scsi_lib.c revision 001aac257cf8adbe90cdcba6e07f8d12dfc8fa6b
1/* 2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale 3 * 4 * SCSI queueing library. 5 * Initial versions: Eric Youngdale (eric@andante.org). 6 * Based upon conversations with large numbers 7 * of people at Linux Expo. 8 */ 9 10#include <linux/bio.h> 11#include <linux/blkdev.h> 12#include <linux/completion.h> 13#include <linux/kernel.h> 14#include <linux/mempool.h> 15#include <linux/slab.h> 16#include <linux/init.h> 17#include <linux/pci.h> 18#include <linux/delay.h> 19#include <linux/hardirq.h> 20#include <linux/scatterlist.h> 21 22#include <scsi/scsi.h> 23#include <scsi/scsi_cmnd.h> 24#include <scsi/scsi_dbg.h> 25#include <scsi/scsi_device.h> 26#include <scsi/scsi_driver.h> 27#include <scsi/scsi_eh.h> 28#include <scsi/scsi_host.h> 29 30#include "scsi_priv.h" 31#include "scsi_logging.h" 32 33 34#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 35#define SG_MEMPOOL_SIZE 2 36 37/* 38 * The maximum number of SG segments that we will put inside a scatterlist 39 * (unless chaining is used). Should ideally fit inside a single page, to 40 * avoid a higher order allocation. 41 */ 42#define SCSI_MAX_SG_SEGMENTS 128 43 44struct scsi_host_sg_pool { 45 size_t size; 46 char *name; 47 struct kmem_cache *slab; 48 mempool_t *pool; 49}; 50 51#define SP(x) { x, "sgpool-" #x } 52static struct scsi_host_sg_pool scsi_sg_pools[] = { 53 SP(8), 54 SP(16), 55#if (SCSI_MAX_SG_SEGMENTS > 16) 56 SP(32), 57#if (SCSI_MAX_SG_SEGMENTS > 32) 58 SP(64), 59#if (SCSI_MAX_SG_SEGMENTS > 64) 60 SP(128), 61#endif 62#endif 63#endif 64}; 65#undef SP 66 67static void scsi_run_queue(struct request_queue *q); 68 69/* 70 * Function: scsi_unprep_request() 71 * 72 * Purpose: Remove all preparation done for a request, including its 73 * associated scsi_cmnd, so that it can be requeued. 74 * 75 * Arguments: req - request to unprepare 76 * 77 * Lock status: Assumed that no locks are held upon entry. 78 * 79 * Returns: Nothing. 80 */ 81static void scsi_unprep_request(struct request *req) 82{ 83 struct scsi_cmnd *cmd = req->special; 84 85 req->cmd_flags &= ~REQ_DONTPREP; 86 req->special = NULL; 87 88 scsi_put_command(cmd); 89} 90 91/* 92 * Function: scsi_queue_insert() 93 * 94 * Purpose: Insert a command in the midlevel queue. 95 * 96 * Arguments: cmd - command that we are adding to queue. 97 * reason - why we are inserting command to queue. 98 * 99 * Lock status: Assumed that lock is not held upon entry. 100 * 101 * Returns: Nothing. 102 * 103 * Notes: We do this for one of two cases. Either the host is busy 104 * and it cannot accept any more commands for the time being, 105 * or the device returned QUEUE_FULL and can accept no more 106 * commands. 107 * Notes: This could be called either from an interrupt context or a 108 * normal process context. 109 */ 110int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 111{ 112 struct Scsi_Host *host = cmd->device->host; 113 struct scsi_device *device = cmd->device; 114 struct request_queue *q = device->request_queue; 115 unsigned long flags; 116 117 SCSI_LOG_MLQUEUE(1, 118 printk("Inserting command %p into mlqueue\n", cmd)); 119 120 /* 121 * Set the appropriate busy bit for the device/host. 122 * 123 * If the host/device isn't busy, assume that something actually 124 * completed, and that we should be able to queue a command now. 125 * 126 * Note that the prior mid-layer assumption that any host could 127 * always queue at least one command is now broken. The mid-layer 128 * will implement a user specifiable stall (see 129 * scsi_host.max_host_blocked and scsi_device.max_device_blocked) 130 * if a command is requeued with no other commands outstanding 131 * either for the device or for the host. 132 */ 133 if (reason == SCSI_MLQUEUE_HOST_BUSY) 134 host->host_blocked = host->max_host_blocked; 135 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY) 136 device->device_blocked = device->max_device_blocked; 137 138 /* 139 * Decrement the counters, since these commands are no longer 140 * active on the host/device. 141 */ 142 scsi_device_unbusy(device); 143 144 /* 145 * Requeue this command. It will go before all other commands 146 * that are already in the queue. 147 * 148 * NOTE: there is magic here about the way the queue is plugged if 149 * we have no outstanding commands. 150 * 151 * Although we *don't* plug the queue, we call the request 152 * function. The SCSI request function detects the blocked condition 153 * and plugs the queue appropriately. 154 */ 155 spin_lock_irqsave(q->queue_lock, flags); 156 blk_requeue_request(q, cmd->request); 157 spin_unlock_irqrestore(q->queue_lock, flags); 158 159 scsi_run_queue(q); 160 161 return 0; 162} 163 164/** 165 * scsi_execute - insert request and wait for the result 166 * @sdev: scsi device 167 * @cmd: scsi command 168 * @data_direction: data direction 169 * @buffer: data buffer 170 * @bufflen: len of buffer 171 * @sense: optional sense buffer 172 * @timeout: request timeout in seconds 173 * @retries: number of times to retry request 174 * @flags: or into request flags; 175 * 176 * returns the req->errors value which is the scsi_cmnd result 177 * field. 178 */ 179int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 180 int data_direction, void *buffer, unsigned bufflen, 181 unsigned char *sense, int timeout, int retries, int flags) 182{ 183 struct request *req; 184 int write = (data_direction == DMA_TO_DEVICE); 185 int ret = DRIVER_ERROR << 24; 186 187 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 188 189 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 190 buffer, bufflen, __GFP_WAIT)) 191 goto out; 192 193 req->cmd_len = COMMAND_SIZE(cmd[0]); 194 memcpy(req->cmd, cmd, req->cmd_len); 195 req->sense = sense; 196 req->sense_len = 0; 197 req->retries = retries; 198 req->timeout = timeout; 199 req->cmd_type = REQ_TYPE_BLOCK_PC; 200 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 201 202 /* 203 * head injection *required* here otherwise quiesce won't work 204 */ 205 blk_execute_rq(req->q, NULL, req, 1); 206 207 ret = req->errors; 208 out: 209 blk_put_request(req); 210 211 return ret; 212} 213EXPORT_SYMBOL(scsi_execute); 214 215 216int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, 217 int data_direction, void *buffer, unsigned bufflen, 218 struct scsi_sense_hdr *sshdr, int timeout, int retries) 219{ 220 char *sense = NULL; 221 int result; 222 223 if (sshdr) { 224 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 225 if (!sense) 226 return DRIVER_ERROR << 24; 227 } 228 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 229 sense, timeout, retries, 0); 230 if (sshdr) 231 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 232 233 kfree(sense); 234 return result; 235} 236EXPORT_SYMBOL(scsi_execute_req); 237 238struct scsi_io_context { 239 void *data; 240 void (*done)(void *data, char *sense, int result, int resid); 241 char sense[SCSI_SENSE_BUFFERSIZE]; 242}; 243 244static struct kmem_cache *scsi_io_context_cache; 245 246static void scsi_end_async(struct request *req, int uptodate) 247{ 248 struct scsi_io_context *sioc = req->end_io_data; 249 250 if (sioc->done) 251 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len); 252 253 kmem_cache_free(scsi_io_context_cache, sioc); 254 __blk_put_request(req->q, req); 255} 256 257static int scsi_merge_bio(struct request *rq, struct bio *bio) 258{ 259 struct request_queue *q = rq->q; 260 261 bio->bi_flags &= ~(1 << BIO_SEG_VALID); 262 if (rq_data_dir(rq) == WRITE) 263 bio->bi_rw |= (1 << BIO_RW); 264 blk_queue_bounce(q, &bio); 265 266 return blk_rq_append_bio(q, rq, bio); 267} 268 269static void scsi_bi_endio(struct bio *bio, int error) 270{ 271 bio_put(bio); 272} 273 274/** 275 * scsi_req_map_sg - map a scatterlist into a request 276 * @rq: request to fill 277 * @sgl: scatterlist 278 * @nsegs: number of elements 279 * @bufflen: len of buffer 280 * @gfp: memory allocation flags 281 * 282 * scsi_req_map_sg maps a scatterlist into a request so that the 283 * request can be sent to the block layer. We do not trust the scatterlist 284 * sent to use, as some ULDs use that struct to only organize the pages. 285 */ 286static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl, 287 int nsegs, unsigned bufflen, gfp_t gfp) 288{ 289 struct request_queue *q = rq->q; 290 int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 291 unsigned int data_len = bufflen, len, bytes, off; 292 struct scatterlist *sg; 293 struct page *page; 294 struct bio *bio = NULL; 295 int i, err, nr_vecs = 0; 296 297 for_each_sg(sgl, sg, nsegs, i) { 298 page = sg_page(sg); 299 off = sg->offset; 300 len = sg->length; 301 data_len += len; 302 303 while (len > 0 && data_len > 0) { 304 /* 305 * sg sends a scatterlist that is larger than 306 * the data_len it wants transferred for certain 307 * IO sizes 308 */ 309 bytes = min_t(unsigned int, len, PAGE_SIZE - off); 310 bytes = min(bytes, data_len); 311 312 if (!bio) { 313 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); 314 nr_pages -= nr_vecs; 315 316 bio = bio_alloc(gfp, nr_vecs); 317 if (!bio) { 318 err = -ENOMEM; 319 goto free_bios; 320 } 321 bio->bi_end_io = scsi_bi_endio; 322 } 323 324 if (bio_add_pc_page(q, bio, page, bytes, off) != 325 bytes) { 326 bio_put(bio); 327 err = -EINVAL; 328 goto free_bios; 329 } 330 331 if (bio->bi_vcnt >= nr_vecs) { 332 err = scsi_merge_bio(rq, bio); 333 if (err) { 334 bio_endio(bio, 0); 335 goto free_bios; 336 } 337 bio = NULL; 338 } 339 340 page++; 341 len -= bytes; 342 data_len -=bytes; 343 off = 0; 344 } 345 } 346 347 rq->buffer = rq->data = NULL; 348 rq->data_len = bufflen; 349 return 0; 350 351free_bios: 352 while ((bio = rq->bio) != NULL) { 353 rq->bio = bio->bi_next; 354 /* 355 * call endio instead of bio_put incase it was bounced 356 */ 357 bio_endio(bio, 0); 358 } 359 360 return err; 361} 362 363/** 364 * scsi_execute_async - insert request 365 * @sdev: scsi device 366 * @cmd: scsi command 367 * @cmd_len: length of scsi cdb 368 * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE 369 * @buffer: data buffer (this can be a kernel buffer or scatterlist) 370 * @bufflen: len of buffer 371 * @use_sg: if buffer is a scatterlist this is the number of elements 372 * @timeout: request timeout in seconds 373 * @retries: number of times to retry request 374 * @privdata: data passed to done() 375 * @done: callback function when done 376 * @gfp: memory allocation flags 377 */ 378int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, 379 int cmd_len, int data_direction, void *buffer, unsigned bufflen, 380 int use_sg, int timeout, int retries, void *privdata, 381 void (*done)(void *, char *, int, int), gfp_t gfp) 382{ 383 struct request *req; 384 struct scsi_io_context *sioc; 385 int err = 0; 386 int write = (data_direction == DMA_TO_DEVICE); 387 388 sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp); 389 if (!sioc) 390 return DRIVER_ERROR << 24; 391 392 req = blk_get_request(sdev->request_queue, write, gfp); 393 if (!req) 394 goto free_sense; 395 req->cmd_type = REQ_TYPE_BLOCK_PC; 396 req->cmd_flags |= REQ_QUIET; 397 398 if (use_sg) 399 err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp); 400 else if (bufflen) 401 err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp); 402 403 if (err) 404 goto free_req; 405 406 req->cmd_len = cmd_len; 407 memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ 408 memcpy(req->cmd, cmd, req->cmd_len); 409 req->sense = sioc->sense; 410 req->sense_len = 0; 411 req->timeout = timeout; 412 req->retries = retries; 413 req->end_io_data = sioc; 414 415 sioc->data = privdata; 416 sioc->done = done; 417 418 blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async); 419 return 0; 420 421free_req: 422 blk_put_request(req); 423free_sense: 424 kmem_cache_free(scsi_io_context_cache, sioc); 425 return DRIVER_ERROR << 24; 426} 427EXPORT_SYMBOL_GPL(scsi_execute_async); 428 429/* 430 * Function: scsi_init_cmd_errh() 431 * 432 * Purpose: Initialize cmd fields related to error handling. 433 * 434 * Arguments: cmd - command that is ready to be queued. 435 * 436 * Notes: This function has the job of initializing a number of 437 * fields related to error handling. Typically this will 438 * be called once for each command, as required. 439 */ 440static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 441{ 442 cmd->serial_number = 0; 443 cmd->resid = 0; 444 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); 445 if (cmd->cmd_len == 0) 446 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 447} 448 449void scsi_device_unbusy(struct scsi_device *sdev) 450{ 451 struct Scsi_Host *shost = sdev->host; 452 unsigned long flags; 453 454 spin_lock_irqsave(shost->host_lock, flags); 455 shost->host_busy--; 456 if (unlikely(scsi_host_in_recovery(shost) && 457 (shost->host_failed || shost->host_eh_scheduled))) 458 scsi_eh_wakeup(shost); 459 spin_unlock(shost->host_lock); 460 spin_lock(sdev->request_queue->queue_lock); 461 sdev->device_busy--; 462 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 463} 464 465/* 466 * Called for single_lun devices on IO completion. Clear starget_sdev_user, 467 * and call blk_run_queue for all the scsi_devices on the target - 468 * including current_sdev first. 469 * 470 * Called with *no* scsi locks held. 471 */ 472static void scsi_single_lun_run(struct scsi_device *current_sdev) 473{ 474 struct Scsi_Host *shost = current_sdev->host; 475 struct scsi_device *sdev, *tmp; 476 struct scsi_target *starget = scsi_target(current_sdev); 477 unsigned long flags; 478 479 spin_lock_irqsave(shost->host_lock, flags); 480 starget->starget_sdev_user = NULL; 481 spin_unlock_irqrestore(shost->host_lock, flags); 482 483 /* 484 * Call blk_run_queue for all LUNs on the target, starting with 485 * current_sdev. We race with others (to set starget_sdev_user), 486 * but in most cases, we will be first. Ideally, each LU on the 487 * target would get some limited time or requests on the target. 488 */ 489 blk_run_queue(current_sdev->request_queue); 490 491 spin_lock_irqsave(shost->host_lock, flags); 492 if (starget->starget_sdev_user) 493 goto out; 494 list_for_each_entry_safe(sdev, tmp, &starget->devices, 495 same_target_siblings) { 496 if (sdev == current_sdev) 497 continue; 498 if (scsi_device_get(sdev)) 499 continue; 500 501 spin_unlock_irqrestore(shost->host_lock, flags); 502 blk_run_queue(sdev->request_queue); 503 spin_lock_irqsave(shost->host_lock, flags); 504 505 scsi_device_put(sdev); 506 } 507 out: 508 spin_unlock_irqrestore(shost->host_lock, flags); 509} 510 511/* 512 * Function: scsi_run_queue() 513 * 514 * Purpose: Select a proper request queue to serve next 515 * 516 * Arguments: q - last request's queue 517 * 518 * Returns: Nothing 519 * 520 * Notes: The previous command was completely finished, start 521 * a new one if possible. 522 */ 523static void scsi_run_queue(struct request_queue *q) 524{ 525 struct scsi_device *sdev = q->queuedata; 526 struct Scsi_Host *shost = sdev->host; 527 unsigned long flags; 528 529 if (scsi_target(sdev)->single_lun) 530 scsi_single_lun_run(sdev); 531 532 spin_lock_irqsave(shost->host_lock, flags); 533 while (!list_empty(&shost->starved_list) && 534 !shost->host_blocked && !shost->host_self_blocked && 535 !((shost->can_queue > 0) && 536 (shost->host_busy >= shost->can_queue))) { 537 /* 538 * As long as shost is accepting commands and we have 539 * starved queues, call blk_run_queue. scsi_request_fn 540 * drops the queue_lock and can add us back to the 541 * starved_list. 542 * 543 * host_lock protects the starved_list and starved_entry. 544 * scsi_request_fn must get the host_lock before checking 545 * or modifying starved_list or starved_entry. 546 */ 547 sdev = list_entry(shost->starved_list.next, 548 struct scsi_device, starved_entry); 549 list_del_init(&sdev->starved_entry); 550 spin_unlock_irqrestore(shost->host_lock, flags); 551 552 553 if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && 554 !test_and_set_bit(QUEUE_FLAG_REENTER, 555 &sdev->request_queue->queue_flags)) { 556 blk_run_queue(sdev->request_queue); 557 clear_bit(QUEUE_FLAG_REENTER, 558 &sdev->request_queue->queue_flags); 559 } else 560 blk_run_queue(sdev->request_queue); 561 562 spin_lock_irqsave(shost->host_lock, flags); 563 if (unlikely(!list_empty(&sdev->starved_entry))) 564 /* 565 * sdev lost a race, and was put back on the 566 * starved list. This is unlikely but without this 567 * in theory we could loop forever. 568 */ 569 break; 570 } 571 spin_unlock_irqrestore(shost->host_lock, flags); 572 573 blk_run_queue(q); 574} 575 576/* 577 * Function: scsi_requeue_command() 578 * 579 * Purpose: Handle post-processing of completed commands. 580 * 581 * Arguments: q - queue to operate on 582 * cmd - command that may need to be requeued. 583 * 584 * Returns: Nothing 585 * 586 * Notes: After command completion, there may be blocks left 587 * over which weren't finished by the previous command 588 * this can be for a number of reasons - the main one is 589 * I/O errors in the middle of the request, in which case 590 * we need to request the blocks that come after the bad 591 * sector. 592 * Notes: Upon return, cmd is a stale pointer. 593 */ 594static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 595{ 596 struct request *req = cmd->request; 597 unsigned long flags; 598 599 scsi_unprep_request(req); 600 spin_lock_irqsave(q->queue_lock, flags); 601 blk_requeue_request(q, req); 602 spin_unlock_irqrestore(q->queue_lock, flags); 603 604 scsi_run_queue(q); 605} 606 607void scsi_next_command(struct scsi_cmnd *cmd) 608{ 609 struct scsi_device *sdev = cmd->device; 610 struct request_queue *q = sdev->request_queue; 611 612 /* need to hold a reference on the device before we let go of the cmd */ 613 get_device(&sdev->sdev_gendev); 614 615 scsi_put_command(cmd); 616 scsi_run_queue(q); 617 618 /* ok to remove device now */ 619 put_device(&sdev->sdev_gendev); 620} 621 622void scsi_run_host_queues(struct Scsi_Host *shost) 623{ 624 struct scsi_device *sdev; 625 626 shost_for_each_device(sdev, shost) 627 scsi_run_queue(sdev->request_queue); 628} 629 630/* 631 * Function: scsi_end_request() 632 * 633 * Purpose: Post-processing of completed commands (usually invoked at end 634 * of upper level post-processing and scsi_io_completion). 635 * 636 * Arguments: cmd - command that is complete. 637 * uptodate - 1 if I/O indicates success, <= 0 for I/O error. 638 * bytes - number of bytes of completed I/O 639 * requeue - indicates whether we should requeue leftovers. 640 * 641 * Lock status: Assumed that lock is not held upon entry. 642 * 643 * Returns: cmd if requeue required, NULL otherwise. 644 * 645 * Notes: This is called for block device requests in order to 646 * mark some number of sectors as complete. 647 * 648 * We are guaranteeing that the request queue will be goosed 649 * at some point during this call. 650 * Notes: If cmd was requeued, upon return it will be a stale pointer. 651 */ 652static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, 653 int bytes, int requeue) 654{ 655 struct request_queue *q = cmd->device->request_queue; 656 struct request *req = cmd->request; 657 unsigned long flags; 658 659 /* 660 * If there are blocks left over at the end, set up the command 661 * to queue the remainder of them. 662 */ 663 if (end_that_request_chunk(req, uptodate, bytes)) { 664 int leftover = (req->hard_nr_sectors << 9); 665 666 if (blk_pc_request(req)) 667 leftover = req->data_len; 668 669 /* kill remainder if no retrys */ 670 if (!uptodate && blk_noretry_request(req)) 671 end_that_request_chunk(req, 0, leftover); 672 else { 673 if (requeue) { 674 /* 675 * Bleah. Leftovers again. Stick the 676 * leftovers in the front of the 677 * queue, and goose the queue again. 678 */ 679 scsi_requeue_command(q, cmd); 680 cmd = NULL; 681 } 682 return cmd; 683 } 684 } 685 686 add_disk_randomness(req->rq_disk); 687 688 spin_lock_irqsave(q->queue_lock, flags); 689 if (blk_rq_tagged(req)) 690 blk_queue_end_tag(q, req); 691 end_that_request_last(req, uptodate); 692 spin_unlock_irqrestore(q->queue_lock, flags); 693 694 /* 695 * This will goose the queue request function at the end, so we don't 696 * need to worry about launching another command. 697 */ 698 scsi_next_command(cmd); 699 return NULL; 700} 701 702/* 703 * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit 704 * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. 705 */ 706#define SCSI_MAX_SG_CHAIN_SEGMENTS 2048 707 708static inline unsigned int scsi_sgtable_index(unsigned short nents) 709{ 710 unsigned int index; 711 712 switch (nents) { 713 case 1 ... 8: 714 index = 0; 715 break; 716 case 9 ... 16: 717 index = 1; 718 break; 719#if (SCSI_MAX_SG_SEGMENTS > 16) 720 case 17 ... 32: 721 index = 2; 722 break; 723#if (SCSI_MAX_SG_SEGMENTS > 32) 724 case 33 ... 64: 725 index = 3; 726 break; 727#if (SCSI_MAX_SG_SEGMENTS > 64) 728 case 65 ... 128: 729 index = 4; 730 break; 731#endif 732#endif 733#endif 734 default: 735 printk(KERN_ERR "scsi: bad segment count=%d\n", nents); 736 BUG(); 737 } 738 739 return index; 740} 741 742struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) 743{ 744 struct scsi_host_sg_pool *sgp; 745 struct scatterlist *sgl, *prev, *ret; 746 unsigned int index; 747 int this, left; 748 749 BUG_ON(!cmd->use_sg); 750 751 left = cmd->use_sg; 752 ret = prev = NULL; 753 do { 754 this = left; 755 if (this > SCSI_MAX_SG_SEGMENTS) { 756 this = SCSI_MAX_SG_SEGMENTS - 1; 757 index = SG_MEMPOOL_NR - 1; 758 } else 759 index = scsi_sgtable_index(this); 760 761 left -= this; 762 763 sgp = scsi_sg_pools + index; 764 765 sgl = mempool_alloc(sgp->pool, gfp_mask); 766 if (unlikely(!sgl)) 767 goto enomem; 768 769 sg_init_table(sgl, sgp->size); 770 771 /* 772 * first loop through, set initial index and return value 773 */ 774 if (!ret) 775 ret = sgl; 776 777 /* 778 * chain previous sglist, if any. we know the previous 779 * sglist must be the biggest one, or we would not have 780 * ended up doing another loop. 781 */ 782 if (prev) 783 sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl); 784 785 /* 786 * if we have nothing left, mark the last segment as 787 * end-of-list 788 */ 789 if (!left) 790 sg_mark_end(&sgl[this - 1]); 791 792 /* 793 * don't allow subsequent mempool allocs to sleep, it would 794 * violate the mempool principle. 795 */ 796 gfp_mask &= ~__GFP_WAIT; 797 gfp_mask |= __GFP_HIGH; 798 prev = sgl; 799 } while (left); 800 801 /* 802 * ->use_sg may get modified after dma mapping has potentially 803 * shrunk the number of segments, so keep a copy of it for free. 804 */ 805 cmd->__use_sg = cmd->use_sg; 806 return ret; 807enomem: 808 if (ret) { 809 /* 810 * Free entries chained off ret. Since we were trying to 811 * allocate another sglist, we know that all entries are of 812 * the max size. 813 */ 814 sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; 815 prev = ret; 816 ret = &ret[SCSI_MAX_SG_SEGMENTS - 1]; 817 818 while ((sgl = sg_chain_ptr(ret)) != NULL) { 819 ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1]; 820 mempool_free(sgl, sgp->pool); 821 } 822 823 mempool_free(prev, sgp->pool); 824 } 825 return NULL; 826} 827 828EXPORT_SYMBOL(scsi_alloc_sgtable); 829 830void scsi_free_sgtable(struct scsi_cmnd *cmd) 831{ 832 struct scatterlist *sgl = cmd->request_buffer; 833 struct scsi_host_sg_pool *sgp; 834 835 /* 836 * if this is the biggest size sglist, check if we have 837 * chained parts we need to free 838 */ 839 if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) { 840 unsigned short this, left; 841 struct scatterlist *next; 842 unsigned int index; 843 844 left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1); 845 next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]); 846 while (left && next) { 847 sgl = next; 848 this = left; 849 if (this > SCSI_MAX_SG_SEGMENTS) { 850 this = SCSI_MAX_SG_SEGMENTS - 1; 851 index = SG_MEMPOOL_NR - 1; 852 } else 853 index = scsi_sgtable_index(this); 854 855 left -= this; 856 857 sgp = scsi_sg_pools + index; 858 859 if (left) 860 next = sg_chain_ptr(&sgl[sgp->size - 1]); 861 862 mempool_free(sgl, sgp->pool); 863 } 864 865 /* 866 * Restore original, will be freed below 867 */ 868 sgl = cmd->request_buffer; 869 sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; 870 } else 871 sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg); 872 873 mempool_free(sgl, sgp->pool); 874} 875 876EXPORT_SYMBOL(scsi_free_sgtable); 877 878/* 879 * Function: scsi_release_buffers() 880 * 881 * Purpose: Completion processing for block device I/O requests. 882 * 883 * Arguments: cmd - command that we are bailing. 884 * 885 * Lock status: Assumed that no lock is held upon entry. 886 * 887 * Returns: Nothing 888 * 889 * Notes: In the event that an upper level driver rejects a 890 * command, we must release resources allocated during 891 * the __init_io() function. Primarily this would involve 892 * the scatter-gather table, and potentially any bounce 893 * buffers. 894 */ 895static void scsi_release_buffers(struct scsi_cmnd *cmd) 896{ 897 if (cmd->use_sg) 898 scsi_free_sgtable(cmd); 899 900 /* 901 * Zero these out. They now point to freed memory, and it is 902 * dangerous to hang onto the pointers. 903 */ 904 cmd->request_buffer = NULL; 905 cmd->request_bufflen = 0; 906} 907 908/* 909 * Function: scsi_io_completion() 910 * 911 * Purpose: Completion processing for block device I/O requests. 912 * 913 * Arguments: cmd - command that is finished. 914 * 915 * Lock status: Assumed that no lock is held upon entry. 916 * 917 * Returns: Nothing 918 * 919 * Notes: This function is matched in terms of capabilities to 920 * the function that created the scatter-gather list. 921 * In other words, if there are no bounce buffers 922 * (the normal case for most drivers), we don't need 923 * the logic to deal with cleaning up afterwards. 924 * 925 * We must do one of several things here: 926 * 927 * a) Call scsi_end_request. This will finish off the 928 * specified number of sectors. If we are done, the 929 * command block will be released, and the queue 930 * function will be goosed. If we are not done, then 931 * scsi_end_request will directly goose the queue. 932 * 933 * b) We can just use scsi_requeue_command() here. This would 934 * be used if we just wanted to retry, for example. 935 */ 936void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 937{ 938 int result = cmd->result; 939 int this_count = cmd->request_bufflen; 940 struct request_queue *q = cmd->device->request_queue; 941 struct request *req = cmd->request; 942 int clear_errors = 1; 943 struct scsi_sense_hdr sshdr; 944 int sense_valid = 0; 945 int sense_deferred = 0; 946 947 scsi_release_buffers(cmd); 948 949 if (result) { 950 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 951 if (sense_valid) 952 sense_deferred = scsi_sense_is_deferred(&sshdr); 953 } 954 955 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ 956 req->errors = result; 957 if (result) { 958 clear_errors = 0; 959 if (sense_valid && req->sense) { 960 /* 961 * SG_IO wants current and deferred errors 962 */ 963 int len = 8 + cmd->sense_buffer[7]; 964 965 if (len > SCSI_SENSE_BUFFERSIZE) 966 len = SCSI_SENSE_BUFFERSIZE; 967 memcpy(req->sense, cmd->sense_buffer, len); 968 req->sense_len = len; 969 } 970 } 971 req->data_len = cmd->resid; 972 } 973 974 /* 975 * Next deal with any sectors which we were able to correctly 976 * handle. 977 */ 978 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 979 "%d bytes done.\n", 980 req->nr_sectors, good_bytes)); 981 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg)); 982 983 if (clear_errors) 984 req->errors = 0; 985 986 /* A number of bytes were successfully read. If there 987 * are leftovers and there is some kind of error 988 * (result != 0), retry the rest. 989 */ 990 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL) 991 return; 992 993 /* good_bytes = 0, or (inclusive) there were leftovers and 994 * result = 0, so scsi_end_request couldn't retry. 995 */ 996 if (sense_valid && !sense_deferred) { 997 switch (sshdr.sense_key) { 998 case UNIT_ATTENTION: 999 if (cmd->device->removable) { 1000 /* Detected disc change. Set a bit 1001 * and quietly refuse further access. 1002 */ 1003 cmd->device->changed = 1; 1004 scsi_end_request(cmd, 0, this_count, 1); 1005 return; 1006 } else { 1007 /* Must have been a power glitch, or a 1008 * bus reset. Could not have been a 1009 * media change, so we just retry the 1010 * request and see what happens. 1011 */ 1012 scsi_requeue_command(q, cmd); 1013 return; 1014 } 1015 break; 1016 case ILLEGAL_REQUEST: 1017 /* If we had an ILLEGAL REQUEST returned, then 1018 * we may have performed an unsupported 1019 * command. The only thing this should be 1020 * would be a ten byte read where only a six 1021 * byte read was supported. Also, on a system 1022 * where READ CAPACITY failed, we may have 1023 * read past the end of the disk. 1024 */ 1025 if ((cmd->device->use_10_for_rw && 1026 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 1027 (cmd->cmnd[0] == READ_10 || 1028 cmd->cmnd[0] == WRITE_10)) { 1029 cmd->device->use_10_for_rw = 0; 1030 /* This will cause a retry with a 1031 * 6-byte command. 1032 */ 1033 scsi_requeue_command(q, cmd); 1034 return; 1035 } else { 1036 scsi_end_request(cmd, 0, this_count, 1); 1037 return; 1038 } 1039 break; 1040 case NOT_READY: 1041 /* If the device is in the process of becoming 1042 * ready, or has a temporary blockage, retry. 1043 */ 1044 if (sshdr.asc == 0x04) { 1045 switch (sshdr.ascq) { 1046 case 0x01: /* becoming ready */ 1047 case 0x04: /* format in progress */ 1048 case 0x05: /* rebuild in progress */ 1049 case 0x06: /* recalculation in progress */ 1050 case 0x07: /* operation in progress */ 1051 case 0x08: /* Long write in progress */ 1052 case 0x09: /* self test in progress */ 1053 scsi_requeue_command(q, cmd); 1054 return; 1055 default: 1056 break; 1057 } 1058 } 1059 if (!(req->cmd_flags & REQ_QUIET)) 1060 scsi_cmd_print_sense_hdr(cmd, 1061 "Device not ready", 1062 &sshdr); 1063 1064 scsi_end_request(cmd, 0, this_count, 1); 1065 return; 1066 case VOLUME_OVERFLOW: 1067 if (!(req->cmd_flags & REQ_QUIET)) { 1068 scmd_printk(KERN_INFO, cmd, 1069 "Volume overflow, CDB: "); 1070 __scsi_print_command(cmd->cmnd); 1071 scsi_print_sense("", cmd); 1072 } 1073 /* See SSC3rXX or current. */ 1074 scsi_end_request(cmd, 0, this_count, 1); 1075 return; 1076 default: 1077 break; 1078 } 1079 } 1080 if (host_byte(result) == DID_RESET) { 1081 /* Third party bus reset or reset for error recovery 1082 * reasons. Just retry the request and see what 1083 * happens. 1084 */ 1085 scsi_requeue_command(q, cmd); 1086 return; 1087 } 1088 if (result) { 1089 if (!(req->cmd_flags & REQ_QUIET)) { 1090 scsi_print_result(cmd); 1091 if (driver_byte(result) & DRIVER_SENSE) 1092 scsi_print_sense("", cmd); 1093 } 1094 } 1095 scsi_end_request(cmd, 0, this_count, !result); 1096} 1097 1098/* 1099 * Function: scsi_init_io() 1100 * 1101 * Purpose: SCSI I/O initialize function. 1102 * 1103 * Arguments: cmd - Command descriptor we wish to initialize 1104 * 1105 * Returns: 0 on success 1106 * BLKPREP_DEFER if the failure is retryable 1107 */ 1108static int scsi_init_io(struct scsi_cmnd *cmd) 1109{ 1110 struct request *req = cmd->request; 1111 int count; 1112 1113 /* 1114 * We used to not use scatter-gather for single segment request, 1115 * but now we do (it makes highmem I/O easier to support without 1116 * kmapping pages) 1117 */ 1118 cmd->use_sg = req->nr_phys_segments; 1119 1120 /* 1121 * If sg table allocation fails, requeue request later. 1122 */ 1123 cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC); 1124 if (unlikely(!cmd->request_buffer)) { 1125 scsi_unprep_request(req); 1126 return BLKPREP_DEFER; 1127 } 1128 1129 req->buffer = NULL; 1130 if (blk_pc_request(req)) 1131 cmd->request_bufflen = req->data_len; 1132 else 1133 cmd->request_bufflen = req->nr_sectors << 9; 1134 1135 /* 1136 * Next, walk the list, and fill in the addresses and sizes of 1137 * each segment. 1138 */ 1139 count = blk_rq_map_sg(req->q, req, cmd->request_buffer); 1140 BUG_ON(count > cmd->use_sg); 1141 cmd->use_sg = count; 1142 return BLKPREP_OK; 1143} 1144 1145static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1146 struct request *req) 1147{ 1148 struct scsi_cmnd *cmd; 1149 1150 if (!req->special) { 1151 cmd = scsi_get_command(sdev, GFP_ATOMIC); 1152 if (unlikely(!cmd)) 1153 return NULL; 1154 req->special = cmd; 1155 } else { 1156 cmd = req->special; 1157 } 1158 1159 /* pull a tag out of the request if we have one */ 1160 cmd->tag = req->tag; 1161 cmd->request = req; 1162 1163 return cmd; 1164} 1165 1166int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1167{ 1168 struct scsi_cmnd *cmd; 1169 int ret = scsi_prep_state_check(sdev, req); 1170 1171 if (ret != BLKPREP_OK) 1172 return ret; 1173 1174 cmd = scsi_get_cmd_from_req(sdev, req); 1175 if (unlikely(!cmd)) 1176 return BLKPREP_DEFER; 1177 1178 /* 1179 * BLOCK_PC requests may transfer data, in which case they must 1180 * a bio attached to them. Or they might contain a SCSI command 1181 * that does not transfer data, in which case they may optionally 1182 * submit a request without an attached bio. 1183 */ 1184 if (req->bio) { 1185 int ret; 1186 1187 BUG_ON(!req->nr_phys_segments); 1188 1189 ret = scsi_init_io(cmd); 1190 if (unlikely(ret)) 1191 return ret; 1192 } else { 1193 BUG_ON(req->data_len); 1194 BUG_ON(req->data); 1195 1196 cmd->request_bufflen = 0; 1197 cmd->request_buffer = NULL; 1198 cmd->use_sg = 0; 1199 req->buffer = NULL; 1200 } 1201 1202 BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd)); 1203 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1204 cmd->cmd_len = req->cmd_len; 1205 if (!req->data_len) 1206 cmd->sc_data_direction = DMA_NONE; 1207 else if (rq_data_dir(req) == WRITE) 1208 cmd->sc_data_direction = DMA_TO_DEVICE; 1209 else 1210 cmd->sc_data_direction = DMA_FROM_DEVICE; 1211 1212 cmd->transfersize = req->data_len; 1213 cmd->allowed = req->retries; 1214 cmd->timeout_per_command = req->timeout; 1215 return BLKPREP_OK; 1216} 1217EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); 1218 1219/* 1220 * Setup a REQ_TYPE_FS command. These are simple read/write request 1221 * from filesystems that still need to be translated to SCSI CDBs from 1222 * the ULD. 1223 */ 1224int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1225{ 1226 struct scsi_cmnd *cmd; 1227 int ret = scsi_prep_state_check(sdev, req); 1228 1229 if (ret != BLKPREP_OK) 1230 return ret; 1231 /* 1232 * Filesystem requests must transfer data. 1233 */ 1234 BUG_ON(!req->nr_phys_segments); 1235 1236 cmd = scsi_get_cmd_from_req(sdev, req); 1237 if (unlikely(!cmd)) 1238 return BLKPREP_DEFER; 1239 1240 return scsi_init_io(cmd); 1241} 1242EXPORT_SYMBOL(scsi_setup_fs_cmnd); 1243 1244int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) 1245{ 1246 int ret = BLKPREP_OK; 1247 1248 /* 1249 * If the device is not in running state we will reject some 1250 * or all commands. 1251 */ 1252 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1253 switch (sdev->sdev_state) { 1254 case SDEV_OFFLINE: 1255 /* 1256 * If the device is offline we refuse to process any 1257 * commands. The device must be brought online 1258 * before trying any recovery commands. 1259 */ 1260 sdev_printk(KERN_ERR, sdev, 1261 "rejecting I/O to offline device\n"); 1262 ret = BLKPREP_KILL; 1263 break; 1264 case SDEV_DEL: 1265 /* 1266 * If the device is fully deleted, we refuse to 1267 * process any commands as well. 1268 */ 1269 sdev_printk(KERN_ERR, sdev, 1270 "rejecting I/O to dead device\n"); 1271 ret = BLKPREP_KILL; 1272 break; 1273 case SDEV_QUIESCE: 1274 case SDEV_BLOCK: 1275 /* 1276 * If the devices is blocked we defer normal commands. 1277 */ 1278 if (!(req->cmd_flags & REQ_PREEMPT)) 1279 ret = BLKPREP_DEFER; 1280 break; 1281 default: 1282 /* 1283 * For any other not fully online state we only allow 1284 * special commands. In particular any user initiated 1285 * command is not allowed. 1286 */ 1287 if (!(req->cmd_flags & REQ_PREEMPT)) 1288 ret = BLKPREP_KILL; 1289 break; 1290 } 1291 } 1292 return ret; 1293} 1294EXPORT_SYMBOL(scsi_prep_state_check); 1295 1296int scsi_prep_return(struct request_queue *q, struct request *req, int ret) 1297{ 1298 struct scsi_device *sdev = q->queuedata; 1299 1300 switch (ret) { 1301 case BLKPREP_KILL: 1302 req->errors = DID_NO_CONNECT << 16; 1303 /* release the command and kill it */ 1304 if (req->special) { 1305 struct scsi_cmnd *cmd = req->special; 1306 scsi_release_buffers(cmd); 1307 scsi_put_command(cmd); 1308 req->special = NULL; 1309 } 1310 break; 1311 case BLKPREP_DEFER: 1312 /* 1313 * If we defer, the elv_next_request() returns NULL, but the 1314 * queue must be restarted, so we plug here if no returning 1315 * command will automatically do that. 1316 */ 1317 if (sdev->device_busy == 0) 1318 blk_plug_device(q); 1319 break; 1320 default: 1321 req->cmd_flags |= REQ_DONTPREP; 1322 } 1323 1324 return ret; 1325} 1326EXPORT_SYMBOL(scsi_prep_return); 1327 1328int scsi_prep_fn(struct request_queue *q, struct request *req) 1329{ 1330 struct scsi_device *sdev = q->queuedata; 1331 int ret = BLKPREP_KILL; 1332 1333 if (req->cmd_type == REQ_TYPE_BLOCK_PC) 1334 ret = scsi_setup_blk_pc_cmnd(sdev, req); 1335 return scsi_prep_return(q, req, ret); 1336} 1337 1338/* 1339 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1340 * return 0. 1341 * 1342 * Called with the queue_lock held. 1343 */ 1344static inline int scsi_dev_queue_ready(struct request_queue *q, 1345 struct scsi_device *sdev) 1346{ 1347 if (sdev->device_busy >= sdev->queue_depth) 1348 return 0; 1349 if (sdev->device_busy == 0 && sdev->device_blocked) { 1350 /* 1351 * unblock after device_blocked iterates to zero 1352 */ 1353 if (--sdev->device_blocked == 0) { 1354 SCSI_LOG_MLQUEUE(3, 1355 sdev_printk(KERN_INFO, sdev, 1356 "unblocking device at zero depth\n")); 1357 } else { 1358 blk_plug_device(q); 1359 return 0; 1360 } 1361 } 1362 if (sdev->device_blocked) 1363 return 0; 1364 1365 return 1; 1366} 1367 1368/* 1369 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1370 * return 0. We must end up running the queue again whenever 0 is 1371 * returned, else IO can hang. 1372 * 1373 * Called with host_lock held. 1374 */ 1375static inline int scsi_host_queue_ready(struct request_queue *q, 1376 struct Scsi_Host *shost, 1377 struct scsi_device *sdev) 1378{ 1379 if (scsi_host_in_recovery(shost)) 1380 return 0; 1381 if (shost->host_busy == 0 && shost->host_blocked) { 1382 /* 1383 * unblock after host_blocked iterates to zero 1384 */ 1385 if (--shost->host_blocked == 0) { 1386 SCSI_LOG_MLQUEUE(3, 1387 printk("scsi%d unblocking host at zero depth\n", 1388 shost->host_no)); 1389 } else { 1390 blk_plug_device(q); 1391 return 0; 1392 } 1393 } 1394 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || 1395 shost->host_blocked || shost->host_self_blocked) { 1396 if (list_empty(&sdev->starved_entry)) 1397 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1398 return 0; 1399 } 1400 1401 /* We're OK to process the command, so we can't be starved */ 1402 if (!list_empty(&sdev->starved_entry)) 1403 list_del_init(&sdev->starved_entry); 1404 1405 return 1; 1406} 1407 1408/* 1409 * Kill a request for a dead device 1410 */ 1411static void scsi_kill_request(struct request *req, struct request_queue *q) 1412{ 1413 struct scsi_cmnd *cmd = req->special; 1414 struct scsi_device *sdev = cmd->device; 1415 struct Scsi_Host *shost = sdev->host; 1416 1417 blkdev_dequeue_request(req); 1418 1419 if (unlikely(cmd == NULL)) { 1420 printk(KERN_CRIT "impossible request in %s.\n", 1421 __FUNCTION__); 1422 BUG(); 1423 } 1424 1425 scsi_init_cmd_errh(cmd); 1426 cmd->result = DID_NO_CONNECT << 16; 1427 atomic_inc(&cmd->device->iorequest_cnt); 1428 1429 /* 1430 * SCSI request completion path will do scsi_device_unbusy(), 1431 * bump busy counts. To bump the counters, we need to dance 1432 * with the locks as normal issue path does. 1433 */ 1434 sdev->device_busy++; 1435 spin_unlock(sdev->request_queue->queue_lock); 1436 spin_lock(shost->host_lock); 1437 shost->host_busy++; 1438 spin_unlock(shost->host_lock); 1439 spin_lock(sdev->request_queue->queue_lock); 1440 1441 __scsi_done(cmd); 1442} 1443 1444static void scsi_softirq_done(struct request *rq) 1445{ 1446 struct scsi_cmnd *cmd = rq->completion_data; 1447 unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command; 1448 int disposition; 1449 1450 INIT_LIST_HEAD(&cmd->eh_entry); 1451 1452 disposition = scsi_decide_disposition(cmd); 1453 if (disposition != SUCCESS && 1454 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1455 sdev_printk(KERN_ERR, cmd->device, 1456 "timing out command, waited %lus\n", 1457 wait_for/HZ); 1458 disposition = SUCCESS; 1459 } 1460 1461 scsi_log_completion(cmd, disposition); 1462 1463 switch (disposition) { 1464 case SUCCESS: 1465 scsi_finish_command(cmd); 1466 break; 1467 case NEEDS_RETRY: 1468 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 1469 break; 1470 case ADD_TO_MLQUEUE: 1471 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1472 break; 1473 default: 1474 if (!scsi_eh_scmd_add(cmd, 0)) 1475 scsi_finish_command(cmd); 1476 } 1477} 1478 1479/* 1480 * Function: scsi_request_fn() 1481 * 1482 * Purpose: Main strategy routine for SCSI. 1483 * 1484 * Arguments: q - Pointer to actual queue. 1485 * 1486 * Returns: Nothing 1487 * 1488 * Lock status: IO request lock assumed to be held when called. 1489 */ 1490static void scsi_request_fn(struct request_queue *q) 1491{ 1492 struct scsi_device *sdev = q->queuedata; 1493 struct Scsi_Host *shost; 1494 struct scsi_cmnd *cmd; 1495 struct request *req; 1496 1497 if (!sdev) { 1498 printk("scsi: killing requests for dead queue\n"); 1499 while ((req = elv_next_request(q)) != NULL) 1500 scsi_kill_request(req, q); 1501 return; 1502 } 1503 1504 if(!get_device(&sdev->sdev_gendev)) 1505 /* We must be tearing the block queue down already */ 1506 return; 1507 1508 /* 1509 * To start with, we keep looping until the queue is empty, or until 1510 * the host is no longer able to accept any more requests. 1511 */ 1512 shost = sdev->host; 1513 while (!blk_queue_plugged(q)) { 1514 int rtn; 1515 /* 1516 * get next queueable request. We do this early to make sure 1517 * that the request is fully prepared even if we cannot 1518 * accept it. 1519 */ 1520 req = elv_next_request(q); 1521 if (!req || !scsi_dev_queue_ready(q, sdev)) 1522 break; 1523 1524 if (unlikely(!scsi_device_online(sdev))) { 1525 sdev_printk(KERN_ERR, sdev, 1526 "rejecting I/O to offline device\n"); 1527 scsi_kill_request(req, q); 1528 continue; 1529 } 1530 1531 1532 /* 1533 * Remove the request from the request list. 1534 */ 1535 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1536 blkdev_dequeue_request(req); 1537 sdev->device_busy++; 1538 1539 spin_unlock(q->queue_lock); 1540 cmd = req->special; 1541 if (unlikely(cmd == NULL)) { 1542 printk(KERN_CRIT "impossible request in %s.\n" 1543 "please mail a stack trace to " 1544 "linux-scsi@vger.kernel.org\n", 1545 __FUNCTION__); 1546 blk_dump_rq_flags(req, "foo"); 1547 BUG(); 1548 } 1549 spin_lock(shost->host_lock); 1550 1551 if (!scsi_host_queue_ready(q, shost, sdev)) 1552 goto not_ready; 1553 if (scsi_target(sdev)->single_lun) { 1554 if (scsi_target(sdev)->starget_sdev_user && 1555 scsi_target(sdev)->starget_sdev_user != sdev) 1556 goto not_ready; 1557 scsi_target(sdev)->starget_sdev_user = sdev; 1558 } 1559 shost->host_busy++; 1560 1561 /* 1562 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will 1563 * take the lock again. 1564 */ 1565 spin_unlock_irq(shost->host_lock); 1566 1567 /* 1568 * Finally, initialize any error handling parameters, and set up 1569 * the timers for timeouts. 1570 */ 1571 scsi_init_cmd_errh(cmd); 1572 1573 /* 1574 * Dispatch the command to the low-level driver. 1575 */ 1576 rtn = scsi_dispatch_cmd(cmd); 1577 spin_lock_irq(q->queue_lock); 1578 if(rtn) { 1579 /* we're refusing the command; because of 1580 * the way locks get dropped, we need to 1581 * check here if plugging is required */ 1582 if(sdev->device_busy == 0) 1583 blk_plug_device(q); 1584 1585 break; 1586 } 1587 } 1588 1589 goto out; 1590 1591 not_ready: 1592 spin_unlock_irq(shost->host_lock); 1593 1594 /* 1595 * lock q, handle tag, requeue req, and decrement device_busy. We 1596 * must return with queue_lock held. 1597 * 1598 * Decrementing device_busy without checking it is OK, as all such 1599 * cases (host limits or settings) should run the queue at some 1600 * later time. 1601 */ 1602 spin_lock_irq(q->queue_lock); 1603 blk_requeue_request(q, req); 1604 sdev->device_busy--; 1605 if(sdev->device_busy == 0) 1606 blk_plug_device(q); 1607 out: 1608 /* must be careful here...if we trigger the ->remove() function 1609 * we cannot be holding the q lock */ 1610 spin_unlock_irq(q->queue_lock); 1611 put_device(&sdev->sdev_gendev); 1612 spin_lock_irq(q->queue_lock); 1613} 1614 1615u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 1616{ 1617 struct device *host_dev; 1618 u64 bounce_limit = 0xffffffff; 1619 1620 if (shost->unchecked_isa_dma) 1621 return BLK_BOUNCE_ISA; 1622 /* 1623 * Platforms with virtual-DMA translation 1624 * hardware have no practical limit. 1625 */ 1626 if (!PCI_DMA_BUS_IS_PHYS) 1627 return BLK_BOUNCE_ANY; 1628 1629 host_dev = scsi_get_device(shost); 1630 if (host_dev && host_dev->dma_mask) 1631 bounce_limit = *host_dev->dma_mask; 1632 1633 return bounce_limit; 1634} 1635EXPORT_SYMBOL(scsi_calculate_bounce_limit); 1636 1637struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, 1638 request_fn_proc *request_fn) 1639{ 1640 struct request_queue *q; 1641 1642 q = blk_init_queue(request_fn, NULL); 1643 if (!q) 1644 return NULL; 1645 1646 /* 1647 * this limit is imposed by hardware restrictions 1648 */ 1649 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1650 1651 /* 1652 * In the future, sg chaining support will be mandatory and this 1653 * ifdef can then go away. Right now we don't have all archs 1654 * converted, so better keep it safe. 1655 */ 1656#ifdef ARCH_HAS_SG_CHAIN 1657 if (shost->use_sg_chaining) 1658 blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); 1659 else 1660 blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS); 1661#else 1662 blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS); 1663#endif 1664 1665 blk_queue_max_sectors(q, shost->max_sectors); 1666 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1667 blk_queue_segment_boundary(q, shost->dma_boundary); 1668 1669 if (!shost->use_clustering) 1670 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 1671 return q; 1672} 1673EXPORT_SYMBOL(__scsi_alloc_queue); 1674 1675struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 1676{ 1677 struct request_queue *q; 1678 1679 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); 1680 if (!q) 1681 return NULL; 1682 1683 blk_queue_prep_rq(q, scsi_prep_fn); 1684 blk_queue_softirq_done(q, scsi_softirq_done); 1685 return q; 1686} 1687 1688void scsi_free_queue(struct request_queue *q) 1689{ 1690 blk_cleanup_queue(q); 1691} 1692 1693/* 1694 * Function: scsi_block_requests() 1695 * 1696 * Purpose: Utility function used by low-level drivers to prevent further 1697 * commands from being queued to the device. 1698 * 1699 * Arguments: shost - Host in question 1700 * 1701 * Returns: Nothing 1702 * 1703 * Lock status: No locks are assumed held. 1704 * 1705 * Notes: There is no timer nor any other means by which the requests 1706 * get unblocked other than the low-level driver calling 1707 * scsi_unblock_requests(). 1708 */ 1709void scsi_block_requests(struct Scsi_Host *shost) 1710{ 1711 shost->host_self_blocked = 1; 1712} 1713EXPORT_SYMBOL(scsi_block_requests); 1714 1715/* 1716 * Function: scsi_unblock_requests() 1717 * 1718 * Purpose: Utility function used by low-level drivers to allow further 1719 * commands from being queued to the device. 1720 * 1721 * Arguments: shost - Host in question 1722 * 1723 * Returns: Nothing 1724 * 1725 * Lock status: No locks are assumed held. 1726 * 1727 * Notes: There is no timer nor any other means by which the requests 1728 * get unblocked other than the low-level driver calling 1729 * scsi_unblock_requests(). 1730 * 1731 * This is done as an API function so that changes to the 1732 * internals of the scsi mid-layer won't require wholesale 1733 * changes to drivers that use this feature. 1734 */ 1735void scsi_unblock_requests(struct Scsi_Host *shost) 1736{ 1737 shost->host_self_blocked = 0; 1738 scsi_run_host_queues(shost); 1739} 1740EXPORT_SYMBOL(scsi_unblock_requests); 1741 1742int __init scsi_init_queue(void) 1743{ 1744 int i; 1745 1746 scsi_io_context_cache = kmem_cache_create("scsi_io_context", 1747 sizeof(struct scsi_io_context), 1748 0, 0, NULL); 1749 if (!scsi_io_context_cache) { 1750 printk(KERN_ERR "SCSI: can't init scsi io context cache\n"); 1751 return -ENOMEM; 1752 } 1753 1754 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1755 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1756 int size = sgp->size * sizeof(struct scatterlist); 1757 1758 sgp->slab = kmem_cache_create(sgp->name, size, 0, 1759 SLAB_HWCACHE_ALIGN, NULL); 1760 if (!sgp->slab) { 1761 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1762 sgp->name); 1763 } 1764 1765 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 1766 sgp->slab); 1767 if (!sgp->pool) { 1768 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 1769 sgp->name); 1770 } 1771 } 1772 1773 return 0; 1774} 1775 1776void scsi_exit_queue(void) 1777{ 1778 int i; 1779 1780 kmem_cache_destroy(scsi_io_context_cache); 1781 1782 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1783 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1784 mempool_destroy(sgp->pool); 1785 kmem_cache_destroy(sgp->slab); 1786 } 1787} 1788 1789/** 1790 * scsi_mode_select - issue a mode select 1791 * @sdev: SCSI device to be queried 1792 * @pf: Page format bit (1 == standard, 0 == vendor specific) 1793 * @sp: Save page bit (0 == don't save, 1 == save) 1794 * @modepage: mode page being requested 1795 * @buffer: request buffer (may not be smaller than eight bytes) 1796 * @len: length of request buffer. 1797 * @timeout: command timeout 1798 * @retries: number of retries before failing 1799 * @data: returns a structure abstracting the mode header data 1800 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1801 * must be SCSI_SENSE_BUFFERSIZE big. 1802 * 1803 * Returns zero if successful; negative error number or scsi 1804 * status on error 1805 * 1806 */ 1807int 1808scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, 1809 unsigned char *buffer, int len, int timeout, int retries, 1810 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1811{ 1812 unsigned char cmd[10]; 1813 unsigned char *real_buffer; 1814 int ret; 1815 1816 memset(cmd, 0, sizeof(cmd)); 1817 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); 1818 1819 if (sdev->use_10_for_ms) { 1820 if (len > 65535) 1821 return -EINVAL; 1822 real_buffer = kmalloc(8 + len, GFP_KERNEL); 1823 if (!real_buffer) 1824 return -ENOMEM; 1825 memcpy(real_buffer + 8, buffer, len); 1826 len += 8; 1827 real_buffer[0] = 0; 1828 real_buffer[1] = 0; 1829 real_buffer[2] = data->medium_type; 1830 real_buffer[3] = data->device_specific; 1831 real_buffer[4] = data->longlba ? 0x01 : 0; 1832 real_buffer[5] = 0; 1833 real_buffer[6] = data->block_descriptor_length >> 8; 1834 real_buffer[7] = data->block_descriptor_length; 1835 1836 cmd[0] = MODE_SELECT_10; 1837 cmd[7] = len >> 8; 1838 cmd[8] = len; 1839 } else { 1840 if (len > 255 || data->block_descriptor_length > 255 || 1841 data->longlba) 1842 return -EINVAL; 1843 1844 real_buffer = kmalloc(4 + len, GFP_KERNEL); 1845 if (!real_buffer) 1846 return -ENOMEM; 1847 memcpy(real_buffer + 4, buffer, len); 1848 len += 4; 1849 real_buffer[0] = 0; 1850 real_buffer[1] = data->medium_type; 1851 real_buffer[2] = data->device_specific; 1852 real_buffer[3] = data->block_descriptor_length; 1853 1854 1855 cmd[0] = MODE_SELECT; 1856 cmd[4] = len; 1857 } 1858 1859 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 1860 sshdr, timeout, retries); 1861 kfree(real_buffer); 1862 return ret; 1863} 1864EXPORT_SYMBOL_GPL(scsi_mode_select); 1865 1866/** 1867 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. 1868 * @sdev: SCSI device to be queried 1869 * @dbd: set if mode sense will allow block descriptors to be returned 1870 * @modepage: mode page being requested 1871 * @buffer: request buffer (may not be smaller than eight bytes) 1872 * @len: length of request buffer. 1873 * @timeout: command timeout 1874 * @retries: number of retries before failing 1875 * @data: returns a structure abstracting the mode header data 1876 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1877 * must be SCSI_SENSE_BUFFERSIZE big. 1878 * 1879 * Returns zero if unsuccessful, or the header offset (either 4 1880 * or 8 depending on whether a six or ten byte command was 1881 * issued) if successful. 1882 */ 1883int 1884scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 1885 unsigned char *buffer, int len, int timeout, int retries, 1886 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1887{ 1888 unsigned char cmd[12]; 1889 int use_10_for_ms; 1890 int header_length; 1891 int result; 1892 struct scsi_sense_hdr my_sshdr; 1893 1894 memset(data, 0, sizeof(*data)); 1895 memset(&cmd[0], 0, 12); 1896 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 1897 cmd[2] = modepage; 1898 1899 /* caller might not be interested in sense, but we need it */ 1900 if (!sshdr) 1901 sshdr = &my_sshdr; 1902 1903 retry: 1904 use_10_for_ms = sdev->use_10_for_ms; 1905 1906 if (use_10_for_ms) { 1907 if (len < 8) 1908 len = 8; 1909 1910 cmd[0] = MODE_SENSE_10; 1911 cmd[8] = len; 1912 header_length = 8; 1913 } else { 1914 if (len < 4) 1915 len = 4; 1916 1917 cmd[0] = MODE_SENSE; 1918 cmd[4] = len; 1919 header_length = 4; 1920 } 1921 1922 memset(buffer, 0, len); 1923 1924 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 1925 sshdr, timeout, retries); 1926 1927 /* This code looks awful: what it's doing is making sure an 1928 * ILLEGAL REQUEST sense return identifies the actual command 1929 * byte as the problem. MODE_SENSE commands can return 1930 * ILLEGAL REQUEST if the code page isn't supported */ 1931 1932 if (use_10_for_ms && !scsi_status_is_good(result) && 1933 (driver_byte(result) & DRIVER_SENSE)) { 1934 if (scsi_sense_valid(sshdr)) { 1935 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 1936 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 1937 /* 1938 * Invalid command operation code 1939 */ 1940 sdev->use_10_for_ms = 0; 1941 goto retry; 1942 } 1943 } 1944 } 1945 1946 if(scsi_status_is_good(result)) { 1947 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && 1948 (modepage == 6 || modepage == 8))) { 1949 /* Initio breakage? */ 1950 header_length = 0; 1951 data->length = 13; 1952 data->medium_type = 0; 1953 data->device_specific = 0; 1954 data->longlba = 0; 1955 data->block_descriptor_length = 0; 1956 } else if(use_10_for_ms) { 1957 data->length = buffer[0]*256 + buffer[1] + 2; 1958 data->medium_type = buffer[2]; 1959 data->device_specific = buffer[3]; 1960 data->longlba = buffer[4] & 0x01; 1961 data->block_descriptor_length = buffer[6]*256 1962 + buffer[7]; 1963 } else { 1964 data->length = buffer[0] + 1; 1965 data->medium_type = buffer[1]; 1966 data->device_specific = buffer[2]; 1967 data->block_descriptor_length = buffer[3]; 1968 } 1969 data->header_length = header_length; 1970 } 1971 1972 return result; 1973} 1974EXPORT_SYMBOL(scsi_mode_sense); 1975 1976/** 1977 * scsi_test_unit_ready - test if unit is ready 1978 * @sdev: scsi device to change the state of. 1979 * @timeout: command timeout 1980 * @retries: number of retries before failing 1981 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for 1982 * returning sense. Make sure that this is cleared before passing 1983 * in. 1984 * 1985 * Returns zero if unsuccessful or an error if TUR failed. For 1986 * removable media, a return of NOT_READY or UNIT_ATTENTION is 1987 * translated to success, with the ->changed flag updated. 1988 **/ 1989int 1990scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, 1991 struct scsi_sense_hdr *sshdr_external) 1992{ 1993 char cmd[] = { 1994 TEST_UNIT_READY, 0, 0, 0, 0, 0, 1995 }; 1996 struct scsi_sense_hdr *sshdr; 1997 int result; 1998 1999 if (!sshdr_external) 2000 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 2001 else 2002 sshdr = sshdr_external; 2003 2004 /* try to eat the UNIT_ATTENTION if there are enough retries */ 2005 do { 2006 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 2007 timeout, retries); 2008 } while ((driver_byte(result) & DRIVER_SENSE) && 2009 sshdr && sshdr->sense_key == UNIT_ATTENTION && 2010 --retries); 2011 2012 if (!sshdr) 2013 /* could not allocate sense buffer, so can't process it */ 2014 return result; 2015 2016 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) { 2017 2018 if ((scsi_sense_valid(sshdr)) && 2019 ((sshdr->sense_key == UNIT_ATTENTION) || 2020 (sshdr->sense_key == NOT_READY))) { 2021 sdev->changed = 1; 2022 result = 0; 2023 } 2024 } 2025 if (!sshdr_external) 2026 kfree(sshdr); 2027 return result; 2028} 2029EXPORT_SYMBOL(scsi_test_unit_ready); 2030 2031/** 2032 * scsi_device_set_state - Take the given device through the device state model. 2033 * @sdev: scsi device to change the state of. 2034 * @state: state to change to. 2035 * 2036 * Returns zero if unsuccessful or an error if the requested 2037 * transition is illegal. 2038 */ 2039int 2040scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 2041{ 2042 enum scsi_device_state oldstate = sdev->sdev_state; 2043 2044 if (state == oldstate) 2045 return 0; 2046 2047 switch (state) { 2048 case SDEV_CREATED: 2049 /* There are no legal states that come back to 2050 * created. This is the manually initialised start 2051 * state */ 2052 goto illegal; 2053 2054 case SDEV_RUNNING: 2055 switch (oldstate) { 2056 case SDEV_CREATED: 2057 case SDEV_OFFLINE: 2058 case SDEV_QUIESCE: 2059 case SDEV_BLOCK: 2060 break; 2061 default: 2062 goto illegal; 2063 } 2064 break; 2065 2066 case SDEV_QUIESCE: 2067 switch (oldstate) { 2068 case SDEV_RUNNING: 2069 case SDEV_OFFLINE: 2070 break; 2071 default: 2072 goto illegal; 2073 } 2074 break; 2075 2076 case SDEV_OFFLINE: 2077 switch (oldstate) { 2078 case SDEV_CREATED: 2079 case SDEV_RUNNING: 2080 case SDEV_QUIESCE: 2081 case SDEV_BLOCK: 2082 break; 2083 default: 2084 goto illegal; 2085 } 2086 break; 2087 2088 case SDEV_BLOCK: 2089 switch (oldstate) { 2090 case SDEV_CREATED: 2091 case SDEV_RUNNING: 2092 break; 2093 default: 2094 goto illegal; 2095 } 2096 break; 2097 2098 case SDEV_CANCEL: 2099 switch (oldstate) { 2100 case SDEV_CREATED: 2101 case SDEV_RUNNING: 2102 case SDEV_QUIESCE: 2103 case SDEV_OFFLINE: 2104 case SDEV_BLOCK: 2105 break; 2106 default: 2107 goto illegal; 2108 } 2109 break; 2110 2111 case SDEV_DEL: 2112 switch (oldstate) { 2113 case SDEV_CREATED: 2114 case SDEV_RUNNING: 2115 case SDEV_OFFLINE: 2116 case SDEV_CANCEL: 2117 break; 2118 default: 2119 goto illegal; 2120 } 2121 break; 2122 2123 } 2124 sdev->sdev_state = state; 2125 return 0; 2126 2127 illegal: 2128 SCSI_LOG_ERROR_RECOVERY(1, 2129 sdev_printk(KERN_ERR, sdev, 2130 "Illegal state transition %s->%s\n", 2131 scsi_device_state_name(oldstate), 2132 scsi_device_state_name(state)) 2133 ); 2134 return -EINVAL; 2135} 2136EXPORT_SYMBOL(scsi_device_set_state); 2137 2138/** 2139 * sdev_evt_emit - emit a single SCSI device uevent 2140 * @sdev: associated SCSI device 2141 * @evt: event to emit 2142 * 2143 * Send a single uevent (scsi_event) to the associated scsi_device. 2144 */ 2145static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) 2146{ 2147 int idx = 0; 2148 char *envp[3]; 2149 2150 switch (evt->evt_type) { 2151 case SDEV_EVT_MEDIA_CHANGE: 2152 envp[idx++] = "SDEV_MEDIA_CHANGE=1"; 2153 break; 2154 2155 default: 2156 /* do nothing */ 2157 break; 2158 } 2159 2160 envp[idx++] = NULL; 2161 2162 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); 2163} 2164 2165/** 2166 * sdev_evt_thread - send a uevent for each scsi event 2167 * @work: work struct for scsi_device 2168 * 2169 * Dispatch queued events to their associated scsi_device kobjects 2170 * as uevents. 2171 */ 2172void scsi_evt_thread(struct work_struct *work) 2173{ 2174 struct scsi_device *sdev; 2175 LIST_HEAD(event_list); 2176 2177 sdev = container_of(work, struct scsi_device, event_work); 2178 2179 while (1) { 2180 struct scsi_event *evt; 2181 struct list_head *this, *tmp; 2182 unsigned long flags; 2183 2184 spin_lock_irqsave(&sdev->list_lock, flags); 2185 list_splice_init(&sdev->event_list, &event_list); 2186 spin_unlock_irqrestore(&sdev->list_lock, flags); 2187 2188 if (list_empty(&event_list)) 2189 break; 2190 2191 list_for_each_safe(this, tmp, &event_list) { 2192 evt = list_entry(this, struct scsi_event, node); 2193 list_del(&evt->node); 2194 scsi_evt_emit(sdev, evt); 2195 kfree(evt); 2196 } 2197 } 2198} 2199 2200/** 2201 * sdev_evt_send - send asserted event to uevent thread 2202 * @sdev: scsi_device event occurred on 2203 * @evt: event to send 2204 * 2205 * Assert scsi device event asynchronously. 2206 */ 2207void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) 2208{ 2209 unsigned long flags; 2210 2211 if (!test_bit(evt->evt_type, sdev->supported_events)) { 2212 kfree(evt); 2213 return; 2214 } 2215 2216 spin_lock_irqsave(&sdev->list_lock, flags); 2217 list_add_tail(&evt->node, &sdev->event_list); 2218 schedule_work(&sdev->event_work); 2219 spin_unlock_irqrestore(&sdev->list_lock, flags); 2220} 2221EXPORT_SYMBOL_GPL(sdev_evt_send); 2222 2223/** 2224 * sdev_evt_alloc - allocate a new scsi event 2225 * @evt_type: type of event to allocate 2226 * @gfpflags: GFP flags for allocation 2227 * 2228 * Allocates and returns a new scsi_event. 2229 */ 2230struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, 2231 gfp_t gfpflags) 2232{ 2233 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); 2234 if (!evt) 2235 return NULL; 2236 2237 evt->evt_type = evt_type; 2238 INIT_LIST_HEAD(&evt->node); 2239 2240 /* evt_type-specific initialization, if any */ 2241 switch (evt_type) { 2242 case SDEV_EVT_MEDIA_CHANGE: 2243 default: 2244 /* do nothing */ 2245 break; 2246 } 2247 2248 return evt; 2249} 2250EXPORT_SYMBOL_GPL(sdev_evt_alloc); 2251 2252/** 2253 * sdev_evt_send_simple - send asserted event to uevent thread 2254 * @sdev: scsi_device event occurred on 2255 * @evt_type: type of event to send 2256 * @gfpflags: GFP flags for allocation 2257 * 2258 * Assert scsi device event asynchronously, given an event type. 2259 */ 2260void sdev_evt_send_simple(struct scsi_device *sdev, 2261 enum scsi_device_event evt_type, gfp_t gfpflags) 2262{ 2263 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); 2264 if (!evt) { 2265 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", 2266 evt_type); 2267 return; 2268 } 2269 2270 sdev_evt_send(sdev, evt); 2271} 2272EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2273 2274/** 2275 * scsi_device_quiesce - Block user issued commands. 2276 * @sdev: scsi device to quiesce. 2277 * 2278 * This works by trying to transition to the SDEV_QUIESCE state 2279 * (which must be a legal transition). When the device is in this 2280 * state, only special requests will be accepted, all others will 2281 * be deferred. Since special requests may also be requeued requests, 2282 * a successful return doesn't guarantee the device will be 2283 * totally quiescent. 2284 * 2285 * Must be called with user context, may sleep. 2286 * 2287 * Returns zero if unsuccessful or an error if not. 2288 */ 2289int 2290scsi_device_quiesce(struct scsi_device *sdev) 2291{ 2292 int err = scsi_device_set_state(sdev, SDEV_QUIESCE); 2293 if (err) 2294 return err; 2295 2296 scsi_run_queue(sdev->request_queue); 2297 while (sdev->device_busy) { 2298 msleep_interruptible(200); 2299 scsi_run_queue(sdev->request_queue); 2300 } 2301 return 0; 2302} 2303EXPORT_SYMBOL(scsi_device_quiesce); 2304 2305/** 2306 * scsi_device_resume - Restart user issued commands to a quiesced device. 2307 * @sdev: scsi device to resume. 2308 * 2309 * Moves the device from quiesced back to running and restarts the 2310 * queues. 2311 * 2312 * Must be called with user context, may sleep. 2313 */ 2314void 2315scsi_device_resume(struct scsi_device *sdev) 2316{ 2317 if(scsi_device_set_state(sdev, SDEV_RUNNING)) 2318 return; 2319 scsi_run_queue(sdev->request_queue); 2320} 2321EXPORT_SYMBOL(scsi_device_resume); 2322 2323static void 2324device_quiesce_fn(struct scsi_device *sdev, void *data) 2325{ 2326 scsi_device_quiesce(sdev); 2327} 2328 2329void 2330scsi_target_quiesce(struct scsi_target *starget) 2331{ 2332 starget_for_each_device(starget, NULL, device_quiesce_fn); 2333} 2334EXPORT_SYMBOL(scsi_target_quiesce); 2335 2336static void 2337device_resume_fn(struct scsi_device *sdev, void *data) 2338{ 2339 scsi_device_resume(sdev); 2340} 2341 2342void 2343scsi_target_resume(struct scsi_target *starget) 2344{ 2345 starget_for_each_device(starget, NULL, device_resume_fn); 2346} 2347EXPORT_SYMBOL(scsi_target_resume); 2348 2349/** 2350 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state 2351 * @sdev: device to block 2352 * 2353 * Block request made by scsi lld's to temporarily stop all 2354 * scsi commands on the specified device. Called from interrupt 2355 * or normal process context. 2356 * 2357 * Returns zero if successful or error if not 2358 * 2359 * Notes: 2360 * This routine transitions the device to the SDEV_BLOCK state 2361 * (which must be a legal transition). When the device is in this 2362 * state, all commands are deferred until the scsi lld reenables 2363 * the device with scsi_device_unblock or device_block_tmo fires. 2364 * This routine assumes the host_lock is held on entry. 2365 */ 2366int 2367scsi_internal_device_block(struct scsi_device *sdev) 2368{ 2369 struct request_queue *q = sdev->request_queue; 2370 unsigned long flags; 2371 int err = 0; 2372 2373 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2374 if (err) 2375 return err; 2376 2377 /* 2378 * The device has transitioned to SDEV_BLOCK. Stop the 2379 * block layer from calling the midlayer with this device's 2380 * request queue. 2381 */ 2382 spin_lock_irqsave(q->queue_lock, flags); 2383 blk_stop_queue(q); 2384 spin_unlock_irqrestore(q->queue_lock, flags); 2385 2386 return 0; 2387} 2388EXPORT_SYMBOL_GPL(scsi_internal_device_block); 2389 2390/** 2391 * scsi_internal_device_unblock - resume a device after a block request 2392 * @sdev: device to resume 2393 * 2394 * Called by scsi lld's or the midlayer to restart the device queue 2395 * for the previously suspended scsi device. Called from interrupt or 2396 * normal process context. 2397 * 2398 * Returns zero if successful or error if not. 2399 * 2400 * Notes: 2401 * This routine transitions the device to the SDEV_RUNNING state 2402 * (which must be a legal transition) allowing the midlayer to 2403 * goose the queue for this device. This routine assumes the 2404 * host_lock is held upon entry. 2405 */ 2406int 2407scsi_internal_device_unblock(struct scsi_device *sdev) 2408{ 2409 struct request_queue *q = sdev->request_queue; 2410 int err; 2411 unsigned long flags; 2412 2413 /* 2414 * Try to transition the scsi device to SDEV_RUNNING 2415 * and goose the device queue if successful. 2416 */ 2417 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2418 if (err) 2419 return err; 2420 2421 spin_lock_irqsave(q->queue_lock, flags); 2422 blk_start_queue(q); 2423 spin_unlock_irqrestore(q->queue_lock, flags); 2424 2425 return 0; 2426} 2427EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); 2428 2429static void 2430device_block(struct scsi_device *sdev, void *data) 2431{ 2432 scsi_internal_device_block(sdev); 2433} 2434 2435static int 2436target_block(struct device *dev, void *data) 2437{ 2438 if (scsi_is_target_device(dev)) 2439 starget_for_each_device(to_scsi_target(dev), NULL, 2440 device_block); 2441 return 0; 2442} 2443 2444void 2445scsi_target_block(struct device *dev) 2446{ 2447 if (scsi_is_target_device(dev)) 2448 starget_for_each_device(to_scsi_target(dev), NULL, 2449 device_block); 2450 else 2451 device_for_each_child(dev, NULL, target_block); 2452} 2453EXPORT_SYMBOL_GPL(scsi_target_block); 2454 2455static void 2456device_unblock(struct scsi_device *sdev, void *data) 2457{ 2458 scsi_internal_device_unblock(sdev); 2459} 2460 2461static int 2462target_unblock(struct device *dev, void *data) 2463{ 2464 if (scsi_is_target_device(dev)) 2465 starget_for_each_device(to_scsi_target(dev), NULL, 2466 device_unblock); 2467 return 0; 2468} 2469 2470void 2471scsi_target_unblock(struct device *dev) 2472{ 2473 if (scsi_is_target_device(dev)) 2474 starget_for_each_device(to_scsi_target(dev), NULL, 2475 device_unblock); 2476 else 2477 device_for_each_child(dev, NULL, target_unblock); 2478} 2479EXPORT_SYMBOL_GPL(scsi_target_unblock); 2480 2481/** 2482 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt 2483 * @sgl: scatter-gather list 2484 * @sg_count: number of segments in sg 2485 * @offset: offset in bytes into sg, on return offset into the mapped area 2486 * @len: bytes to map, on return number of bytes mapped 2487 * 2488 * Returns virtual address of the start of the mapped page 2489 */ 2490void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, 2491 size_t *offset, size_t *len) 2492{ 2493 int i; 2494 size_t sg_len = 0, len_complete = 0; 2495 struct scatterlist *sg; 2496 struct page *page; 2497 2498 WARN_ON(!irqs_disabled()); 2499 2500 for_each_sg(sgl, sg, sg_count, i) { 2501 len_complete = sg_len; /* Complete sg-entries */ 2502 sg_len += sg->length; 2503 if (sg_len > *offset) 2504 break; 2505 } 2506 2507 if (unlikely(i == sg_count)) { 2508 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 2509 "elements %d\n", 2510 __FUNCTION__, sg_len, *offset, sg_count); 2511 WARN_ON(1); 2512 return NULL; 2513 } 2514 2515 /* Offset starting from the beginning of first page in this sg-entry */ 2516 *offset = *offset - len_complete + sg->offset; 2517 2518 /* Assumption: contiguous pages can be accessed as "page + i" */ 2519 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); 2520 *offset &= ~PAGE_MASK; 2521 2522 /* Bytes in this sg-entry from *offset to the end of the page */ 2523 sg_len = PAGE_SIZE - *offset; 2524 if (*len > sg_len) 2525 *len = sg_len; 2526 2527 return kmap_atomic(page, KM_BIO_SRC_IRQ); 2528} 2529EXPORT_SYMBOL(scsi_kmap_atomic_sg); 2530 2531/** 2532 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg 2533 * @virt: virtual address to be unmapped 2534 */ 2535void scsi_kunmap_atomic_sg(void *virt) 2536{ 2537 kunmap_atomic(virt, KM_BIO_SRC_IRQ); 2538} 2539EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 2540