scsi_lib.c revision b8de16318410f6f8611a879678a531237e4aadc9
1/* 2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale 3 * 4 * SCSI queueing library. 5 * Initial versions: Eric Youngdale (eric@andante.org). 6 * Based upon conversations with large numbers 7 * of people at Linux Expo. 8 */ 9 10#include <linux/bio.h> 11#include <linux/blkdev.h> 12#include <linux/completion.h> 13#include <linux/kernel.h> 14#include <linux/mempool.h> 15#include <linux/slab.h> 16#include <linux/init.h> 17#include <linux/pci.h> 18#include <linux/delay.h> 19#include <linux/hardirq.h> 20#include <linux/scatterlist.h> 21 22#include <scsi/scsi.h> 23#include <scsi/scsi_cmnd.h> 24#include <scsi/scsi_dbg.h> 25#include <scsi/scsi_device.h> 26#include <scsi/scsi_driver.h> 27#include <scsi/scsi_eh.h> 28#include <scsi/scsi_host.h> 29 30#include "scsi_priv.h" 31#include "scsi_logging.h" 32 33 34#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 35#define SG_MEMPOOL_SIZE 2 36 37/* 38 * The maximum number of SG segments that we will put inside a scatterlist 39 * (unless chaining is used). Should ideally fit inside a single page, to 40 * avoid a higher order allocation. 41 */ 42#define SCSI_MAX_SG_SEGMENTS 128 43 44struct scsi_host_sg_pool { 45 size_t size; 46 char *name; 47 struct kmem_cache *slab; 48 mempool_t *pool; 49}; 50 51#define SP(x) { x, "sgpool-" #x } 52static struct scsi_host_sg_pool scsi_sg_pools[] = { 53 SP(8), 54 SP(16), 55#if (SCSI_MAX_SG_SEGMENTS > 16) 56 SP(32), 57#if (SCSI_MAX_SG_SEGMENTS > 32) 58 SP(64), 59#if (SCSI_MAX_SG_SEGMENTS > 64) 60 SP(128), 61#endif 62#endif 63#endif 64}; 65#undef SP 66 67static struct kmem_cache *scsi_bidi_sdb_cache; 68 69static void scsi_run_queue(struct request_queue *q); 70 71/* 72 * Function: scsi_unprep_request() 73 * 74 * Purpose: Remove all preparation done for a request, including its 75 * associated scsi_cmnd, so that it can be requeued. 76 * 77 * Arguments: req - request to unprepare 78 * 79 * Lock status: Assumed that no locks are held upon entry. 80 * 81 * Returns: Nothing. 82 */ 83static void scsi_unprep_request(struct request *req) 84{ 85 struct scsi_cmnd *cmd = req->special; 86 87 req->cmd_flags &= ~REQ_DONTPREP; 88 req->special = NULL; 89 90 scsi_put_command(cmd); 91} 92 93/* 94 * Function: scsi_queue_insert() 95 * 96 * Purpose: Insert a command in the midlevel queue. 97 * 98 * Arguments: cmd - command that we are adding to queue. 99 * reason - why we are inserting command to queue. 100 * 101 * Lock status: Assumed that lock is not held upon entry. 102 * 103 * Returns: Nothing. 104 * 105 * Notes: We do this for one of two cases. Either the host is busy 106 * and it cannot accept any more commands for the time being, 107 * or the device returned QUEUE_FULL and can accept no more 108 * commands. 109 * Notes: This could be called either from an interrupt context or a 110 * normal process context. 111 */ 112int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 113{ 114 struct Scsi_Host *host = cmd->device->host; 115 struct scsi_device *device = cmd->device; 116 struct request_queue *q = device->request_queue; 117 unsigned long flags; 118 119 SCSI_LOG_MLQUEUE(1, 120 printk("Inserting command %p into mlqueue\n", cmd)); 121 122 /* 123 * Set the appropriate busy bit for the device/host. 124 * 125 * If the host/device isn't busy, assume that something actually 126 * completed, and that we should be able to queue a command now. 127 * 128 * Note that the prior mid-layer assumption that any host could 129 * always queue at least one command is now broken. The mid-layer 130 * will implement a user specifiable stall (see 131 * scsi_host.max_host_blocked and scsi_device.max_device_blocked) 132 * if a command is requeued with no other commands outstanding 133 * either for the device or for the host. 134 */ 135 if (reason == SCSI_MLQUEUE_HOST_BUSY) 136 host->host_blocked = host->max_host_blocked; 137 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY) 138 device->device_blocked = device->max_device_blocked; 139 140 /* 141 * Decrement the counters, since these commands are no longer 142 * active on the host/device. 143 */ 144 scsi_device_unbusy(device); 145 146 /* 147 * Requeue this command. It will go before all other commands 148 * that are already in the queue. 149 * 150 * NOTE: there is magic here about the way the queue is plugged if 151 * we have no outstanding commands. 152 * 153 * Although we *don't* plug the queue, we call the request 154 * function. The SCSI request function detects the blocked condition 155 * and plugs the queue appropriately. 156 */ 157 spin_lock_irqsave(q->queue_lock, flags); 158 blk_requeue_request(q, cmd->request); 159 spin_unlock_irqrestore(q->queue_lock, flags); 160 161 scsi_run_queue(q); 162 163 return 0; 164} 165 166/** 167 * scsi_execute - insert request and wait for the result 168 * @sdev: scsi device 169 * @cmd: scsi command 170 * @data_direction: data direction 171 * @buffer: data buffer 172 * @bufflen: len of buffer 173 * @sense: optional sense buffer 174 * @timeout: request timeout in seconds 175 * @retries: number of times to retry request 176 * @flags: or into request flags; 177 * 178 * returns the req->errors value which is the scsi_cmnd result 179 * field. 180 */ 181int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 182 int data_direction, void *buffer, unsigned bufflen, 183 unsigned char *sense, int timeout, int retries, int flags) 184{ 185 struct request *req; 186 int write = (data_direction == DMA_TO_DEVICE); 187 int ret = DRIVER_ERROR << 24; 188 189 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 190 191 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 192 buffer, bufflen, __GFP_WAIT)) 193 goto out; 194 195 req->cmd_len = COMMAND_SIZE(cmd[0]); 196 memcpy(req->cmd, cmd, req->cmd_len); 197 req->sense = sense; 198 req->sense_len = 0; 199 req->retries = retries; 200 req->timeout = timeout; 201 req->cmd_type = REQ_TYPE_BLOCK_PC; 202 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 203 204 /* 205 * head injection *required* here otherwise quiesce won't work 206 */ 207 blk_execute_rq(req->q, NULL, req, 1); 208 209 ret = req->errors; 210 out: 211 blk_put_request(req); 212 213 return ret; 214} 215EXPORT_SYMBOL(scsi_execute); 216 217 218int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, 219 int data_direction, void *buffer, unsigned bufflen, 220 struct scsi_sense_hdr *sshdr, int timeout, int retries) 221{ 222 char *sense = NULL; 223 int result; 224 225 if (sshdr) { 226 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 227 if (!sense) 228 return DRIVER_ERROR << 24; 229 } 230 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 231 sense, timeout, retries, 0); 232 if (sshdr) 233 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 234 235 kfree(sense); 236 return result; 237} 238EXPORT_SYMBOL(scsi_execute_req); 239 240struct scsi_io_context { 241 void *data; 242 void (*done)(void *data, char *sense, int result, int resid); 243 char sense[SCSI_SENSE_BUFFERSIZE]; 244}; 245 246static struct kmem_cache *scsi_io_context_cache; 247 248static void scsi_end_async(struct request *req, int uptodate) 249{ 250 struct scsi_io_context *sioc = req->end_io_data; 251 252 if (sioc->done) 253 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len); 254 255 kmem_cache_free(scsi_io_context_cache, sioc); 256 __blk_put_request(req->q, req); 257} 258 259static int scsi_merge_bio(struct request *rq, struct bio *bio) 260{ 261 struct request_queue *q = rq->q; 262 263 bio->bi_flags &= ~(1 << BIO_SEG_VALID); 264 if (rq_data_dir(rq) == WRITE) 265 bio->bi_rw |= (1 << BIO_RW); 266 blk_queue_bounce(q, &bio); 267 268 return blk_rq_append_bio(q, rq, bio); 269} 270 271static void scsi_bi_endio(struct bio *bio, int error) 272{ 273 bio_put(bio); 274} 275 276/** 277 * scsi_req_map_sg - map a scatterlist into a request 278 * @rq: request to fill 279 * @sgl: scatterlist 280 * @nsegs: number of elements 281 * @bufflen: len of buffer 282 * @gfp: memory allocation flags 283 * 284 * scsi_req_map_sg maps a scatterlist into a request so that the 285 * request can be sent to the block layer. We do not trust the scatterlist 286 * sent to use, as some ULDs use that struct to only organize the pages. 287 */ 288static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl, 289 int nsegs, unsigned bufflen, gfp_t gfp) 290{ 291 struct request_queue *q = rq->q; 292 int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 293 unsigned int data_len = bufflen, len, bytes, off; 294 struct scatterlist *sg; 295 struct page *page; 296 struct bio *bio = NULL; 297 int i, err, nr_vecs = 0; 298 299 for_each_sg(sgl, sg, nsegs, i) { 300 page = sg_page(sg); 301 off = sg->offset; 302 len = sg->length; 303 data_len += len; 304 305 while (len > 0 && data_len > 0) { 306 /* 307 * sg sends a scatterlist that is larger than 308 * the data_len it wants transferred for certain 309 * IO sizes 310 */ 311 bytes = min_t(unsigned int, len, PAGE_SIZE - off); 312 bytes = min(bytes, data_len); 313 314 if (!bio) { 315 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); 316 nr_pages -= nr_vecs; 317 318 bio = bio_alloc(gfp, nr_vecs); 319 if (!bio) { 320 err = -ENOMEM; 321 goto free_bios; 322 } 323 bio->bi_end_io = scsi_bi_endio; 324 } 325 326 if (bio_add_pc_page(q, bio, page, bytes, off) != 327 bytes) { 328 bio_put(bio); 329 err = -EINVAL; 330 goto free_bios; 331 } 332 333 if (bio->bi_vcnt >= nr_vecs) { 334 err = scsi_merge_bio(rq, bio); 335 if (err) { 336 bio_endio(bio, 0); 337 goto free_bios; 338 } 339 bio = NULL; 340 } 341 342 page++; 343 len -= bytes; 344 data_len -=bytes; 345 off = 0; 346 } 347 } 348 349 rq->buffer = rq->data = NULL; 350 rq->data_len = bufflen; 351 return 0; 352 353free_bios: 354 while ((bio = rq->bio) != NULL) { 355 rq->bio = bio->bi_next; 356 /* 357 * call endio instead of bio_put incase it was bounced 358 */ 359 bio_endio(bio, 0); 360 } 361 362 return err; 363} 364 365/** 366 * scsi_execute_async - insert request 367 * @sdev: scsi device 368 * @cmd: scsi command 369 * @cmd_len: length of scsi cdb 370 * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE 371 * @buffer: data buffer (this can be a kernel buffer or scatterlist) 372 * @bufflen: len of buffer 373 * @use_sg: if buffer is a scatterlist this is the number of elements 374 * @timeout: request timeout in seconds 375 * @retries: number of times to retry request 376 * @privdata: data passed to done() 377 * @done: callback function when done 378 * @gfp: memory allocation flags 379 */ 380int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, 381 int cmd_len, int data_direction, void *buffer, unsigned bufflen, 382 int use_sg, int timeout, int retries, void *privdata, 383 void (*done)(void *, char *, int, int), gfp_t gfp) 384{ 385 struct request *req; 386 struct scsi_io_context *sioc; 387 int err = 0; 388 int write = (data_direction == DMA_TO_DEVICE); 389 390 sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp); 391 if (!sioc) 392 return DRIVER_ERROR << 24; 393 394 req = blk_get_request(sdev->request_queue, write, gfp); 395 if (!req) 396 goto free_sense; 397 req->cmd_type = REQ_TYPE_BLOCK_PC; 398 req->cmd_flags |= REQ_QUIET; 399 400 if (use_sg) 401 err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp); 402 else if (bufflen) 403 err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp); 404 405 if (err) 406 goto free_req; 407 408 req->cmd_len = cmd_len; 409 memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ 410 memcpy(req->cmd, cmd, req->cmd_len); 411 req->sense = sioc->sense; 412 req->sense_len = 0; 413 req->timeout = timeout; 414 req->retries = retries; 415 req->end_io_data = sioc; 416 417 sioc->data = privdata; 418 sioc->done = done; 419 420 blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async); 421 return 0; 422 423free_req: 424 blk_put_request(req); 425free_sense: 426 kmem_cache_free(scsi_io_context_cache, sioc); 427 return DRIVER_ERROR << 24; 428} 429EXPORT_SYMBOL_GPL(scsi_execute_async); 430 431/* 432 * Function: scsi_init_cmd_errh() 433 * 434 * Purpose: Initialize cmd fields related to error handling. 435 * 436 * Arguments: cmd - command that is ready to be queued. 437 * 438 * Notes: This function has the job of initializing a number of 439 * fields related to error handling. Typically this will 440 * be called once for each command, as required. 441 */ 442static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 443{ 444 cmd->serial_number = 0; 445 scsi_set_resid(cmd, 0); 446 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 447 if (cmd->cmd_len == 0) 448 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 449} 450 451void scsi_device_unbusy(struct scsi_device *sdev) 452{ 453 struct Scsi_Host *shost = sdev->host; 454 unsigned long flags; 455 456 spin_lock_irqsave(shost->host_lock, flags); 457 shost->host_busy--; 458 if (unlikely(scsi_host_in_recovery(shost) && 459 (shost->host_failed || shost->host_eh_scheduled))) 460 scsi_eh_wakeup(shost); 461 spin_unlock(shost->host_lock); 462 spin_lock(sdev->request_queue->queue_lock); 463 sdev->device_busy--; 464 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 465} 466 467/* 468 * Called for single_lun devices on IO completion. Clear starget_sdev_user, 469 * and call blk_run_queue for all the scsi_devices on the target - 470 * including current_sdev first. 471 * 472 * Called with *no* scsi locks held. 473 */ 474static void scsi_single_lun_run(struct scsi_device *current_sdev) 475{ 476 struct Scsi_Host *shost = current_sdev->host; 477 struct scsi_device *sdev, *tmp; 478 struct scsi_target *starget = scsi_target(current_sdev); 479 unsigned long flags; 480 481 spin_lock_irqsave(shost->host_lock, flags); 482 starget->starget_sdev_user = NULL; 483 spin_unlock_irqrestore(shost->host_lock, flags); 484 485 /* 486 * Call blk_run_queue for all LUNs on the target, starting with 487 * current_sdev. We race with others (to set starget_sdev_user), 488 * but in most cases, we will be first. Ideally, each LU on the 489 * target would get some limited time or requests on the target. 490 */ 491 blk_run_queue(current_sdev->request_queue); 492 493 spin_lock_irqsave(shost->host_lock, flags); 494 if (starget->starget_sdev_user) 495 goto out; 496 list_for_each_entry_safe(sdev, tmp, &starget->devices, 497 same_target_siblings) { 498 if (sdev == current_sdev) 499 continue; 500 if (scsi_device_get(sdev)) 501 continue; 502 503 spin_unlock_irqrestore(shost->host_lock, flags); 504 blk_run_queue(sdev->request_queue); 505 spin_lock_irqsave(shost->host_lock, flags); 506 507 scsi_device_put(sdev); 508 } 509 out: 510 spin_unlock_irqrestore(shost->host_lock, flags); 511} 512 513/* 514 * Function: scsi_run_queue() 515 * 516 * Purpose: Select a proper request queue to serve next 517 * 518 * Arguments: q - last request's queue 519 * 520 * Returns: Nothing 521 * 522 * Notes: The previous command was completely finished, start 523 * a new one if possible. 524 */ 525static void scsi_run_queue(struct request_queue *q) 526{ 527 struct scsi_device *sdev = q->queuedata; 528 struct Scsi_Host *shost = sdev->host; 529 unsigned long flags; 530 531 if (scsi_target(sdev)->single_lun) 532 scsi_single_lun_run(sdev); 533 534 spin_lock_irqsave(shost->host_lock, flags); 535 while (!list_empty(&shost->starved_list) && 536 !shost->host_blocked && !shost->host_self_blocked && 537 !((shost->can_queue > 0) && 538 (shost->host_busy >= shost->can_queue))) { 539 /* 540 * As long as shost is accepting commands and we have 541 * starved queues, call blk_run_queue. scsi_request_fn 542 * drops the queue_lock and can add us back to the 543 * starved_list. 544 * 545 * host_lock protects the starved_list and starved_entry. 546 * scsi_request_fn must get the host_lock before checking 547 * or modifying starved_list or starved_entry. 548 */ 549 sdev = list_entry(shost->starved_list.next, 550 struct scsi_device, starved_entry); 551 list_del_init(&sdev->starved_entry); 552 spin_unlock_irqrestore(shost->host_lock, flags); 553 554 555 if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && 556 !test_and_set_bit(QUEUE_FLAG_REENTER, 557 &sdev->request_queue->queue_flags)) { 558 blk_run_queue(sdev->request_queue); 559 clear_bit(QUEUE_FLAG_REENTER, 560 &sdev->request_queue->queue_flags); 561 } else 562 blk_run_queue(sdev->request_queue); 563 564 spin_lock_irqsave(shost->host_lock, flags); 565 if (unlikely(!list_empty(&sdev->starved_entry))) 566 /* 567 * sdev lost a race, and was put back on the 568 * starved list. This is unlikely but without this 569 * in theory we could loop forever. 570 */ 571 break; 572 } 573 spin_unlock_irqrestore(shost->host_lock, flags); 574 575 blk_run_queue(q); 576} 577 578/* 579 * Function: scsi_requeue_command() 580 * 581 * Purpose: Handle post-processing of completed commands. 582 * 583 * Arguments: q - queue to operate on 584 * cmd - command that may need to be requeued. 585 * 586 * Returns: Nothing 587 * 588 * Notes: After command completion, there may be blocks left 589 * over which weren't finished by the previous command 590 * this can be for a number of reasons - the main one is 591 * I/O errors in the middle of the request, in which case 592 * we need to request the blocks that come after the bad 593 * sector. 594 * Notes: Upon return, cmd is a stale pointer. 595 */ 596static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 597{ 598 struct request *req = cmd->request; 599 unsigned long flags; 600 601 scsi_unprep_request(req); 602 spin_lock_irqsave(q->queue_lock, flags); 603 blk_requeue_request(q, req); 604 spin_unlock_irqrestore(q->queue_lock, flags); 605 606 scsi_run_queue(q); 607} 608 609void scsi_next_command(struct scsi_cmnd *cmd) 610{ 611 struct scsi_device *sdev = cmd->device; 612 struct request_queue *q = sdev->request_queue; 613 614 /* need to hold a reference on the device before we let go of the cmd */ 615 get_device(&sdev->sdev_gendev); 616 617 scsi_put_command(cmd); 618 scsi_run_queue(q); 619 620 /* ok to remove device now */ 621 put_device(&sdev->sdev_gendev); 622} 623 624void scsi_run_host_queues(struct Scsi_Host *shost) 625{ 626 struct scsi_device *sdev; 627 628 shost_for_each_device(sdev, shost) 629 scsi_run_queue(sdev->request_queue); 630} 631 632/* 633 * Function: scsi_end_request() 634 * 635 * Purpose: Post-processing of completed commands (usually invoked at end 636 * of upper level post-processing and scsi_io_completion). 637 * 638 * Arguments: cmd - command that is complete. 639 * error - 0 if I/O indicates success, < 0 for I/O error. 640 * bytes - number of bytes of completed I/O 641 * requeue - indicates whether we should requeue leftovers. 642 * 643 * Lock status: Assumed that lock is not held upon entry. 644 * 645 * Returns: cmd if requeue required, NULL otherwise. 646 * 647 * Notes: This is called for block device requests in order to 648 * mark some number of sectors as complete. 649 * 650 * We are guaranteeing that the request queue will be goosed 651 * at some point during this call. 652 * Notes: If cmd was requeued, upon return it will be a stale pointer. 653 */ 654static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, 655 int bytes, int requeue) 656{ 657 struct request_queue *q = cmd->device->request_queue; 658 struct request *req = cmd->request; 659 660 /* 661 * If there are blocks left over at the end, set up the command 662 * to queue the remainder of them. 663 */ 664 if (blk_end_request(req, error, bytes)) { 665 int leftover = (req->hard_nr_sectors << 9); 666 667 if (blk_pc_request(req)) 668 leftover = req->data_len; 669 670 /* kill remainder if no retrys */ 671 if (error && blk_noretry_request(req)) 672 blk_end_request(req, error, leftover); 673 else { 674 if (requeue) { 675 /* 676 * Bleah. Leftovers again. Stick the 677 * leftovers in the front of the 678 * queue, and goose the queue again. 679 */ 680 scsi_requeue_command(q, cmd); 681 cmd = NULL; 682 } 683 return cmd; 684 } 685 } 686 687 /* 688 * This will goose the queue request function at the end, so we don't 689 * need to worry about launching another command. 690 */ 691 scsi_next_command(cmd); 692 return NULL; 693} 694 695/* 696 * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit 697 * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. 698 */ 699#define SCSI_MAX_SG_CHAIN_SEGMENTS 2048 700 701static inline unsigned int scsi_sgtable_index(unsigned short nents) 702{ 703 unsigned int index; 704 705 switch (nents) { 706 case 1 ... 8: 707 index = 0; 708 break; 709 case 9 ... 16: 710 index = 1; 711 break; 712#if (SCSI_MAX_SG_SEGMENTS > 16) 713 case 17 ... 32: 714 index = 2; 715 break; 716#if (SCSI_MAX_SG_SEGMENTS > 32) 717 case 33 ... 64: 718 index = 3; 719 break; 720#if (SCSI_MAX_SG_SEGMENTS > 64) 721 case 65 ... 128: 722 index = 4; 723 break; 724#endif 725#endif 726#endif 727 default: 728 printk(KERN_ERR "scsi: bad segment count=%d\n", nents); 729 BUG(); 730 } 731 732 return index; 733} 734 735static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) 736{ 737 struct scsi_host_sg_pool *sgp; 738 739 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 740 mempool_free(sgl, sgp->pool); 741} 742 743static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) 744{ 745 struct scsi_host_sg_pool *sgp; 746 747 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 748 return mempool_alloc(sgp->pool, gfp_mask); 749} 750 751static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, 752 gfp_t gfp_mask) 753{ 754 int ret; 755 756 BUG_ON(!nents); 757 758 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, 759 gfp_mask, scsi_sg_alloc); 760 if (unlikely(ret)) 761 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, 762 scsi_sg_free); 763 764 return ret; 765} 766 767static void scsi_free_sgtable(struct scsi_data_buffer *sdb) 768{ 769 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); 770} 771 772/* 773 * Function: scsi_release_buffers() 774 * 775 * Purpose: Completion processing for block device I/O requests. 776 * 777 * Arguments: cmd - command that we are bailing. 778 * 779 * Lock status: Assumed that no lock is held upon entry. 780 * 781 * Returns: Nothing 782 * 783 * Notes: In the event that an upper level driver rejects a 784 * command, we must release resources allocated during 785 * the __init_io() function. Primarily this would involve 786 * the scatter-gather table, and potentially any bounce 787 * buffers. 788 */ 789void scsi_release_buffers(struct scsi_cmnd *cmd) 790{ 791 if (cmd->sdb.table.nents) 792 scsi_free_sgtable(&cmd->sdb); 793 794 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 795 796 if (scsi_bidi_cmnd(cmd)) { 797 struct scsi_data_buffer *bidi_sdb = 798 cmd->request->next_rq->special; 799 scsi_free_sgtable(bidi_sdb); 800 kmem_cache_free(scsi_bidi_sdb_cache, bidi_sdb); 801 cmd->request->next_rq->special = NULL; 802 } 803} 804EXPORT_SYMBOL(scsi_release_buffers); 805 806/* 807 * Bidi commands Must be complete as a whole, both sides at once. 808 * If part of the bytes were written and lld returned 809 * scsi_in()->resid and/or scsi_out()->resid this information will be left 810 * in req->data_len and req->next_rq->data_len. The upper-layer driver can 811 * decide what to do with this information. 812 */ 813void scsi_end_bidi_request(struct scsi_cmnd *cmd) 814{ 815 struct request *req = cmd->request; 816 unsigned int dlen = req->data_len; 817 unsigned int next_dlen = req->next_rq->data_len; 818 819 req->data_len = scsi_out(cmd)->resid; 820 req->next_rq->data_len = scsi_in(cmd)->resid; 821 822 /* The req and req->next_rq have not been completed */ 823 BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen)); 824 825 scsi_release_buffers(cmd); 826 827 /* 828 * This will goose the queue request function at the end, so we don't 829 * need to worry about launching another command. 830 */ 831 scsi_next_command(cmd); 832} 833 834/* 835 * Function: scsi_io_completion() 836 * 837 * Purpose: Completion processing for block device I/O requests. 838 * 839 * Arguments: cmd - command that is finished. 840 * 841 * Lock status: Assumed that no lock is held upon entry. 842 * 843 * Returns: Nothing 844 * 845 * Notes: This function is matched in terms of capabilities to 846 * the function that created the scatter-gather list. 847 * In other words, if there are no bounce buffers 848 * (the normal case for most drivers), we don't need 849 * the logic to deal with cleaning up afterwards. 850 * 851 * We must do one of several things here: 852 * 853 * a) Call scsi_end_request. This will finish off the 854 * specified number of sectors. If we are done, the 855 * command block will be released, and the queue 856 * function will be goosed. If we are not done, then 857 * scsi_end_request will directly goose the queue. 858 * 859 * b) We can just use scsi_requeue_command() here. This would 860 * be used if we just wanted to retry, for example. 861 */ 862void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 863{ 864 int result = cmd->result; 865 int this_count = scsi_bufflen(cmd); 866 struct request_queue *q = cmd->device->request_queue; 867 struct request *req = cmd->request; 868 int clear_errors = 1; 869 struct scsi_sense_hdr sshdr; 870 int sense_valid = 0; 871 int sense_deferred = 0; 872 873 if (result) { 874 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 875 if (sense_valid) 876 sense_deferred = scsi_sense_is_deferred(&sshdr); 877 } 878 879 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ 880 req->errors = result; 881 if (result) { 882 clear_errors = 0; 883 if (sense_valid && req->sense) { 884 /* 885 * SG_IO wants current and deferred errors 886 */ 887 int len = 8 + cmd->sense_buffer[7]; 888 889 if (len > SCSI_SENSE_BUFFERSIZE) 890 len = SCSI_SENSE_BUFFERSIZE; 891 memcpy(req->sense, cmd->sense_buffer, len); 892 req->sense_len = len; 893 } 894 } 895 if (scsi_bidi_cmnd(cmd)) { 896 /* will also release_buffers */ 897 scsi_end_bidi_request(cmd); 898 return; 899 } 900 req->data_len = scsi_get_resid(cmd); 901 } 902 903 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ 904 scsi_release_buffers(cmd); 905 906 /* 907 * Next deal with any sectors which we were able to correctly 908 * handle. 909 */ 910 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 911 "%d bytes done.\n", 912 req->nr_sectors, good_bytes)); 913 914 if (clear_errors) 915 req->errors = 0; 916 917 /* A number of bytes were successfully read. If there 918 * are leftovers and there is some kind of error 919 * (result != 0), retry the rest. 920 */ 921 if (scsi_end_request(cmd, 0, good_bytes, result == 0) == NULL) 922 return; 923 924 /* good_bytes = 0, or (inclusive) there were leftovers and 925 * result = 0, so scsi_end_request couldn't retry. 926 */ 927 if (sense_valid && !sense_deferred) { 928 switch (sshdr.sense_key) { 929 case UNIT_ATTENTION: 930 if (cmd->device->removable) { 931 /* Detected disc change. Set a bit 932 * and quietly refuse further access. 933 */ 934 cmd->device->changed = 1; 935 scsi_end_request(cmd, -EIO, this_count, 1); 936 return; 937 } else { 938 /* Must have been a power glitch, or a 939 * bus reset. Could not have been a 940 * media change, so we just retry the 941 * request and see what happens. 942 */ 943 scsi_requeue_command(q, cmd); 944 return; 945 } 946 break; 947 case ILLEGAL_REQUEST: 948 /* If we had an ILLEGAL REQUEST returned, then 949 * we may have performed an unsupported 950 * command. The only thing this should be 951 * would be a ten byte read where only a six 952 * byte read was supported. Also, on a system 953 * where READ CAPACITY failed, we may have 954 * read past the end of the disk. 955 */ 956 if ((cmd->device->use_10_for_rw && 957 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 958 (cmd->cmnd[0] == READ_10 || 959 cmd->cmnd[0] == WRITE_10)) { 960 cmd->device->use_10_for_rw = 0; 961 /* This will cause a retry with a 962 * 6-byte command. 963 */ 964 scsi_requeue_command(q, cmd); 965 return; 966 } else { 967 scsi_end_request(cmd, -EIO, this_count, 1); 968 return; 969 } 970 break; 971 case NOT_READY: 972 /* If the device is in the process of becoming 973 * ready, or has a temporary blockage, retry. 974 */ 975 if (sshdr.asc == 0x04) { 976 switch (sshdr.ascq) { 977 case 0x01: /* becoming ready */ 978 case 0x04: /* format in progress */ 979 case 0x05: /* rebuild in progress */ 980 case 0x06: /* recalculation in progress */ 981 case 0x07: /* operation in progress */ 982 case 0x08: /* Long write in progress */ 983 case 0x09: /* self test in progress */ 984 scsi_requeue_command(q, cmd); 985 return; 986 default: 987 break; 988 } 989 } 990 if (!(req->cmd_flags & REQ_QUIET)) 991 scsi_cmd_print_sense_hdr(cmd, 992 "Device not ready", 993 &sshdr); 994 995 scsi_end_request(cmd, -EIO, this_count, 1); 996 return; 997 case VOLUME_OVERFLOW: 998 if (!(req->cmd_flags & REQ_QUIET)) { 999 scmd_printk(KERN_INFO, cmd, 1000 "Volume overflow, CDB: "); 1001 __scsi_print_command(cmd->cmnd); 1002 scsi_print_sense("", cmd); 1003 } 1004 /* See SSC3rXX or current. */ 1005 scsi_end_request(cmd, -EIO, this_count, 1); 1006 return; 1007 default: 1008 break; 1009 } 1010 } 1011 if (host_byte(result) == DID_RESET) { 1012 /* Third party bus reset or reset for error recovery 1013 * reasons. Just retry the request and see what 1014 * happens. 1015 */ 1016 scsi_requeue_command(q, cmd); 1017 return; 1018 } 1019 if (result) { 1020 if (!(req->cmd_flags & REQ_QUIET)) { 1021 scsi_print_result(cmd); 1022 if (driver_byte(result) & DRIVER_SENSE) 1023 scsi_print_sense("", cmd); 1024 } 1025 } 1026 scsi_end_request(cmd, -EIO, this_count, !result); 1027} 1028 1029static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, 1030 gfp_t gfp_mask) 1031{ 1032 int count; 1033 1034 /* 1035 * If sg table allocation fails, requeue request later. 1036 */ 1037 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, 1038 gfp_mask))) { 1039 return BLKPREP_DEFER; 1040 } 1041 1042 req->buffer = NULL; 1043 if (blk_pc_request(req)) 1044 sdb->length = req->data_len; 1045 else 1046 sdb->length = req->nr_sectors << 9; 1047 1048 /* 1049 * Next, walk the list, and fill in the addresses and sizes of 1050 * each segment. 1051 */ 1052 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 1053 BUG_ON(count > sdb->table.nents); 1054 sdb->table.nents = count; 1055 return BLKPREP_OK; 1056} 1057 1058/* 1059 * Function: scsi_init_io() 1060 * 1061 * Purpose: SCSI I/O initialize function. 1062 * 1063 * Arguments: cmd - Command descriptor we wish to initialize 1064 * 1065 * Returns: 0 on success 1066 * BLKPREP_DEFER if the failure is retryable 1067 * BLKPREP_KILL if the failure is fatal 1068 */ 1069int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) 1070{ 1071 int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask); 1072 if (error) 1073 goto err_exit; 1074 1075 if (blk_bidi_rq(cmd->request)) { 1076 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( 1077 scsi_bidi_sdb_cache, GFP_ATOMIC); 1078 if (!bidi_sdb) { 1079 error = BLKPREP_DEFER; 1080 goto err_exit; 1081 } 1082 1083 cmd->request->next_rq->special = bidi_sdb; 1084 error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb, 1085 GFP_ATOMIC); 1086 if (error) 1087 goto err_exit; 1088 } 1089 1090 return BLKPREP_OK ; 1091 1092err_exit: 1093 scsi_release_buffers(cmd); 1094 if (error == BLKPREP_KILL) 1095 scsi_put_command(cmd); 1096 else /* BLKPREP_DEFER */ 1097 scsi_unprep_request(cmd->request); 1098 1099 return error; 1100} 1101EXPORT_SYMBOL(scsi_init_io); 1102 1103static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1104 struct request *req) 1105{ 1106 struct scsi_cmnd *cmd; 1107 1108 if (!req->special) { 1109 cmd = scsi_get_command(sdev, GFP_ATOMIC); 1110 if (unlikely(!cmd)) 1111 return NULL; 1112 req->special = cmd; 1113 } else { 1114 cmd = req->special; 1115 } 1116 1117 /* pull a tag out of the request if we have one */ 1118 cmd->tag = req->tag; 1119 cmd->request = req; 1120 1121 return cmd; 1122} 1123 1124int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1125{ 1126 struct scsi_cmnd *cmd; 1127 int ret = scsi_prep_state_check(sdev, req); 1128 1129 if (ret != BLKPREP_OK) 1130 return ret; 1131 1132 cmd = scsi_get_cmd_from_req(sdev, req); 1133 if (unlikely(!cmd)) 1134 return BLKPREP_DEFER; 1135 1136 /* 1137 * BLOCK_PC requests may transfer data, in which case they must 1138 * a bio attached to them. Or they might contain a SCSI command 1139 * that does not transfer data, in which case they may optionally 1140 * submit a request without an attached bio. 1141 */ 1142 if (req->bio) { 1143 int ret; 1144 1145 BUG_ON(!req->nr_phys_segments); 1146 1147 ret = scsi_init_io(cmd, GFP_ATOMIC); 1148 if (unlikely(ret)) 1149 return ret; 1150 } else { 1151 BUG_ON(req->data_len); 1152 BUG_ON(req->data); 1153 1154 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1155 req->buffer = NULL; 1156 } 1157 1158 BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd)); 1159 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1160 cmd->cmd_len = req->cmd_len; 1161 if (!req->data_len) 1162 cmd->sc_data_direction = DMA_NONE; 1163 else if (rq_data_dir(req) == WRITE) 1164 cmd->sc_data_direction = DMA_TO_DEVICE; 1165 else 1166 cmd->sc_data_direction = DMA_FROM_DEVICE; 1167 1168 cmd->transfersize = req->data_len; 1169 cmd->allowed = req->retries; 1170 cmd->timeout_per_command = req->timeout; 1171 return BLKPREP_OK; 1172} 1173EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); 1174 1175/* 1176 * Setup a REQ_TYPE_FS command. These are simple read/write request 1177 * from filesystems that still need to be translated to SCSI CDBs from 1178 * the ULD. 1179 */ 1180int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1181{ 1182 struct scsi_cmnd *cmd; 1183 int ret = scsi_prep_state_check(sdev, req); 1184 1185 if (ret != BLKPREP_OK) 1186 return ret; 1187 /* 1188 * Filesystem requests must transfer data. 1189 */ 1190 BUG_ON(!req->nr_phys_segments); 1191 1192 cmd = scsi_get_cmd_from_req(sdev, req); 1193 if (unlikely(!cmd)) 1194 return BLKPREP_DEFER; 1195 1196 return scsi_init_io(cmd, GFP_ATOMIC); 1197} 1198EXPORT_SYMBOL(scsi_setup_fs_cmnd); 1199 1200int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) 1201{ 1202 int ret = BLKPREP_OK; 1203 1204 /* 1205 * If the device is not in running state we will reject some 1206 * or all commands. 1207 */ 1208 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1209 switch (sdev->sdev_state) { 1210 case SDEV_OFFLINE: 1211 /* 1212 * If the device is offline we refuse to process any 1213 * commands. The device must be brought online 1214 * before trying any recovery commands. 1215 */ 1216 sdev_printk(KERN_ERR, sdev, 1217 "rejecting I/O to offline device\n"); 1218 ret = BLKPREP_KILL; 1219 break; 1220 case SDEV_DEL: 1221 /* 1222 * If the device is fully deleted, we refuse to 1223 * process any commands as well. 1224 */ 1225 sdev_printk(KERN_ERR, sdev, 1226 "rejecting I/O to dead device\n"); 1227 ret = BLKPREP_KILL; 1228 break; 1229 case SDEV_QUIESCE: 1230 case SDEV_BLOCK: 1231 /* 1232 * If the devices is blocked we defer normal commands. 1233 */ 1234 if (!(req->cmd_flags & REQ_PREEMPT)) 1235 ret = BLKPREP_DEFER; 1236 break; 1237 default: 1238 /* 1239 * For any other not fully online state we only allow 1240 * special commands. In particular any user initiated 1241 * command is not allowed. 1242 */ 1243 if (!(req->cmd_flags & REQ_PREEMPT)) 1244 ret = BLKPREP_KILL; 1245 break; 1246 } 1247 } 1248 return ret; 1249} 1250EXPORT_SYMBOL(scsi_prep_state_check); 1251 1252int scsi_prep_return(struct request_queue *q, struct request *req, int ret) 1253{ 1254 struct scsi_device *sdev = q->queuedata; 1255 1256 switch (ret) { 1257 case BLKPREP_KILL: 1258 req->errors = DID_NO_CONNECT << 16; 1259 /* release the command and kill it */ 1260 if (req->special) { 1261 struct scsi_cmnd *cmd = req->special; 1262 scsi_release_buffers(cmd); 1263 scsi_put_command(cmd); 1264 req->special = NULL; 1265 } 1266 break; 1267 case BLKPREP_DEFER: 1268 /* 1269 * If we defer, the elv_next_request() returns NULL, but the 1270 * queue must be restarted, so we plug here if no returning 1271 * command will automatically do that. 1272 */ 1273 if (sdev->device_busy == 0) 1274 blk_plug_device(q); 1275 break; 1276 default: 1277 req->cmd_flags |= REQ_DONTPREP; 1278 } 1279 1280 return ret; 1281} 1282EXPORT_SYMBOL(scsi_prep_return); 1283 1284int scsi_prep_fn(struct request_queue *q, struct request *req) 1285{ 1286 struct scsi_device *sdev = q->queuedata; 1287 int ret = BLKPREP_KILL; 1288 1289 if (req->cmd_type == REQ_TYPE_BLOCK_PC) 1290 ret = scsi_setup_blk_pc_cmnd(sdev, req); 1291 return scsi_prep_return(q, req, ret); 1292} 1293 1294/* 1295 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1296 * return 0. 1297 * 1298 * Called with the queue_lock held. 1299 */ 1300static inline int scsi_dev_queue_ready(struct request_queue *q, 1301 struct scsi_device *sdev) 1302{ 1303 if (sdev->device_busy >= sdev->queue_depth) 1304 return 0; 1305 if (sdev->device_busy == 0 && sdev->device_blocked) { 1306 /* 1307 * unblock after device_blocked iterates to zero 1308 */ 1309 if (--sdev->device_blocked == 0) { 1310 SCSI_LOG_MLQUEUE(3, 1311 sdev_printk(KERN_INFO, sdev, 1312 "unblocking device at zero depth\n")); 1313 } else { 1314 blk_plug_device(q); 1315 return 0; 1316 } 1317 } 1318 if (sdev->device_blocked) 1319 return 0; 1320 1321 return 1; 1322} 1323 1324/* 1325 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1326 * return 0. We must end up running the queue again whenever 0 is 1327 * returned, else IO can hang. 1328 * 1329 * Called with host_lock held. 1330 */ 1331static inline int scsi_host_queue_ready(struct request_queue *q, 1332 struct Scsi_Host *shost, 1333 struct scsi_device *sdev) 1334{ 1335 if (scsi_host_in_recovery(shost)) 1336 return 0; 1337 if (shost->host_busy == 0 && shost->host_blocked) { 1338 /* 1339 * unblock after host_blocked iterates to zero 1340 */ 1341 if (--shost->host_blocked == 0) { 1342 SCSI_LOG_MLQUEUE(3, 1343 printk("scsi%d unblocking host at zero depth\n", 1344 shost->host_no)); 1345 } else { 1346 blk_plug_device(q); 1347 return 0; 1348 } 1349 } 1350 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || 1351 shost->host_blocked || shost->host_self_blocked) { 1352 if (list_empty(&sdev->starved_entry)) 1353 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1354 return 0; 1355 } 1356 1357 /* We're OK to process the command, so we can't be starved */ 1358 if (!list_empty(&sdev->starved_entry)) 1359 list_del_init(&sdev->starved_entry); 1360 1361 return 1; 1362} 1363 1364/* 1365 * Kill a request for a dead device 1366 */ 1367static void scsi_kill_request(struct request *req, struct request_queue *q) 1368{ 1369 struct scsi_cmnd *cmd = req->special; 1370 struct scsi_device *sdev = cmd->device; 1371 struct Scsi_Host *shost = sdev->host; 1372 1373 blkdev_dequeue_request(req); 1374 1375 if (unlikely(cmd == NULL)) { 1376 printk(KERN_CRIT "impossible request in %s.\n", 1377 __FUNCTION__); 1378 BUG(); 1379 } 1380 1381 scsi_init_cmd_errh(cmd); 1382 cmd->result = DID_NO_CONNECT << 16; 1383 atomic_inc(&cmd->device->iorequest_cnt); 1384 1385 /* 1386 * SCSI request completion path will do scsi_device_unbusy(), 1387 * bump busy counts. To bump the counters, we need to dance 1388 * with the locks as normal issue path does. 1389 */ 1390 sdev->device_busy++; 1391 spin_unlock(sdev->request_queue->queue_lock); 1392 spin_lock(shost->host_lock); 1393 shost->host_busy++; 1394 spin_unlock(shost->host_lock); 1395 spin_lock(sdev->request_queue->queue_lock); 1396 1397 __scsi_done(cmd); 1398} 1399 1400static void scsi_softirq_done(struct request *rq) 1401{ 1402 struct scsi_cmnd *cmd = rq->completion_data; 1403 unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command; 1404 int disposition; 1405 1406 INIT_LIST_HEAD(&cmd->eh_entry); 1407 1408 disposition = scsi_decide_disposition(cmd); 1409 if (disposition != SUCCESS && 1410 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1411 sdev_printk(KERN_ERR, cmd->device, 1412 "timing out command, waited %lus\n", 1413 wait_for/HZ); 1414 disposition = SUCCESS; 1415 } 1416 1417 scsi_log_completion(cmd, disposition); 1418 1419 switch (disposition) { 1420 case SUCCESS: 1421 scsi_finish_command(cmd); 1422 break; 1423 case NEEDS_RETRY: 1424 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 1425 break; 1426 case ADD_TO_MLQUEUE: 1427 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1428 break; 1429 default: 1430 if (!scsi_eh_scmd_add(cmd, 0)) 1431 scsi_finish_command(cmd); 1432 } 1433} 1434 1435/* 1436 * Function: scsi_request_fn() 1437 * 1438 * Purpose: Main strategy routine for SCSI. 1439 * 1440 * Arguments: q - Pointer to actual queue. 1441 * 1442 * Returns: Nothing 1443 * 1444 * Lock status: IO request lock assumed to be held when called. 1445 */ 1446static void scsi_request_fn(struct request_queue *q) 1447{ 1448 struct scsi_device *sdev = q->queuedata; 1449 struct Scsi_Host *shost; 1450 struct scsi_cmnd *cmd; 1451 struct request *req; 1452 1453 if (!sdev) { 1454 printk("scsi: killing requests for dead queue\n"); 1455 while ((req = elv_next_request(q)) != NULL) 1456 scsi_kill_request(req, q); 1457 return; 1458 } 1459 1460 if(!get_device(&sdev->sdev_gendev)) 1461 /* We must be tearing the block queue down already */ 1462 return; 1463 1464 /* 1465 * To start with, we keep looping until the queue is empty, or until 1466 * the host is no longer able to accept any more requests. 1467 */ 1468 shost = sdev->host; 1469 while (!blk_queue_plugged(q)) { 1470 int rtn; 1471 /* 1472 * get next queueable request. We do this early to make sure 1473 * that the request is fully prepared even if we cannot 1474 * accept it. 1475 */ 1476 req = elv_next_request(q); 1477 if (!req || !scsi_dev_queue_ready(q, sdev)) 1478 break; 1479 1480 if (unlikely(!scsi_device_online(sdev))) { 1481 sdev_printk(KERN_ERR, sdev, 1482 "rejecting I/O to offline device\n"); 1483 scsi_kill_request(req, q); 1484 continue; 1485 } 1486 1487 1488 /* 1489 * Remove the request from the request list. 1490 */ 1491 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1492 blkdev_dequeue_request(req); 1493 sdev->device_busy++; 1494 1495 spin_unlock(q->queue_lock); 1496 cmd = req->special; 1497 if (unlikely(cmd == NULL)) { 1498 printk(KERN_CRIT "impossible request in %s.\n" 1499 "please mail a stack trace to " 1500 "linux-scsi@vger.kernel.org\n", 1501 __FUNCTION__); 1502 blk_dump_rq_flags(req, "foo"); 1503 BUG(); 1504 } 1505 spin_lock(shost->host_lock); 1506 1507 if (!scsi_host_queue_ready(q, shost, sdev)) 1508 goto not_ready; 1509 if (scsi_target(sdev)->single_lun) { 1510 if (scsi_target(sdev)->starget_sdev_user && 1511 scsi_target(sdev)->starget_sdev_user != sdev) 1512 goto not_ready; 1513 scsi_target(sdev)->starget_sdev_user = sdev; 1514 } 1515 shost->host_busy++; 1516 1517 /* 1518 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will 1519 * take the lock again. 1520 */ 1521 spin_unlock_irq(shost->host_lock); 1522 1523 /* 1524 * Finally, initialize any error handling parameters, and set up 1525 * the timers for timeouts. 1526 */ 1527 scsi_init_cmd_errh(cmd); 1528 1529 /* 1530 * Dispatch the command to the low-level driver. 1531 */ 1532 rtn = scsi_dispatch_cmd(cmd); 1533 spin_lock_irq(q->queue_lock); 1534 if(rtn) { 1535 /* we're refusing the command; because of 1536 * the way locks get dropped, we need to 1537 * check here if plugging is required */ 1538 if(sdev->device_busy == 0) 1539 blk_plug_device(q); 1540 1541 break; 1542 } 1543 } 1544 1545 goto out; 1546 1547 not_ready: 1548 spin_unlock_irq(shost->host_lock); 1549 1550 /* 1551 * lock q, handle tag, requeue req, and decrement device_busy. We 1552 * must return with queue_lock held. 1553 * 1554 * Decrementing device_busy without checking it is OK, as all such 1555 * cases (host limits or settings) should run the queue at some 1556 * later time. 1557 */ 1558 spin_lock_irq(q->queue_lock); 1559 blk_requeue_request(q, req); 1560 sdev->device_busy--; 1561 if(sdev->device_busy == 0) 1562 blk_plug_device(q); 1563 out: 1564 /* must be careful here...if we trigger the ->remove() function 1565 * we cannot be holding the q lock */ 1566 spin_unlock_irq(q->queue_lock); 1567 put_device(&sdev->sdev_gendev); 1568 spin_lock_irq(q->queue_lock); 1569} 1570 1571u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 1572{ 1573 struct device *host_dev; 1574 u64 bounce_limit = 0xffffffff; 1575 1576 if (shost->unchecked_isa_dma) 1577 return BLK_BOUNCE_ISA; 1578 /* 1579 * Platforms with virtual-DMA translation 1580 * hardware have no practical limit. 1581 */ 1582 if (!PCI_DMA_BUS_IS_PHYS) 1583 return BLK_BOUNCE_ANY; 1584 1585 host_dev = scsi_get_device(shost); 1586 if (host_dev && host_dev->dma_mask) 1587 bounce_limit = *host_dev->dma_mask; 1588 1589 return bounce_limit; 1590} 1591EXPORT_SYMBOL(scsi_calculate_bounce_limit); 1592 1593struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, 1594 request_fn_proc *request_fn) 1595{ 1596 struct request_queue *q; 1597 1598 q = blk_init_queue(request_fn, NULL); 1599 if (!q) 1600 return NULL; 1601 1602 /* 1603 * this limit is imposed by hardware restrictions 1604 */ 1605 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1606 1607 /* 1608 * In the future, sg chaining support will be mandatory and this 1609 * ifdef can then go away. Right now we don't have all archs 1610 * converted, so better keep it safe. 1611 */ 1612#ifdef ARCH_HAS_SG_CHAIN 1613 if (shost->use_sg_chaining) 1614 blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); 1615 else 1616 blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS); 1617#else 1618 blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS); 1619#endif 1620 1621 blk_queue_max_sectors(q, shost->max_sectors); 1622 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1623 blk_queue_segment_boundary(q, shost->dma_boundary); 1624 1625 if (!shost->use_clustering) 1626 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 1627 1628 /* 1629 * set a reasonable default alignment on word boundaries: the 1630 * host and device may alter it using 1631 * blk_queue_update_dma_alignment() later. 1632 */ 1633 blk_queue_dma_alignment(q, 0x03); 1634 1635 return q; 1636} 1637EXPORT_SYMBOL(__scsi_alloc_queue); 1638 1639struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 1640{ 1641 struct request_queue *q; 1642 1643 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); 1644 if (!q) 1645 return NULL; 1646 1647 blk_queue_prep_rq(q, scsi_prep_fn); 1648 blk_queue_softirq_done(q, scsi_softirq_done); 1649 return q; 1650} 1651 1652void scsi_free_queue(struct request_queue *q) 1653{ 1654 blk_cleanup_queue(q); 1655} 1656 1657/* 1658 * Function: scsi_block_requests() 1659 * 1660 * Purpose: Utility function used by low-level drivers to prevent further 1661 * commands from being queued to the device. 1662 * 1663 * Arguments: shost - Host in question 1664 * 1665 * Returns: Nothing 1666 * 1667 * Lock status: No locks are assumed held. 1668 * 1669 * Notes: There is no timer nor any other means by which the requests 1670 * get unblocked other than the low-level driver calling 1671 * scsi_unblock_requests(). 1672 */ 1673void scsi_block_requests(struct Scsi_Host *shost) 1674{ 1675 shost->host_self_blocked = 1; 1676} 1677EXPORT_SYMBOL(scsi_block_requests); 1678 1679/* 1680 * Function: scsi_unblock_requests() 1681 * 1682 * Purpose: Utility function used by low-level drivers to allow further 1683 * commands from being queued to the device. 1684 * 1685 * Arguments: shost - Host in question 1686 * 1687 * Returns: Nothing 1688 * 1689 * Lock status: No locks are assumed held. 1690 * 1691 * Notes: There is no timer nor any other means by which the requests 1692 * get unblocked other than the low-level driver calling 1693 * scsi_unblock_requests(). 1694 * 1695 * This is done as an API function so that changes to the 1696 * internals of the scsi mid-layer won't require wholesale 1697 * changes to drivers that use this feature. 1698 */ 1699void scsi_unblock_requests(struct Scsi_Host *shost) 1700{ 1701 shost->host_self_blocked = 0; 1702 scsi_run_host_queues(shost); 1703} 1704EXPORT_SYMBOL(scsi_unblock_requests); 1705 1706int __init scsi_init_queue(void) 1707{ 1708 int i; 1709 1710 scsi_io_context_cache = kmem_cache_create("scsi_io_context", 1711 sizeof(struct scsi_io_context), 1712 0, 0, NULL); 1713 if (!scsi_io_context_cache) { 1714 printk(KERN_ERR "SCSI: can't init scsi io context cache\n"); 1715 return -ENOMEM; 1716 } 1717 1718 scsi_bidi_sdb_cache = kmem_cache_create("scsi_bidi_sdb", 1719 sizeof(struct scsi_data_buffer), 1720 0, 0, NULL); 1721 if (!scsi_bidi_sdb_cache) { 1722 printk(KERN_ERR "SCSI: can't init scsi bidi sdb cache\n"); 1723 return -ENOMEM; 1724 } 1725 1726 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1727 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1728 int size = sgp->size * sizeof(struct scatterlist); 1729 1730 sgp->slab = kmem_cache_create(sgp->name, size, 0, 1731 SLAB_HWCACHE_ALIGN, NULL); 1732 if (!sgp->slab) { 1733 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1734 sgp->name); 1735 } 1736 1737 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 1738 sgp->slab); 1739 if (!sgp->pool) { 1740 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 1741 sgp->name); 1742 } 1743 } 1744 1745 return 0; 1746} 1747 1748void scsi_exit_queue(void) 1749{ 1750 int i; 1751 1752 kmem_cache_destroy(scsi_io_context_cache); 1753 1754 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1755 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1756 mempool_destroy(sgp->pool); 1757 kmem_cache_destroy(sgp->slab); 1758 } 1759} 1760 1761/** 1762 * scsi_mode_select - issue a mode select 1763 * @sdev: SCSI device to be queried 1764 * @pf: Page format bit (1 == standard, 0 == vendor specific) 1765 * @sp: Save page bit (0 == don't save, 1 == save) 1766 * @modepage: mode page being requested 1767 * @buffer: request buffer (may not be smaller than eight bytes) 1768 * @len: length of request buffer. 1769 * @timeout: command timeout 1770 * @retries: number of retries before failing 1771 * @data: returns a structure abstracting the mode header data 1772 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1773 * must be SCSI_SENSE_BUFFERSIZE big. 1774 * 1775 * Returns zero if successful; negative error number or scsi 1776 * status on error 1777 * 1778 */ 1779int 1780scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, 1781 unsigned char *buffer, int len, int timeout, int retries, 1782 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1783{ 1784 unsigned char cmd[10]; 1785 unsigned char *real_buffer; 1786 int ret; 1787 1788 memset(cmd, 0, sizeof(cmd)); 1789 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); 1790 1791 if (sdev->use_10_for_ms) { 1792 if (len > 65535) 1793 return -EINVAL; 1794 real_buffer = kmalloc(8 + len, GFP_KERNEL); 1795 if (!real_buffer) 1796 return -ENOMEM; 1797 memcpy(real_buffer + 8, buffer, len); 1798 len += 8; 1799 real_buffer[0] = 0; 1800 real_buffer[1] = 0; 1801 real_buffer[2] = data->medium_type; 1802 real_buffer[3] = data->device_specific; 1803 real_buffer[4] = data->longlba ? 0x01 : 0; 1804 real_buffer[5] = 0; 1805 real_buffer[6] = data->block_descriptor_length >> 8; 1806 real_buffer[7] = data->block_descriptor_length; 1807 1808 cmd[0] = MODE_SELECT_10; 1809 cmd[7] = len >> 8; 1810 cmd[8] = len; 1811 } else { 1812 if (len > 255 || data->block_descriptor_length > 255 || 1813 data->longlba) 1814 return -EINVAL; 1815 1816 real_buffer = kmalloc(4 + len, GFP_KERNEL); 1817 if (!real_buffer) 1818 return -ENOMEM; 1819 memcpy(real_buffer + 4, buffer, len); 1820 len += 4; 1821 real_buffer[0] = 0; 1822 real_buffer[1] = data->medium_type; 1823 real_buffer[2] = data->device_specific; 1824 real_buffer[3] = data->block_descriptor_length; 1825 1826 1827 cmd[0] = MODE_SELECT; 1828 cmd[4] = len; 1829 } 1830 1831 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 1832 sshdr, timeout, retries); 1833 kfree(real_buffer); 1834 return ret; 1835} 1836EXPORT_SYMBOL_GPL(scsi_mode_select); 1837 1838/** 1839 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. 1840 * @sdev: SCSI device to be queried 1841 * @dbd: set if mode sense will allow block descriptors to be returned 1842 * @modepage: mode page being requested 1843 * @buffer: request buffer (may not be smaller than eight bytes) 1844 * @len: length of request buffer. 1845 * @timeout: command timeout 1846 * @retries: number of retries before failing 1847 * @data: returns a structure abstracting the mode header data 1848 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1849 * must be SCSI_SENSE_BUFFERSIZE big. 1850 * 1851 * Returns zero if unsuccessful, or the header offset (either 4 1852 * or 8 depending on whether a six or ten byte command was 1853 * issued) if successful. 1854 */ 1855int 1856scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 1857 unsigned char *buffer, int len, int timeout, int retries, 1858 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1859{ 1860 unsigned char cmd[12]; 1861 int use_10_for_ms; 1862 int header_length; 1863 int result; 1864 struct scsi_sense_hdr my_sshdr; 1865 1866 memset(data, 0, sizeof(*data)); 1867 memset(&cmd[0], 0, 12); 1868 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 1869 cmd[2] = modepage; 1870 1871 /* caller might not be interested in sense, but we need it */ 1872 if (!sshdr) 1873 sshdr = &my_sshdr; 1874 1875 retry: 1876 use_10_for_ms = sdev->use_10_for_ms; 1877 1878 if (use_10_for_ms) { 1879 if (len < 8) 1880 len = 8; 1881 1882 cmd[0] = MODE_SENSE_10; 1883 cmd[8] = len; 1884 header_length = 8; 1885 } else { 1886 if (len < 4) 1887 len = 4; 1888 1889 cmd[0] = MODE_SENSE; 1890 cmd[4] = len; 1891 header_length = 4; 1892 } 1893 1894 memset(buffer, 0, len); 1895 1896 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 1897 sshdr, timeout, retries); 1898 1899 /* This code looks awful: what it's doing is making sure an 1900 * ILLEGAL REQUEST sense return identifies the actual command 1901 * byte as the problem. MODE_SENSE commands can return 1902 * ILLEGAL REQUEST if the code page isn't supported */ 1903 1904 if (use_10_for_ms && !scsi_status_is_good(result) && 1905 (driver_byte(result) & DRIVER_SENSE)) { 1906 if (scsi_sense_valid(sshdr)) { 1907 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 1908 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 1909 /* 1910 * Invalid command operation code 1911 */ 1912 sdev->use_10_for_ms = 0; 1913 goto retry; 1914 } 1915 } 1916 } 1917 1918 if(scsi_status_is_good(result)) { 1919 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && 1920 (modepage == 6 || modepage == 8))) { 1921 /* Initio breakage? */ 1922 header_length = 0; 1923 data->length = 13; 1924 data->medium_type = 0; 1925 data->device_specific = 0; 1926 data->longlba = 0; 1927 data->block_descriptor_length = 0; 1928 } else if(use_10_for_ms) { 1929 data->length = buffer[0]*256 + buffer[1] + 2; 1930 data->medium_type = buffer[2]; 1931 data->device_specific = buffer[3]; 1932 data->longlba = buffer[4] & 0x01; 1933 data->block_descriptor_length = buffer[6]*256 1934 + buffer[7]; 1935 } else { 1936 data->length = buffer[0] + 1; 1937 data->medium_type = buffer[1]; 1938 data->device_specific = buffer[2]; 1939 data->block_descriptor_length = buffer[3]; 1940 } 1941 data->header_length = header_length; 1942 } 1943 1944 return result; 1945} 1946EXPORT_SYMBOL(scsi_mode_sense); 1947 1948/** 1949 * scsi_test_unit_ready - test if unit is ready 1950 * @sdev: scsi device to change the state of. 1951 * @timeout: command timeout 1952 * @retries: number of retries before failing 1953 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for 1954 * returning sense. Make sure that this is cleared before passing 1955 * in. 1956 * 1957 * Returns zero if unsuccessful or an error if TUR failed. For 1958 * removable media, a return of NOT_READY or UNIT_ATTENTION is 1959 * translated to success, with the ->changed flag updated. 1960 **/ 1961int 1962scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, 1963 struct scsi_sense_hdr *sshdr_external) 1964{ 1965 char cmd[] = { 1966 TEST_UNIT_READY, 0, 0, 0, 0, 0, 1967 }; 1968 struct scsi_sense_hdr *sshdr; 1969 int result; 1970 1971 if (!sshdr_external) 1972 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 1973 else 1974 sshdr = sshdr_external; 1975 1976 /* try to eat the UNIT_ATTENTION if there are enough retries */ 1977 do { 1978 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 1979 timeout, retries); 1980 } while ((driver_byte(result) & DRIVER_SENSE) && 1981 sshdr && sshdr->sense_key == UNIT_ATTENTION && 1982 --retries); 1983 1984 if (!sshdr) 1985 /* could not allocate sense buffer, so can't process it */ 1986 return result; 1987 1988 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) { 1989 1990 if ((scsi_sense_valid(sshdr)) && 1991 ((sshdr->sense_key == UNIT_ATTENTION) || 1992 (sshdr->sense_key == NOT_READY))) { 1993 sdev->changed = 1; 1994 result = 0; 1995 } 1996 } 1997 if (!sshdr_external) 1998 kfree(sshdr); 1999 return result; 2000} 2001EXPORT_SYMBOL(scsi_test_unit_ready); 2002 2003/** 2004 * scsi_device_set_state - Take the given device through the device state model. 2005 * @sdev: scsi device to change the state of. 2006 * @state: state to change to. 2007 * 2008 * Returns zero if unsuccessful or an error if the requested 2009 * transition is illegal. 2010 */ 2011int 2012scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 2013{ 2014 enum scsi_device_state oldstate = sdev->sdev_state; 2015 2016 if (state == oldstate) 2017 return 0; 2018 2019 switch (state) { 2020 case SDEV_CREATED: 2021 /* There are no legal states that come back to 2022 * created. This is the manually initialised start 2023 * state */ 2024 goto illegal; 2025 2026 case SDEV_RUNNING: 2027 switch (oldstate) { 2028 case SDEV_CREATED: 2029 case SDEV_OFFLINE: 2030 case SDEV_QUIESCE: 2031 case SDEV_BLOCK: 2032 break; 2033 default: 2034 goto illegal; 2035 } 2036 break; 2037 2038 case SDEV_QUIESCE: 2039 switch (oldstate) { 2040 case SDEV_RUNNING: 2041 case SDEV_OFFLINE: 2042 break; 2043 default: 2044 goto illegal; 2045 } 2046 break; 2047 2048 case SDEV_OFFLINE: 2049 switch (oldstate) { 2050 case SDEV_CREATED: 2051 case SDEV_RUNNING: 2052 case SDEV_QUIESCE: 2053 case SDEV_BLOCK: 2054 break; 2055 default: 2056 goto illegal; 2057 } 2058 break; 2059 2060 case SDEV_BLOCK: 2061 switch (oldstate) { 2062 case SDEV_CREATED: 2063 case SDEV_RUNNING: 2064 break; 2065 default: 2066 goto illegal; 2067 } 2068 break; 2069 2070 case SDEV_CANCEL: 2071 switch (oldstate) { 2072 case SDEV_CREATED: 2073 case SDEV_RUNNING: 2074 case SDEV_QUIESCE: 2075 case SDEV_OFFLINE: 2076 case SDEV_BLOCK: 2077 break; 2078 default: 2079 goto illegal; 2080 } 2081 break; 2082 2083 case SDEV_DEL: 2084 switch (oldstate) { 2085 case SDEV_CREATED: 2086 case SDEV_RUNNING: 2087 case SDEV_OFFLINE: 2088 case SDEV_CANCEL: 2089 break; 2090 default: 2091 goto illegal; 2092 } 2093 break; 2094 2095 } 2096 sdev->sdev_state = state; 2097 return 0; 2098 2099 illegal: 2100 SCSI_LOG_ERROR_RECOVERY(1, 2101 sdev_printk(KERN_ERR, sdev, 2102 "Illegal state transition %s->%s\n", 2103 scsi_device_state_name(oldstate), 2104 scsi_device_state_name(state)) 2105 ); 2106 return -EINVAL; 2107} 2108EXPORT_SYMBOL(scsi_device_set_state); 2109 2110/** 2111 * sdev_evt_emit - emit a single SCSI device uevent 2112 * @sdev: associated SCSI device 2113 * @evt: event to emit 2114 * 2115 * Send a single uevent (scsi_event) to the associated scsi_device. 2116 */ 2117static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) 2118{ 2119 int idx = 0; 2120 char *envp[3]; 2121 2122 switch (evt->evt_type) { 2123 case SDEV_EVT_MEDIA_CHANGE: 2124 envp[idx++] = "SDEV_MEDIA_CHANGE=1"; 2125 break; 2126 2127 default: 2128 /* do nothing */ 2129 break; 2130 } 2131 2132 envp[idx++] = NULL; 2133 2134 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); 2135} 2136 2137/** 2138 * sdev_evt_thread - send a uevent for each scsi event 2139 * @work: work struct for scsi_device 2140 * 2141 * Dispatch queued events to their associated scsi_device kobjects 2142 * as uevents. 2143 */ 2144void scsi_evt_thread(struct work_struct *work) 2145{ 2146 struct scsi_device *sdev; 2147 LIST_HEAD(event_list); 2148 2149 sdev = container_of(work, struct scsi_device, event_work); 2150 2151 while (1) { 2152 struct scsi_event *evt; 2153 struct list_head *this, *tmp; 2154 unsigned long flags; 2155 2156 spin_lock_irqsave(&sdev->list_lock, flags); 2157 list_splice_init(&sdev->event_list, &event_list); 2158 spin_unlock_irqrestore(&sdev->list_lock, flags); 2159 2160 if (list_empty(&event_list)) 2161 break; 2162 2163 list_for_each_safe(this, tmp, &event_list) { 2164 evt = list_entry(this, struct scsi_event, node); 2165 list_del(&evt->node); 2166 scsi_evt_emit(sdev, evt); 2167 kfree(evt); 2168 } 2169 } 2170} 2171 2172/** 2173 * sdev_evt_send - send asserted event to uevent thread 2174 * @sdev: scsi_device event occurred on 2175 * @evt: event to send 2176 * 2177 * Assert scsi device event asynchronously. 2178 */ 2179void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) 2180{ 2181 unsigned long flags; 2182 2183 if (!test_bit(evt->evt_type, sdev->supported_events)) { 2184 kfree(evt); 2185 return; 2186 } 2187 2188 spin_lock_irqsave(&sdev->list_lock, flags); 2189 list_add_tail(&evt->node, &sdev->event_list); 2190 schedule_work(&sdev->event_work); 2191 spin_unlock_irqrestore(&sdev->list_lock, flags); 2192} 2193EXPORT_SYMBOL_GPL(sdev_evt_send); 2194 2195/** 2196 * sdev_evt_alloc - allocate a new scsi event 2197 * @evt_type: type of event to allocate 2198 * @gfpflags: GFP flags for allocation 2199 * 2200 * Allocates and returns a new scsi_event. 2201 */ 2202struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, 2203 gfp_t gfpflags) 2204{ 2205 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); 2206 if (!evt) 2207 return NULL; 2208 2209 evt->evt_type = evt_type; 2210 INIT_LIST_HEAD(&evt->node); 2211 2212 /* evt_type-specific initialization, if any */ 2213 switch (evt_type) { 2214 case SDEV_EVT_MEDIA_CHANGE: 2215 default: 2216 /* do nothing */ 2217 break; 2218 } 2219 2220 return evt; 2221} 2222EXPORT_SYMBOL_GPL(sdev_evt_alloc); 2223 2224/** 2225 * sdev_evt_send_simple - send asserted event to uevent thread 2226 * @sdev: scsi_device event occurred on 2227 * @evt_type: type of event to send 2228 * @gfpflags: GFP flags for allocation 2229 * 2230 * Assert scsi device event asynchronously, given an event type. 2231 */ 2232void sdev_evt_send_simple(struct scsi_device *sdev, 2233 enum scsi_device_event evt_type, gfp_t gfpflags) 2234{ 2235 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); 2236 if (!evt) { 2237 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", 2238 evt_type); 2239 return; 2240 } 2241 2242 sdev_evt_send(sdev, evt); 2243} 2244EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2245 2246/** 2247 * scsi_device_quiesce - Block user issued commands. 2248 * @sdev: scsi device to quiesce. 2249 * 2250 * This works by trying to transition to the SDEV_QUIESCE state 2251 * (which must be a legal transition). When the device is in this 2252 * state, only special requests will be accepted, all others will 2253 * be deferred. Since special requests may also be requeued requests, 2254 * a successful return doesn't guarantee the device will be 2255 * totally quiescent. 2256 * 2257 * Must be called with user context, may sleep. 2258 * 2259 * Returns zero if unsuccessful or an error if not. 2260 */ 2261int 2262scsi_device_quiesce(struct scsi_device *sdev) 2263{ 2264 int err = scsi_device_set_state(sdev, SDEV_QUIESCE); 2265 if (err) 2266 return err; 2267 2268 scsi_run_queue(sdev->request_queue); 2269 while (sdev->device_busy) { 2270 msleep_interruptible(200); 2271 scsi_run_queue(sdev->request_queue); 2272 } 2273 return 0; 2274} 2275EXPORT_SYMBOL(scsi_device_quiesce); 2276 2277/** 2278 * scsi_device_resume - Restart user issued commands to a quiesced device. 2279 * @sdev: scsi device to resume. 2280 * 2281 * Moves the device from quiesced back to running and restarts the 2282 * queues. 2283 * 2284 * Must be called with user context, may sleep. 2285 */ 2286void 2287scsi_device_resume(struct scsi_device *sdev) 2288{ 2289 if(scsi_device_set_state(sdev, SDEV_RUNNING)) 2290 return; 2291 scsi_run_queue(sdev->request_queue); 2292} 2293EXPORT_SYMBOL(scsi_device_resume); 2294 2295static void 2296device_quiesce_fn(struct scsi_device *sdev, void *data) 2297{ 2298 scsi_device_quiesce(sdev); 2299} 2300 2301void 2302scsi_target_quiesce(struct scsi_target *starget) 2303{ 2304 starget_for_each_device(starget, NULL, device_quiesce_fn); 2305} 2306EXPORT_SYMBOL(scsi_target_quiesce); 2307 2308static void 2309device_resume_fn(struct scsi_device *sdev, void *data) 2310{ 2311 scsi_device_resume(sdev); 2312} 2313 2314void 2315scsi_target_resume(struct scsi_target *starget) 2316{ 2317 starget_for_each_device(starget, NULL, device_resume_fn); 2318} 2319EXPORT_SYMBOL(scsi_target_resume); 2320 2321/** 2322 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state 2323 * @sdev: device to block 2324 * 2325 * Block request made by scsi lld's to temporarily stop all 2326 * scsi commands on the specified device. Called from interrupt 2327 * or normal process context. 2328 * 2329 * Returns zero if successful or error if not 2330 * 2331 * Notes: 2332 * This routine transitions the device to the SDEV_BLOCK state 2333 * (which must be a legal transition). When the device is in this 2334 * state, all commands are deferred until the scsi lld reenables 2335 * the device with scsi_device_unblock or device_block_tmo fires. 2336 * This routine assumes the host_lock is held on entry. 2337 */ 2338int 2339scsi_internal_device_block(struct scsi_device *sdev) 2340{ 2341 struct request_queue *q = sdev->request_queue; 2342 unsigned long flags; 2343 int err = 0; 2344 2345 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2346 if (err) 2347 return err; 2348 2349 /* 2350 * The device has transitioned to SDEV_BLOCK. Stop the 2351 * block layer from calling the midlayer with this device's 2352 * request queue. 2353 */ 2354 spin_lock_irqsave(q->queue_lock, flags); 2355 blk_stop_queue(q); 2356 spin_unlock_irqrestore(q->queue_lock, flags); 2357 2358 return 0; 2359} 2360EXPORT_SYMBOL_GPL(scsi_internal_device_block); 2361 2362/** 2363 * scsi_internal_device_unblock - resume a device after a block request 2364 * @sdev: device to resume 2365 * 2366 * Called by scsi lld's or the midlayer to restart the device queue 2367 * for the previously suspended scsi device. Called from interrupt or 2368 * normal process context. 2369 * 2370 * Returns zero if successful or error if not. 2371 * 2372 * Notes: 2373 * This routine transitions the device to the SDEV_RUNNING state 2374 * (which must be a legal transition) allowing the midlayer to 2375 * goose the queue for this device. This routine assumes the 2376 * host_lock is held upon entry. 2377 */ 2378int 2379scsi_internal_device_unblock(struct scsi_device *sdev) 2380{ 2381 struct request_queue *q = sdev->request_queue; 2382 int err; 2383 unsigned long flags; 2384 2385 /* 2386 * Try to transition the scsi device to SDEV_RUNNING 2387 * and goose the device queue if successful. 2388 */ 2389 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2390 if (err) 2391 return err; 2392 2393 spin_lock_irqsave(q->queue_lock, flags); 2394 blk_start_queue(q); 2395 spin_unlock_irqrestore(q->queue_lock, flags); 2396 2397 return 0; 2398} 2399EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); 2400 2401static void 2402device_block(struct scsi_device *sdev, void *data) 2403{ 2404 scsi_internal_device_block(sdev); 2405} 2406 2407static int 2408target_block(struct device *dev, void *data) 2409{ 2410 if (scsi_is_target_device(dev)) 2411 starget_for_each_device(to_scsi_target(dev), NULL, 2412 device_block); 2413 return 0; 2414} 2415 2416void 2417scsi_target_block(struct device *dev) 2418{ 2419 if (scsi_is_target_device(dev)) 2420 starget_for_each_device(to_scsi_target(dev), NULL, 2421 device_block); 2422 else 2423 device_for_each_child(dev, NULL, target_block); 2424} 2425EXPORT_SYMBOL_GPL(scsi_target_block); 2426 2427static void 2428device_unblock(struct scsi_device *sdev, void *data) 2429{ 2430 scsi_internal_device_unblock(sdev); 2431} 2432 2433static int 2434target_unblock(struct device *dev, void *data) 2435{ 2436 if (scsi_is_target_device(dev)) 2437 starget_for_each_device(to_scsi_target(dev), NULL, 2438 device_unblock); 2439 return 0; 2440} 2441 2442void 2443scsi_target_unblock(struct device *dev) 2444{ 2445 if (scsi_is_target_device(dev)) 2446 starget_for_each_device(to_scsi_target(dev), NULL, 2447 device_unblock); 2448 else 2449 device_for_each_child(dev, NULL, target_unblock); 2450} 2451EXPORT_SYMBOL_GPL(scsi_target_unblock); 2452 2453/** 2454 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt 2455 * @sgl: scatter-gather list 2456 * @sg_count: number of segments in sg 2457 * @offset: offset in bytes into sg, on return offset into the mapped area 2458 * @len: bytes to map, on return number of bytes mapped 2459 * 2460 * Returns virtual address of the start of the mapped page 2461 */ 2462void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, 2463 size_t *offset, size_t *len) 2464{ 2465 int i; 2466 size_t sg_len = 0, len_complete = 0; 2467 struct scatterlist *sg; 2468 struct page *page; 2469 2470 WARN_ON(!irqs_disabled()); 2471 2472 for_each_sg(sgl, sg, sg_count, i) { 2473 len_complete = sg_len; /* Complete sg-entries */ 2474 sg_len += sg->length; 2475 if (sg_len > *offset) 2476 break; 2477 } 2478 2479 if (unlikely(i == sg_count)) { 2480 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 2481 "elements %d\n", 2482 __FUNCTION__, sg_len, *offset, sg_count); 2483 WARN_ON(1); 2484 return NULL; 2485 } 2486 2487 /* Offset starting from the beginning of first page in this sg-entry */ 2488 *offset = *offset - len_complete + sg->offset; 2489 2490 /* Assumption: contiguous pages can be accessed as "page + i" */ 2491 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); 2492 *offset &= ~PAGE_MASK; 2493 2494 /* Bytes in this sg-entry from *offset to the end of the page */ 2495 sg_len = PAGE_SIZE - *offset; 2496 if (*len > sg_len) 2497 *len = sg_len; 2498 2499 return kmap_atomic(page, KM_BIO_SRC_IRQ); 2500} 2501EXPORT_SYMBOL(scsi_kmap_atomic_sg); 2502 2503/** 2504 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg 2505 * @virt: virtual address to be unmapped 2506 */ 2507void scsi_kunmap_atomic_sg(void *virt) 2508{ 2509 kunmap_atomic(virt, KM_BIO_SRC_IRQ); 2510} 2511EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 2512