scsi_lib.c revision 3b00315799d78f76531b71435fbc2643cd71ae4c
1/* 2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale 3 * 4 * SCSI queueing library. 5 * Initial versions: Eric Youngdale (eric@andante.org). 6 * Based upon conversations with large numbers 7 * of people at Linux Expo. 8 */ 9 10#include <linux/bio.h> 11#include <linux/blkdev.h> 12#include <linux/completion.h> 13#include <linux/kernel.h> 14#include <linux/mempool.h> 15#include <linux/slab.h> 16#include <linux/init.h> 17#include <linux/pci.h> 18#include <linux/delay.h> 19#include <linux/hardirq.h> 20 21#include <scsi/scsi.h> 22#include <scsi/scsi_cmnd.h> 23#include <scsi/scsi_dbg.h> 24#include <scsi/scsi_device.h> 25#include <scsi/scsi_driver.h> 26#include <scsi/scsi_eh.h> 27#include <scsi/scsi_host.h> 28 29#include "scsi_priv.h" 30#include "scsi_logging.h" 31 32 33#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 34#define SG_MEMPOOL_SIZE 32 35 36struct scsi_host_sg_pool { 37 size_t size; 38 char *name; 39 kmem_cache_t *slab; 40 mempool_t *pool; 41}; 42 43#if (SCSI_MAX_PHYS_SEGMENTS < 32) 44#error SCSI_MAX_PHYS_SEGMENTS is too small 45#endif 46 47#define SP(x) { x, "sgpool-" #x } 48static struct scsi_host_sg_pool scsi_sg_pools[] = { 49 SP(8), 50 SP(16), 51 SP(32), 52#if (SCSI_MAX_PHYS_SEGMENTS > 32) 53 SP(64), 54#if (SCSI_MAX_PHYS_SEGMENTS > 64) 55 SP(128), 56#if (SCSI_MAX_PHYS_SEGMENTS > 128) 57 SP(256), 58#if (SCSI_MAX_PHYS_SEGMENTS > 256) 59#error SCSI_MAX_PHYS_SEGMENTS is too large 60#endif 61#endif 62#endif 63#endif 64}; 65#undef SP 66 67static void scsi_run_queue(struct request_queue *q); 68 69/* 70 * Function: scsi_unprep_request() 71 * 72 * Purpose: Remove all preparation done for a request, including its 73 * associated scsi_cmnd, so that it can be requeued. 74 * 75 * Arguments: req - request to unprepare 76 * 77 * Lock status: Assumed that no locks are held upon entry. 78 * 79 * Returns: Nothing. 80 */ 81static void scsi_unprep_request(struct request *req) 82{ 83 struct scsi_cmnd *cmd = req->special; 84 85 req->cmd_flags &= ~REQ_DONTPREP; 86 req->special = NULL; 87 88 scsi_put_command(cmd); 89} 90 91/* 92 * Function: scsi_queue_insert() 93 * 94 * Purpose: Insert a command in the midlevel queue. 95 * 96 * Arguments: cmd - command that we are adding to queue. 97 * reason - why we are inserting command to queue. 98 * 99 * Lock status: Assumed that lock is not held upon entry. 100 * 101 * Returns: Nothing. 102 * 103 * Notes: We do this for one of two cases. Either the host is busy 104 * and it cannot accept any more commands for the time being, 105 * or the device returned QUEUE_FULL and can accept no more 106 * commands. 107 * Notes: This could be called either from an interrupt context or a 108 * normal process context. 109 */ 110int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 111{ 112 struct Scsi_Host *host = cmd->device->host; 113 struct scsi_device *device = cmd->device; 114 struct request_queue *q = device->request_queue; 115 unsigned long flags; 116 117 SCSI_LOG_MLQUEUE(1, 118 printk("Inserting command %p into mlqueue\n", cmd)); 119 120 /* 121 * Set the appropriate busy bit for the device/host. 122 * 123 * If the host/device isn't busy, assume that something actually 124 * completed, and that we should be able to queue a command now. 125 * 126 * Note that the prior mid-layer assumption that any host could 127 * always queue at least one command is now broken. The mid-layer 128 * will implement a user specifiable stall (see 129 * scsi_host.max_host_blocked and scsi_device.max_device_blocked) 130 * if a command is requeued with no other commands outstanding 131 * either for the device or for the host. 132 */ 133 if (reason == SCSI_MLQUEUE_HOST_BUSY) 134 host->host_blocked = host->max_host_blocked; 135 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY) 136 device->device_blocked = device->max_device_blocked; 137 138 /* 139 * Decrement the counters, since these commands are no longer 140 * active on the host/device. 141 */ 142 scsi_device_unbusy(device); 143 144 /* 145 * Requeue this command. It will go before all other commands 146 * that are already in the queue. 147 * 148 * NOTE: there is magic here about the way the queue is plugged if 149 * we have no outstanding commands. 150 * 151 * Although we *don't* plug the queue, we call the request 152 * function. The SCSI request function detects the blocked condition 153 * and plugs the queue appropriately. 154 */ 155 spin_lock_irqsave(q->queue_lock, flags); 156 blk_requeue_request(q, cmd->request); 157 spin_unlock_irqrestore(q->queue_lock, flags); 158 159 scsi_run_queue(q); 160 161 return 0; 162} 163 164/** 165 * scsi_execute - insert request and wait for the result 166 * @sdev: scsi device 167 * @cmd: scsi command 168 * @data_direction: data direction 169 * @buffer: data buffer 170 * @bufflen: len of buffer 171 * @sense: optional sense buffer 172 * @timeout: request timeout in seconds 173 * @retries: number of times to retry request 174 * @flags: or into request flags; 175 * 176 * returns the req->errors value which is the the scsi_cmnd result 177 * field. 178 **/ 179int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 180 int data_direction, void *buffer, unsigned bufflen, 181 unsigned char *sense, int timeout, int retries, int flags) 182{ 183 struct request *req; 184 int write = (data_direction == DMA_TO_DEVICE); 185 int ret = DRIVER_ERROR << 24; 186 187 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 188 189 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 190 buffer, bufflen, __GFP_WAIT)) 191 goto out; 192 193 req->cmd_len = COMMAND_SIZE(cmd[0]); 194 memcpy(req->cmd, cmd, req->cmd_len); 195 req->sense = sense; 196 req->sense_len = 0; 197 req->retries = retries; 198 req->timeout = timeout; 199 req->cmd_type = REQ_TYPE_BLOCK_PC; 200 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 201 202 /* 203 * head injection *required* here otherwise quiesce won't work 204 */ 205 blk_execute_rq(req->q, NULL, req, 1); 206 207 ret = req->errors; 208 out: 209 blk_put_request(req); 210 211 return ret; 212} 213EXPORT_SYMBOL(scsi_execute); 214 215 216int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, 217 int data_direction, void *buffer, unsigned bufflen, 218 struct scsi_sense_hdr *sshdr, int timeout, int retries) 219{ 220 char *sense = NULL; 221 int result; 222 223 if (sshdr) { 224 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 225 if (!sense) 226 return DRIVER_ERROR << 24; 227 } 228 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 229 sense, timeout, retries, 0); 230 if (sshdr) 231 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 232 233 kfree(sense); 234 return result; 235} 236EXPORT_SYMBOL(scsi_execute_req); 237 238struct scsi_io_context { 239 void *data; 240 void (*done)(void *data, char *sense, int result, int resid); 241 char sense[SCSI_SENSE_BUFFERSIZE]; 242}; 243 244static kmem_cache_t *scsi_io_context_cache; 245 246static void scsi_end_async(struct request *req, int uptodate) 247{ 248 struct scsi_io_context *sioc = req->end_io_data; 249 250 if (sioc->done) 251 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len); 252 253 kmem_cache_free(scsi_io_context_cache, sioc); 254 __blk_put_request(req->q, req); 255} 256 257static int scsi_merge_bio(struct request *rq, struct bio *bio) 258{ 259 struct request_queue *q = rq->q; 260 261 bio->bi_flags &= ~(1 << BIO_SEG_VALID); 262 if (rq_data_dir(rq) == WRITE) 263 bio->bi_rw |= (1 << BIO_RW); 264 blk_queue_bounce(q, &bio); 265 266 if (!rq->bio) 267 blk_rq_bio_prep(q, rq, bio); 268 else if (!q->back_merge_fn(q, rq, bio)) 269 return -EINVAL; 270 else { 271 rq->biotail->bi_next = bio; 272 rq->biotail = bio; 273 rq->hard_nr_sectors += bio_sectors(bio); 274 rq->nr_sectors = rq->hard_nr_sectors; 275 } 276 277 return 0; 278} 279 280static int scsi_bi_endio(struct bio *bio, unsigned int bytes_done, int error) 281{ 282 if (bio->bi_size) 283 return 1; 284 285 bio_put(bio); 286 return 0; 287} 288 289/** 290 * scsi_req_map_sg - map a scatterlist into a request 291 * @rq: request to fill 292 * @sg: scatterlist 293 * @nsegs: number of elements 294 * @bufflen: len of buffer 295 * @gfp: memory allocation flags 296 * 297 * scsi_req_map_sg maps a scatterlist into a request so that the 298 * request can be sent to the block layer. We do not trust the scatterlist 299 * sent to use, as some ULDs use that struct to only organize the pages. 300 */ 301static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl, 302 int nsegs, unsigned bufflen, gfp_t gfp) 303{ 304 struct request_queue *q = rq->q; 305 int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 306 unsigned int data_len = 0, len, bytes, off; 307 struct page *page; 308 struct bio *bio = NULL; 309 int i, err, nr_vecs = 0; 310 311 for (i = 0; i < nsegs; i++) { 312 page = sgl[i].page; 313 off = sgl[i].offset; 314 len = sgl[i].length; 315 data_len += len; 316 317 while (len > 0) { 318 bytes = min_t(unsigned int, len, PAGE_SIZE - off); 319 320 if (!bio) { 321 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); 322 nr_pages -= nr_vecs; 323 324 bio = bio_alloc(gfp, nr_vecs); 325 if (!bio) { 326 err = -ENOMEM; 327 goto free_bios; 328 } 329 bio->bi_end_io = scsi_bi_endio; 330 } 331 332 if (bio_add_pc_page(q, bio, page, bytes, off) != 333 bytes) { 334 bio_put(bio); 335 err = -EINVAL; 336 goto free_bios; 337 } 338 339 if (bio->bi_vcnt >= nr_vecs) { 340 err = scsi_merge_bio(rq, bio); 341 if (err) { 342 bio_endio(bio, bio->bi_size, 0); 343 goto free_bios; 344 } 345 bio = NULL; 346 } 347 348 page++; 349 len -= bytes; 350 off = 0; 351 } 352 } 353 354 rq->buffer = rq->data = NULL; 355 rq->data_len = data_len; 356 return 0; 357 358free_bios: 359 while ((bio = rq->bio) != NULL) { 360 rq->bio = bio->bi_next; 361 /* 362 * call endio instead of bio_put incase it was bounced 363 */ 364 bio_endio(bio, bio->bi_size, 0); 365 } 366 367 return err; 368} 369 370/** 371 * scsi_execute_async - insert request 372 * @sdev: scsi device 373 * @cmd: scsi command 374 * @cmd_len: length of scsi cdb 375 * @data_direction: data direction 376 * @buffer: data buffer (this can be a kernel buffer or scatterlist) 377 * @bufflen: len of buffer 378 * @use_sg: if buffer is a scatterlist this is the number of elements 379 * @timeout: request timeout in seconds 380 * @retries: number of times to retry request 381 * @flags: or into request flags 382 **/ 383int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, 384 int cmd_len, int data_direction, void *buffer, unsigned bufflen, 385 int use_sg, int timeout, int retries, void *privdata, 386 void (*done)(void *, char *, int, int), gfp_t gfp) 387{ 388 struct request *req; 389 struct scsi_io_context *sioc; 390 int err = 0; 391 int write = (data_direction == DMA_TO_DEVICE); 392 393 sioc = kmem_cache_alloc(scsi_io_context_cache, gfp); 394 if (!sioc) 395 return DRIVER_ERROR << 24; 396 memset(sioc, 0, sizeof(*sioc)); 397 398 req = blk_get_request(sdev->request_queue, write, gfp); 399 if (!req) 400 goto free_sense; 401 req->cmd_type = REQ_TYPE_BLOCK_PC; 402 req->cmd_flags |= REQ_QUIET; 403 404 if (use_sg) 405 err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp); 406 else if (bufflen) 407 err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp); 408 409 if (err) 410 goto free_req; 411 412 req->cmd_len = cmd_len; 413 memcpy(req->cmd, cmd, req->cmd_len); 414 req->sense = sioc->sense; 415 req->sense_len = 0; 416 req->timeout = timeout; 417 req->retries = retries; 418 req->end_io_data = sioc; 419 420 sioc->data = privdata; 421 sioc->done = done; 422 423 blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async); 424 return 0; 425 426free_req: 427 blk_put_request(req); 428free_sense: 429 kmem_cache_free(scsi_io_context_cache, sioc); 430 return DRIVER_ERROR << 24; 431} 432EXPORT_SYMBOL_GPL(scsi_execute_async); 433 434/* 435 * Function: scsi_init_cmd_errh() 436 * 437 * Purpose: Initialize cmd fields related to error handling. 438 * 439 * Arguments: cmd - command that is ready to be queued. 440 * 441 * Notes: This function has the job of initializing a number of 442 * fields related to error handling. Typically this will 443 * be called once for each command, as required. 444 */ 445static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 446{ 447 cmd->serial_number = 0; 448 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); 449 if (cmd->cmd_len == 0) 450 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 451} 452 453void scsi_device_unbusy(struct scsi_device *sdev) 454{ 455 struct Scsi_Host *shost = sdev->host; 456 unsigned long flags; 457 458 spin_lock_irqsave(shost->host_lock, flags); 459 shost->host_busy--; 460 if (unlikely(scsi_host_in_recovery(shost) && 461 (shost->host_failed || shost->host_eh_scheduled))) 462 scsi_eh_wakeup(shost); 463 spin_unlock(shost->host_lock); 464 spin_lock(sdev->request_queue->queue_lock); 465 sdev->device_busy--; 466 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 467} 468 469/* 470 * Called for single_lun devices on IO completion. Clear starget_sdev_user, 471 * and call blk_run_queue for all the scsi_devices on the target - 472 * including current_sdev first. 473 * 474 * Called with *no* scsi locks held. 475 */ 476static void scsi_single_lun_run(struct scsi_device *current_sdev) 477{ 478 struct Scsi_Host *shost = current_sdev->host; 479 struct scsi_device *sdev, *tmp; 480 struct scsi_target *starget = scsi_target(current_sdev); 481 unsigned long flags; 482 483 spin_lock_irqsave(shost->host_lock, flags); 484 starget->starget_sdev_user = NULL; 485 spin_unlock_irqrestore(shost->host_lock, flags); 486 487 /* 488 * Call blk_run_queue for all LUNs on the target, starting with 489 * current_sdev. We race with others (to set starget_sdev_user), 490 * but in most cases, we will be first. Ideally, each LU on the 491 * target would get some limited time or requests on the target. 492 */ 493 blk_run_queue(current_sdev->request_queue); 494 495 spin_lock_irqsave(shost->host_lock, flags); 496 if (starget->starget_sdev_user) 497 goto out; 498 list_for_each_entry_safe(sdev, tmp, &starget->devices, 499 same_target_siblings) { 500 if (sdev == current_sdev) 501 continue; 502 if (scsi_device_get(sdev)) 503 continue; 504 505 spin_unlock_irqrestore(shost->host_lock, flags); 506 blk_run_queue(sdev->request_queue); 507 spin_lock_irqsave(shost->host_lock, flags); 508 509 scsi_device_put(sdev); 510 } 511 out: 512 spin_unlock_irqrestore(shost->host_lock, flags); 513} 514 515/* 516 * Function: scsi_run_queue() 517 * 518 * Purpose: Select a proper request queue to serve next 519 * 520 * Arguments: q - last request's queue 521 * 522 * Returns: Nothing 523 * 524 * Notes: The previous command was completely finished, start 525 * a new one if possible. 526 */ 527static void scsi_run_queue(struct request_queue *q) 528{ 529 struct scsi_device *sdev = q->queuedata; 530 struct Scsi_Host *shost = sdev->host; 531 unsigned long flags; 532 533 if (sdev->single_lun) 534 scsi_single_lun_run(sdev); 535 536 spin_lock_irqsave(shost->host_lock, flags); 537 while (!list_empty(&shost->starved_list) && 538 !shost->host_blocked && !shost->host_self_blocked && 539 !((shost->can_queue > 0) && 540 (shost->host_busy >= shost->can_queue))) { 541 /* 542 * As long as shost is accepting commands and we have 543 * starved queues, call blk_run_queue. scsi_request_fn 544 * drops the queue_lock and can add us back to the 545 * starved_list. 546 * 547 * host_lock protects the starved_list and starved_entry. 548 * scsi_request_fn must get the host_lock before checking 549 * or modifying starved_list or starved_entry. 550 */ 551 sdev = list_entry(shost->starved_list.next, 552 struct scsi_device, starved_entry); 553 list_del_init(&sdev->starved_entry); 554 spin_unlock_irqrestore(shost->host_lock, flags); 555 556 557 if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && 558 !test_and_set_bit(QUEUE_FLAG_REENTER, 559 &sdev->request_queue->queue_flags)) { 560 blk_run_queue(sdev->request_queue); 561 clear_bit(QUEUE_FLAG_REENTER, 562 &sdev->request_queue->queue_flags); 563 } else 564 blk_run_queue(sdev->request_queue); 565 566 spin_lock_irqsave(shost->host_lock, flags); 567 if (unlikely(!list_empty(&sdev->starved_entry))) 568 /* 569 * sdev lost a race, and was put back on the 570 * starved list. This is unlikely but without this 571 * in theory we could loop forever. 572 */ 573 break; 574 } 575 spin_unlock_irqrestore(shost->host_lock, flags); 576 577 blk_run_queue(q); 578} 579 580/* 581 * Function: scsi_requeue_command() 582 * 583 * Purpose: Handle post-processing of completed commands. 584 * 585 * Arguments: q - queue to operate on 586 * cmd - command that may need to be requeued. 587 * 588 * Returns: Nothing 589 * 590 * Notes: After command completion, there may be blocks left 591 * over which weren't finished by the previous command 592 * this can be for a number of reasons - the main one is 593 * I/O errors in the middle of the request, in which case 594 * we need to request the blocks that come after the bad 595 * sector. 596 * Notes: Upon return, cmd is a stale pointer. 597 */ 598static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 599{ 600 struct request *req = cmd->request; 601 unsigned long flags; 602 603 scsi_unprep_request(req); 604 spin_lock_irqsave(q->queue_lock, flags); 605 blk_requeue_request(q, req); 606 spin_unlock_irqrestore(q->queue_lock, flags); 607 608 scsi_run_queue(q); 609} 610 611void scsi_next_command(struct scsi_cmnd *cmd) 612{ 613 struct scsi_device *sdev = cmd->device; 614 struct request_queue *q = sdev->request_queue; 615 616 /* need to hold a reference on the device before we let go of the cmd */ 617 get_device(&sdev->sdev_gendev); 618 619 scsi_put_command(cmd); 620 scsi_run_queue(q); 621 622 /* ok to remove device now */ 623 put_device(&sdev->sdev_gendev); 624} 625 626void scsi_run_host_queues(struct Scsi_Host *shost) 627{ 628 struct scsi_device *sdev; 629 630 shost_for_each_device(sdev, shost) 631 scsi_run_queue(sdev->request_queue); 632} 633 634/* 635 * Function: scsi_end_request() 636 * 637 * Purpose: Post-processing of completed commands (usually invoked at end 638 * of upper level post-processing and scsi_io_completion). 639 * 640 * Arguments: cmd - command that is complete. 641 * uptodate - 1 if I/O indicates success, <= 0 for I/O error. 642 * bytes - number of bytes of completed I/O 643 * requeue - indicates whether we should requeue leftovers. 644 * 645 * Lock status: Assumed that lock is not held upon entry. 646 * 647 * Returns: cmd if requeue required, NULL otherwise. 648 * 649 * Notes: This is called for block device requests in order to 650 * mark some number of sectors as complete. 651 * 652 * We are guaranteeing that the request queue will be goosed 653 * at some point during this call. 654 * Notes: If cmd was requeued, upon return it will be a stale pointer. 655 */ 656static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, 657 int bytes, int requeue) 658{ 659 request_queue_t *q = cmd->device->request_queue; 660 struct request *req = cmd->request; 661 unsigned long flags; 662 663 /* 664 * If there are blocks left over at the end, set up the command 665 * to queue the remainder of them. 666 */ 667 if (end_that_request_chunk(req, uptodate, bytes)) { 668 int leftover = (req->hard_nr_sectors << 9); 669 670 if (blk_pc_request(req)) 671 leftover = req->data_len; 672 673 /* kill remainder if no retrys */ 674 if (!uptodate && blk_noretry_request(req)) 675 end_that_request_chunk(req, 0, leftover); 676 else { 677 if (requeue) { 678 /* 679 * Bleah. Leftovers again. Stick the 680 * leftovers in the front of the 681 * queue, and goose the queue again. 682 */ 683 scsi_requeue_command(q, cmd); 684 cmd = NULL; 685 } 686 return cmd; 687 } 688 } 689 690 add_disk_randomness(req->rq_disk); 691 692 spin_lock_irqsave(q->queue_lock, flags); 693 if (blk_rq_tagged(req)) 694 blk_queue_end_tag(q, req); 695 end_that_request_last(req, uptodate); 696 spin_unlock_irqrestore(q->queue_lock, flags); 697 698 /* 699 * This will goose the queue request function at the end, so we don't 700 * need to worry about launching another command. 701 */ 702 scsi_next_command(cmd); 703 return NULL; 704} 705 706static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) 707{ 708 struct scsi_host_sg_pool *sgp; 709 struct scatterlist *sgl; 710 711 BUG_ON(!cmd->use_sg); 712 713 switch (cmd->use_sg) { 714 case 1 ... 8: 715 cmd->sglist_len = 0; 716 break; 717 case 9 ... 16: 718 cmd->sglist_len = 1; 719 break; 720 case 17 ... 32: 721 cmd->sglist_len = 2; 722 break; 723#if (SCSI_MAX_PHYS_SEGMENTS > 32) 724 case 33 ... 64: 725 cmd->sglist_len = 3; 726 break; 727#if (SCSI_MAX_PHYS_SEGMENTS > 64) 728 case 65 ... 128: 729 cmd->sglist_len = 4; 730 break; 731#if (SCSI_MAX_PHYS_SEGMENTS > 128) 732 case 129 ... 256: 733 cmd->sglist_len = 5; 734 break; 735#endif 736#endif 737#endif 738 default: 739 return NULL; 740 } 741 742 sgp = scsi_sg_pools + cmd->sglist_len; 743 sgl = mempool_alloc(sgp->pool, gfp_mask); 744 return sgl; 745} 746 747static void scsi_free_sgtable(struct scatterlist *sgl, int index) 748{ 749 struct scsi_host_sg_pool *sgp; 750 751 BUG_ON(index >= SG_MEMPOOL_NR); 752 753 sgp = scsi_sg_pools + index; 754 mempool_free(sgl, sgp->pool); 755} 756 757/* 758 * Function: scsi_release_buffers() 759 * 760 * Purpose: Completion processing for block device I/O requests. 761 * 762 * Arguments: cmd - command that we are bailing. 763 * 764 * Lock status: Assumed that no lock is held upon entry. 765 * 766 * Returns: Nothing 767 * 768 * Notes: In the event that an upper level driver rejects a 769 * command, we must release resources allocated during 770 * the __init_io() function. Primarily this would involve 771 * the scatter-gather table, and potentially any bounce 772 * buffers. 773 */ 774static void scsi_release_buffers(struct scsi_cmnd *cmd) 775{ 776 if (cmd->use_sg) 777 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); 778 779 /* 780 * Zero these out. They now point to freed memory, and it is 781 * dangerous to hang onto the pointers. 782 */ 783 cmd->request_buffer = NULL; 784 cmd->request_bufflen = 0; 785} 786 787/* 788 * Function: scsi_io_completion() 789 * 790 * Purpose: Completion processing for block device I/O requests. 791 * 792 * Arguments: cmd - command that is finished. 793 * 794 * Lock status: Assumed that no lock is held upon entry. 795 * 796 * Returns: Nothing 797 * 798 * Notes: This function is matched in terms of capabilities to 799 * the function that created the scatter-gather list. 800 * In other words, if there are no bounce buffers 801 * (the normal case for most drivers), we don't need 802 * the logic to deal with cleaning up afterwards. 803 * 804 * We must do one of several things here: 805 * 806 * a) Call scsi_end_request. This will finish off the 807 * specified number of sectors. If we are done, the 808 * command block will be released, and the queue 809 * function will be goosed. If we are not done, then 810 * scsi_end_request will directly goose the queue. 811 * 812 * b) We can just use scsi_requeue_command() here. This would 813 * be used if we just wanted to retry, for example. 814 */ 815void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 816{ 817 int result = cmd->result; 818 int this_count = cmd->request_bufflen; 819 request_queue_t *q = cmd->device->request_queue; 820 struct request *req = cmd->request; 821 int clear_errors = 1; 822 struct scsi_sense_hdr sshdr; 823 int sense_valid = 0; 824 int sense_deferred = 0; 825 826 scsi_release_buffers(cmd); 827 828 if (result) { 829 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 830 if (sense_valid) 831 sense_deferred = scsi_sense_is_deferred(&sshdr); 832 } 833 834 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ 835 req->errors = result; 836 if (result) { 837 clear_errors = 0; 838 if (sense_valid && req->sense) { 839 /* 840 * SG_IO wants current and deferred errors 841 */ 842 int len = 8 + cmd->sense_buffer[7]; 843 844 if (len > SCSI_SENSE_BUFFERSIZE) 845 len = SCSI_SENSE_BUFFERSIZE; 846 memcpy(req->sense, cmd->sense_buffer, len); 847 req->sense_len = len; 848 } 849 } else 850 req->data_len = cmd->resid; 851 } 852 853 /* 854 * Next deal with any sectors which we were able to correctly 855 * handle. 856 */ 857 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 858 "%d bytes done.\n", 859 req->nr_sectors, good_bytes)); 860 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg)); 861 862 if (clear_errors) 863 req->errors = 0; 864 865 /* A number of bytes were successfully read. If there 866 * are leftovers and there is some kind of error 867 * (result != 0), retry the rest. 868 */ 869 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL) 870 return; 871 872 /* good_bytes = 0, or (inclusive) there were leftovers and 873 * result = 0, so scsi_end_request couldn't retry. 874 */ 875 if (sense_valid && !sense_deferred) { 876 switch (sshdr.sense_key) { 877 case UNIT_ATTENTION: 878 if (cmd->device->removable) { 879 /* Detected disc change. Set a bit 880 * and quietly refuse further access. 881 */ 882 cmd->device->changed = 1; 883 scsi_end_request(cmd, 0, this_count, 1); 884 return; 885 } else { 886 /* Must have been a power glitch, or a 887 * bus reset. Could not have been a 888 * media change, so we just retry the 889 * request and see what happens. 890 */ 891 scsi_requeue_command(q, cmd); 892 return; 893 } 894 break; 895 case ILLEGAL_REQUEST: 896 /* If we had an ILLEGAL REQUEST returned, then 897 * we may have performed an unsupported 898 * command. The only thing this should be 899 * would be a ten byte read where only a six 900 * byte read was supported. Also, on a system 901 * where READ CAPACITY failed, we may have 902 * read past the end of the disk. 903 */ 904 if ((cmd->device->use_10_for_rw && 905 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 906 (cmd->cmnd[0] == READ_10 || 907 cmd->cmnd[0] == WRITE_10)) { 908 cmd->device->use_10_for_rw = 0; 909 /* This will cause a retry with a 910 * 6-byte command. 911 */ 912 scsi_requeue_command(q, cmd); 913 return; 914 } else { 915 scsi_end_request(cmd, 0, this_count, 1); 916 return; 917 } 918 break; 919 case NOT_READY: 920 /* If the device is in the process of becoming 921 * ready, or has a temporary blockage, retry. 922 */ 923 if (sshdr.asc == 0x04) { 924 switch (sshdr.ascq) { 925 case 0x01: /* becoming ready */ 926 case 0x04: /* format in progress */ 927 case 0x05: /* rebuild in progress */ 928 case 0x06: /* recalculation in progress */ 929 case 0x07: /* operation in progress */ 930 case 0x08: /* Long write in progress */ 931 case 0x09: /* self test in progress */ 932 scsi_requeue_command(q, cmd); 933 return; 934 default: 935 break; 936 } 937 } 938 if (!(req->cmd_flags & REQ_QUIET)) { 939 scmd_printk(KERN_INFO, cmd, 940 "Device not ready: "); 941 scsi_print_sense_hdr("", &sshdr); 942 } 943 scsi_end_request(cmd, 0, this_count, 1); 944 return; 945 case VOLUME_OVERFLOW: 946 if (!(req->cmd_flags & REQ_QUIET)) { 947 scmd_printk(KERN_INFO, cmd, 948 "Volume overflow, CDB: "); 949 __scsi_print_command(cmd->cmnd); 950 scsi_print_sense("", cmd); 951 } 952 /* See SSC3rXX or current. */ 953 scsi_end_request(cmd, 0, this_count, 1); 954 return; 955 default: 956 break; 957 } 958 } 959 if (host_byte(result) == DID_RESET) { 960 /* Third party bus reset or reset for error recovery 961 * reasons. Just retry the request and see what 962 * happens. 963 */ 964 scsi_requeue_command(q, cmd); 965 return; 966 } 967 if (result) { 968 if (!(req->cmd_flags & REQ_QUIET)) { 969 scmd_printk(KERN_INFO, cmd, 970 "SCSI error: return code = 0x%08x\n", 971 result); 972 if (driver_byte(result) & DRIVER_SENSE) 973 scsi_print_sense("", cmd); 974 } 975 } 976 scsi_end_request(cmd, 0, this_count, !result); 977} 978EXPORT_SYMBOL(scsi_io_completion); 979 980/* 981 * Function: scsi_init_io() 982 * 983 * Purpose: SCSI I/O initialize function. 984 * 985 * Arguments: cmd - Command descriptor we wish to initialize 986 * 987 * Returns: 0 on success 988 * BLKPREP_DEFER if the failure is retryable 989 * BLKPREP_KILL if the failure is fatal 990 */ 991static int scsi_init_io(struct scsi_cmnd *cmd) 992{ 993 struct request *req = cmd->request; 994 struct scatterlist *sgpnt; 995 int count; 996 997 /* 998 * We used to not use scatter-gather for single segment request, 999 * but now we do (it makes highmem I/O easier to support without 1000 * kmapping pages) 1001 */ 1002 cmd->use_sg = req->nr_phys_segments; 1003 1004 /* 1005 * If sg table allocation fails, requeue request later. 1006 */ 1007 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC); 1008 if (unlikely(!sgpnt)) { 1009 scsi_unprep_request(req); 1010 return BLKPREP_DEFER; 1011 } 1012 1013 req->buffer = NULL; 1014 cmd->request_buffer = (char *) sgpnt; 1015 if (blk_pc_request(req)) 1016 cmd->request_bufflen = req->data_len; 1017 else 1018 cmd->request_bufflen = req->nr_sectors << 9; 1019 1020 /* 1021 * Next, walk the list, and fill in the addresses and sizes of 1022 * each segment. 1023 */ 1024 count = blk_rq_map_sg(req->q, req, cmd->request_buffer); 1025 if (likely(count <= cmd->use_sg)) { 1026 cmd->use_sg = count; 1027 return BLKPREP_OK; 1028 } 1029 1030 printk(KERN_ERR "Incorrect number of segments after building list\n"); 1031 printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg); 1032 printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors, 1033 req->current_nr_sectors); 1034 1035 /* release the command and kill it */ 1036 scsi_release_buffers(cmd); 1037 scsi_put_command(cmd); 1038 return BLKPREP_KILL; 1039} 1040 1041static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, 1042 sector_t *error_sector) 1043{ 1044 struct scsi_device *sdev = q->queuedata; 1045 struct scsi_driver *drv; 1046 1047 if (sdev->sdev_state != SDEV_RUNNING) 1048 return -ENXIO; 1049 1050 drv = *(struct scsi_driver **) disk->private_data; 1051 if (drv->issue_flush) 1052 return drv->issue_flush(&sdev->sdev_gendev, error_sector); 1053 1054 return -EOPNOTSUPP; 1055} 1056 1057static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1058 struct request *req) 1059{ 1060 struct scsi_cmnd *cmd; 1061 1062 if (!req->special) { 1063 cmd = scsi_get_command(sdev, GFP_ATOMIC); 1064 if (unlikely(!cmd)) 1065 return NULL; 1066 req->special = cmd; 1067 } else { 1068 cmd = req->special; 1069 } 1070 1071 /* pull a tag out of the request if we have one */ 1072 cmd->tag = req->tag; 1073 cmd->request = req; 1074 1075 return cmd; 1076} 1077 1078static void scsi_blk_pc_done(struct scsi_cmnd *cmd) 1079{ 1080 BUG_ON(!blk_pc_request(cmd->request)); 1081 /* 1082 * This will complete the whole command with uptodate=1 so 1083 * as far as the block layer is concerned the command completed 1084 * successfully. Since this is a REQ_BLOCK_PC command the 1085 * caller should check the request's errors value 1086 */ 1087 scsi_io_completion(cmd, cmd->request_bufflen); 1088} 1089 1090static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1091{ 1092 struct scsi_cmnd *cmd; 1093 1094 cmd = scsi_get_cmd_from_req(sdev, req); 1095 if (unlikely(!cmd)) 1096 return BLKPREP_DEFER; 1097 1098 /* 1099 * BLOCK_PC requests may transfer data, in which case they must 1100 * a bio attached to them. Or they might contain a SCSI command 1101 * that does not transfer data, in which case they may optionally 1102 * submit a request without an attached bio. 1103 */ 1104 if (req->bio) { 1105 int ret; 1106 1107 BUG_ON(!req->nr_phys_segments); 1108 1109 ret = scsi_init_io(cmd); 1110 if (unlikely(ret)) 1111 return ret; 1112 } else { 1113 BUG_ON(req->data_len); 1114 BUG_ON(req->data); 1115 1116 cmd->request_bufflen = 0; 1117 cmd->request_buffer = NULL; 1118 cmd->use_sg = 0; 1119 req->buffer = NULL; 1120 } 1121 1122 BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd)); 1123 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1124 cmd->cmd_len = req->cmd_len; 1125 if (!req->data_len) 1126 cmd->sc_data_direction = DMA_NONE; 1127 else if (rq_data_dir(req) == WRITE) 1128 cmd->sc_data_direction = DMA_TO_DEVICE; 1129 else 1130 cmd->sc_data_direction = DMA_FROM_DEVICE; 1131 1132 cmd->transfersize = req->data_len; 1133 cmd->allowed = req->retries; 1134 cmd->timeout_per_command = req->timeout; 1135 cmd->done = scsi_blk_pc_done; 1136 return BLKPREP_OK; 1137} 1138 1139/* 1140 * Setup a REQ_TYPE_FS command. These are simple read/write request 1141 * from filesystems that still need to be translated to SCSI CDBs from 1142 * the ULD. 1143 */ 1144static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1145{ 1146 struct scsi_cmnd *cmd; 1147 struct scsi_driver *drv; 1148 int ret; 1149 1150 /* 1151 * Filesystem requests must transfer data. 1152 */ 1153 BUG_ON(!req->nr_phys_segments); 1154 1155 cmd = scsi_get_cmd_from_req(sdev, req); 1156 if (unlikely(!cmd)) 1157 return BLKPREP_DEFER; 1158 1159 ret = scsi_init_io(cmd); 1160 if (unlikely(ret)) 1161 return ret; 1162 1163 /* 1164 * Initialize the actual SCSI command for this request. 1165 */ 1166 drv = *(struct scsi_driver **)req->rq_disk->private_data; 1167 if (unlikely(!drv->init_command(cmd))) { 1168 scsi_release_buffers(cmd); 1169 scsi_put_command(cmd); 1170 return BLKPREP_KILL; 1171 } 1172 1173 return BLKPREP_OK; 1174} 1175 1176static int scsi_prep_fn(struct request_queue *q, struct request *req) 1177{ 1178 struct scsi_device *sdev = q->queuedata; 1179 int ret = BLKPREP_OK; 1180 1181 /* 1182 * If the device is not in running state we will reject some 1183 * or all commands. 1184 */ 1185 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1186 switch (sdev->sdev_state) { 1187 case SDEV_OFFLINE: 1188 /* 1189 * If the device is offline we refuse to process any 1190 * commands. The device must be brought online 1191 * before trying any recovery commands. 1192 */ 1193 sdev_printk(KERN_ERR, sdev, 1194 "rejecting I/O to offline device\n"); 1195 ret = BLKPREP_KILL; 1196 break; 1197 case SDEV_DEL: 1198 /* 1199 * If the device is fully deleted, we refuse to 1200 * process any commands as well. 1201 */ 1202 sdev_printk(KERN_ERR, sdev, 1203 "rejecting I/O to dead device\n"); 1204 ret = BLKPREP_KILL; 1205 break; 1206 case SDEV_QUIESCE: 1207 case SDEV_BLOCK: 1208 /* 1209 * If the devices is blocked we defer normal commands. 1210 */ 1211 if (!(req->cmd_flags & REQ_PREEMPT)) 1212 ret = BLKPREP_DEFER; 1213 break; 1214 default: 1215 /* 1216 * For any other not fully online state we only allow 1217 * special commands. In particular any user initiated 1218 * command is not allowed. 1219 */ 1220 if (!(req->cmd_flags & REQ_PREEMPT)) 1221 ret = BLKPREP_KILL; 1222 break; 1223 } 1224 1225 if (ret != BLKPREP_OK) 1226 goto out; 1227 } 1228 1229 switch (req->cmd_type) { 1230 case REQ_TYPE_BLOCK_PC: 1231 ret = scsi_setup_blk_pc_cmnd(sdev, req); 1232 break; 1233 case REQ_TYPE_FS: 1234 ret = scsi_setup_fs_cmnd(sdev, req); 1235 break; 1236 default: 1237 /* 1238 * All other command types are not supported. 1239 * 1240 * Note that these days the SCSI subsystem does not use 1241 * REQ_TYPE_SPECIAL requests anymore. These are only used 1242 * (directly or via blk_insert_request) by non-SCSI drivers. 1243 */ 1244 blk_dump_rq_flags(req, "SCSI bad req"); 1245 ret = BLKPREP_KILL; 1246 break; 1247 } 1248 1249 out: 1250 switch (ret) { 1251 case BLKPREP_KILL: 1252 req->errors = DID_NO_CONNECT << 16; 1253 break; 1254 case BLKPREP_DEFER: 1255 /* 1256 * If we defer, the elv_next_request() returns NULL, but the 1257 * queue must be restarted, so we plug here if no returning 1258 * command will automatically do that. 1259 */ 1260 if (sdev->device_busy == 0) 1261 blk_plug_device(q); 1262 break; 1263 default: 1264 req->cmd_flags |= REQ_DONTPREP; 1265 } 1266 1267 return ret; 1268} 1269 1270/* 1271 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1272 * return 0. 1273 * 1274 * Called with the queue_lock held. 1275 */ 1276static inline int scsi_dev_queue_ready(struct request_queue *q, 1277 struct scsi_device *sdev) 1278{ 1279 if (sdev->device_busy >= sdev->queue_depth) 1280 return 0; 1281 if (sdev->device_busy == 0 && sdev->device_blocked) { 1282 /* 1283 * unblock after device_blocked iterates to zero 1284 */ 1285 if (--sdev->device_blocked == 0) { 1286 SCSI_LOG_MLQUEUE(3, 1287 sdev_printk(KERN_INFO, sdev, 1288 "unblocking device at zero depth\n")); 1289 } else { 1290 blk_plug_device(q); 1291 return 0; 1292 } 1293 } 1294 if (sdev->device_blocked) 1295 return 0; 1296 1297 return 1; 1298} 1299 1300/* 1301 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1302 * return 0. We must end up running the queue again whenever 0 is 1303 * returned, else IO can hang. 1304 * 1305 * Called with host_lock held. 1306 */ 1307static inline int scsi_host_queue_ready(struct request_queue *q, 1308 struct Scsi_Host *shost, 1309 struct scsi_device *sdev) 1310{ 1311 if (scsi_host_in_recovery(shost)) 1312 return 0; 1313 if (shost->host_busy == 0 && shost->host_blocked) { 1314 /* 1315 * unblock after host_blocked iterates to zero 1316 */ 1317 if (--shost->host_blocked == 0) { 1318 SCSI_LOG_MLQUEUE(3, 1319 printk("scsi%d unblocking host at zero depth\n", 1320 shost->host_no)); 1321 } else { 1322 blk_plug_device(q); 1323 return 0; 1324 } 1325 } 1326 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || 1327 shost->host_blocked || shost->host_self_blocked) { 1328 if (list_empty(&sdev->starved_entry)) 1329 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1330 return 0; 1331 } 1332 1333 /* We're OK to process the command, so we can't be starved */ 1334 if (!list_empty(&sdev->starved_entry)) 1335 list_del_init(&sdev->starved_entry); 1336 1337 return 1; 1338} 1339 1340/* 1341 * Kill a request for a dead device 1342 */ 1343static void scsi_kill_request(struct request *req, request_queue_t *q) 1344{ 1345 struct scsi_cmnd *cmd = req->special; 1346 struct scsi_device *sdev = cmd->device; 1347 struct Scsi_Host *shost = sdev->host; 1348 1349 blkdev_dequeue_request(req); 1350 1351 if (unlikely(cmd == NULL)) { 1352 printk(KERN_CRIT "impossible request in %s.\n", 1353 __FUNCTION__); 1354 BUG(); 1355 } 1356 1357 scsi_init_cmd_errh(cmd); 1358 cmd->result = DID_NO_CONNECT << 16; 1359 atomic_inc(&cmd->device->iorequest_cnt); 1360 1361 /* 1362 * SCSI request completion path will do scsi_device_unbusy(), 1363 * bump busy counts. To bump the counters, we need to dance 1364 * with the locks as normal issue path does. 1365 */ 1366 sdev->device_busy++; 1367 spin_unlock(sdev->request_queue->queue_lock); 1368 spin_lock(shost->host_lock); 1369 shost->host_busy++; 1370 spin_unlock(shost->host_lock); 1371 spin_lock(sdev->request_queue->queue_lock); 1372 1373 __scsi_done(cmd); 1374} 1375 1376static void scsi_softirq_done(struct request *rq) 1377{ 1378 struct scsi_cmnd *cmd = rq->completion_data; 1379 unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command; 1380 int disposition; 1381 1382 INIT_LIST_HEAD(&cmd->eh_entry); 1383 1384 disposition = scsi_decide_disposition(cmd); 1385 if (disposition != SUCCESS && 1386 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1387 sdev_printk(KERN_ERR, cmd->device, 1388 "timing out command, waited %lus\n", 1389 wait_for/HZ); 1390 disposition = SUCCESS; 1391 } 1392 1393 scsi_log_completion(cmd, disposition); 1394 1395 switch (disposition) { 1396 case SUCCESS: 1397 scsi_finish_command(cmd); 1398 break; 1399 case NEEDS_RETRY: 1400 scsi_retry_command(cmd); 1401 break; 1402 case ADD_TO_MLQUEUE: 1403 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1404 break; 1405 default: 1406 if (!scsi_eh_scmd_add(cmd, 0)) 1407 scsi_finish_command(cmd); 1408 } 1409} 1410 1411/* 1412 * Function: scsi_request_fn() 1413 * 1414 * Purpose: Main strategy routine for SCSI. 1415 * 1416 * Arguments: q - Pointer to actual queue. 1417 * 1418 * Returns: Nothing 1419 * 1420 * Lock status: IO request lock assumed to be held when called. 1421 */ 1422static void scsi_request_fn(struct request_queue *q) 1423{ 1424 struct scsi_device *sdev = q->queuedata; 1425 struct Scsi_Host *shost; 1426 struct scsi_cmnd *cmd; 1427 struct request *req; 1428 1429 if (!sdev) { 1430 printk("scsi: killing requests for dead queue\n"); 1431 while ((req = elv_next_request(q)) != NULL) 1432 scsi_kill_request(req, q); 1433 return; 1434 } 1435 1436 if(!get_device(&sdev->sdev_gendev)) 1437 /* We must be tearing the block queue down already */ 1438 return; 1439 1440 /* 1441 * To start with, we keep looping until the queue is empty, or until 1442 * the host is no longer able to accept any more requests. 1443 */ 1444 shost = sdev->host; 1445 while (!blk_queue_plugged(q)) { 1446 int rtn; 1447 /* 1448 * get next queueable request. We do this early to make sure 1449 * that the request is fully prepared even if we cannot 1450 * accept it. 1451 */ 1452 req = elv_next_request(q); 1453 if (!req || !scsi_dev_queue_ready(q, sdev)) 1454 break; 1455 1456 if (unlikely(!scsi_device_online(sdev))) { 1457 sdev_printk(KERN_ERR, sdev, 1458 "rejecting I/O to offline device\n"); 1459 scsi_kill_request(req, q); 1460 continue; 1461 } 1462 1463 1464 /* 1465 * Remove the request from the request list. 1466 */ 1467 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1468 blkdev_dequeue_request(req); 1469 sdev->device_busy++; 1470 1471 spin_unlock(q->queue_lock); 1472 cmd = req->special; 1473 if (unlikely(cmd == NULL)) { 1474 printk(KERN_CRIT "impossible request in %s.\n" 1475 "please mail a stack trace to " 1476 "linux-scsi@vger.kernel.org\n", 1477 __FUNCTION__); 1478 blk_dump_rq_flags(req, "foo"); 1479 BUG(); 1480 } 1481 spin_lock(shost->host_lock); 1482 1483 if (!scsi_host_queue_ready(q, shost, sdev)) 1484 goto not_ready; 1485 if (sdev->single_lun) { 1486 if (scsi_target(sdev)->starget_sdev_user && 1487 scsi_target(sdev)->starget_sdev_user != sdev) 1488 goto not_ready; 1489 scsi_target(sdev)->starget_sdev_user = sdev; 1490 } 1491 shost->host_busy++; 1492 1493 /* 1494 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will 1495 * take the lock again. 1496 */ 1497 spin_unlock_irq(shost->host_lock); 1498 1499 /* 1500 * Finally, initialize any error handling parameters, and set up 1501 * the timers for timeouts. 1502 */ 1503 scsi_init_cmd_errh(cmd); 1504 1505 /* 1506 * Dispatch the command to the low-level driver. 1507 */ 1508 rtn = scsi_dispatch_cmd(cmd); 1509 spin_lock_irq(q->queue_lock); 1510 if(rtn) { 1511 /* we're refusing the command; because of 1512 * the way locks get dropped, we need to 1513 * check here if plugging is required */ 1514 if(sdev->device_busy == 0) 1515 blk_plug_device(q); 1516 1517 break; 1518 } 1519 } 1520 1521 goto out; 1522 1523 not_ready: 1524 spin_unlock_irq(shost->host_lock); 1525 1526 /* 1527 * lock q, handle tag, requeue req, and decrement device_busy. We 1528 * must return with queue_lock held. 1529 * 1530 * Decrementing device_busy without checking it is OK, as all such 1531 * cases (host limits or settings) should run the queue at some 1532 * later time. 1533 */ 1534 spin_lock_irq(q->queue_lock); 1535 blk_requeue_request(q, req); 1536 sdev->device_busy--; 1537 if(sdev->device_busy == 0) 1538 blk_plug_device(q); 1539 out: 1540 /* must be careful here...if we trigger the ->remove() function 1541 * we cannot be holding the q lock */ 1542 spin_unlock_irq(q->queue_lock); 1543 put_device(&sdev->sdev_gendev); 1544 spin_lock_irq(q->queue_lock); 1545} 1546 1547u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 1548{ 1549 struct device *host_dev; 1550 u64 bounce_limit = 0xffffffff; 1551 1552 if (shost->unchecked_isa_dma) 1553 return BLK_BOUNCE_ISA; 1554 /* 1555 * Platforms with virtual-DMA translation 1556 * hardware have no practical limit. 1557 */ 1558 if (!PCI_DMA_BUS_IS_PHYS) 1559 return BLK_BOUNCE_ANY; 1560 1561 host_dev = scsi_get_device(shost); 1562 if (host_dev && host_dev->dma_mask) 1563 bounce_limit = *host_dev->dma_mask; 1564 1565 return bounce_limit; 1566} 1567EXPORT_SYMBOL(scsi_calculate_bounce_limit); 1568 1569struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 1570{ 1571 struct Scsi_Host *shost = sdev->host; 1572 struct request_queue *q; 1573 1574 q = blk_init_queue(scsi_request_fn, NULL); 1575 if (!q) 1576 return NULL; 1577 1578 blk_queue_prep_rq(q, scsi_prep_fn); 1579 1580 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1581 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS); 1582 blk_queue_max_sectors(q, shost->max_sectors); 1583 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1584 blk_queue_segment_boundary(q, shost->dma_boundary); 1585 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); 1586 blk_queue_softirq_done(q, scsi_softirq_done); 1587 1588 if (!shost->use_clustering) 1589 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 1590 return q; 1591} 1592 1593void scsi_free_queue(struct request_queue *q) 1594{ 1595 blk_cleanup_queue(q); 1596} 1597 1598/* 1599 * Function: scsi_block_requests() 1600 * 1601 * Purpose: Utility function used by low-level drivers to prevent further 1602 * commands from being queued to the device. 1603 * 1604 * Arguments: shost - Host in question 1605 * 1606 * Returns: Nothing 1607 * 1608 * Lock status: No locks are assumed held. 1609 * 1610 * Notes: There is no timer nor any other means by which the requests 1611 * get unblocked other than the low-level driver calling 1612 * scsi_unblock_requests(). 1613 */ 1614void scsi_block_requests(struct Scsi_Host *shost) 1615{ 1616 shost->host_self_blocked = 1; 1617} 1618EXPORT_SYMBOL(scsi_block_requests); 1619 1620/* 1621 * Function: scsi_unblock_requests() 1622 * 1623 * Purpose: Utility function used by low-level drivers to allow further 1624 * commands from being queued to the device. 1625 * 1626 * Arguments: shost - Host in question 1627 * 1628 * Returns: Nothing 1629 * 1630 * Lock status: No locks are assumed held. 1631 * 1632 * Notes: There is no timer nor any other means by which the requests 1633 * get unblocked other than the low-level driver calling 1634 * scsi_unblock_requests(). 1635 * 1636 * This is done as an API function so that changes to the 1637 * internals of the scsi mid-layer won't require wholesale 1638 * changes to drivers that use this feature. 1639 */ 1640void scsi_unblock_requests(struct Scsi_Host *shost) 1641{ 1642 shost->host_self_blocked = 0; 1643 scsi_run_host_queues(shost); 1644} 1645EXPORT_SYMBOL(scsi_unblock_requests); 1646 1647int __init scsi_init_queue(void) 1648{ 1649 int i; 1650 1651 scsi_io_context_cache = kmem_cache_create("scsi_io_context", 1652 sizeof(struct scsi_io_context), 1653 0, 0, NULL, NULL); 1654 if (!scsi_io_context_cache) { 1655 printk(KERN_ERR "SCSI: can't init scsi io context cache\n"); 1656 return -ENOMEM; 1657 } 1658 1659 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1660 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1661 int size = sgp->size * sizeof(struct scatterlist); 1662 1663 sgp->slab = kmem_cache_create(sgp->name, size, 0, 1664 SLAB_HWCACHE_ALIGN, NULL, NULL); 1665 if (!sgp->slab) { 1666 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1667 sgp->name); 1668 } 1669 1670 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 1671 sgp->slab); 1672 if (!sgp->pool) { 1673 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 1674 sgp->name); 1675 } 1676 } 1677 1678 return 0; 1679} 1680 1681void scsi_exit_queue(void) 1682{ 1683 int i; 1684 1685 kmem_cache_destroy(scsi_io_context_cache); 1686 1687 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1688 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1689 mempool_destroy(sgp->pool); 1690 kmem_cache_destroy(sgp->slab); 1691 } 1692} 1693 1694/** 1695 * scsi_mode_select - issue a mode select 1696 * @sdev: SCSI device to be queried 1697 * @pf: Page format bit (1 == standard, 0 == vendor specific) 1698 * @sp: Save page bit (0 == don't save, 1 == save) 1699 * @modepage: mode page being requested 1700 * @buffer: request buffer (may not be smaller than eight bytes) 1701 * @len: length of request buffer. 1702 * @timeout: command timeout 1703 * @retries: number of retries before failing 1704 * @data: returns a structure abstracting the mode header data 1705 * @sense: place to put sense data (or NULL if no sense to be collected). 1706 * must be SCSI_SENSE_BUFFERSIZE big. 1707 * 1708 * Returns zero if successful; negative error number or scsi 1709 * status on error 1710 * 1711 */ 1712int 1713scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, 1714 unsigned char *buffer, int len, int timeout, int retries, 1715 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1716{ 1717 unsigned char cmd[10]; 1718 unsigned char *real_buffer; 1719 int ret; 1720 1721 memset(cmd, 0, sizeof(cmd)); 1722 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); 1723 1724 if (sdev->use_10_for_ms) { 1725 if (len > 65535) 1726 return -EINVAL; 1727 real_buffer = kmalloc(8 + len, GFP_KERNEL); 1728 if (!real_buffer) 1729 return -ENOMEM; 1730 memcpy(real_buffer + 8, buffer, len); 1731 len += 8; 1732 real_buffer[0] = 0; 1733 real_buffer[1] = 0; 1734 real_buffer[2] = data->medium_type; 1735 real_buffer[3] = data->device_specific; 1736 real_buffer[4] = data->longlba ? 0x01 : 0; 1737 real_buffer[5] = 0; 1738 real_buffer[6] = data->block_descriptor_length >> 8; 1739 real_buffer[7] = data->block_descriptor_length; 1740 1741 cmd[0] = MODE_SELECT_10; 1742 cmd[7] = len >> 8; 1743 cmd[8] = len; 1744 } else { 1745 if (len > 255 || data->block_descriptor_length > 255 || 1746 data->longlba) 1747 return -EINVAL; 1748 1749 real_buffer = kmalloc(4 + len, GFP_KERNEL); 1750 if (!real_buffer) 1751 return -ENOMEM; 1752 memcpy(real_buffer + 4, buffer, len); 1753 len += 4; 1754 real_buffer[0] = 0; 1755 real_buffer[1] = data->medium_type; 1756 real_buffer[2] = data->device_specific; 1757 real_buffer[3] = data->block_descriptor_length; 1758 1759 1760 cmd[0] = MODE_SELECT; 1761 cmd[4] = len; 1762 } 1763 1764 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 1765 sshdr, timeout, retries); 1766 kfree(real_buffer); 1767 return ret; 1768} 1769EXPORT_SYMBOL_GPL(scsi_mode_select); 1770 1771/** 1772 * scsi_mode_sense - issue a mode sense, falling back from 10 to 1773 * six bytes if necessary. 1774 * @sdev: SCSI device to be queried 1775 * @dbd: set if mode sense will allow block descriptors to be returned 1776 * @modepage: mode page being requested 1777 * @buffer: request buffer (may not be smaller than eight bytes) 1778 * @len: length of request buffer. 1779 * @timeout: command timeout 1780 * @retries: number of retries before failing 1781 * @data: returns a structure abstracting the mode header data 1782 * @sense: place to put sense data (or NULL if no sense to be collected). 1783 * must be SCSI_SENSE_BUFFERSIZE big. 1784 * 1785 * Returns zero if unsuccessful, or the header offset (either 4 1786 * or 8 depending on whether a six or ten byte command was 1787 * issued) if successful. 1788 **/ 1789int 1790scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 1791 unsigned char *buffer, int len, int timeout, int retries, 1792 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1793{ 1794 unsigned char cmd[12]; 1795 int use_10_for_ms; 1796 int header_length; 1797 int result; 1798 struct scsi_sense_hdr my_sshdr; 1799 1800 memset(data, 0, sizeof(*data)); 1801 memset(&cmd[0], 0, 12); 1802 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 1803 cmd[2] = modepage; 1804 1805 /* caller might not be interested in sense, but we need it */ 1806 if (!sshdr) 1807 sshdr = &my_sshdr; 1808 1809 retry: 1810 use_10_for_ms = sdev->use_10_for_ms; 1811 1812 if (use_10_for_ms) { 1813 if (len < 8) 1814 len = 8; 1815 1816 cmd[0] = MODE_SENSE_10; 1817 cmd[8] = len; 1818 header_length = 8; 1819 } else { 1820 if (len < 4) 1821 len = 4; 1822 1823 cmd[0] = MODE_SENSE; 1824 cmd[4] = len; 1825 header_length = 4; 1826 } 1827 1828 memset(buffer, 0, len); 1829 1830 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 1831 sshdr, timeout, retries); 1832 1833 /* This code looks awful: what it's doing is making sure an 1834 * ILLEGAL REQUEST sense return identifies the actual command 1835 * byte as the problem. MODE_SENSE commands can return 1836 * ILLEGAL REQUEST if the code page isn't supported */ 1837 1838 if (use_10_for_ms && !scsi_status_is_good(result) && 1839 (driver_byte(result) & DRIVER_SENSE)) { 1840 if (scsi_sense_valid(sshdr)) { 1841 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 1842 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 1843 /* 1844 * Invalid command operation code 1845 */ 1846 sdev->use_10_for_ms = 0; 1847 goto retry; 1848 } 1849 } 1850 } 1851 1852 if(scsi_status_is_good(result)) { 1853 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && 1854 (modepage == 6 || modepage == 8))) { 1855 /* Initio breakage? */ 1856 header_length = 0; 1857 data->length = 13; 1858 data->medium_type = 0; 1859 data->device_specific = 0; 1860 data->longlba = 0; 1861 data->block_descriptor_length = 0; 1862 } else if(use_10_for_ms) { 1863 data->length = buffer[0]*256 + buffer[1] + 2; 1864 data->medium_type = buffer[2]; 1865 data->device_specific = buffer[3]; 1866 data->longlba = buffer[4] & 0x01; 1867 data->block_descriptor_length = buffer[6]*256 1868 + buffer[7]; 1869 } else { 1870 data->length = buffer[0] + 1; 1871 data->medium_type = buffer[1]; 1872 data->device_specific = buffer[2]; 1873 data->block_descriptor_length = buffer[3]; 1874 } 1875 data->header_length = header_length; 1876 } 1877 1878 return result; 1879} 1880EXPORT_SYMBOL(scsi_mode_sense); 1881 1882int 1883scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries) 1884{ 1885 char cmd[] = { 1886 TEST_UNIT_READY, 0, 0, 0, 0, 0, 1887 }; 1888 struct scsi_sense_hdr sshdr; 1889 int result; 1890 1891 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr, 1892 timeout, retries); 1893 1894 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) { 1895 1896 if ((scsi_sense_valid(&sshdr)) && 1897 ((sshdr.sense_key == UNIT_ATTENTION) || 1898 (sshdr.sense_key == NOT_READY))) { 1899 sdev->changed = 1; 1900 result = 0; 1901 } 1902 } 1903 return result; 1904} 1905EXPORT_SYMBOL(scsi_test_unit_ready); 1906 1907/** 1908 * scsi_device_set_state - Take the given device through the device 1909 * state model. 1910 * @sdev: scsi device to change the state of. 1911 * @state: state to change to. 1912 * 1913 * Returns zero if unsuccessful or an error if the requested 1914 * transition is illegal. 1915 **/ 1916int 1917scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 1918{ 1919 enum scsi_device_state oldstate = sdev->sdev_state; 1920 1921 if (state == oldstate) 1922 return 0; 1923 1924 switch (state) { 1925 case SDEV_CREATED: 1926 /* There are no legal states that come back to 1927 * created. This is the manually initialised start 1928 * state */ 1929 goto illegal; 1930 1931 case SDEV_RUNNING: 1932 switch (oldstate) { 1933 case SDEV_CREATED: 1934 case SDEV_OFFLINE: 1935 case SDEV_QUIESCE: 1936 case SDEV_BLOCK: 1937 break; 1938 default: 1939 goto illegal; 1940 } 1941 break; 1942 1943 case SDEV_QUIESCE: 1944 switch (oldstate) { 1945 case SDEV_RUNNING: 1946 case SDEV_OFFLINE: 1947 break; 1948 default: 1949 goto illegal; 1950 } 1951 break; 1952 1953 case SDEV_OFFLINE: 1954 switch (oldstate) { 1955 case SDEV_CREATED: 1956 case SDEV_RUNNING: 1957 case SDEV_QUIESCE: 1958 case SDEV_BLOCK: 1959 break; 1960 default: 1961 goto illegal; 1962 } 1963 break; 1964 1965 case SDEV_BLOCK: 1966 switch (oldstate) { 1967 case SDEV_CREATED: 1968 case SDEV_RUNNING: 1969 break; 1970 default: 1971 goto illegal; 1972 } 1973 break; 1974 1975 case SDEV_CANCEL: 1976 switch (oldstate) { 1977 case SDEV_CREATED: 1978 case SDEV_RUNNING: 1979 case SDEV_QUIESCE: 1980 case SDEV_OFFLINE: 1981 case SDEV_BLOCK: 1982 break; 1983 default: 1984 goto illegal; 1985 } 1986 break; 1987 1988 case SDEV_DEL: 1989 switch (oldstate) { 1990 case SDEV_CREATED: 1991 case SDEV_RUNNING: 1992 case SDEV_OFFLINE: 1993 case SDEV_CANCEL: 1994 break; 1995 default: 1996 goto illegal; 1997 } 1998 break; 1999 2000 } 2001 sdev->sdev_state = state; 2002 return 0; 2003 2004 illegal: 2005 SCSI_LOG_ERROR_RECOVERY(1, 2006 sdev_printk(KERN_ERR, sdev, 2007 "Illegal state transition %s->%s\n", 2008 scsi_device_state_name(oldstate), 2009 scsi_device_state_name(state)) 2010 ); 2011 return -EINVAL; 2012} 2013EXPORT_SYMBOL(scsi_device_set_state); 2014 2015/** 2016 * scsi_device_quiesce - Block user issued commands. 2017 * @sdev: scsi device to quiesce. 2018 * 2019 * This works by trying to transition to the SDEV_QUIESCE state 2020 * (which must be a legal transition). When the device is in this 2021 * state, only special requests will be accepted, all others will 2022 * be deferred. Since special requests may also be requeued requests, 2023 * a successful return doesn't guarantee the device will be 2024 * totally quiescent. 2025 * 2026 * Must be called with user context, may sleep. 2027 * 2028 * Returns zero if unsuccessful or an error if not. 2029 **/ 2030int 2031scsi_device_quiesce(struct scsi_device *sdev) 2032{ 2033 int err = scsi_device_set_state(sdev, SDEV_QUIESCE); 2034 if (err) 2035 return err; 2036 2037 scsi_run_queue(sdev->request_queue); 2038 while (sdev->device_busy) { 2039 msleep_interruptible(200); 2040 scsi_run_queue(sdev->request_queue); 2041 } 2042 return 0; 2043} 2044EXPORT_SYMBOL(scsi_device_quiesce); 2045 2046/** 2047 * scsi_device_resume - Restart user issued commands to a quiesced device. 2048 * @sdev: scsi device to resume. 2049 * 2050 * Moves the device from quiesced back to running and restarts the 2051 * queues. 2052 * 2053 * Must be called with user context, may sleep. 2054 **/ 2055void 2056scsi_device_resume(struct scsi_device *sdev) 2057{ 2058 if(scsi_device_set_state(sdev, SDEV_RUNNING)) 2059 return; 2060 scsi_run_queue(sdev->request_queue); 2061} 2062EXPORT_SYMBOL(scsi_device_resume); 2063 2064static void 2065device_quiesce_fn(struct scsi_device *sdev, void *data) 2066{ 2067 scsi_device_quiesce(sdev); 2068} 2069 2070void 2071scsi_target_quiesce(struct scsi_target *starget) 2072{ 2073 starget_for_each_device(starget, NULL, device_quiesce_fn); 2074} 2075EXPORT_SYMBOL(scsi_target_quiesce); 2076 2077static void 2078device_resume_fn(struct scsi_device *sdev, void *data) 2079{ 2080 scsi_device_resume(sdev); 2081} 2082 2083void 2084scsi_target_resume(struct scsi_target *starget) 2085{ 2086 starget_for_each_device(starget, NULL, device_resume_fn); 2087} 2088EXPORT_SYMBOL(scsi_target_resume); 2089 2090/** 2091 * scsi_internal_device_block - internal function to put a device 2092 * temporarily into the SDEV_BLOCK state 2093 * @sdev: device to block 2094 * 2095 * Block request made by scsi lld's to temporarily stop all 2096 * scsi commands on the specified device. Called from interrupt 2097 * or normal process context. 2098 * 2099 * Returns zero if successful or error if not 2100 * 2101 * Notes: 2102 * This routine transitions the device to the SDEV_BLOCK state 2103 * (which must be a legal transition). When the device is in this 2104 * state, all commands are deferred until the scsi lld reenables 2105 * the device with scsi_device_unblock or device_block_tmo fires. 2106 * This routine assumes the host_lock is held on entry. 2107 **/ 2108int 2109scsi_internal_device_block(struct scsi_device *sdev) 2110{ 2111 request_queue_t *q = sdev->request_queue; 2112 unsigned long flags; 2113 int err = 0; 2114 2115 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2116 if (err) 2117 return err; 2118 2119 /* 2120 * The device has transitioned to SDEV_BLOCK. Stop the 2121 * block layer from calling the midlayer with this device's 2122 * request queue. 2123 */ 2124 spin_lock_irqsave(q->queue_lock, flags); 2125 blk_stop_queue(q); 2126 spin_unlock_irqrestore(q->queue_lock, flags); 2127 2128 return 0; 2129} 2130EXPORT_SYMBOL_GPL(scsi_internal_device_block); 2131 2132/** 2133 * scsi_internal_device_unblock - resume a device after a block request 2134 * @sdev: device to resume 2135 * 2136 * Called by scsi lld's or the midlayer to restart the device queue 2137 * for the previously suspended scsi device. Called from interrupt or 2138 * normal process context. 2139 * 2140 * Returns zero if successful or error if not. 2141 * 2142 * Notes: 2143 * This routine transitions the device to the SDEV_RUNNING state 2144 * (which must be a legal transition) allowing the midlayer to 2145 * goose the queue for this device. This routine assumes the 2146 * host_lock is held upon entry. 2147 **/ 2148int 2149scsi_internal_device_unblock(struct scsi_device *sdev) 2150{ 2151 request_queue_t *q = sdev->request_queue; 2152 int err; 2153 unsigned long flags; 2154 2155 /* 2156 * Try to transition the scsi device to SDEV_RUNNING 2157 * and goose the device queue if successful. 2158 */ 2159 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2160 if (err) 2161 return err; 2162 2163 spin_lock_irqsave(q->queue_lock, flags); 2164 blk_start_queue(q); 2165 spin_unlock_irqrestore(q->queue_lock, flags); 2166 2167 return 0; 2168} 2169EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); 2170 2171static void 2172device_block(struct scsi_device *sdev, void *data) 2173{ 2174 scsi_internal_device_block(sdev); 2175} 2176 2177static int 2178target_block(struct device *dev, void *data) 2179{ 2180 if (scsi_is_target_device(dev)) 2181 starget_for_each_device(to_scsi_target(dev), NULL, 2182 device_block); 2183 return 0; 2184} 2185 2186void 2187scsi_target_block(struct device *dev) 2188{ 2189 if (scsi_is_target_device(dev)) 2190 starget_for_each_device(to_scsi_target(dev), NULL, 2191 device_block); 2192 else 2193 device_for_each_child(dev, NULL, target_block); 2194} 2195EXPORT_SYMBOL_GPL(scsi_target_block); 2196 2197static void 2198device_unblock(struct scsi_device *sdev, void *data) 2199{ 2200 scsi_internal_device_unblock(sdev); 2201} 2202 2203static int 2204target_unblock(struct device *dev, void *data) 2205{ 2206 if (scsi_is_target_device(dev)) 2207 starget_for_each_device(to_scsi_target(dev), NULL, 2208 device_unblock); 2209 return 0; 2210} 2211 2212void 2213scsi_target_unblock(struct device *dev) 2214{ 2215 if (scsi_is_target_device(dev)) 2216 starget_for_each_device(to_scsi_target(dev), NULL, 2217 device_unblock); 2218 else 2219 device_for_each_child(dev, NULL, target_unblock); 2220} 2221EXPORT_SYMBOL_GPL(scsi_target_unblock); 2222 2223/** 2224 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt 2225 * @sg: scatter-gather list 2226 * @sg_count: number of segments in sg 2227 * @offset: offset in bytes into sg, on return offset into the mapped area 2228 * @len: bytes to map, on return number of bytes mapped 2229 * 2230 * Returns virtual address of the start of the mapped page 2231 */ 2232void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, 2233 size_t *offset, size_t *len) 2234{ 2235 int i; 2236 size_t sg_len = 0, len_complete = 0; 2237 struct page *page; 2238 2239 for (i = 0; i < sg_count; i++) { 2240 len_complete = sg_len; /* Complete sg-entries */ 2241 sg_len += sg[i].length; 2242 if (sg_len > *offset) 2243 break; 2244 } 2245 2246 if (unlikely(i == sg_count)) { 2247 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 2248 "elements %d\n", 2249 __FUNCTION__, sg_len, *offset, sg_count); 2250 WARN_ON(1); 2251 return NULL; 2252 } 2253 2254 /* Offset starting from the beginning of first page in this sg-entry */ 2255 *offset = *offset - len_complete + sg[i].offset; 2256 2257 /* Assumption: contiguous pages can be accessed as "page + i" */ 2258 page = nth_page(sg[i].page, (*offset >> PAGE_SHIFT)); 2259 *offset &= ~PAGE_MASK; 2260 2261 /* Bytes in this sg-entry from *offset to the end of the page */ 2262 sg_len = PAGE_SIZE - *offset; 2263 if (*len > sg_len) 2264 *len = sg_len; 2265 2266 return kmap_atomic(page, KM_BIO_SRC_IRQ); 2267} 2268EXPORT_SYMBOL(scsi_kmap_atomic_sg); 2269 2270/** 2271 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously 2272 * mapped with scsi_kmap_atomic_sg 2273 * @virt: virtual address to be unmapped 2274 */ 2275void scsi_kunmap_atomic_sg(void *virt) 2276{ 2277 kunmap_atomic(virt, KM_BIO_SRC_IRQ); 2278} 2279EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 2280