scsi_lib.c revision 33659ebbae262228eef4e0fe990f393d1f0ed941
1/* 2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale 3 * 4 * SCSI queueing library. 5 * Initial versions: Eric Youngdale (eric@andante.org). 6 * Based upon conversations with large numbers 7 * of people at Linux Expo. 8 */ 9 10#include <linux/bio.h> 11#include <linux/bitops.h> 12#include <linux/blkdev.h> 13#include <linux/completion.h> 14#include <linux/kernel.h> 15#include <linux/mempool.h> 16#include <linux/slab.h> 17#include <linux/init.h> 18#include <linux/pci.h> 19#include <linux/delay.h> 20#include <linux/hardirq.h> 21#include <linux/scatterlist.h> 22 23#include <scsi/scsi.h> 24#include <scsi/scsi_cmnd.h> 25#include <scsi/scsi_dbg.h> 26#include <scsi/scsi_device.h> 27#include <scsi/scsi_driver.h> 28#include <scsi/scsi_eh.h> 29#include <scsi/scsi_host.h> 30 31#include "scsi_priv.h" 32#include "scsi_logging.h" 33 34 35#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 36#define SG_MEMPOOL_SIZE 2 37 38struct scsi_host_sg_pool { 39 size_t size; 40 char *name; 41 struct kmem_cache *slab; 42 mempool_t *pool; 43}; 44 45#define SP(x) { x, "sgpool-" __stringify(x) } 46#if (SCSI_MAX_SG_SEGMENTS < 32) 47#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) 48#endif 49static struct scsi_host_sg_pool scsi_sg_pools[] = { 50 SP(8), 51 SP(16), 52#if (SCSI_MAX_SG_SEGMENTS > 32) 53 SP(32), 54#if (SCSI_MAX_SG_SEGMENTS > 64) 55 SP(64), 56#if (SCSI_MAX_SG_SEGMENTS > 128) 57 SP(128), 58#if (SCSI_MAX_SG_SEGMENTS > 256) 59#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX) 60#endif 61#endif 62#endif 63#endif 64 SP(SCSI_MAX_SG_SEGMENTS) 65}; 66#undef SP 67 68struct kmem_cache *scsi_sdb_cache; 69 70static void scsi_run_queue(struct request_queue *q); 71 72/* 73 * Function: scsi_unprep_request() 74 * 75 * Purpose: Remove all preparation done for a request, including its 76 * associated scsi_cmnd, so that it can be requeued. 77 * 78 * Arguments: req - request to unprepare 79 * 80 * Lock status: Assumed that no locks are held upon entry. 81 * 82 * Returns: Nothing. 83 */ 84static void scsi_unprep_request(struct request *req) 85{ 86 struct scsi_cmnd *cmd = req->special; 87 88 req->cmd_flags &= ~REQ_DONTPREP; 89 req->special = NULL; 90 91 scsi_put_command(cmd); 92} 93 94/** 95 * __scsi_queue_insert - private queue insertion 96 * @cmd: The SCSI command being requeued 97 * @reason: The reason for the requeue 98 * @unbusy: Whether the queue should be unbusied 99 * 100 * This is a private queue insertion. The public interface 101 * scsi_queue_insert() always assumes the queue should be unbusied 102 * because it's always called before the completion. This function is 103 * for a requeue after completion, which should only occur in this 104 * file. 105 */ 106static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) 107{ 108 struct Scsi_Host *host = cmd->device->host; 109 struct scsi_device *device = cmd->device; 110 struct scsi_target *starget = scsi_target(device); 111 struct request_queue *q = device->request_queue; 112 unsigned long flags; 113 114 SCSI_LOG_MLQUEUE(1, 115 printk("Inserting command %p into mlqueue\n", cmd)); 116 117 /* 118 * Set the appropriate busy bit for the device/host. 119 * 120 * If the host/device isn't busy, assume that something actually 121 * completed, and that we should be able to queue a command now. 122 * 123 * Note that the prior mid-layer assumption that any host could 124 * always queue at least one command is now broken. The mid-layer 125 * will implement a user specifiable stall (see 126 * scsi_host.max_host_blocked and scsi_device.max_device_blocked) 127 * if a command is requeued with no other commands outstanding 128 * either for the device or for the host. 129 */ 130 switch (reason) { 131 case SCSI_MLQUEUE_HOST_BUSY: 132 host->host_blocked = host->max_host_blocked; 133 break; 134 case SCSI_MLQUEUE_DEVICE_BUSY: 135 device->device_blocked = device->max_device_blocked; 136 break; 137 case SCSI_MLQUEUE_TARGET_BUSY: 138 starget->target_blocked = starget->max_target_blocked; 139 break; 140 } 141 142 /* 143 * Decrement the counters, since these commands are no longer 144 * active on the host/device. 145 */ 146 if (unbusy) 147 scsi_device_unbusy(device); 148 149 /* 150 * Requeue this command. It will go before all other commands 151 * that are already in the queue. 152 * 153 * NOTE: there is magic here about the way the queue is plugged if 154 * we have no outstanding commands. 155 * 156 * Although we *don't* plug the queue, we call the request 157 * function. The SCSI request function detects the blocked condition 158 * and plugs the queue appropriately. 159 */ 160 spin_lock_irqsave(q->queue_lock, flags); 161 blk_requeue_request(q, cmd->request); 162 spin_unlock_irqrestore(q->queue_lock, flags); 163 164 scsi_run_queue(q); 165 166 return 0; 167} 168 169/* 170 * Function: scsi_queue_insert() 171 * 172 * Purpose: Insert a command in the midlevel queue. 173 * 174 * Arguments: cmd - command that we are adding to queue. 175 * reason - why we are inserting command to queue. 176 * 177 * Lock status: Assumed that lock is not held upon entry. 178 * 179 * Returns: Nothing. 180 * 181 * Notes: We do this for one of two cases. Either the host is busy 182 * and it cannot accept any more commands for the time being, 183 * or the device returned QUEUE_FULL and can accept no more 184 * commands. 185 * Notes: This could be called either from an interrupt context or a 186 * normal process context. 187 */ 188int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 189{ 190 return __scsi_queue_insert(cmd, reason, 1); 191} 192/** 193 * scsi_execute - insert request and wait for the result 194 * @sdev: scsi device 195 * @cmd: scsi command 196 * @data_direction: data direction 197 * @buffer: data buffer 198 * @bufflen: len of buffer 199 * @sense: optional sense buffer 200 * @timeout: request timeout in seconds 201 * @retries: number of times to retry request 202 * @flags: or into request flags; 203 * @resid: optional residual length 204 * 205 * returns the req->errors value which is the scsi_cmnd result 206 * field. 207 */ 208int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 209 int data_direction, void *buffer, unsigned bufflen, 210 unsigned char *sense, int timeout, int retries, int flags, 211 int *resid) 212{ 213 struct request *req; 214 int write = (data_direction == DMA_TO_DEVICE); 215 int ret = DRIVER_ERROR << 24; 216 217 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 218 219 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 220 buffer, bufflen, __GFP_WAIT)) 221 goto out; 222 223 req->cmd_len = COMMAND_SIZE(cmd[0]); 224 memcpy(req->cmd, cmd, req->cmd_len); 225 req->sense = sense; 226 req->sense_len = 0; 227 req->retries = retries; 228 req->timeout = timeout; 229 req->cmd_type = REQ_TYPE_BLOCK_PC; 230 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 231 232 /* 233 * head injection *required* here otherwise quiesce won't work 234 */ 235 blk_execute_rq(req->q, NULL, req, 1); 236 237 /* 238 * Some devices (USB mass-storage in particular) may transfer 239 * garbage data together with a residue indicating that the data 240 * is invalid. Prevent the garbage from being misinterpreted 241 * and prevent security leaks by zeroing out the excess data. 242 */ 243 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen)) 244 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len); 245 246 if (resid) 247 *resid = req->resid_len; 248 ret = req->errors; 249 out: 250 blk_put_request(req); 251 252 return ret; 253} 254EXPORT_SYMBOL(scsi_execute); 255 256 257int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, 258 int data_direction, void *buffer, unsigned bufflen, 259 struct scsi_sense_hdr *sshdr, int timeout, int retries, 260 int *resid) 261{ 262 char *sense = NULL; 263 int result; 264 265 if (sshdr) { 266 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 267 if (!sense) 268 return DRIVER_ERROR << 24; 269 } 270 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 271 sense, timeout, retries, 0, resid); 272 if (sshdr) 273 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 274 275 kfree(sense); 276 return result; 277} 278EXPORT_SYMBOL(scsi_execute_req); 279 280/* 281 * Function: scsi_init_cmd_errh() 282 * 283 * Purpose: Initialize cmd fields related to error handling. 284 * 285 * Arguments: cmd - command that is ready to be queued. 286 * 287 * Notes: This function has the job of initializing a number of 288 * fields related to error handling. Typically this will 289 * be called once for each command, as required. 290 */ 291static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 292{ 293 cmd->serial_number = 0; 294 scsi_set_resid(cmd, 0); 295 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 296 if (cmd->cmd_len == 0) 297 cmd->cmd_len = scsi_command_size(cmd->cmnd); 298} 299 300void scsi_device_unbusy(struct scsi_device *sdev) 301{ 302 struct Scsi_Host *shost = sdev->host; 303 struct scsi_target *starget = scsi_target(sdev); 304 unsigned long flags; 305 306 spin_lock_irqsave(shost->host_lock, flags); 307 shost->host_busy--; 308 starget->target_busy--; 309 if (unlikely(scsi_host_in_recovery(shost) && 310 (shost->host_failed || shost->host_eh_scheduled))) 311 scsi_eh_wakeup(shost); 312 spin_unlock(shost->host_lock); 313 spin_lock(sdev->request_queue->queue_lock); 314 sdev->device_busy--; 315 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 316} 317 318/* 319 * Called for single_lun devices on IO completion. Clear starget_sdev_user, 320 * and call blk_run_queue for all the scsi_devices on the target - 321 * including current_sdev first. 322 * 323 * Called with *no* scsi locks held. 324 */ 325static void scsi_single_lun_run(struct scsi_device *current_sdev) 326{ 327 struct Scsi_Host *shost = current_sdev->host; 328 struct scsi_device *sdev, *tmp; 329 struct scsi_target *starget = scsi_target(current_sdev); 330 unsigned long flags; 331 332 spin_lock_irqsave(shost->host_lock, flags); 333 starget->starget_sdev_user = NULL; 334 spin_unlock_irqrestore(shost->host_lock, flags); 335 336 /* 337 * Call blk_run_queue for all LUNs on the target, starting with 338 * current_sdev. We race with others (to set starget_sdev_user), 339 * but in most cases, we will be first. Ideally, each LU on the 340 * target would get some limited time or requests on the target. 341 */ 342 blk_run_queue(current_sdev->request_queue); 343 344 spin_lock_irqsave(shost->host_lock, flags); 345 if (starget->starget_sdev_user) 346 goto out; 347 list_for_each_entry_safe(sdev, tmp, &starget->devices, 348 same_target_siblings) { 349 if (sdev == current_sdev) 350 continue; 351 if (scsi_device_get(sdev)) 352 continue; 353 354 spin_unlock_irqrestore(shost->host_lock, flags); 355 blk_run_queue(sdev->request_queue); 356 spin_lock_irqsave(shost->host_lock, flags); 357 358 scsi_device_put(sdev); 359 } 360 out: 361 spin_unlock_irqrestore(shost->host_lock, flags); 362} 363 364static inline int scsi_device_is_busy(struct scsi_device *sdev) 365{ 366 if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked) 367 return 1; 368 369 return 0; 370} 371 372static inline int scsi_target_is_busy(struct scsi_target *starget) 373{ 374 return ((starget->can_queue > 0 && 375 starget->target_busy >= starget->can_queue) || 376 starget->target_blocked); 377} 378 379static inline int scsi_host_is_busy(struct Scsi_Host *shost) 380{ 381 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || 382 shost->host_blocked || shost->host_self_blocked) 383 return 1; 384 385 return 0; 386} 387 388/* 389 * Function: scsi_run_queue() 390 * 391 * Purpose: Select a proper request queue to serve next 392 * 393 * Arguments: q - last request's queue 394 * 395 * Returns: Nothing 396 * 397 * Notes: The previous command was completely finished, start 398 * a new one if possible. 399 */ 400static void scsi_run_queue(struct request_queue *q) 401{ 402 struct scsi_device *sdev = q->queuedata; 403 struct Scsi_Host *shost = sdev->host; 404 LIST_HEAD(starved_list); 405 unsigned long flags; 406 407 if (scsi_target(sdev)->single_lun) 408 scsi_single_lun_run(sdev); 409 410 spin_lock_irqsave(shost->host_lock, flags); 411 list_splice_init(&shost->starved_list, &starved_list); 412 413 while (!list_empty(&starved_list)) { 414 int flagset; 415 416 /* 417 * As long as shost is accepting commands and we have 418 * starved queues, call blk_run_queue. scsi_request_fn 419 * drops the queue_lock and can add us back to the 420 * starved_list. 421 * 422 * host_lock protects the starved_list and starved_entry. 423 * scsi_request_fn must get the host_lock before checking 424 * or modifying starved_list or starved_entry. 425 */ 426 if (scsi_host_is_busy(shost)) 427 break; 428 429 sdev = list_entry(starved_list.next, 430 struct scsi_device, starved_entry); 431 list_del_init(&sdev->starved_entry); 432 if (scsi_target_is_busy(scsi_target(sdev))) { 433 list_move_tail(&sdev->starved_entry, 434 &shost->starved_list); 435 continue; 436 } 437 438 spin_unlock(shost->host_lock); 439 440 spin_lock(sdev->request_queue->queue_lock); 441 flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && 442 !test_bit(QUEUE_FLAG_REENTER, 443 &sdev->request_queue->queue_flags); 444 if (flagset) 445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); 446 __blk_run_queue(sdev->request_queue); 447 if (flagset) 448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); 449 spin_unlock(sdev->request_queue->queue_lock); 450 451 spin_lock(shost->host_lock); 452 } 453 /* put any unprocessed entries back */ 454 list_splice(&starved_list, &shost->starved_list); 455 spin_unlock_irqrestore(shost->host_lock, flags); 456 457 blk_run_queue(q); 458} 459 460/* 461 * Function: scsi_requeue_command() 462 * 463 * Purpose: Handle post-processing of completed commands. 464 * 465 * Arguments: q - queue to operate on 466 * cmd - command that may need to be requeued. 467 * 468 * Returns: Nothing 469 * 470 * Notes: After command completion, there may be blocks left 471 * over which weren't finished by the previous command 472 * this can be for a number of reasons - the main one is 473 * I/O errors in the middle of the request, in which case 474 * we need to request the blocks that come after the bad 475 * sector. 476 * Notes: Upon return, cmd is a stale pointer. 477 */ 478static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 479{ 480 struct request *req = cmd->request; 481 unsigned long flags; 482 483 spin_lock_irqsave(q->queue_lock, flags); 484 scsi_unprep_request(req); 485 blk_requeue_request(q, req); 486 spin_unlock_irqrestore(q->queue_lock, flags); 487 488 scsi_run_queue(q); 489} 490 491void scsi_next_command(struct scsi_cmnd *cmd) 492{ 493 struct scsi_device *sdev = cmd->device; 494 struct request_queue *q = sdev->request_queue; 495 496 /* need to hold a reference on the device before we let go of the cmd */ 497 get_device(&sdev->sdev_gendev); 498 499 scsi_put_command(cmd); 500 scsi_run_queue(q); 501 502 /* ok to remove device now */ 503 put_device(&sdev->sdev_gendev); 504} 505 506void scsi_run_host_queues(struct Scsi_Host *shost) 507{ 508 struct scsi_device *sdev; 509 510 shost_for_each_device(sdev, shost) 511 scsi_run_queue(sdev->request_queue); 512} 513 514static void __scsi_release_buffers(struct scsi_cmnd *, int); 515 516/* 517 * Function: scsi_end_request() 518 * 519 * Purpose: Post-processing of completed commands (usually invoked at end 520 * of upper level post-processing and scsi_io_completion). 521 * 522 * Arguments: cmd - command that is complete. 523 * error - 0 if I/O indicates success, < 0 for I/O error. 524 * bytes - number of bytes of completed I/O 525 * requeue - indicates whether we should requeue leftovers. 526 * 527 * Lock status: Assumed that lock is not held upon entry. 528 * 529 * Returns: cmd if requeue required, NULL otherwise. 530 * 531 * Notes: This is called for block device requests in order to 532 * mark some number of sectors as complete. 533 * 534 * We are guaranteeing that the request queue will be goosed 535 * at some point during this call. 536 * Notes: If cmd was requeued, upon return it will be a stale pointer. 537 */ 538static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, 539 int bytes, int requeue) 540{ 541 struct request_queue *q = cmd->device->request_queue; 542 struct request *req = cmd->request; 543 544 /* 545 * If there are blocks left over at the end, set up the command 546 * to queue the remainder of them. 547 */ 548 if (blk_end_request(req, error, bytes)) { 549 /* kill remainder if no retrys */ 550 if (error && scsi_noretry_cmd(cmd)) 551 blk_end_request_all(req, error); 552 else { 553 if (requeue) { 554 /* 555 * Bleah. Leftovers again. Stick the 556 * leftovers in the front of the 557 * queue, and goose the queue again. 558 */ 559 scsi_release_buffers(cmd); 560 scsi_requeue_command(q, cmd); 561 cmd = NULL; 562 } 563 return cmd; 564 } 565 } 566 567 /* 568 * This will goose the queue request function at the end, so we don't 569 * need to worry about launching another command. 570 */ 571 __scsi_release_buffers(cmd, 0); 572 scsi_next_command(cmd); 573 return NULL; 574} 575 576static inline unsigned int scsi_sgtable_index(unsigned short nents) 577{ 578 unsigned int index; 579 580 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS); 581 582 if (nents <= 8) 583 index = 0; 584 else 585 index = get_count_order(nents) - 3; 586 587 return index; 588} 589 590static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) 591{ 592 struct scsi_host_sg_pool *sgp; 593 594 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 595 mempool_free(sgl, sgp->pool); 596} 597 598static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) 599{ 600 struct scsi_host_sg_pool *sgp; 601 602 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 603 return mempool_alloc(sgp->pool, gfp_mask); 604} 605 606static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, 607 gfp_t gfp_mask) 608{ 609 int ret; 610 611 BUG_ON(!nents); 612 613 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, 614 gfp_mask, scsi_sg_alloc); 615 if (unlikely(ret)) 616 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, 617 scsi_sg_free); 618 619 return ret; 620} 621 622static void scsi_free_sgtable(struct scsi_data_buffer *sdb) 623{ 624 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); 625} 626 627static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) 628{ 629 630 if (cmd->sdb.table.nents) 631 scsi_free_sgtable(&cmd->sdb); 632 633 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 634 635 if (do_bidi_check && scsi_bidi_cmnd(cmd)) { 636 struct scsi_data_buffer *bidi_sdb = 637 cmd->request->next_rq->special; 638 scsi_free_sgtable(bidi_sdb); 639 kmem_cache_free(scsi_sdb_cache, bidi_sdb); 640 cmd->request->next_rq->special = NULL; 641 } 642 643 if (scsi_prot_sg_count(cmd)) 644 scsi_free_sgtable(cmd->prot_sdb); 645} 646 647/* 648 * Function: scsi_release_buffers() 649 * 650 * Purpose: Completion processing for block device I/O requests. 651 * 652 * Arguments: cmd - command that we are bailing. 653 * 654 * Lock status: Assumed that no lock is held upon entry. 655 * 656 * Returns: Nothing 657 * 658 * Notes: In the event that an upper level driver rejects a 659 * command, we must release resources allocated during 660 * the __init_io() function. Primarily this would involve 661 * the scatter-gather table, and potentially any bounce 662 * buffers. 663 */ 664void scsi_release_buffers(struct scsi_cmnd *cmd) 665{ 666 __scsi_release_buffers(cmd, 1); 667} 668EXPORT_SYMBOL(scsi_release_buffers); 669 670/* 671 * Function: scsi_io_completion() 672 * 673 * Purpose: Completion processing for block device I/O requests. 674 * 675 * Arguments: cmd - command that is finished. 676 * 677 * Lock status: Assumed that no lock is held upon entry. 678 * 679 * Returns: Nothing 680 * 681 * Notes: This function is matched in terms of capabilities to 682 * the function that created the scatter-gather list. 683 * In other words, if there are no bounce buffers 684 * (the normal case for most drivers), we don't need 685 * the logic to deal with cleaning up afterwards. 686 * 687 * We must call scsi_end_request(). This will finish off 688 * the specified number of sectors. If we are done, the 689 * command block will be released and the queue function 690 * will be goosed. If we are not done then we have to 691 * figure out what to do next: 692 * 693 * a) We can call scsi_requeue_command(). The request 694 * will be unprepared and put back on the queue. Then 695 * a new command will be created for it. This should 696 * be used if we made forward progress, or if we want 697 * to switch from READ(10) to READ(6) for example. 698 * 699 * b) We can call scsi_queue_insert(). The request will 700 * be put back on the queue and retried using the same 701 * command as before, possibly after a delay. 702 * 703 * c) We can call blk_end_request() with -EIO to fail 704 * the remainder of the request. 705 */ 706void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 707{ 708 int result = cmd->result; 709 struct request_queue *q = cmd->device->request_queue; 710 struct request *req = cmd->request; 711 int error = 0; 712 struct scsi_sense_hdr sshdr; 713 int sense_valid = 0; 714 int sense_deferred = 0; 715 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, 716 ACTION_DELAYED_RETRY} action; 717 char *description = NULL; 718 719 if (result) { 720 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 721 if (sense_valid) 722 sense_deferred = scsi_sense_is_deferred(&sshdr); 723 } 724 725 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ 726 req->errors = result; 727 if (result) { 728 if (sense_valid && req->sense) { 729 /* 730 * SG_IO wants current and deferred errors 731 */ 732 int len = 8 + cmd->sense_buffer[7]; 733 734 if (len > SCSI_SENSE_BUFFERSIZE) 735 len = SCSI_SENSE_BUFFERSIZE; 736 memcpy(req->sense, cmd->sense_buffer, len); 737 req->sense_len = len; 738 } 739 if (!sense_deferred) 740 error = -EIO; 741 } 742 743 req->resid_len = scsi_get_resid(cmd); 744 745 if (scsi_bidi_cmnd(cmd)) { 746 /* 747 * Bidi commands Must be complete as a whole, 748 * both sides at once. 749 */ 750 req->next_rq->resid_len = scsi_in(cmd)->resid; 751 752 scsi_release_buffers(cmd); 753 blk_end_request_all(req, 0); 754 755 scsi_next_command(cmd); 756 return; 757 } 758 } 759 760 /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ 761 BUG_ON(blk_bidi_rq(req)); 762 763 /* 764 * Next deal with any sectors which we were able to correctly 765 * handle. 766 */ 767 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, " 768 "%d bytes done.\n", 769 blk_rq_sectors(req), good_bytes)); 770 771 /* 772 * Recovered errors need reporting, but they're always treated 773 * as success, so fiddle the result code here. For BLOCK_PC 774 * we already took a copy of the original into rq->errors which 775 * is what gets returned to the user 776 */ 777 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { 778 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip 779 * print since caller wants ATA registers. Only occurs on 780 * SCSI ATA PASS_THROUGH commands when CK_COND=1 781 */ 782 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) 783 ; 784 else if (!(req->cmd_flags & REQ_QUIET)) 785 scsi_print_sense("", cmd); 786 result = 0; 787 /* BLOCK_PC may have set error */ 788 error = 0; 789 } 790 791 /* 792 * A number of bytes were successfully read. If there 793 * are leftovers and there is some kind of error 794 * (result != 0), retry the rest. 795 */ 796 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) 797 return; 798 799 error = -EIO; 800 801 if (host_byte(result) == DID_RESET) { 802 /* Third party bus reset or reset for error recovery 803 * reasons. Just retry the command and see what 804 * happens. 805 */ 806 action = ACTION_RETRY; 807 } else if (sense_valid && !sense_deferred) { 808 switch (sshdr.sense_key) { 809 case UNIT_ATTENTION: 810 if (cmd->device->removable) { 811 /* Detected disc change. Set a bit 812 * and quietly refuse further access. 813 */ 814 cmd->device->changed = 1; 815 description = "Media Changed"; 816 action = ACTION_FAIL; 817 } else { 818 /* Must have been a power glitch, or a 819 * bus reset. Could not have been a 820 * media change, so we just retry the 821 * command and see what happens. 822 */ 823 action = ACTION_RETRY; 824 } 825 break; 826 case ILLEGAL_REQUEST: 827 /* If we had an ILLEGAL REQUEST returned, then 828 * we may have performed an unsupported 829 * command. The only thing this should be 830 * would be a ten byte read where only a six 831 * byte read was supported. Also, on a system 832 * where READ CAPACITY failed, we may have 833 * read past the end of the disk. 834 */ 835 if ((cmd->device->use_10_for_rw && 836 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 837 (cmd->cmnd[0] == READ_10 || 838 cmd->cmnd[0] == WRITE_10)) { 839 /* This will issue a new 6-byte command. */ 840 cmd->device->use_10_for_rw = 0; 841 action = ACTION_REPREP; 842 } else if (sshdr.asc == 0x10) /* DIX */ { 843 description = "Host Data Integrity Failure"; 844 action = ACTION_FAIL; 845 error = -EILSEQ; 846 } else 847 action = ACTION_FAIL; 848 break; 849 case ABORTED_COMMAND: 850 action = ACTION_FAIL; 851 if (sshdr.asc == 0x10) { /* DIF */ 852 description = "Target Data Integrity Failure"; 853 error = -EILSEQ; 854 } 855 break; 856 case NOT_READY: 857 /* If the device is in the process of becoming 858 * ready, or has a temporary blockage, retry. 859 */ 860 if (sshdr.asc == 0x04) { 861 switch (sshdr.ascq) { 862 case 0x01: /* becoming ready */ 863 case 0x04: /* format in progress */ 864 case 0x05: /* rebuild in progress */ 865 case 0x06: /* recalculation in progress */ 866 case 0x07: /* operation in progress */ 867 case 0x08: /* Long write in progress */ 868 case 0x09: /* self test in progress */ 869 case 0x14: /* space allocation in progress */ 870 action = ACTION_DELAYED_RETRY; 871 break; 872 default: 873 description = "Device not ready"; 874 action = ACTION_FAIL; 875 break; 876 } 877 } else { 878 description = "Device not ready"; 879 action = ACTION_FAIL; 880 } 881 break; 882 case VOLUME_OVERFLOW: 883 /* See SSC3rXX or current. */ 884 action = ACTION_FAIL; 885 break; 886 default: 887 description = "Unhandled sense code"; 888 action = ACTION_FAIL; 889 break; 890 } 891 } else { 892 description = "Unhandled error code"; 893 action = ACTION_FAIL; 894 } 895 896 switch (action) { 897 case ACTION_FAIL: 898 /* Give up and fail the remainder of the request */ 899 scsi_release_buffers(cmd); 900 if (!(req->cmd_flags & REQ_QUIET)) { 901 if (description) 902 scmd_printk(KERN_INFO, cmd, "%s\n", 903 description); 904 scsi_print_result(cmd); 905 if (driver_byte(result) & DRIVER_SENSE) 906 scsi_print_sense("", cmd); 907 scsi_print_command(cmd); 908 } 909 if (blk_end_request_err(req, error)) 910 scsi_requeue_command(q, cmd); 911 else 912 scsi_next_command(cmd); 913 break; 914 case ACTION_REPREP: 915 /* Unprep the request and put it back at the head of the queue. 916 * A new command will be prepared and issued. 917 */ 918 scsi_release_buffers(cmd); 919 scsi_requeue_command(q, cmd); 920 break; 921 case ACTION_RETRY: 922 /* Retry the same command immediately */ 923 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0); 924 break; 925 case ACTION_DELAYED_RETRY: 926 /* Retry the same command after a delay */ 927 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); 928 break; 929 } 930} 931 932static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, 933 gfp_t gfp_mask) 934{ 935 int count; 936 937 /* 938 * If sg table allocation fails, requeue request later. 939 */ 940 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, 941 gfp_mask))) { 942 return BLKPREP_DEFER; 943 } 944 945 req->buffer = NULL; 946 947 /* 948 * Next, walk the list, and fill in the addresses and sizes of 949 * each segment. 950 */ 951 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 952 BUG_ON(count > sdb->table.nents); 953 sdb->table.nents = count; 954 sdb->length = blk_rq_bytes(req); 955 return BLKPREP_OK; 956} 957 958/* 959 * Function: scsi_init_io() 960 * 961 * Purpose: SCSI I/O initialize function. 962 * 963 * Arguments: cmd - Command descriptor we wish to initialize 964 * 965 * Returns: 0 on success 966 * BLKPREP_DEFER if the failure is retryable 967 * BLKPREP_KILL if the failure is fatal 968 */ 969int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) 970{ 971 int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask); 972 if (error) 973 goto err_exit; 974 975 if (blk_bidi_rq(cmd->request)) { 976 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( 977 scsi_sdb_cache, GFP_ATOMIC); 978 if (!bidi_sdb) { 979 error = BLKPREP_DEFER; 980 goto err_exit; 981 } 982 983 cmd->request->next_rq->special = bidi_sdb; 984 error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb, 985 GFP_ATOMIC); 986 if (error) 987 goto err_exit; 988 } 989 990 if (blk_integrity_rq(cmd->request)) { 991 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; 992 int ivecs, count; 993 994 BUG_ON(prot_sdb == NULL); 995 ivecs = blk_rq_count_integrity_sg(cmd->request); 996 997 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { 998 error = BLKPREP_DEFER; 999 goto err_exit; 1000 } 1001 1002 count = blk_rq_map_integrity_sg(cmd->request, 1003 prot_sdb->table.sgl); 1004 BUG_ON(unlikely(count > ivecs)); 1005 1006 cmd->prot_sdb = prot_sdb; 1007 cmd->prot_sdb->table.nents = count; 1008 } 1009 1010 return BLKPREP_OK ; 1011 1012err_exit: 1013 scsi_release_buffers(cmd); 1014 if (error == BLKPREP_KILL) 1015 scsi_put_command(cmd); 1016 else /* BLKPREP_DEFER */ 1017 scsi_unprep_request(cmd->request); 1018 1019 return error; 1020} 1021EXPORT_SYMBOL(scsi_init_io); 1022 1023static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1024 struct request *req) 1025{ 1026 struct scsi_cmnd *cmd; 1027 1028 if (!req->special) { 1029 cmd = scsi_get_command(sdev, GFP_ATOMIC); 1030 if (unlikely(!cmd)) 1031 return NULL; 1032 req->special = cmd; 1033 } else { 1034 cmd = req->special; 1035 } 1036 1037 /* pull a tag out of the request if we have one */ 1038 cmd->tag = req->tag; 1039 cmd->request = req; 1040 1041 cmd->cmnd = req->cmd; 1042 1043 return cmd; 1044} 1045 1046int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1047{ 1048 struct scsi_cmnd *cmd; 1049 int ret = scsi_prep_state_check(sdev, req); 1050 1051 if (ret != BLKPREP_OK) 1052 return ret; 1053 1054 cmd = scsi_get_cmd_from_req(sdev, req); 1055 if (unlikely(!cmd)) 1056 return BLKPREP_DEFER; 1057 1058 /* 1059 * BLOCK_PC requests may transfer data, in which case they must 1060 * a bio attached to them. Or they might contain a SCSI command 1061 * that does not transfer data, in which case they may optionally 1062 * submit a request without an attached bio. 1063 */ 1064 if (req->bio) { 1065 int ret; 1066 1067 BUG_ON(!req->nr_phys_segments); 1068 1069 ret = scsi_init_io(cmd, GFP_ATOMIC); 1070 if (unlikely(ret)) 1071 return ret; 1072 } else { 1073 BUG_ON(blk_rq_bytes(req)); 1074 1075 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1076 req->buffer = NULL; 1077 } 1078 1079 cmd->cmd_len = req->cmd_len; 1080 if (!blk_rq_bytes(req)) 1081 cmd->sc_data_direction = DMA_NONE; 1082 else if (rq_data_dir(req) == WRITE) 1083 cmd->sc_data_direction = DMA_TO_DEVICE; 1084 else 1085 cmd->sc_data_direction = DMA_FROM_DEVICE; 1086 1087 cmd->transfersize = blk_rq_bytes(req); 1088 cmd->allowed = req->retries; 1089 return BLKPREP_OK; 1090} 1091EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); 1092 1093/* 1094 * Setup a REQ_TYPE_FS command. These are simple read/write request 1095 * from filesystems that still need to be translated to SCSI CDBs from 1096 * the ULD. 1097 */ 1098int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1099{ 1100 struct scsi_cmnd *cmd; 1101 int ret = scsi_prep_state_check(sdev, req); 1102 1103 if (ret != BLKPREP_OK) 1104 return ret; 1105 1106 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh 1107 && sdev->scsi_dh_data->scsi_dh->prep_fn)) { 1108 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); 1109 if (ret != BLKPREP_OK) 1110 return ret; 1111 } 1112 1113 /* 1114 * Filesystem requests must transfer data. 1115 */ 1116 BUG_ON(!req->nr_phys_segments); 1117 1118 cmd = scsi_get_cmd_from_req(sdev, req); 1119 if (unlikely(!cmd)) 1120 return BLKPREP_DEFER; 1121 1122 memset(cmd->cmnd, 0, BLK_MAX_CDB); 1123 return scsi_init_io(cmd, GFP_ATOMIC); 1124} 1125EXPORT_SYMBOL(scsi_setup_fs_cmnd); 1126 1127int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) 1128{ 1129 int ret = BLKPREP_OK; 1130 1131 /* 1132 * If the device is not in running state we will reject some 1133 * or all commands. 1134 */ 1135 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1136 switch (sdev->sdev_state) { 1137 case SDEV_OFFLINE: 1138 /* 1139 * If the device is offline we refuse to process any 1140 * commands. The device must be brought online 1141 * before trying any recovery commands. 1142 */ 1143 sdev_printk(KERN_ERR, sdev, 1144 "rejecting I/O to offline device\n"); 1145 ret = BLKPREP_KILL; 1146 break; 1147 case SDEV_DEL: 1148 /* 1149 * If the device is fully deleted, we refuse to 1150 * process any commands as well. 1151 */ 1152 sdev_printk(KERN_ERR, sdev, 1153 "rejecting I/O to dead device\n"); 1154 ret = BLKPREP_KILL; 1155 break; 1156 case SDEV_QUIESCE: 1157 case SDEV_BLOCK: 1158 case SDEV_CREATED_BLOCK: 1159 /* 1160 * If the devices is blocked we defer normal commands. 1161 */ 1162 if (!(req->cmd_flags & REQ_PREEMPT)) 1163 ret = BLKPREP_DEFER; 1164 break; 1165 default: 1166 /* 1167 * For any other not fully online state we only allow 1168 * special commands. In particular any user initiated 1169 * command is not allowed. 1170 */ 1171 if (!(req->cmd_flags & REQ_PREEMPT)) 1172 ret = BLKPREP_KILL; 1173 break; 1174 } 1175 } 1176 return ret; 1177} 1178EXPORT_SYMBOL(scsi_prep_state_check); 1179 1180int scsi_prep_return(struct request_queue *q, struct request *req, int ret) 1181{ 1182 struct scsi_device *sdev = q->queuedata; 1183 1184 switch (ret) { 1185 case BLKPREP_KILL: 1186 req->errors = DID_NO_CONNECT << 16; 1187 /* release the command and kill it */ 1188 if (req->special) { 1189 struct scsi_cmnd *cmd = req->special; 1190 scsi_release_buffers(cmd); 1191 scsi_put_command(cmd); 1192 req->special = NULL; 1193 } 1194 break; 1195 case BLKPREP_DEFER: 1196 /* 1197 * If we defer, the blk_peek_request() returns NULL, but the 1198 * queue must be restarted, so we plug here if no returning 1199 * command will automatically do that. 1200 */ 1201 if (sdev->device_busy == 0) 1202 blk_plug_device(q); 1203 break; 1204 default: 1205 req->cmd_flags |= REQ_DONTPREP; 1206 } 1207 1208 return ret; 1209} 1210EXPORT_SYMBOL(scsi_prep_return); 1211 1212int scsi_prep_fn(struct request_queue *q, struct request *req) 1213{ 1214 struct scsi_device *sdev = q->queuedata; 1215 int ret = BLKPREP_KILL; 1216 1217 if (req->cmd_type == REQ_TYPE_BLOCK_PC) 1218 ret = scsi_setup_blk_pc_cmnd(sdev, req); 1219 return scsi_prep_return(q, req, ret); 1220} 1221EXPORT_SYMBOL(scsi_prep_fn); 1222 1223/* 1224 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1225 * return 0. 1226 * 1227 * Called with the queue_lock held. 1228 */ 1229static inline int scsi_dev_queue_ready(struct request_queue *q, 1230 struct scsi_device *sdev) 1231{ 1232 if (sdev->device_busy == 0 && sdev->device_blocked) { 1233 /* 1234 * unblock after device_blocked iterates to zero 1235 */ 1236 if (--sdev->device_blocked == 0) { 1237 SCSI_LOG_MLQUEUE(3, 1238 sdev_printk(KERN_INFO, sdev, 1239 "unblocking device at zero depth\n")); 1240 } else { 1241 blk_plug_device(q); 1242 return 0; 1243 } 1244 } 1245 if (scsi_device_is_busy(sdev)) 1246 return 0; 1247 1248 return 1; 1249} 1250 1251 1252/* 1253 * scsi_target_queue_ready: checks if there we can send commands to target 1254 * @sdev: scsi device on starget to check. 1255 * 1256 * Called with the host lock held. 1257 */ 1258static inline int scsi_target_queue_ready(struct Scsi_Host *shost, 1259 struct scsi_device *sdev) 1260{ 1261 struct scsi_target *starget = scsi_target(sdev); 1262 1263 if (starget->single_lun) { 1264 if (starget->starget_sdev_user && 1265 starget->starget_sdev_user != sdev) 1266 return 0; 1267 starget->starget_sdev_user = sdev; 1268 } 1269 1270 if (starget->target_busy == 0 && starget->target_blocked) { 1271 /* 1272 * unblock after target_blocked iterates to zero 1273 */ 1274 if (--starget->target_blocked == 0) { 1275 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, 1276 "unblocking target at zero depth\n")); 1277 } else 1278 return 0; 1279 } 1280 1281 if (scsi_target_is_busy(starget)) { 1282 if (list_empty(&sdev->starved_entry)) { 1283 list_add_tail(&sdev->starved_entry, 1284 &shost->starved_list); 1285 return 0; 1286 } 1287 } 1288 1289 /* We're OK to process the command, so we can't be starved */ 1290 if (!list_empty(&sdev->starved_entry)) 1291 list_del_init(&sdev->starved_entry); 1292 return 1; 1293} 1294 1295/* 1296 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1297 * return 0. We must end up running the queue again whenever 0 is 1298 * returned, else IO can hang. 1299 * 1300 * Called with host_lock held. 1301 */ 1302static inline int scsi_host_queue_ready(struct request_queue *q, 1303 struct Scsi_Host *shost, 1304 struct scsi_device *sdev) 1305{ 1306 if (scsi_host_in_recovery(shost)) 1307 return 0; 1308 if (shost->host_busy == 0 && shost->host_blocked) { 1309 /* 1310 * unblock after host_blocked iterates to zero 1311 */ 1312 if (--shost->host_blocked == 0) { 1313 SCSI_LOG_MLQUEUE(3, 1314 printk("scsi%d unblocking host at zero depth\n", 1315 shost->host_no)); 1316 } else { 1317 return 0; 1318 } 1319 } 1320 if (scsi_host_is_busy(shost)) { 1321 if (list_empty(&sdev->starved_entry)) 1322 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1323 return 0; 1324 } 1325 1326 /* We're OK to process the command, so we can't be starved */ 1327 if (!list_empty(&sdev->starved_entry)) 1328 list_del_init(&sdev->starved_entry); 1329 1330 return 1; 1331} 1332 1333/* 1334 * Busy state exporting function for request stacking drivers. 1335 * 1336 * For efficiency, no lock is taken to check the busy state of 1337 * shost/starget/sdev, since the returned value is not guaranteed and 1338 * may be changed after request stacking drivers call the function, 1339 * regardless of taking lock or not. 1340 * 1341 * When scsi can't dispatch I/Os anymore and needs to kill I/Os 1342 * (e.g. !sdev), scsi needs to return 'not busy'. 1343 * Otherwise, request stacking drivers may hold requests forever. 1344 */ 1345static int scsi_lld_busy(struct request_queue *q) 1346{ 1347 struct scsi_device *sdev = q->queuedata; 1348 struct Scsi_Host *shost; 1349 struct scsi_target *starget; 1350 1351 if (!sdev) 1352 return 0; 1353 1354 shost = sdev->host; 1355 starget = scsi_target(sdev); 1356 1357 if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) || 1358 scsi_target_is_busy(starget) || scsi_device_is_busy(sdev)) 1359 return 1; 1360 1361 return 0; 1362} 1363 1364/* 1365 * Kill a request for a dead device 1366 */ 1367static void scsi_kill_request(struct request *req, struct request_queue *q) 1368{ 1369 struct scsi_cmnd *cmd = req->special; 1370 struct scsi_device *sdev; 1371 struct scsi_target *starget; 1372 struct Scsi_Host *shost; 1373 1374 blk_start_request(req); 1375 1376 if (unlikely(cmd == NULL)) { 1377 printk(KERN_CRIT "impossible request in %s.\n", 1378 __func__); 1379 BUG(); 1380 } 1381 1382 sdev = cmd->device; 1383 starget = scsi_target(sdev); 1384 shost = sdev->host; 1385 scsi_init_cmd_errh(cmd); 1386 cmd->result = DID_NO_CONNECT << 16; 1387 atomic_inc(&cmd->device->iorequest_cnt); 1388 1389 /* 1390 * SCSI request completion path will do scsi_device_unbusy(), 1391 * bump busy counts. To bump the counters, we need to dance 1392 * with the locks as normal issue path does. 1393 */ 1394 sdev->device_busy++; 1395 spin_unlock(sdev->request_queue->queue_lock); 1396 spin_lock(shost->host_lock); 1397 shost->host_busy++; 1398 starget->target_busy++; 1399 spin_unlock(shost->host_lock); 1400 spin_lock(sdev->request_queue->queue_lock); 1401 1402 blk_complete_request(req); 1403} 1404 1405static void scsi_softirq_done(struct request *rq) 1406{ 1407 struct scsi_cmnd *cmd = rq->special; 1408 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; 1409 int disposition; 1410 1411 INIT_LIST_HEAD(&cmd->eh_entry); 1412 1413 /* 1414 * Set the serial numbers back to zero 1415 */ 1416 cmd->serial_number = 0; 1417 1418 atomic_inc(&cmd->device->iodone_cnt); 1419 if (cmd->result) 1420 atomic_inc(&cmd->device->ioerr_cnt); 1421 1422 disposition = scsi_decide_disposition(cmd); 1423 if (disposition != SUCCESS && 1424 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1425 sdev_printk(KERN_ERR, cmd->device, 1426 "timing out command, waited %lus\n", 1427 wait_for/HZ); 1428 disposition = SUCCESS; 1429 } 1430 1431 scsi_log_completion(cmd, disposition); 1432 1433 switch (disposition) { 1434 case SUCCESS: 1435 scsi_finish_command(cmd); 1436 break; 1437 case NEEDS_RETRY: 1438 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 1439 break; 1440 case ADD_TO_MLQUEUE: 1441 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1442 break; 1443 default: 1444 if (!scsi_eh_scmd_add(cmd, 0)) 1445 scsi_finish_command(cmd); 1446 } 1447} 1448 1449/* 1450 * Function: scsi_request_fn() 1451 * 1452 * Purpose: Main strategy routine for SCSI. 1453 * 1454 * Arguments: q - Pointer to actual queue. 1455 * 1456 * Returns: Nothing 1457 * 1458 * Lock status: IO request lock assumed to be held when called. 1459 */ 1460static void scsi_request_fn(struct request_queue *q) 1461{ 1462 struct scsi_device *sdev = q->queuedata; 1463 struct Scsi_Host *shost; 1464 struct scsi_cmnd *cmd; 1465 struct request *req; 1466 1467 if (!sdev) { 1468 printk("scsi: killing requests for dead queue\n"); 1469 while ((req = blk_peek_request(q)) != NULL) 1470 scsi_kill_request(req, q); 1471 return; 1472 } 1473 1474 if(!get_device(&sdev->sdev_gendev)) 1475 /* We must be tearing the block queue down already */ 1476 return; 1477 1478 /* 1479 * To start with, we keep looping until the queue is empty, or until 1480 * the host is no longer able to accept any more requests. 1481 */ 1482 shost = sdev->host; 1483 while (!blk_queue_plugged(q)) { 1484 int rtn; 1485 /* 1486 * get next queueable request. We do this early to make sure 1487 * that the request is fully prepared even if we cannot 1488 * accept it. 1489 */ 1490 req = blk_peek_request(q); 1491 if (!req || !scsi_dev_queue_ready(q, sdev)) 1492 break; 1493 1494 if (unlikely(!scsi_device_online(sdev))) { 1495 sdev_printk(KERN_ERR, sdev, 1496 "rejecting I/O to offline device\n"); 1497 scsi_kill_request(req, q); 1498 continue; 1499 } 1500 1501 1502 /* 1503 * Remove the request from the request list. 1504 */ 1505 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1506 blk_start_request(req); 1507 sdev->device_busy++; 1508 1509 spin_unlock(q->queue_lock); 1510 cmd = req->special; 1511 if (unlikely(cmd == NULL)) { 1512 printk(KERN_CRIT "impossible request in %s.\n" 1513 "please mail a stack trace to " 1514 "linux-scsi@vger.kernel.org\n", 1515 __func__); 1516 blk_dump_rq_flags(req, "foo"); 1517 BUG(); 1518 } 1519 spin_lock(shost->host_lock); 1520 1521 /* 1522 * We hit this when the driver is using a host wide 1523 * tag map. For device level tag maps the queue_depth check 1524 * in the device ready fn would prevent us from trying 1525 * to allocate a tag. Since the map is a shared host resource 1526 * we add the dev to the starved list so it eventually gets 1527 * a run when a tag is freed. 1528 */ 1529 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { 1530 if (list_empty(&sdev->starved_entry)) 1531 list_add_tail(&sdev->starved_entry, 1532 &shost->starved_list); 1533 goto not_ready; 1534 } 1535 1536 if (!scsi_target_queue_ready(shost, sdev)) 1537 goto not_ready; 1538 1539 if (!scsi_host_queue_ready(q, shost, sdev)) 1540 goto not_ready; 1541 1542 scsi_target(sdev)->target_busy++; 1543 shost->host_busy++; 1544 1545 /* 1546 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will 1547 * take the lock again. 1548 */ 1549 spin_unlock_irq(shost->host_lock); 1550 1551 /* 1552 * Finally, initialize any error handling parameters, and set up 1553 * the timers for timeouts. 1554 */ 1555 scsi_init_cmd_errh(cmd); 1556 1557 /* 1558 * Dispatch the command to the low-level driver. 1559 */ 1560 rtn = scsi_dispatch_cmd(cmd); 1561 spin_lock_irq(q->queue_lock); 1562 if(rtn) { 1563 /* we're refusing the command; because of 1564 * the way locks get dropped, we need to 1565 * check here if plugging is required */ 1566 if(sdev->device_busy == 0) 1567 blk_plug_device(q); 1568 1569 break; 1570 } 1571 } 1572 1573 goto out; 1574 1575 not_ready: 1576 spin_unlock_irq(shost->host_lock); 1577 1578 /* 1579 * lock q, handle tag, requeue req, and decrement device_busy. We 1580 * must return with queue_lock held. 1581 * 1582 * Decrementing device_busy without checking it is OK, as all such 1583 * cases (host limits or settings) should run the queue at some 1584 * later time. 1585 */ 1586 spin_lock_irq(q->queue_lock); 1587 blk_requeue_request(q, req); 1588 sdev->device_busy--; 1589 if(sdev->device_busy == 0) 1590 blk_plug_device(q); 1591 out: 1592 /* must be careful here...if we trigger the ->remove() function 1593 * we cannot be holding the q lock */ 1594 spin_unlock_irq(q->queue_lock); 1595 put_device(&sdev->sdev_gendev); 1596 spin_lock_irq(q->queue_lock); 1597} 1598 1599u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 1600{ 1601 struct device *host_dev; 1602 u64 bounce_limit = 0xffffffff; 1603 1604 if (shost->unchecked_isa_dma) 1605 return BLK_BOUNCE_ISA; 1606 /* 1607 * Platforms with virtual-DMA translation 1608 * hardware have no practical limit. 1609 */ 1610 if (!PCI_DMA_BUS_IS_PHYS) 1611 return BLK_BOUNCE_ANY; 1612 1613 host_dev = scsi_get_device(shost); 1614 if (host_dev && host_dev->dma_mask) 1615 bounce_limit = *host_dev->dma_mask; 1616 1617 return bounce_limit; 1618} 1619EXPORT_SYMBOL(scsi_calculate_bounce_limit); 1620 1621struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, 1622 request_fn_proc *request_fn) 1623{ 1624 struct request_queue *q; 1625 struct device *dev = shost->shost_gendev.parent; 1626 1627 q = blk_init_queue(request_fn, NULL); 1628 if (!q) 1629 return NULL; 1630 1631 /* 1632 * this limit is imposed by hardware restrictions 1633 */ 1634 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, 1635 SCSI_MAX_SG_CHAIN_SEGMENTS)); 1636 1637 blk_queue_max_hw_sectors(q, shost->max_sectors); 1638 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1639 blk_queue_segment_boundary(q, shost->dma_boundary); 1640 dma_set_seg_boundary(dev, shost->dma_boundary); 1641 1642 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 1643 1644 /* New queue, no concurrency on queue_flags */ 1645 if (!shost->use_clustering) 1646 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 1647 1648 /* 1649 * set a reasonable default alignment on word boundaries: the 1650 * host and device may alter it using 1651 * blk_queue_update_dma_alignment() later. 1652 */ 1653 blk_queue_dma_alignment(q, 0x03); 1654 1655 return q; 1656} 1657EXPORT_SYMBOL(__scsi_alloc_queue); 1658 1659struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 1660{ 1661 struct request_queue *q; 1662 1663 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); 1664 if (!q) 1665 return NULL; 1666 1667 blk_queue_prep_rq(q, scsi_prep_fn); 1668 blk_queue_softirq_done(q, scsi_softirq_done); 1669 blk_queue_rq_timed_out(q, scsi_times_out); 1670 blk_queue_lld_busy(q, scsi_lld_busy); 1671 return q; 1672} 1673 1674void scsi_free_queue(struct request_queue *q) 1675{ 1676 blk_cleanup_queue(q); 1677} 1678 1679/* 1680 * Function: scsi_block_requests() 1681 * 1682 * Purpose: Utility function used by low-level drivers to prevent further 1683 * commands from being queued to the device. 1684 * 1685 * Arguments: shost - Host in question 1686 * 1687 * Returns: Nothing 1688 * 1689 * Lock status: No locks are assumed held. 1690 * 1691 * Notes: There is no timer nor any other means by which the requests 1692 * get unblocked other than the low-level driver calling 1693 * scsi_unblock_requests(). 1694 */ 1695void scsi_block_requests(struct Scsi_Host *shost) 1696{ 1697 shost->host_self_blocked = 1; 1698} 1699EXPORT_SYMBOL(scsi_block_requests); 1700 1701/* 1702 * Function: scsi_unblock_requests() 1703 * 1704 * Purpose: Utility function used by low-level drivers to allow further 1705 * commands from being queued to the device. 1706 * 1707 * Arguments: shost - Host in question 1708 * 1709 * Returns: Nothing 1710 * 1711 * Lock status: No locks are assumed held. 1712 * 1713 * Notes: There is no timer nor any other means by which the requests 1714 * get unblocked other than the low-level driver calling 1715 * scsi_unblock_requests(). 1716 * 1717 * This is done as an API function so that changes to the 1718 * internals of the scsi mid-layer won't require wholesale 1719 * changes to drivers that use this feature. 1720 */ 1721void scsi_unblock_requests(struct Scsi_Host *shost) 1722{ 1723 shost->host_self_blocked = 0; 1724 scsi_run_host_queues(shost); 1725} 1726EXPORT_SYMBOL(scsi_unblock_requests); 1727 1728int __init scsi_init_queue(void) 1729{ 1730 int i; 1731 1732 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", 1733 sizeof(struct scsi_data_buffer), 1734 0, 0, NULL); 1735 if (!scsi_sdb_cache) { 1736 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); 1737 return -ENOMEM; 1738 } 1739 1740 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1741 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1742 int size = sgp->size * sizeof(struct scatterlist); 1743 1744 sgp->slab = kmem_cache_create(sgp->name, size, 0, 1745 SLAB_HWCACHE_ALIGN, NULL); 1746 if (!sgp->slab) { 1747 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1748 sgp->name); 1749 goto cleanup_sdb; 1750 } 1751 1752 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 1753 sgp->slab); 1754 if (!sgp->pool) { 1755 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 1756 sgp->name); 1757 goto cleanup_sdb; 1758 } 1759 } 1760 1761 return 0; 1762 1763cleanup_sdb: 1764 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1765 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1766 if (sgp->pool) 1767 mempool_destroy(sgp->pool); 1768 if (sgp->slab) 1769 kmem_cache_destroy(sgp->slab); 1770 } 1771 kmem_cache_destroy(scsi_sdb_cache); 1772 1773 return -ENOMEM; 1774} 1775 1776void scsi_exit_queue(void) 1777{ 1778 int i; 1779 1780 kmem_cache_destroy(scsi_sdb_cache); 1781 1782 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1783 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1784 mempool_destroy(sgp->pool); 1785 kmem_cache_destroy(sgp->slab); 1786 } 1787} 1788 1789/** 1790 * scsi_mode_select - issue a mode select 1791 * @sdev: SCSI device to be queried 1792 * @pf: Page format bit (1 == standard, 0 == vendor specific) 1793 * @sp: Save page bit (0 == don't save, 1 == save) 1794 * @modepage: mode page being requested 1795 * @buffer: request buffer (may not be smaller than eight bytes) 1796 * @len: length of request buffer. 1797 * @timeout: command timeout 1798 * @retries: number of retries before failing 1799 * @data: returns a structure abstracting the mode header data 1800 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1801 * must be SCSI_SENSE_BUFFERSIZE big. 1802 * 1803 * Returns zero if successful; negative error number or scsi 1804 * status on error 1805 * 1806 */ 1807int 1808scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, 1809 unsigned char *buffer, int len, int timeout, int retries, 1810 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1811{ 1812 unsigned char cmd[10]; 1813 unsigned char *real_buffer; 1814 int ret; 1815 1816 memset(cmd, 0, sizeof(cmd)); 1817 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); 1818 1819 if (sdev->use_10_for_ms) { 1820 if (len > 65535) 1821 return -EINVAL; 1822 real_buffer = kmalloc(8 + len, GFP_KERNEL); 1823 if (!real_buffer) 1824 return -ENOMEM; 1825 memcpy(real_buffer + 8, buffer, len); 1826 len += 8; 1827 real_buffer[0] = 0; 1828 real_buffer[1] = 0; 1829 real_buffer[2] = data->medium_type; 1830 real_buffer[3] = data->device_specific; 1831 real_buffer[4] = data->longlba ? 0x01 : 0; 1832 real_buffer[5] = 0; 1833 real_buffer[6] = data->block_descriptor_length >> 8; 1834 real_buffer[7] = data->block_descriptor_length; 1835 1836 cmd[0] = MODE_SELECT_10; 1837 cmd[7] = len >> 8; 1838 cmd[8] = len; 1839 } else { 1840 if (len > 255 || data->block_descriptor_length > 255 || 1841 data->longlba) 1842 return -EINVAL; 1843 1844 real_buffer = kmalloc(4 + len, GFP_KERNEL); 1845 if (!real_buffer) 1846 return -ENOMEM; 1847 memcpy(real_buffer + 4, buffer, len); 1848 len += 4; 1849 real_buffer[0] = 0; 1850 real_buffer[1] = data->medium_type; 1851 real_buffer[2] = data->device_specific; 1852 real_buffer[3] = data->block_descriptor_length; 1853 1854 1855 cmd[0] = MODE_SELECT; 1856 cmd[4] = len; 1857 } 1858 1859 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 1860 sshdr, timeout, retries, NULL); 1861 kfree(real_buffer); 1862 return ret; 1863} 1864EXPORT_SYMBOL_GPL(scsi_mode_select); 1865 1866/** 1867 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. 1868 * @sdev: SCSI device to be queried 1869 * @dbd: set if mode sense will allow block descriptors to be returned 1870 * @modepage: mode page being requested 1871 * @buffer: request buffer (may not be smaller than eight bytes) 1872 * @len: length of request buffer. 1873 * @timeout: command timeout 1874 * @retries: number of retries before failing 1875 * @data: returns a structure abstracting the mode header data 1876 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1877 * must be SCSI_SENSE_BUFFERSIZE big. 1878 * 1879 * Returns zero if unsuccessful, or the header offset (either 4 1880 * or 8 depending on whether a six or ten byte command was 1881 * issued) if successful. 1882 */ 1883int 1884scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 1885 unsigned char *buffer, int len, int timeout, int retries, 1886 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1887{ 1888 unsigned char cmd[12]; 1889 int use_10_for_ms; 1890 int header_length; 1891 int result; 1892 struct scsi_sense_hdr my_sshdr; 1893 1894 memset(data, 0, sizeof(*data)); 1895 memset(&cmd[0], 0, 12); 1896 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 1897 cmd[2] = modepage; 1898 1899 /* caller might not be interested in sense, but we need it */ 1900 if (!sshdr) 1901 sshdr = &my_sshdr; 1902 1903 retry: 1904 use_10_for_ms = sdev->use_10_for_ms; 1905 1906 if (use_10_for_ms) { 1907 if (len < 8) 1908 len = 8; 1909 1910 cmd[0] = MODE_SENSE_10; 1911 cmd[8] = len; 1912 header_length = 8; 1913 } else { 1914 if (len < 4) 1915 len = 4; 1916 1917 cmd[0] = MODE_SENSE; 1918 cmd[4] = len; 1919 header_length = 4; 1920 } 1921 1922 memset(buffer, 0, len); 1923 1924 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 1925 sshdr, timeout, retries, NULL); 1926 1927 /* This code looks awful: what it's doing is making sure an 1928 * ILLEGAL REQUEST sense return identifies the actual command 1929 * byte as the problem. MODE_SENSE commands can return 1930 * ILLEGAL REQUEST if the code page isn't supported */ 1931 1932 if (use_10_for_ms && !scsi_status_is_good(result) && 1933 (driver_byte(result) & DRIVER_SENSE)) { 1934 if (scsi_sense_valid(sshdr)) { 1935 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 1936 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 1937 /* 1938 * Invalid command operation code 1939 */ 1940 sdev->use_10_for_ms = 0; 1941 goto retry; 1942 } 1943 } 1944 } 1945 1946 if(scsi_status_is_good(result)) { 1947 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && 1948 (modepage == 6 || modepage == 8))) { 1949 /* Initio breakage? */ 1950 header_length = 0; 1951 data->length = 13; 1952 data->medium_type = 0; 1953 data->device_specific = 0; 1954 data->longlba = 0; 1955 data->block_descriptor_length = 0; 1956 } else if(use_10_for_ms) { 1957 data->length = buffer[0]*256 + buffer[1] + 2; 1958 data->medium_type = buffer[2]; 1959 data->device_specific = buffer[3]; 1960 data->longlba = buffer[4] & 0x01; 1961 data->block_descriptor_length = buffer[6]*256 1962 + buffer[7]; 1963 } else { 1964 data->length = buffer[0] + 1; 1965 data->medium_type = buffer[1]; 1966 data->device_specific = buffer[2]; 1967 data->block_descriptor_length = buffer[3]; 1968 } 1969 data->header_length = header_length; 1970 } 1971 1972 return result; 1973} 1974EXPORT_SYMBOL(scsi_mode_sense); 1975 1976/** 1977 * scsi_test_unit_ready - test if unit is ready 1978 * @sdev: scsi device to change the state of. 1979 * @timeout: command timeout 1980 * @retries: number of retries before failing 1981 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for 1982 * returning sense. Make sure that this is cleared before passing 1983 * in. 1984 * 1985 * Returns zero if unsuccessful or an error if TUR failed. For 1986 * removable media, a return of NOT_READY or UNIT_ATTENTION is 1987 * translated to success, with the ->changed flag updated. 1988 **/ 1989int 1990scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, 1991 struct scsi_sense_hdr *sshdr_external) 1992{ 1993 char cmd[] = { 1994 TEST_UNIT_READY, 0, 0, 0, 0, 0, 1995 }; 1996 struct scsi_sense_hdr *sshdr; 1997 int result; 1998 1999 if (!sshdr_external) 2000 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 2001 else 2002 sshdr = sshdr_external; 2003 2004 /* try to eat the UNIT_ATTENTION if there are enough retries */ 2005 do { 2006 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 2007 timeout, retries, NULL); 2008 if (sdev->removable && scsi_sense_valid(sshdr) && 2009 sshdr->sense_key == UNIT_ATTENTION) 2010 sdev->changed = 1; 2011 } while (scsi_sense_valid(sshdr) && 2012 sshdr->sense_key == UNIT_ATTENTION && --retries); 2013 2014 if (!sshdr) 2015 /* could not allocate sense buffer, so can't process it */ 2016 return result; 2017 2018 if (sdev->removable && scsi_sense_valid(sshdr) && 2019 (sshdr->sense_key == UNIT_ATTENTION || 2020 sshdr->sense_key == NOT_READY)) { 2021 sdev->changed = 1; 2022 result = 0; 2023 } 2024 if (!sshdr_external) 2025 kfree(sshdr); 2026 return result; 2027} 2028EXPORT_SYMBOL(scsi_test_unit_ready); 2029 2030/** 2031 * scsi_device_set_state - Take the given device through the device state model. 2032 * @sdev: scsi device to change the state of. 2033 * @state: state to change to. 2034 * 2035 * Returns zero if unsuccessful or an error if the requested 2036 * transition is illegal. 2037 */ 2038int 2039scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 2040{ 2041 enum scsi_device_state oldstate = sdev->sdev_state; 2042 2043 if (state == oldstate) 2044 return 0; 2045 2046 switch (state) { 2047 case SDEV_CREATED: 2048 switch (oldstate) { 2049 case SDEV_CREATED_BLOCK: 2050 break; 2051 default: 2052 goto illegal; 2053 } 2054 break; 2055 2056 case SDEV_RUNNING: 2057 switch (oldstate) { 2058 case SDEV_CREATED: 2059 case SDEV_OFFLINE: 2060 case SDEV_QUIESCE: 2061 case SDEV_BLOCK: 2062 break; 2063 default: 2064 goto illegal; 2065 } 2066 break; 2067 2068 case SDEV_QUIESCE: 2069 switch (oldstate) { 2070 case SDEV_RUNNING: 2071 case SDEV_OFFLINE: 2072 break; 2073 default: 2074 goto illegal; 2075 } 2076 break; 2077 2078 case SDEV_OFFLINE: 2079 switch (oldstate) { 2080 case SDEV_CREATED: 2081 case SDEV_RUNNING: 2082 case SDEV_QUIESCE: 2083 case SDEV_BLOCK: 2084 break; 2085 default: 2086 goto illegal; 2087 } 2088 break; 2089 2090 case SDEV_BLOCK: 2091 switch (oldstate) { 2092 case SDEV_RUNNING: 2093 case SDEV_CREATED_BLOCK: 2094 break; 2095 default: 2096 goto illegal; 2097 } 2098 break; 2099 2100 case SDEV_CREATED_BLOCK: 2101 switch (oldstate) { 2102 case SDEV_CREATED: 2103 break; 2104 default: 2105 goto illegal; 2106 } 2107 break; 2108 2109 case SDEV_CANCEL: 2110 switch (oldstate) { 2111 case SDEV_CREATED: 2112 case SDEV_RUNNING: 2113 case SDEV_QUIESCE: 2114 case SDEV_OFFLINE: 2115 case SDEV_BLOCK: 2116 break; 2117 default: 2118 goto illegal; 2119 } 2120 break; 2121 2122 case SDEV_DEL: 2123 switch (oldstate) { 2124 case SDEV_CREATED: 2125 case SDEV_RUNNING: 2126 case SDEV_OFFLINE: 2127 case SDEV_CANCEL: 2128 break; 2129 default: 2130 goto illegal; 2131 } 2132 break; 2133 2134 } 2135 sdev->sdev_state = state; 2136 return 0; 2137 2138 illegal: 2139 SCSI_LOG_ERROR_RECOVERY(1, 2140 sdev_printk(KERN_ERR, sdev, 2141 "Illegal state transition %s->%s\n", 2142 scsi_device_state_name(oldstate), 2143 scsi_device_state_name(state)) 2144 ); 2145 return -EINVAL; 2146} 2147EXPORT_SYMBOL(scsi_device_set_state); 2148 2149/** 2150 * sdev_evt_emit - emit a single SCSI device uevent 2151 * @sdev: associated SCSI device 2152 * @evt: event to emit 2153 * 2154 * Send a single uevent (scsi_event) to the associated scsi_device. 2155 */ 2156static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) 2157{ 2158 int idx = 0; 2159 char *envp[3]; 2160 2161 switch (evt->evt_type) { 2162 case SDEV_EVT_MEDIA_CHANGE: 2163 envp[idx++] = "SDEV_MEDIA_CHANGE=1"; 2164 break; 2165 2166 default: 2167 /* do nothing */ 2168 break; 2169 } 2170 2171 envp[idx++] = NULL; 2172 2173 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); 2174} 2175 2176/** 2177 * sdev_evt_thread - send a uevent for each scsi event 2178 * @work: work struct for scsi_device 2179 * 2180 * Dispatch queued events to their associated scsi_device kobjects 2181 * as uevents. 2182 */ 2183void scsi_evt_thread(struct work_struct *work) 2184{ 2185 struct scsi_device *sdev; 2186 LIST_HEAD(event_list); 2187 2188 sdev = container_of(work, struct scsi_device, event_work); 2189 2190 while (1) { 2191 struct scsi_event *evt; 2192 struct list_head *this, *tmp; 2193 unsigned long flags; 2194 2195 spin_lock_irqsave(&sdev->list_lock, flags); 2196 list_splice_init(&sdev->event_list, &event_list); 2197 spin_unlock_irqrestore(&sdev->list_lock, flags); 2198 2199 if (list_empty(&event_list)) 2200 break; 2201 2202 list_for_each_safe(this, tmp, &event_list) { 2203 evt = list_entry(this, struct scsi_event, node); 2204 list_del(&evt->node); 2205 scsi_evt_emit(sdev, evt); 2206 kfree(evt); 2207 } 2208 } 2209} 2210 2211/** 2212 * sdev_evt_send - send asserted event to uevent thread 2213 * @sdev: scsi_device event occurred on 2214 * @evt: event to send 2215 * 2216 * Assert scsi device event asynchronously. 2217 */ 2218void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) 2219{ 2220 unsigned long flags; 2221 2222#if 0 2223 /* FIXME: currently this check eliminates all media change events 2224 * for polled devices. Need to update to discriminate between AN 2225 * and polled events */ 2226 if (!test_bit(evt->evt_type, sdev->supported_events)) { 2227 kfree(evt); 2228 return; 2229 } 2230#endif 2231 2232 spin_lock_irqsave(&sdev->list_lock, flags); 2233 list_add_tail(&evt->node, &sdev->event_list); 2234 schedule_work(&sdev->event_work); 2235 spin_unlock_irqrestore(&sdev->list_lock, flags); 2236} 2237EXPORT_SYMBOL_GPL(sdev_evt_send); 2238 2239/** 2240 * sdev_evt_alloc - allocate a new scsi event 2241 * @evt_type: type of event to allocate 2242 * @gfpflags: GFP flags for allocation 2243 * 2244 * Allocates and returns a new scsi_event. 2245 */ 2246struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, 2247 gfp_t gfpflags) 2248{ 2249 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); 2250 if (!evt) 2251 return NULL; 2252 2253 evt->evt_type = evt_type; 2254 INIT_LIST_HEAD(&evt->node); 2255 2256 /* evt_type-specific initialization, if any */ 2257 switch (evt_type) { 2258 case SDEV_EVT_MEDIA_CHANGE: 2259 default: 2260 /* do nothing */ 2261 break; 2262 } 2263 2264 return evt; 2265} 2266EXPORT_SYMBOL_GPL(sdev_evt_alloc); 2267 2268/** 2269 * sdev_evt_send_simple - send asserted event to uevent thread 2270 * @sdev: scsi_device event occurred on 2271 * @evt_type: type of event to send 2272 * @gfpflags: GFP flags for allocation 2273 * 2274 * Assert scsi device event asynchronously, given an event type. 2275 */ 2276void sdev_evt_send_simple(struct scsi_device *sdev, 2277 enum scsi_device_event evt_type, gfp_t gfpflags) 2278{ 2279 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); 2280 if (!evt) { 2281 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", 2282 evt_type); 2283 return; 2284 } 2285 2286 sdev_evt_send(sdev, evt); 2287} 2288EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2289 2290/** 2291 * scsi_device_quiesce - Block user issued commands. 2292 * @sdev: scsi device to quiesce. 2293 * 2294 * This works by trying to transition to the SDEV_QUIESCE state 2295 * (which must be a legal transition). When the device is in this 2296 * state, only special requests will be accepted, all others will 2297 * be deferred. Since special requests may also be requeued requests, 2298 * a successful return doesn't guarantee the device will be 2299 * totally quiescent. 2300 * 2301 * Must be called with user context, may sleep. 2302 * 2303 * Returns zero if unsuccessful or an error if not. 2304 */ 2305int 2306scsi_device_quiesce(struct scsi_device *sdev) 2307{ 2308 int err = scsi_device_set_state(sdev, SDEV_QUIESCE); 2309 if (err) 2310 return err; 2311 2312 scsi_run_queue(sdev->request_queue); 2313 while (sdev->device_busy) { 2314 msleep_interruptible(200); 2315 scsi_run_queue(sdev->request_queue); 2316 } 2317 return 0; 2318} 2319EXPORT_SYMBOL(scsi_device_quiesce); 2320 2321/** 2322 * scsi_device_resume - Restart user issued commands to a quiesced device. 2323 * @sdev: scsi device to resume. 2324 * 2325 * Moves the device from quiesced back to running and restarts the 2326 * queues. 2327 * 2328 * Must be called with user context, may sleep. 2329 */ 2330void 2331scsi_device_resume(struct scsi_device *sdev) 2332{ 2333 if(scsi_device_set_state(sdev, SDEV_RUNNING)) 2334 return; 2335 scsi_run_queue(sdev->request_queue); 2336} 2337EXPORT_SYMBOL(scsi_device_resume); 2338 2339static void 2340device_quiesce_fn(struct scsi_device *sdev, void *data) 2341{ 2342 scsi_device_quiesce(sdev); 2343} 2344 2345void 2346scsi_target_quiesce(struct scsi_target *starget) 2347{ 2348 starget_for_each_device(starget, NULL, device_quiesce_fn); 2349} 2350EXPORT_SYMBOL(scsi_target_quiesce); 2351 2352static void 2353device_resume_fn(struct scsi_device *sdev, void *data) 2354{ 2355 scsi_device_resume(sdev); 2356} 2357 2358void 2359scsi_target_resume(struct scsi_target *starget) 2360{ 2361 starget_for_each_device(starget, NULL, device_resume_fn); 2362} 2363EXPORT_SYMBOL(scsi_target_resume); 2364 2365/** 2366 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state 2367 * @sdev: device to block 2368 * 2369 * Block request made by scsi lld's to temporarily stop all 2370 * scsi commands on the specified device. Called from interrupt 2371 * or normal process context. 2372 * 2373 * Returns zero if successful or error if not 2374 * 2375 * Notes: 2376 * This routine transitions the device to the SDEV_BLOCK state 2377 * (which must be a legal transition). When the device is in this 2378 * state, all commands are deferred until the scsi lld reenables 2379 * the device with scsi_device_unblock or device_block_tmo fires. 2380 * This routine assumes the host_lock is held on entry. 2381 */ 2382int 2383scsi_internal_device_block(struct scsi_device *sdev) 2384{ 2385 struct request_queue *q = sdev->request_queue; 2386 unsigned long flags; 2387 int err = 0; 2388 2389 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2390 if (err) { 2391 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); 2392 2393 if (err) 2394 return err; 2395 } 2396 2397 /* 2398 * The device has transitioned to SDEV_BLOCK. Stop the 2399 * block layer from calling the midlayer with this device's 2400 * request queue. 2401 */ 2402 spin_lock_irqsave(q->queue_lock, flags); 2403 blk_stop_queue(q); 2404 spin_unlock_irqrestore(q->queue_lock, flags); 2405 2406 return 0; 2407} 2408EXPORT_SYMBOL_GPL(scsi_internal_device_block); 2409 2410/** 2411 * scsi_internal_device_unblock - resume a device after a block request 2412 * @sdev: device to resume 2413 * 2414 * Called by scsi lld's or the midlayer to restart the device queue 2415 * for the previously suspended scsi device. Called from interrupt or 2416 * normal process context. 2417 * 2418 * Returns zero if successful or error if not. 2419 * 2420 * Notes: 2421 * This routine transitions the device to the SDEV_RUNNING state 2422 * (which must be a legal transition) allowing the midlayer to 2423 * goose the queue for this device. This routine assumes the 2424 * host_lock is held upon entry. 2425 */ 2426int 2427scsi_internal_device_unblock(struct scsi_device *sdev) 2428{ 2429 struct request_queue *q = sdev->request_queue; 2430 unsigned long flags; 2431 2432 /* 2433 * Try to transition the scsi device to SDEV_RUNNING 2434 * and goose the device queue if successful. 2435 */ 2436 if (sdev->sdev_state == SDEV_BLOCK) 2437 sdev->sdev_state = SDEV_RUNNING; 2438 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) 2439 sdev->sdev_state = SDEV_CREATED; 2440 else 2441 return -EINVAL; 2442 2443 spin_lock_irqsave(q->queue_lock, flags); 2444 blk_start_queue(q); 2445 spin_unlock_irqrestore(q->queue_lock, flags); 2446 2447 return 0; 2448} 2449EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); 2450 2451static void 2452device_block(struct scsi_device *sdev, void *data) 2453{ 2454 scsi_internal_device_block(sdev); 2455} 2456 2457static int 2458target_block(struct device *dev, void *data) 2459{ 2460 if (scsi_is_target_device(dev)) 2461 starget_for_each_device(to_scsi_target(dev), NULL, 2462 device_block); 2463 return 0; 2464} 2465 2466void 2467scsi_target_block(struct device *dev) 2468{ 2469 if (scsi_is_target_device(dev)) 2470 starget_for_each_device(to_scsi_target(dev), NULL, 2471 device_block); 2472 else 2473 device_for_each_child(dev, NULL, target_block); 2474} 2475EXPORT_SYMBOL_GPL(scsi_target_block); 2476 2477static void 2478device_unblock(struct scsi_device *sdev, void *data) 2479{ 2480 scsi_internal_device_unblock(sdev); 2481} 2482 2483static int 2484target_unblock(struct device *dev, void *data) 2485{ 2486 if (scsi_is_target_device(dev)) 2487 starget_for_each_device(to_scsi_target(dev), NULL, 2488 device_unblock); 2489 return 0; 2490} 2491 2492void 2493scsi_target_unblock(struct device *dev) 2494{ 2495 if (scsi_is_target_device(dev)) 2496 starget_for_each_device(to_scsi_target(dev), NULL, 2497 device_unblock); 2498 else 2499 device_for_each_child(dev, NULL, target_unblock); 2500} 2501EXPORT_SYMBOL_GPL(scsi_target_unblock); 2502 2503/** 2504 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt 2505 * @sgl: scatter-gather list 2506 * @sg_count: number of segments in sg 2507 * @offset: offset in bytes into sg, on return offset into the mapped area 2508 * @len: bytes to map, on return number of bytes mapped 2509 * 2510 * Returns virtual address of the start of the mapped page 2511 */ 2512void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, 2513 size_t *offset, size_t *len) 2514{ 2515 int i; 2516 size_t sg_len = 0, len_complete = 0; 2517 struct scatterlist *sg; 2518 struct page *page; 2519 2520 WARN_ON(!irqs_disabled()); 2521 2522 for_each_sg(sgl, sg, sg_count, i) { 2523 len_complete = sg_len; /* Complete sg-entries */ 2524 sg_len += sg->length; 2525 if (sg_len > *offset) 2526 break; 2527 } 2528 2529 if (unlikely(i == sg_count)) { 2530 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 2531 "elements %d\n", 2532 __func__, sg_len, *offset, sg_count); 2533 WARN_ON(1); 2534 return NULL; 2535 } 2536 2537 /* Offset starting from the beginning of first page in this sg-entry */ 2538 *offset = *offset - len_complete + sg->offset; 2539 2540 /* Assumption: contiguous pages can be accessed as "page + i" */ 2541 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); 2542 *offset &= ~PAGE_MASK; 2543 2544 /* Bytes in this sg-entry from *offset to the end of the page */ 2545 sg_len = PAGE_SIZE - *offset; 2546 if (*len > sg_len) 2547 *len = sg_len; 2548 2549 return kmap_atomic(page, KM_BIO_SRC_IRQ); 2550} 2551EXPORT_SYMBOL(scsi_kmap_atomic_sg); 2552 2553/** 2554 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg 2555 * @virt: virtual address to be unmapped 2556 */ 2557void scsi_kunmap_atomic_sg(void *virt) 2558{ 2559 kunmap_atomic(virt, KM_BIO_SRC_IRQ); 2560} 2561EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 2562