scsi_lib.c revision c3a4d78c580de4edc9ef0f7c59812fb02ceb037f
1/* 2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale 3 * 4 * SCSI queueing library. 5 * Initial versions: Eric Youngdale (eric@andante.org). 6 * Based upon conversations with large numbers 7 * of people at Linux Expo. 8 */ 9 10#include <linux/bio.h> 11#include <linux/bitops.h> 12#include <linux/blkdev.h> 13#include <linux/completion.h> 14#include <linux/kernel.h> 15#include <linux/mempool.h> 16#include <linux/slab.h> 17#include <linux/init.h> 18#include <linux/pci.h> 19#include <linux/delay.h> 20#include <linux/hardirq.h> 21#include <linux/scatterlist.h> 22 23#include <scsi/scsi.h> 24#include <scsi/scsi_cmnd.h> 25#include <scsi/scsi_dbg.h> 26#include <scsi/scsi_device.h> 27#include <scsi/scsi_driver.h> 28#include <scsi/scsi_eh.h> 29#include <scsi/scsi_host.h> 30 31#include "scsi_priv.h" 32#include "scsi_logging.h" 33 34 35#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 36#define SG_MEMPOOL_SIZE 2 37 38struct scsi_host_sg_pool { 39 size_t size; 40 char *name; 41 struct kmem_cache *slab; 42 mempool_t *pool; 43}; 44 45#define SP(x) { x, "sgpool-" __stringify(x) } 46#if (SCSI_MAX_SG_SEGMENTS < 32) 47#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) 48#endif 49static struct scsi_host_sg_pool scsi_sg_pools[] = { 50 SP(8), 51 SP(16), 52#if (SCSI_MAX_SG_SEGMENTS > 32) 53 SP(32), 54#if (SCSI_MAX_SG_SEGMENTS > 64) 55 SP(64), 56#if (SCSI_MAX_SG_SEGMENTS > 128) 57 SP(128), 58#if (SCSI_MAX_SG_SEGMENTS > 256) 59#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX) 60#endif 61#endif 62#endif 63#endif 64 SP(SCSI_MAX_SG_SEGMENTS) 65}; 66#undef SP 67 68struct kmem_cache *scsi_sdb_cache; 69 70static void scsi_run_queue(struct request_queue *q); 71 72/* 73 * Function: scsi_unprep_request() 74 * 75 * Purpose: Remove all preparation done for a request, including its 76 * associated scsi_cmnd, so that it can be requeued. 77 * 78 * Arguments: req - request to unprepare 79 * 80 * Lock status: Assumed that no locks are held upon entry. 81 * 82 * Returns: Nothing. 83 */ 84static void scsi_unprep_request(struct request *req) 85{ 86 struct scsi_cmnd *cmd = req->special; 87 88 req->cmd_flags &= ~REQ_DONTPREP; 89 req->special = NULL; 90 91 scsi_put_command(cmd); 92} 93 94/** 95 * __scsi_queue_insert - private queue insertion 96 * @cmd: The SCSI command being requeued 97 * @reason: The reason for the requeue 98 * @unbusy: Whether the queue should be unbusied 99 * 100 * This is a private queue insertion. The public interface 101 * scsi_queue_insert() always assumes the queue should be unbusied 102 * because it's always called before the completion. This function is 103 * for a requeue after completion, which should only occur in this 104 * file. 105 */ 106static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) 107{ 108 struct Scsi_Host *host = cmd->device->host; 109 struct scsi_device *device = cmd->device; 110 struct scsi_target *starget = scsi_target(device); 111 struct request_queue *q = device->request_queue; 112 unsigned long flags; 113 114 SCSI_LOG_MLQUEUE(1, 115 printk("Inserting command %p into mlqueue\n", cmd)); 116 117 /* 118 * Set the appropriate busy bit for the device/host. 119 * 120 * If the host/device isn't busy, assume that something actually 121 * completed, and that we should be able to queue a command now. 122 * 123 * Note that the prior mid-layer assumption that any host could 124 * always queue at least one command is now broken. The mid-layer 125 * will implement a user specifiable stall (see 126 * scsi_host.max_host_blocked and scsi_device.max_device_blocked) 127 * if a command is requeued with no other commands outstanding 128 * either for the device or for the host. 129 */ 130 switch (reason) { 131 case SCSI_MLQUEUE_HOST_BUSY: 132 host->host_blocked = host->max_host_blocked; 133 break; 134 case SCSI_MLQUEUE_DEVICE_BUSY: 135 device->device_blocked = device->max_device_blocked; 136 break; 137 case SCSI_MLQUEUE_TARGET_BUSY: 138 starget->target_blocked = starget->max_target_blocked; 139 break; 140 } 141 142 /* 143 * Decrement the counters, since these commands are no longer 144 * active on the host/device. 145 */ 146 if (unbusy) 147 scsi_device_unbusy(device); 148 149 /* 150 * Requeue this command. It will go before all other commands 151 * that are already in the queue. 152 * 153 * NOTE: there is magic here about the way the queue is plugged if 154 * we have no outstanding commands. 155 * 156 * Although we *don't* plug the queue, we call the request 157 * function. The SCSI request function detects the blocked condition 158 * and plugs the queue appropriately. 159 */ 160 spin_lock_irqsave(q->queue_lock, flags); 161 blk_requeue_request(q, cmd->request); 162 spin_unlock_irqrestore(q->queue_lock, flags); 163 164 scsi_run_queue(q); 165 166 return 0; 167} 168 169/* 170 * Function: scsi_queue_insert() 171 * 172 * Purpose: Insert a command in the midlevel queue. 173 * 174 * Arguments: cmd - command that we are adding to queue. 175 * reason - why we are inserting command to queue. 176 * 177 * Lock status: Assumed that lock is not held upon entry. 178 * 179 * Returns: Nothing. 180 * 181 * Notes: We do this for one of two cases. Either the host is busy 182 * and it cannot accept any more commands for the time being, 183 * or the device returned QUEUE_FULL and can accept no more 184 * commands. 185 * Notes: This could be called either from an interrupt context or a 186 * normal process context. 187 */ 188int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 189{ 190 return __scsi_queue_insert(cmd, reason, 1); 191} 192/** 193 * scsi_execute - insert request and wait for the result 194 * @sdev: scsi device 195 * @cmd: scsi command 196 * @data_direction: data direction 197 * @buffer: data buffer 198 * @bufflen: len of buffer 199 * @sense: optional sense buffer 200 * @timeout: request timeout in seconds 201 * @retries: number of times to retry request 202 * @flags: or into request flags; 203 * @resid: optional residual length 204 * 205 * returns the req->errors value which is the scsi_cmnd result 206 * field. 207 */ 208int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 209 int data_direction, void *buffer, unsigned bufflen, 210 unsigned char *sense, int timeout, int retries, int flags, 211 int *resid) 212{ 213 struct request *req; 214 int write = (data_direction == DMA_TO_DEVICE); 215 int ret = DRIVER_ERROR << 24; 216 217 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 218 219 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 220 buffer, bufflen, __GFP_WAIT)) 221 goto out; 222 223 req->cmd_len = COMMAND_SIZE(cmd[0]); 224 memcpy(req->cmd, cmd, req->cmd_len); 225 req->sense = sense; 226 req->sense_len = 0; 227 req->retries = retries; 228 req->timeout = timeout; 229 req->cmd_type = REQ_TYPE_BLOCK_PC; 230 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 231 232 /* 233 * head injection *required* here otherwise quiesce won't work 234 */ 235 blk_execute_rq(req->q, NULL, req, 1); 236 237 /* 238 * Some devices (USB mass-storage in particular) may transfer 239 * garbage data together with a residue indicating that the data 240 * is invalid. Prevent the garbage from being misinterpreted 241 * and prevent security leaks by zeroing out the excess data. 242 */ 243 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen)) 244 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len); 245 246 if (resid) 247 *resid = req->resid_len; 248 ret = req->errors; 249 out: 250 blk_put_request(req); 251 252 return ret; 253} 254EXPORT_SYMBOL(scsi_execute); 255 256 257int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, 258 int data_direction, void *buffer, unsigned bufflen, 259 struct scsi_sense_hdr *sshdr, int timeout, int retries, 260 int *resid) 261{ 262 char *sense = NULL; 263 int result; 264 265 if (sshdr) { 266 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 267 if (!sense) 268 return DRIVER_ERROR << 24; 269 } 270 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 271 sense, timeout, retries, 0, resid); 272 if (sshdr) 273 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 274 275 kfree(sense); 276 return result; 277} 278EXPORT_SYMBOL(scsi_execute_req); 279 280/* 281 * Function: scsi_init_cmd_errh() 282 * 283 * Purpose: Initialize cmd fields related to error handling. 284 * 285 * Arguments: cmd - command that is ready to be queued. 286 * 287 * Notes: This function has the job of initializing a number of 288 * fields related to error handling. Typically this will 289 * be called once for each command, as required. 290 */ 291static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 292{ 293 cmd->serial_number = 0; 294 scsi_set_resid(cmd, 0); 295 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 296 if (cmd->cmd_len == 0) 297 cmd->cmd_len = scsi_command_size(cmd->cmnd); 298} 299 300void scsi_device_unbusy(struct scsi_device *sdev) 301{ 302 struct Scsi_Host *shost = sdev->host; 303 struct scsi_target *starget = scsi_target(sdev); 304 unsigned long flags; 305 306 spin_lock_irqsave(shost->host_lock, flags); 307 shost->host_busy--; 308 starget->target_busy--; 309 if (unlikely(scsi_host_in_recovery(shost) && 310 (shost->host_failed || shost->host_eh_scheduled))) 311 scsi_eh_wakeup(shost); 312 spin_unlock(shost->host_lock); 313 spin_lock(sdev->request_queue->queue_lock); 314 sdev->device_busy--; 315 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 316} 317 318/* 319 * Called for single_lun devices on IO completion. Clear starget_sdev_user, 320 * and call blk_run_queue for all the scsi_devices on the target - 321 * including current_sdev first. 322 * 323 * Called with *no* scsi locks held. 324 */ 325static void scsi_single_lun_run(struct scsi_device *current_sdev) 326{ 327 struct Scsi_Host *shost = current_sdev->host; 328 struct scsi_device *sdev, *tmp; 329 struct scsi_target *starget = scsi_target(current_sdev); 330 unsigned long flags; 331 332 spin_lock_irqsave(shost->host_lock, flags); 333 starget->starget_sdev_user = NULL; 334 spin_unlock_irqrestore(shost->host_lock, flags); 335 336 /* 337 * Call blk_run_queue for all LUNs on the target, starting with 338 * current_sdev. We race with others (to set starget_sdev_user), 339 * but in most cases, we will be first. Ideally, each LU on the 340 * target would get some limited time or requests on the target. 341 */ 342 blk_run_queue(current_sdev->request_queue); 343 344 spin_lock_irqsave(shost->host_lock, flags); 345 if (starget->starget_sdev_user) 346 goto out; 347 list_for_each_entry_safe(sdev, tmp, &starget->devices, 348 same_target_siblings) { 349 if (sdev == current_sdev) 350 continue; 351 if (scsi_device_get(sdev)) 352 continue; 353 354 spin_unlock_irqrestore(shost->host_lock, flags); 355 blk_run_queue(sdev->request_queue); 356 spin_lock_irqsave(shost->host_lock, flags); 357 358 scsi_device_put(sdev); 359 } 360 out: 361 spin_unlock_irqrestore(shost->host_lock, flags); 362} 363 364static inline int scsi_device_is_busy(struct scsi_device *sdev) 365{ 366 if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked) 367 return 1; 368 369 return 0; 370} 371 372static inline int scsi_target_is_busy(struct scsi_target *starget) 373{ 374 return ((starget->can_queue > 0 && 375 starget->target_busy >= starget->can_queue) || 376 starget->target_blocked); 377} 378 379static inline int scsi_host_is_busy(struct Scsi_Host *shost) 380{ 381 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || 382 shost->host_blocked || shost->host_self_blocked) 383 return 1; 384 385 return 0; 386} 387 388/* 389 * Function: scsi_run_queue() 390 * 391 * Purpose: Select a proper request queue to serve next 392 * 393 * Arguments: q - last request's queue 394 * 395 * Returns: Nothing 396 * 397 * Notes: The previous command was completely finished, start 398 * a new one if possible. 399 */ 400static void scsi_run_queue(struct request_queue *q) 401{ 402 struct scsi_device *sdev = q->queuedata; 403 struct Scsi_Host *shost = sdev->host; 404 LIST_HEAD(starved_list); 405 unsigned long flags; 406 407 if (scsi_target(sdev)->single_lun) 408 scsi_single_lun_run(sdev); 409 410 spin_lock_irqsave(shost->host_lock, flags); 411 list_splice_init(&shost->starved_list, &starved_list); 412 413 while (!list_empty(&starved_list)) { 414 int flagset; 415 416 /* 417 * As long as shost is accepting commands and we have 418 * starved queues, call blk_run_queue. scsi_request_fn 419 * drops the queue_lock and can add us back to the 420 * starved_list. 421 * 422 * host_lock protects the starved_list and starved_entry. 423 * scsi_request_fn must get the host_lock before checking 424 * or modifying starved_list or starved_entry. 425 */ 426 if (scsi_host_is_busy(shost)) 427 break; 428 429 sdev = list_entry(starved_list.next, 430 struct scsi_device, starved_entry); 431 list_del_init(&sdev->starved_entry); 432 if (scsi_target_is_busy(scsi_target(sdev))) { 433 list_move_tail(&sdev->starved_entry, 434 &shost->starved_list); 435 continue; 436 } 437 438 spin_unlock(shost->host_lock); 439 440 spin_lock(sdev->request_queue->queue_lock); 441 flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && 442 !test_bit(QUEUE_FLAG_REENTER, 443 &sdev->request_queue->queue_flags); 444 if (flagset) 445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); 446 __blk_run_queue(sdev->request_queue); 447 if (flagset) 448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); 449 spin_unlock(sdev->request_queue->queue_lock); 450 451 spin_lock(shost->host_lock); 452 } 453 /* put any unprocessed entries back */ 454 list_splice(&starved_list, &shost->starved_list); 455 spin_unlock_irqrestore(shost->host_lock, flags); 456 457 blk_run_queue(q); 458} 459 460/* 461 * Function: scsi_requeue_command() 462 * 463 * Purpose: Handle post-processing of completed commands. 464 * 465 * Arguments: q - queue to operate on 466 * cmd - command that may need to be requeued. 467 * 468 * Returns: Nothing 469 * 470 * Notes: After command completion, there may be blocks left 471 * over which weren't finished by the previous command 472 * this can be for a number of reasons - the main one is 473 * I/O errors in the middle of the request, in which case 474 * we need to request the blocks that come after the bad 475 * sector. 476 * Notes: Upon return, cmd is a stale pointer. 477 */ 478static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 479{ 480 struct request *req = cmd->request; 481 unsigned long flags; 482 483 spin_lock_irqsave(q->queue_lock, flags); 484 scsi_unprep_request(req); 485 blk_requeue_request(q, req); 486 spin_unlock_irqrestore(q->queue_lock, flags); 487 488 scsi_run_queue(q); 489} 490 491void scsi_next_command(struct scsi_cmnd *cmd) 492{ 493 struct scsi_device *sdev = cmd->device; 494 struct request_queue *q = sdev->request_queue; 495 496 /* need to hold a reference on the device before we let go of the cmd */ 497 get_device(&sdev->sdev_gendev); 498 499 scsi_put_command(cmd); 500 scsi_run_queue(q); 501 502 /* ok to remove device now */ 503 put_device(&sdev->sdev_gendev); 504} 505 506void scsi_run_host_queues(struct Scsi_Host *shost) 507{ 508 struct scsi_device *sdev; 509 510 shost_for_each_device(sdev, shost) 511 scsi_run_queue(sdev->request_queue); 512} 513 514static void __scsi_release_buffers(struct scsi_cmnd *, int); 515 516/* 517 * Function: scsi_end_request() 518 * 519 * Purpose: Post-processing of completed commands (usually invoked at end 520 * of upper level post-processing and scsi_io_completion). 521 * 522 * Arguments: cmd - command that is complete. 523 * error - 0 if I/O indicates success, < 0 for I/O error. 524 * bytes - number of bytes of completed I/O 525 * requeue - indicates whether we should requeue leftovers. 526 * 527 * Lock status: Assumed that lock is not held upon entry. 528 * 529 * Returns: cmd if requeue required, NULL otherwise. 530 * 531 * Notes: This is called for block device requests in order to 532 * mark some number of sectors as complete. 533 * 534 * We are guaranteeing that the request queue will be goosed 535 * at some point during this call. 536 * Notes: If cmd was requeued, upon return it will be a stale pointer. 537 */ 538static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, 539 int bytes, int requeue) 540{ 541 struct request_queue *q = cmd->device->request_queue; 542 struct request *req = cmd->request; 543 544 /* 545 * If there are blocks left over at the end, set up the command 546 * to queue the remainder of them. 547 */ 548 if (blk_end_request(req, error, bytes)) { 549 int leftover = (req->hard_nr_sectors << 9); 550 551 if (blk_pc_request(req)) 552 leftover = req->resid_len; 553 554 /* kill remainder if no retrys */ 555 if (error && scsi_noretry_cmd(cmd)) 556 blk_end_request(req, error, leftover); 557 else { 558 if (requeue) { 559 /* 560 * Bleah. Leftovers again. Stick the 561 * leftovers in the front of the 562 * queue, and goose the queue again. 563 */ 564 scsi_release_buffers(cmd); 565 scsi_requeue_command(q, cmd); 566 cmd = NULL; 567 } 568 return cmd; 569 } 570 } 571 572 /* 573 * This will goose the queue request function at the end, so we don't 574 * need to worry about launching another command. 575 */ 576 __scsi_release_buffers(cmd, 0); 577 scsi_next_command(cmd); 578 return NULL; 579} 580 581static inline unsigned int scsi_sgtable_index(unsigned short nents) 582{ 583 unsigned int index; 584 585 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS); 586 587 if (nents <= 8) 588 index = 0; 589 else 590 index = get_count_order(nents) - 3; 591 592 return index; 593} 594 595static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) 596{ 597 struct scsi_host_sg_pool *sgp; 598 599 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 600 mempool_free(sgl, sgp->pool); 601} 602 603static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) 604{ 605 struct scsi_host_sg_pool *sgp; 606 607 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 608 return mempool_alloc(sgp->pool, gfp_mask); 609} 610 611static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, 612 gfp_t gfp_mask) 613{ 614 int ret; 615 616 BUG_ON(!nents); 617 618 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, 619 gfp_mask, scsi_sg_alloc); 620 if (unlikely(ret)) 621 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, 622 scsi_sg_free); 623 624 return ret; 625} 626 627static void scsi_free_sgtable(struct scsi_data_buffer *sdb) 628{ 629 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); 630} 631 632static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) 633{ 634 635 if (cmd->sdb.table.nents) 636 scsi_free_sgtable(&cmd->sdb); 637 638 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 639 640 if (do_bidi_check && scsi_bidi_cmnd(cmd)) { 641 struct scsi_data_buffer *bidi_sdb = 642 cmd->request->next_rq->special; 643 scsi_free_sgtable(bidi_sdb); 644 kmem_cache_free(scsi_sdb_cache, bidi_sdb); 645 cmd->request->next_rq->special = NULL; 646 } 647 648 if (scsi_prot_sg_count(cmd)) 649 scsi_free_sgtable(cmd->prot_sdb); 650} 651 652/* 653 * Function: scsi_release_buffers() 654 * 655 * Purpose: Completion processing for block device I/O requests. 656 * 657 * Arguments: cmd - command that we are bailing. 658 * 659 * Lock status: Assumed that no lock is held upon entry. 660 * 661 * Returns: Nothing 662 * 663 * Notes: In the event that an upper level driver rejects a 664 * command, we must release resources allocated during 665 * the __init_io() function. Primarily this would involve 666 * the scatter-gather table, and potentially any bounce 667 * buffers. 668 */ 669void scsi_release_buffers(struct scsi_cmnd *cmd) 670{ 671 __scsi_release_buffers(cmd, 1); 672} 673EXPORT_SYMBOL(scsi_release_buffers); 674 675/* 676 * Bidi commands Must be complete as a whole, both sides at once. If 677 * part of the bytes were written and lld returned scsi_in()->resid 678 * and/or scsi_out()->resid this information will be left in 679 * req->resid_len and req->next_rq->resid_len. The upper-layer driver 680 * can decide what to do with this information. 681 */ 682static void scsi_end_bidi_request(struct scsi_cmnd *cmd) 683{ 684 struct request *req = cmd->request; 685 unsigned int dlen = req->data_len; 686 unsigned int next_dlen = req->next_rq->data_len; 687 688 req->resid_len = scsi_out(cmd)->resid; 689 req->next_rq->resid_len = scsi_in(cmd)->resid; 690 691 /* The req and req->next_rq have not been completed */ 692 BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen)); 693 694 scsi_release_buffers(cmd); 695 696 /* 697 * This will goose the queue request function at the end, so we don't 698 * need to worry about launching another command. 699 */ 700 scsi_next_command(cmd); 701} 702 703/* 704 * Function: scsi_io_completion() 705 * 706 * Purpose: Completion processing for block device I/O requests. 707 * 708 * Arguments: cmd - command that is finished. 709 * 710 * Lock status: Assumed that no lock is held upon entry. 711 * 712 * Returns: Nothing 713 * 714 * Notes: This function is matched in terms of capabilities to 715 * the function that created the scatter-gather list. 716 * In other words, if there are no bounce buffers 717 * (the normal case for most drivers), we don't need 718 * the logic to deal with cleaning up afterwards. 719 * 720 * We must call scsi_end_request(). This will finish off 721 * the specified number of sectors. If we are done, the 722 * command block will be released and the queue function 723 * will be goosed. If we are not done then we have to 724 * figure out what to do next: 725 * 726 * a) We can call scsi_requeue_command(). The request 727 * will be unprepared and put back on the queue. Then 728 * a new command will be created for it. This should 729 * be used if we made forward progress, or if we want 730 * to switch from READ(10) to READ(6) for example. 731 * 732 * b) We can call scsi_queue_insert(). The request will 733 * be put back on the queue and retried using the same 734 * command as before, possibly after a delay. 735 * 736 * c) We can call blk_end_request() with -EIO to fail 737 * the remainder of the request. 738 */ 739void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 740{ 741 int result = cmd->result; 742 int this_count; 743 struct request_queue *q = cmd->device->request_queue; 744 struct request *req = cmd->request; 745 int error = 0; 746 struct scsi_sense_hdr sshdr; 747 int sense_valid = 0; 748 int sense_deferred = 0; 749 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, 750 ACTION_DELAYED_RETRY} action; 751 char *description = NULL; 752 753 if (result) { 754 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 755 if (sense_valid) 756 sense_deferred = scsi_sense_is_deferred(&sshdr); 757 } 758 759 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ 760 req->errors = result; 761 if (result) { 762 if (sense_valid && req->sense) { 763 /* 764 * SG_IO wants current and deferred errors 765 */ 766 int len = 8 + cmd->sense_buffer[7]; 767 768 if (len > SCSI_SENSE_BUFFERSIZE) 769 len = SCSI_SENSE_BUFFERSIZE; 770 memcpy(req->sense, cmd->sense_buffer, len); 771 req->sense_len = len; 772 } 773 if (!sense_deferred) 774 error = -EIO; 775 } 776 if (scsi_bidi_cmnd(cmd)) { 777 /* will also release_buffers */ 778 scsi_end_bidi_request(cmd); 779 return; 780 } 781 req->resid_len = scsi_get_resid(cmd); 782 } 783 784 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ 785 786 /* 787 * Next deal with any sectors which we were able to correctly 788 * handle. 789 */ 790 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 791 "%d bytes done.\n", 792 req->nr_sectors, good_bytes)); 793 794 /* 795 * Recovered errors need reporting, but they're always treated 796 * as success, so fiddle the result code here. For BLOCK_PC 797 * we already took a copy of the original into rq->errors which 798 * is what gets returned to the user 799 */ 800 if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) { 801 if (!(req->cmd_flags & REQ_QUIET)) 802 scsi_print_sense("", cmd); 803 result = 0; 804 /* BLOCK_PC may have set error */ 805 error = 0; 806 } 807 808 /* 809 * A number of bytes were successfully read. If there 810 * are leftovers and there is some kind of error 811 * (result != 0), retry the rest. 812 */ 813 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) 814 return; 815 this_count = blk_rq_bytes(req); 816 817 error = -EIO; 818 819 if (host_byte(result) == DID_RESET) { 820 /* Third party bus reset or reset for error recovery 821 * reasons. Just retry the command and see what 822 * happens. 823 */ 824 action = ACTION_RETRY; 825 } else if (sense_valid && !sense_deferred) { 826 switch (sshdr.sense_key) { 827 case UNIT_ATTENTION: 828 if (cmd->device->removable) { 829 /* Detected disc change. Set a bit 830 * and quietly refuse further access. 831 */ 832 cmd->device->changed = 1; 833 description = "Media Changed"; 834 action = ACTION_FAIL; 835 } else { 836 /* Must have been a power glitch, or a 837 * bus reset. Could not have been a 838 * media change, so we just retry the 839 * command and see what happens. 840 */ 841 action = ACTION_RETRY; 842 } 843 break; 844 case ILLEGAL_REQUEST: 845 /* If we had an ILLEGAL REQUEST returned, then 846 * we may have performed an unsupported 847 * command. The only thing this should be 848 * would be a ten byte read where only a six 849 * byte read was supported. Also, on a system 850 * where READ CAPACITY failed, we may have 851 * read past the end of the disk. 852 */ 853 if ((cmd->device->use_10_for_rw && 854 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 855 (cmd->cmnd[0] == READ_10 || 856 cmd->cmnd[0] == WRITE_10)) { 857 /* This will issue a new 6-byte command. */ 858 cmd->device->use_10_for_rw = 0; 859 action = ACTION_REPREP; 860 } else if (sshdr.asc == 0x10) /* DIX */ { 861 description = "Host Data Integrity Failure"; 862 action = ACTION_FAIL; 863 error = -EILSEQ; 864 } else 865 action = ACTION_FAIL; 866 break; 867 case ABORTED_COMMAND: 868 action = ACTION_FAIL; 869 if (sshdr.asc == 0x10) { /* DIF */ 870 description = "Target Data Integrity Failure"; 871 error = -EILSEQ; 872 } 873 break; 874 case NOT_READY: 875 /* If the device is in the process of becoming 876 * ready, or has a temporary blockage, retry. 877 */ 878 if (sshdr.asc == 0x04) { 879 switch (sshdr.ascq) { 880 case 0x01: /* becoming ready */ 881 case 0x04: /* format in progress */ 882 case 0x05: /* rebuild in progress */ 883 case 0x06: /* recalculation in progress */ 884 case 0x07: /* operation in progress */ 885 case 0x08: /* Long write in progress */ 886 case 0x09: /* self test in progress */ 887 action = ACTION_DELAYED_RETRY; 888 break; 889 default: 890 description = "Device not ready"; 891 action = ACTION_FAIL; 892 break; 893 } 894 } else { 895 description = "Device not ready"; 896 action = ACTION_FAIL; 897 } 898 break; 899 case VOLUME_OVERFLOW: 900 /* See SSC3rXX or current. */ 901 action = ACTION_FAIL; 902 break; 903 default: 904 description = "Unhandled sense code"; 905 action = ACTION_FAIL; 906 break; 907 } 908 } else { 909 description = "Unhandled error code"; 910 action = ACTION_FAIL; 911 } 912 913 switch (action) { 914 case ACTION_FAIL: 915 /* Give up and fail the remainder of the request */ 916 scsi_release_buffers(cmd); 917 if (!(req->cmd_flags & REQ_QUIET)) { 918 if (description) 919 scmd_printk(KERN_INFO, cmd, "%s\n", 920 description); 921 scsi_print_result(cmd); 922 if (driver_byte(result) & DRIVER_SENSE) 923 scsi_print_sense("", cmd); 924 } 925 blk_end_request_all(req, -EIO); 926 scsi_next_command(cmd); 927 break; 928 case ACTION_REPREP: 929 /* Unprep the request and put it back at the head of the queue. 930 * A new command will be prepared and issued. 931 */ 932 scsi_release_buffers(cmd); 933 scsi_requeue_command(q, cmd); 934 break; 935 case ACTION_RETRY: 936 /* Retry the same command immediately */ 937 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0); 938 break; 939 case ACTION_DELAYED_RETRY: 940 /* Retry the same command after a delay */ 941 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); 942 break; 943 } 944} 945 946static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, 947 gfp_t gfp_mask) 948{ 949 int count; 950 951 /* 952 * If sg table allocation fails, requeue request later. 953 */ 954 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, 955 gfp_mask))) { 956 return BLKPREP_DEFER; 957 } 958 959 req->buffer = NULL; 960 961 /* 962 * Next, walk the list, and fill in the addresses and sizes of 963 * each segment. 964 */ 965 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 966 BUG_ON(count > sdb->table.nents); 967 sdb->table.nents = count; 968 if (blk_pc_request(req)) 969 sdb->length = req->data_len; 970 else 971 sdb->length = req->nr_sectors << 9; 972 return BLKPREP_OK; 973} 974 975/* 976 * Function: scsi_init_io() 977 * 978 * Purpose: SCSI I/O initialize function. 979 * 980 * Arguments: cmd - Command descriptor we wish to initialize 981 * 982 * Returns: 0 on success 983 * BLKPREP_DEFER if the failure is retryable 984 * BLKPREP_KILL if the failure is fatal 985 */ 986int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) 987{ 988 int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask); 989 if (error) 990 goto err_exit; 991 992 if (blk_bidi_rq(cmd->request)) { 993 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( 994 scsi_sdb_cache, GFP_ATOMIC); 995 if (!bidi_sdb) { 996 error = BLKPREP_DEFER; 997 goto err_exit; 998 } 999 1000 cmd->request->next_rq->special = bidi_sdb; 1001 error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb, 1002 GFP_ATOMIC); 1003 if (error) 1004 goto err_exit; 1005 } 1006 1007 if (blk_integrity_rq(cmd->request)) { 1008 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; 1009 int ivecs, count; 1010 1011 BUG_ON(prot_sdb == NULL); 1012 ivecs = blk_rq_count_integrity_sg(cmd->request); 1013 1014 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { 1015 error = BLKPREP_DEFER; 1016 goto err_exit; 1017 } 1018 1019 count = blk_rq_map_integrity_sg(cmd->request, 1020 prot_sdb->table.sgl); 1021 BUG_ON(unlikely(count > ivecs)); 1022 1023 cmd->prot_sdb = prot_sdb; 1024 cmd->prot_sdb->table.nents = count; 1025 } 1026 1027 return BLKPREP_OK ; 1028 1029err_exit: 1030 scsi_release_buffers(cmd); 1031 if (error == BLKPREP_KILL) 1032 scsi_put_command(cmd); 1033 else /* BLKPREP_DEFER */ 1034 scsi_unprep_request(cmd->request); 1035 1036 return error; 1037} 1038EXPORT_SYMBOL(scsi_init_io); 1039 1040static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1041 struct request *req) 1042{ 1043 struct scsi_cmnd *cmd; 1044 1045 if (!req->special) { 1046 cmd = scsi_get_command(sdev, GFP_ATOMIC); 1047 if (unlikely(!cmd)) 1048 return NULL; 1049 req->special = cmd; 1050 } else { 1051 cmd = req->special; 1052 } 1053 1054 /* pull a tag out of the request if we have one */ 1055 cmd->tag = req->tag; 1056 cmd->request = req; 1057 1058 cmd->cmnd = req->cmd; 1059 1060 return cmd; 1061} 1062 1063int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1064{ 1065 struct scsi_cmnd *cmd; 1066 int ret = scsi_prep_state_check(sdev, req); 1067 1068 if (ret != BLKPREP_OK) 1069 return ret; 1070 1071 cmd = scsi_get_cmd_from_req(sdev, req); 1072 if (unlikely(!cmd)) 1073 return BLKPREP_DEFER; 1074 1075 /* 1076 * BLOCK_PC requests may transfer data, in which case they must 1077 * a bio attached to them. Or they might contain a SCSI command 1078 * that does not transfer data, in which case they may optionally 1079 * submit a request without an attached bio. 1080 */ 1081 if (req->bio) { 1082 int ret; 1083 1084 BUG_ON(!req->nr_phys_segments); 1085 1086 ret = scsi_init_io(cmd, GFP_ATOMIC); 1087 if (unlikely(ret)) 1088 return ret; 1089 } else { 1090 BUG_ON(req->data_len); 1091 1092 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1093 req->buffer = NULL; 1094 } 1095 1096 cmd->cmd_len = req->cmd_len; 1097 if (!req->data_len) 1098 cmd->sc_data_direction = DMA_NONE; 1099 else if (rq_data_dir(req) == WRITE) 1100 cmd->sc_data_direction = DMA_TO_DEVICE; 1101 else 1102 cmd->sc_data_direction = DMA_FROM_DEVICE; 1103 1104 cmd->transfersize = req->data_len; 1105 cmd->allowed = req->retries; 1106 return BLKPREP_OK; 1107} 1108EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); 1109 1110/* 1111 * Setup a REQ_TYPE_FS command. These are simple read/write request 1112 * from filesystems that still need to be translated to SCSI CDBs from 1113 * the ULD. 1114 */ 1115int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1116{ 1117 struct scsi_cmnd *cmd; 1118 int ret = scsi_prep_state_check(sdev, req); 1119 1120 if (ret != BLKPREP_OK) 1121 return ret; 1122 1123 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh 1124 && sdev->scsi_dh_data->scsi_dh->prep_fn)) { 1125 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); 1126 if (ret != BLKPREP_OK) 1127 return ret; 1128 } 1129 1130 /* 1131 * Filesystem requests must transfer data. 1132 */ 1133 BUG_ON(!req->nr_phys_segments); 1134 1135 cmd = scsi_get_cmd_from_req(sdev, req); 1136 if (unlikely(!cmd)) 1137 return BLKPREP_DEFER; 1138 1139 memset(cmd->cmnd, 0, BLK_MAX_CDB); 1140 return scsi_init_io(cmd, GFP_ATOMIC); 1141} 1142EXPORT_SYMBOL(scsi_setup_fs_cmnd); 1143 1144int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) 1145{ 1146 int ret = BLKPREP_OK; 1147 1148 /* 1149 * If the device is not in running state we will reject some 1150 * or all commands. 1151 */ 1152 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1153 switch (sdev->sdev_state) { 1154 case SDEV_OFFLINE: 1155 /* 1156 * If the device is offline we refuse to process any 1157 * commands. The device must be brought online 1158 * before trying any recovery commands. 1159 */ 1160 sdev_printk(KERN_ERR, sdev, 1161 "rejecting I/O to offline device\n"); 1162 ret = BLKPREP_KILL; 1163 break; 1164 case SDEV_DEL: 1165 /* 1166 * If the device is fully deleted, we refuse to 1167 * process any commands as well. 1168 */ 1169 sdev_printk(KERN_ERR, sdev, 1170 "rejecting I/O to dead device\n"); 1171 ret = BLKPREP_KILL; 1172 break; 1173 case SDEV_QUIESCE: 1174 case SDEV_BLOCK: 1175 case SDEV_CREATED_BLOCK: 1176 /* 1177 * If the devices is blocked we defer normal commands. 1178 */ 1179 if (!(req->cmd_flags & REQ_PREEMPT)) 1180 ret = BLKPREP_DEFER; 1181 break; 1182 default: 1183 /* 1184 * For any other not fully online state we only allow 1185 * special commands. In particular any user initiated 1186 * command is not allowed. 1187 */ 1188 if (!(req->cmd_flags & REQ_PREEMPT)) 1189 ret = BLKPREP_KILL; 1190 break; 1191 } 1192 } 1193 return ret; 1194} 1195EXPORT_SYMBOL(scsi_prep_state_check); 1196 1197int scsi_prep_return(struct request_queue *q, struct request *req, int ret) 1198{ 1199 struct scsi_device *sdev = q->queuedata; 1200 1201 switch (ret) { 1202 case BLKPREP_KILL: 1203 req->errors = DID_NO_CONNECT << 16; 1204 /* release the command and kill it */ 1205 if (req->special) { 1206 struct scsi_cmnd *cmd = req->special; 1207 scsi_release_buffers(cmd); 1208 scsi_put_command(cmd); 1209 req->special = NULL; 1210 } 1211 break; 1212 case BLKPREP_DEFER: 1213 /* 1214 * If we defer, the elv_next_request() returns NULL, but the 1215 * queue must be restarted, so we plug here if no returning 1216 * command will automatically do that. 1217 */ 1218 if (sdev->device_busy == 0) 1219 blk_plug_device(q); 1220 break; 1221 default: 1222 req->cmd_flags |= REQ_DONTPREP; 1223 } 1224 1225 return ret; 1226} 1227EXPORT_SYMBOL(scsi_prep_return); 1228 1229int scsi_prep_fn(struct request_queue *q, struct request *req) 1230{ 1231 struct scsi_device *sdev = q->queuedata; 1232 int ret = BLKPREP_KILL; 1233 1234 if (req->cmd_type == REQ_TYPE_BLOCK_PC) 1235 ret = scsi_setup_blk_pc_cmnd(sdev, req); 1236 return scsi_prep_return(q, req, ret); 1237} 1238 1239/* 1240 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1241 * return 0. 1242 * 1243 * Called with the queue_lock held. 1244 */ 1245static inline int scsi_dev_queue_ready(struct request_queue *q, 1246 struct scsi_device *sdev) 1247{ 1248 if (sdev->device_busy == 0 && sdev->device_blocked) { 1249 /* 1250 * unblock after device_blocked iterates to zero 1251 */ 1252 if (--sdev->device_blocked == 0) { 1253 SCSI_LOG_MLQUEUE(3, 1254 sdev_printk(KERN_INFO, sdev, 1255 "unblocking device at zero depth\n")); 1256 } else { 1257 blk_plug_device(q); 1258 return 0; 1259 } 1260 } 1261 if (scsi_device_is_busy(sdev)) 1262 return 0; 1263 1264 return 1; 1265} 1266 1267 1268/* 1269 * scsi_target_queue_ready: checks if there we can send commands to target 1270 * @sdev: scsi device on starget to check. 1271 * 1272 * Called with the host lock held. 1273 */ 1274static inline int scsi_target_queue_ready(struct Scsi_Host *shost, 1275 struct scsi_device *sdev) 1276{ 1277 struct scsi_target *starget = scsi_target(sdev); 1278 1279 if (starget->single_lun) { 1280 if (starget->starget_sdev_user && 1281 starget->starget_sdev_user != sdev) 1282 return 0; 1283 starget->starget_sdev_user = sdev; 1284 } 1285 1286 if (starget->target_busy == 0 && starget->target_blocked) { 1287 /* 1288 * unblock after target_blocked iterates to zero 1289 */ 1290 if (--starget->target_blocked == 0) { 1291 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, 1292 "unblocking target at zero depth\n")); 1293 } else { 1294 blk_plug_device(sdev->request_queue); 1295 return 0; 1296 } 1297 } 1298 1299 if (scsi_target_is_busy(starget)) { 1300 if (list_empty(&sdev->starved_entry)) { 1301 list_add_tail(&sdev->starved_entry, 1302 &shost->starved_list); 1303 return 0; 1304 } 1305 } 1306 1307 /* We're OK to process the command, so we can't be starved */ 1308 if (!list_empty(&sdev->starved_entry)) 1309 list_del_init(&sdev->starved_entry); 1310 return 1; 1311} 1312 1313/* 1314 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1315 * return 0. We must end up running the queue again whenever 0 is 1316 * returned, else IO can hang. 1317 * 1318 * Called with host_lock held. 1319 */ 1320static inline int scsi_host_queue_ready(struct request_queue *q, 1321 struct Scsi_Host *shost, 1322 struct scsi_device *sdev) 1323{ 1324 if (scsi_host_in_recovery(shost)) 1325 return 0; 1326 if (shost->host_busy == 0 && shost->host_blocked) { 1327 /* 1328 * unblock after host_blocked iterates to zero 1329 */ 1330 if (--shost->host_blocked == 0) { 1331 SCSI_LOG_MLQUEUE(3, 1332 printk("scsi%d unblocking host at zero depth\n", 1333 shost->host_no)); 1334 } else { 1335 return 0; 1336 } 1337 } 1338 if (scsi_host_is_busy(shost)) { 1339 if (list_empty(&sdev->starved_entry)) 1340 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1341 return 0; 1342 } 1343 1344 /* We're OK to process the command, so we can't be starved */ 1345 if (!list_empty(&sdev->starved_entry)) 1346 list_del_init(&sdev->starved_entry); 1347 1348 return 1; 1349} 1350 1351/* 1352 * Busy state exporting function for request stacking drivers. 1353 * 1354 * For efficiency, no lock is taken to check the busy state of 1355 * shost/starget/sdev, since the returned value is not guaranteed and 1356 * may be changed after request stacking drivers call the function, 1357 * regardless of taking lock or not. 1358 * 1359 * When scsi can't dispatch I/Os anymore and needs to kill I/Os 1360 * (e.g. !sdev), scsi needs to return 'not busy'. 1361 * Otherwise, request stacking drivers may hold requests forever. 1362 */ 1363static int scsi_lld_busy(struct request_queue *q) 1364{ 1365 struct scsi_device *sdev = q->queuedata; 1366 struct Scsi_Host *shost; 1367 struct scsi_target *starget; 1368 1369 if (!sdev) 1370 return 0; 1371 1372 shost = sdev->host; 1373 starget = scsi_target(sdev); 1374 1375 if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) || 1376 scsi_target_is_busy(starget) || scsi_device_is_busy(sdev)) 1377 return 1; 1378 1379 return 0; 1380} 1381 1382/* 1383 * Kill a request for a dead device 1384 */ 1385static void scsi_kill_request(struct request *req, struct request_queue *q) 1386{ 1387 struct scsi_cmnd *cmd = req->special; 1388 struct scsi_device *sdev = cmd->device; 1389 struct scsi_target *starget = scsi_target(sdev); 1390 struct Scsi_Host *shost = sdev->host; 1391 1392 blkdev_dequeue_request(req); 1393 1394 if (unlikely(cmd == NULL)) { 1395 printk(KERN_CRIT "impossible request in %s.\n", 1396 __func__); 1397 BUG(); 1398 } 1399 1400 scsi_init_cmd_errh(cmd); 1401 cmd->result = DID_NO_CONNECT << 16; 1402 atomic_inc(&cmd->device->iorequest_cnt); 1403 1404 /* 1405 * SCSI request completion path will do scsi_device_unbusy(), 1406 * bump busy counts. To bump the counters, we need to dance 1407 * with the locks as normal issue path does. 1408 */ 1409 sdev->device_busy++; 1410 spin_unlock(sdev->request_queue->queue_lock); 1411 spin_lock(shost->host_lock); 1412 shost->host_busy++; 1413 starget->target_busy++; 1414 spin_unlock(shost->host_lock); 1415 spin_lock(sdev->request_queue->queue_lock); 1416 1417 blk_complete_request(req); 1418} 1419 1420static void scsi_softirq_done(struct request *rq) 1421{ 1422 struct scsi_cmnd *cmd = rq->special; 1423 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; 1424 int disposition; 1425 1426 INIT_LIST_HEAD(&cmd->eh_entry); 1427 1428 /* 1429 * Set the serial numbers back to zero 1430 */ 1431 cmd->serial_number = 0; 1432 1433 atomic_inc(&cmd->device->iodone_cnt); 1434 if (cmd->result) 1435 atomic_inc(&cmd->device->ioerr_cnt); 1436 1437 disposition = scsi_decide_disposition(cmd); 1438 if (disposition != SUCCESS && 1439 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1440 sdev_printk(KERN_ERR, cmd->device, 1441 "timing out command, waited %lus\n", 1442 wait_for/HZ); 1443 disposition = SUCCESS; 1444 } 1445 1446 scsi_log_completion(cmd, disposition); 1447 1448 switch (disposition) { 1449 case SUCCESS: 1450 scsi_finish_command(cmd); 1451 break; 1452 case NEEDS_RETRY: 1453 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 1454 break; 1455 case ADD_TO_MLQUEUE: 1456 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1457 break; 1458 default: 1459 if (!scsi_eh_scmd_add(cmd, 0)) 1460 scsi_finish_command(cmd); 1461 } 1462} 1463 1464/* 1465 * Function: scsi_request_fn() 1466 * 1467 * Purpose: Main strategy routine for SCSI. 1468 * 1469 * Arguments: q - Pointer to actual queue. 1470 * 1471 * Returns: Nothing 1472 * 1473 * Lock status: IO request lock assumed to be held when called. 1474 */ 1475static void scsi_request_fn(struct request_queue *q) 1476{ 1477 struct scsi_device *sdev = q->queuedata; 1478 struct Scsi_Host *shost; 1479 struct scsi_cmnd *cmd; 1480 struct request *req; 1481 1482 if (!sdev) { 1483 printk("scsi: killing requests for dead queue\n"); 1484 while ((req = elv_next_request(q)) != NULL) 1485 scsi_kill_request(req, q); 1486 return; 1487 } 1488 1489 if(!get_device(&sdev->sdev_gendev)) 1490 /* We must be tearing the block queue down already */ 1491 return; 1492 1493 /* 1494 * To start with, we keep looping until the queue is empty, or until 1495 * the host is no longer able to accept any more requests. 1496 */ 1497 shost = sdev->host; 1498 while (!blk_queue_plugged(q)) { 1499 int rtn; 1500 /* 1501 * get next queueable request. We do this early to make sure 1502 * that the request is fully prepared even if we cannot 1503 * accept it. 1504 */ 1505 req = elv_next_request(q); 1506 if (!req || !scsi_dev_queue_ready(q, sdev)) 1507 break; 1508 1509 if (unlikely(!scsi_device_online(sdev))) { 1510 sdev_printk(KERN_ERR, sdev, 1511 "rejecting I/O to offline device\n"); 1512 scsi_kill_request(req, q); 1513 continue; 1514 } 1515 1516 1517 /* 1518 * Remove the request from the request list. 1519 */ 1520 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1521 blkdev_dequeue_request(req); 1522 sdev->device_busy++; 1523 1524 spin_unlock(q->queue_lock); 1525 cmd = req->special; 1526 if (unlikely(cmd == NULL)) { 1527 printk(KERN_CRIT "impossible request in %s.\n" 1528 "please mail a stack trace to " 1529 "linux-scsi@vger.kernel.org\n", 1530 __func__); 1531 blk_dump_rq_flags(req, "foo"); 1532 BUG(); 1533 } 1534 spin_lock(shost->host_lock); 1535 1536 /* 1537 * We hit this when the driver is using a host wide 1538 * tag map. For device level tag maps the queue_depth check 1539 * in the device ready fn would prevent us from trying 1540 * to allocate a tag. Since the map is a shared host resource 1541 * we add the dev to the starved list so it eventually gets 1542 * a run when a tag is freed. 1543 */ 1544 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { 1545 if (list_empty(&sdev->starved_entry)) 1546 list_add_tail(&sdev->starved_entry, 1547 &shost->starved_list); 1548 goto not_ready; 1549 } 1550 1551 if (!scsi_target_queue_ready(shost, sdev)) 1552 goto not_ready; 1553 1554 if (!scsi_host_queue_ready(q, shost, sdev)) 1555 goto not_ready; 1556 1557 scsi_target(sdev)->target_busy++; 1558 shost->host_busy++; 1559 1560 /* 1561 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will 1562 * take the lock again. 1563 */ 1564 spin_unlock_irq(shost->host_lock); 1565 1566 /* 1567 * Finally, initialize any error handling parameters, and set up 1568 * the timers for timeouts. 1569 */ 1570 scsi_init_cmd_errh(cmd); 1571 1572 /* 1573 * Dispatch the command to the low-level driver. 1574 */ 1575 rtn = scsi_dispatch_cmd(cmd); 1576 spin_lock_irq(q->queue_lock); 1577 if(rtn) { 1578 /* we're refusing the command; because of 1579 * the way locks get dropped, we need to 1580 * check here if plugging is required */ 1581 if(sdev->device_busy == 0) 1582 blk_plug_device(q); 1583 1584 break; 1585 } 1586 } 1587 1588 goto out; 1589 1590 not_ready: 1591 spin_unlock_irq(shost->host_lock); 1592 1593 /* 1594 * lock q, handle tag, requeue req, and decrement device_busy. We 1595 * must return with queue_lock held. 1596 * 1597 * Decrementing device_busy without checking it is OK, as all such 1598 * cases (host limits or settings) should run the queue at some 1599 * later time. 1600 */ 1601 spin_lock_irq(q->queue_lock); 1602 blk_requeue_request(q, req); 1603 sdev->device_busy--; 1604 if(sdev->device_busy == 0) 1605 blk_plug_device(q); 1606 out: 1607 /* must be careful here...if we trigger the ->remove() function 1608 * we cannot be holding the q lock */ 1609 spin_unlock_irq(q->queue_lock); 1610 put_device(&sdev->sdev_gendev); 1611 spin_lock_irq(q->queue_lock); 1612} 1613 1614u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 1615{ 1616 struct device *host_dev; 1617 u64 bounce_limit = 0xffffffff; 1618 1619 if (shost->unchecked_isa_dma) 1620 return BLK_BOUNCE_ISA; 1621 /* 1622 * Platforms with virtual-DMA translation 1623 * hardware have no practical limit. 1624 */ 1625 if (!PCI_DMA_BUS_IS_PHYS) 1626 return BLK_BOUNCE_ANY; 1627 1628 host_dev = scsi_get_device(shost); 1629 if (host_dev && host_dev->dma_mask) 1630 bounce_limit = *host_dev->dma_mask; 1631 1632 return bounce_limit; 1633} 1634EXPORT_SYMBOL(scsi_calculate_bounce_limit); 1635 1636struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, 1637 request_fn_proc *request_fn) 1638{ 1639 struct request_queue *q; 1640 struct device *dev = shost->shost_gendev.parent; 1641 1642 q = blk_init_queue(request_fn, NULL); 1643 if (!q) 1644 return NULL; 1645 1646 /* 1647 * this limit is imposed by hardware restrictions 1648 */ 1649 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1650 blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); 1651 1652 blk_queue_max_sectors(q, shost->max_sectors); 1653 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1654 blk_queue_segment_boundary(q, shost->dma_boundary); 1655 dma_set_seg_boundary(dev, shost->dma_boundary); 1656 1657 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 1658 1659 /* New queue, no concurrency on queue_flags */ 1660 if (!shost->use_clustering) 1661 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 1662 1663 /* 1664 * set a reasonable default alignment on word boundaries: the 1665 * host and device may alter it using 1666 * blk_queue_update_dma_alignment() later. 1667 */ 1668 blk_queue_dma_alignment(q, 0x03); 1669 1670 return q; 1671} 1672EXPORT_SYMBOL(__scsi_alloc_queue); 1673 1674struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 1675{ 1676 struct request_queue *q; 1677 1678 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); 1679 if (!q) 1680 return NULL; 1681 1682 blk_queue_prep_rq(q, scsi_prep_fn); 1683 blk_queue_softirq_done(q, scsi_softirq_done); 1684 blk_queue_rq_timed_out(q, scsi_times_out); 1685 blk_queue_lld_busy(q, scsi_lld_busy); 1686 return q; 1687} 1688 1689void scsi_free_queue(struct request_queue *q) 1690{ 1691 blk_cleanup_queue(q); 1692} 1693 1694/* 1695 * Function: scsi_block_requests() 1696 * 1697 * Purpose: Utility function used by low-level drivers to prevent further 1698 * commands from being queued to the device. 1699 * 1700 * Arguments: shost - Host in question 1701 * 1702 * Returns: Nothing 1703 * 1704 * Lock status: No locks are assumed held. 1705 * 1706 * Notes: There is no timer nor any other means by which the requests 1707 * get unblocked other than the low-level driver calling 1708 * scsi_unblock_requests(). 1709 */ 1710void scsi_block_requests(struct Scsi_Host *shost) 1711{ 1712 shost->host_self_blocked = 1; 1713} 1714EXPORT_SYMBOL(scsi_block_requests); 1715 1716/* 1717 * Function: scsi_unblock_requests() 1718 * 1719 * Purpose: Utility function used by low-level drivers to allow further 1720 * commands from being queued to the device. 1721 * 1722 * Arguments: shost - Host in question 1723 * 1724 * Returns: Nothing 1725 * 1726 * Lock status: No locks are assumed held. 1727 * 1728 * Notes: There is no timer nor any other means by which the requests 1729 * get unblocked other than the low-level driver calling 1730 * scsi_unblock_requests(). 1731 * 1732 * This is done as an API function so that changes to the 1733 * internals of the scsi mid-layer won't require wholesale 1734 * changes to drivers that use this feature. 1735 */ 1736void scsi_unblock_requests(struct Scsi_Host *shost) 1737{ 1738 shost->host_self_blocked = 0; 1739 scsi_run_host_queues(shost); 1740} 1741EXPORT_SYMBOL(scsi_unblock_requests); 1742 1743int __init scsi_init_queue(void) 1744{ 1745 int i; 1746 1747 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", 1748 sizeof(struct scsi_data_buffer), 1749 0, 0, NULL); 1750 if (!scsi_sdb_cache) { 1751 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); 1752 return -ENOMEM; 1753 } 1754 1755 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1756 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1757 int size = sgp->size * sizeof(struct scatterlist); 1758 1759 sgp->slab = kmem_cache_create(sgp->name, size, 0, 1760 SLAB_HWCACHE_ALIGN, NULL); 1761 if (!sgp->slab) { 1762 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1763 sgp->name); 1764 goto cleanup_sdb; 1765 } 1766 1767 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 1768 sgp->slab); 1769 if (!sgp->pool) { 1770 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 1771 sgp->name); 1772 goto cleanup_sdb; 1773 } 1774 } 1775 1776 return 0; 1777 1778cleanup_sdb: 1779 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1780 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1781 if (sgp->pool) 1782 mempool_destroy(sgp->pool); 1783 if (sgp->slab) 1784 kmem_cache_destroy(sgp->slab); 1785 } 1786 kmem_cache_destroy(scsi_sdb_cache); 1787 1788 return -ENOMEM; 1789} 1790 1791void scsi_exit_queue(void) 1792{ 1793 int i; 1794 1795 kmem_cache_destroy(scsi_sdb_cache); 1796 1797 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1798 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1799 mempool_destroy(sgp->pool); 1800 kmem_cache_destroy(sgp->slab); 1801 } 1802} 1803 1804/** 1805 * scsi_mode_select - issue a mode select 1806 * @sdev: SCSI device to be queried 1807 * @pf: Page format bit (1 == standard, 0 == vendor specific) 1808 * @sp: Save page bit (0 == don't save, 1 == save) 1809 * @modepage: mode page being requested 1810 * @buffer: request buffer (may not be smaller than eight bytes) 1811 * @len: length of request buffer. 1812 * @timeout: command timeout 1813 * @retries: number of retries before failing 1814 * @data: returns a structure abstracting the mode header data 1815 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1816 * must be SCSI_SENSE_BUFFERSIZE big. 1817 * 1818 * Returns zero if successful; negative error number or scsi 1819 * status on error 1820 * 1821 */ 1822int 1823scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, 1824 unsigned char *buffer, int len, int timeout, int retries, 1825 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1826{ 1827 unsigned char cmd[10]; 1828 unsigned char *real_buffer; 1829 int ret; 1830 1831 memset(cmd, 0, sizeof(cmd)); 1832 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); 1833 1834 if (sdev->use_10_for_ms) { 1835 if (len > 65535) 1836 return -EINVAL; 1837 real_buffer = kmalloc(8 + len, GFP_KERNEL); 1838 if (!real_buffer) 1839 return -ENOMEM; 1840 memcpy(real_buffer + 8, buffer, len); 1841 len += 8; 1842 real_buffer[0] = 0; 1843 real_buffer[1] = 0; 1844 real_buffer[2] = data->medium_type; 1845 real_buffer[3] = data->device_specific; 1846 real_buffer[4] = data->longlba ? 0x01 : 0; 1847 real_buffer[5] = 0; 1848 real_buffer[6] = data->block_descriptor_length >> 8; 1849 real_buffer[7] = data->block_descriptor_length; 1850 1851 cmd[0] = MODE_SELECT_10; 1852 cmd[7] = len >> 8; 1853 cmd[8] = len; 1854 } else { 1855 if (len > 255 || data->block_descriptor_length > 255 || 1856 data->longlba) 1857 return -EINVAL; 1858 1859 real_buffer = kmalloc(4 + len, GFP_KERNEL); 1860 if (!real_buffer) 1861 return -ENOMEM; 1862 memcpy(real_buffer + 4, buffer, len); 1863 len += 4; 1864 real_buffer[0] = 0; 1865 real_buffer[1] = data->medium_type; 1866 real_buffer[2] = data->device_specific; 1867 real_buffer[3] = data->block_descriptor_length; 1868 1869 1870 cmd[0] = MODE_SELECT; 1871 cmd[4] = len; 1872 } 1873 1874 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 1875 sshdr, timeout, retries, NULL); 1876 kfree(real_buffer); 1877 return ret; 1878} 1879EXPORT_SYMBOL_GPL(scsi_mode_select); 1880 1881/** 1882 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. 1883 * @sdev: SCSI device to be queried 1884 * @dbd: set if mode sense will allow block descriptors to be returned 1885 * @modepage: mode page being requested 1886 * @buffer: request buffer (may not be smaller than eight bytes) 1887 * @len: length of request buffer. 1888 * @timeout: command timeout 1889 * @retries: number of retries before failing 1890 * @data: returns a structure abstracting the mode header data 1891 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1892 * must be SCSI_SENSE_BUFFERSIZE big. 1893 * 1894 * Returns zero if unsuccessful, or the header offset (either 4 1895 * or 8 depending on whether a six or ten byte command was 1896 * issued) if successful. 1897 */ 1898int 1899scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 1900 unsigned char *buffer, int len, int timeout, int retries, 1901 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1902{ 1903 unsigned char cmd[12]; 1904 int use_10_for_ms; 1905 int header_length; 1906 int result; 1907 struct scsi_sense_hdr my_sshdr; 1908 1909 memset(data, 0, sizeof(*data)); 1910 memset(&cmd[0], 0, 12); 1911 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 1912 cmd[2] = modepage; 1913 1914 /* caller might not be interested in sense, but we need it */ 1915 if (!sshdr) 1916 sshdr = &my_sshdr; 1917 1918 retry: 1919 use_10_for_ms = sdev->use_10_for_ms; 1920 1921 if (use_10_for_ms) { 1922 if (len < 8) 1923 len = 8; 1924 1925 cmd[0] = MODE_SENSE_10; 1926 cmd[8] = len; 1927 header_length = 8; 1928 } else { 1929 if (len < 4) 1930 len = 4; 1931 1932 cmd[0] = MODE_SENSE; 1933 cmd[4] = len; 1934 header_length = 4; 1935 } 1936 1937 memset(buffer, 0, len); 1938 1939 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 1940 sshdr, timeout, retries, NULL); 1941 1942 /* This code looks awful: what it's doing is making sure an 1943 * ILLEGAL REQUEST sense return identifies the actual command 1944 * byte as the problem. MODE_SENSE commands can return 1945 * ILLEGAL REQUEST if the code page isn't supported */ 1946 1947 if (use_10_for_ms && !scsi_status_is_good(result) && 1948 (driver_byte(result) & DRIVER_SENSE)) { 1949 if (scsi_sense_valid(sshdr)) { 1950 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 1951 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 1952 /* 1953 * Invalid command operation code 1954 */ 1955 sdev->use_10_for_ms = 0; 1956 goto retry; 1957 } 1958 } 1959 } 1960 1961 if(scsi_status_is_good(result)) { 1962 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && 1963 (modepage == 6 || modepage == 8))) { 1964 /* Initio breakage? */ 1965 header_length = 0; 1966 data->length = 13; 1967 data->medium_type = 0; 1968 data->device_specific = 0; 1969 data->longlba = 0; 1970 data->block_descriptor_length = 0; 1971 } else if(use_10_for_ms) { 1972 data->length = buffer[0]*256 + buffer[1] + 2; 1973 data->medium_type = buffer[2]; 1974 data->device_specific = buffer[3]; 1975 data->longlba = buffer[4] & 0x01; 1976 data->block_descriptor_length = buffer[6]*256 1977 + buffer[7]; 1978 } else { 1979 data->length = buffer[0] + 1; 1980 data->medium_type = buffer[1]; 1981 data->device_specific = buffer[2]; 1982 data->block_descriptor_length = buffer[3]; 1983 } 1984 data->header_length = header_length; 1985 } 1986 1987 return result; 1988} 1989EXPORT_SYMBOL(scsi_mode_sense); 1990 1991/** 1992 * scsi_test_unit_ready - test if unit is ready 1993 * @sdev: scsi device to change the state of. 1994 * @timeout: command timeout 1995 * @retries: number of retries before failing 1996 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for 1997 * returning sense. Make sure that this is cleared before passing 1998 * in. 1999 * 2000 * Returns zero if unsuccessful or an error if TUR failed. For 2001 * removable media, a return of NOT_READY or UNIT_ATTENTION is 2002 * translated to success, with the ->changed flag updated. 2003 **/ 2004int 2005scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, 2006 struct scsi_sense_hdr *sshdr_external) 2007{ 2008 char cmd[] = { 2009 TEST_UNIT_READY, 0, 0, 0, 0, 0, 2010 }; 2011 struct scsi_sense_hdr *sshdr; 2012 int result; 2013 2014 if (!sshdr_external) 2015 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 2016 else 2017 sshdr = sshdr_external; 2018 2019 /* try to eat the UNIT_ATTENTION if there are enough retries */ 2020 do { 2021 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 2022 timeout, retries, NULL); 2023 if (sdev->removable && scsi_sense_valid(sshdr) && 2024 sshdr->sense_key == UNIT_ATTENTION) 2025 sdev->changed = 1; 2026 } while (scsi_sense_valid(sshdr) && 2027 sshdr->sense_key == UNIT_ATTENTION && --retries); 2028 2029 if (!sshdr) 2030 /* could not allocate sense buffer, so can't process it */ 2031 return result; 2032 2033 if (sdev->removable && scsi_sense_valid(sshdr) && 2034 (sshdr->sense_key == UNIT_ATTENTION || 2035 sshdr->sense_key == NOT_READY)) { 2036 sdev->changed = 1; 2037 result = 0; 2038 } 2039 if (!sshdr_external) 2040 kfree(sshdr); 2041 return result; 2042} 2043EXPORT_SYMBOL(scsi_test_unit_ready); 2044 2045/** 2046 * scsi_device_set_state - Take the given device through the device state model. 2047 * @sdev: scsi device to change the state of. 2048 * @state: state to change to. 2049 * 2050 * Returns zero if unsuccessful or an error if the requested 2051 * transition is illegal. 2052 */ 2053int 2054scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 2055{ 2056 enum scsi_device_state oldstate = sdev->sdev_state; 2057 2058 if (state == oldstate) 2059 return 0; 2060 2061 switch (state) { 2062 case SDEV_CREATED: 2063 switch (oldstate) { 2064 case SDEV_CREATED_BLOCK: 2065 break; 2066 default: 2067 goto illegal; 2068 } 2069 break; 2070 2071 case SDEV_RUNNING: 2072 switch (oldstate) { 2073 case SDEV_CREATED: 2074 case SDEV_OFFLINE: 2075 case SDEV_QUIESCE: 2076 case SDEV_BLOCK: 2077 break; 2078 default: 2079 goto illegal; 2080 } 2081 break; 2082 2083 case SDEV_QUIESCE: 2084 switch (oldstate) { 2085 case SDEV_RUNNING: 2086 case SDEV_OFFLINE: 2087 break; 2088 default: 2089 goto illegal; 2090 } 2091 break; 2092 2093 case SDEV_OFFLINE: 2094 switch (oldstate) { 2095 case SDEV_CREATED: 2096 case SDEV_RUNNING: 2097 case SDEV_QUIESCE: 2098 case SDEV_BLOCK: 2099 break; 2100 default: 2101 goto illegal; 2102 } 2103 break; 2104 2105 case SDEV_BLOCK: 2106 switch (oldstate) { 2107 case SDEV_RUNNING: 2108 case SDEV_CREATED_BLOCK: 2109 break; 2110 default: 2111 goto illegal; 2112 } 2113 break; 2114 2115 case SDEV_CREATED_BLOCK: 2116 switch (oldstate) { 2117 case SDEV_CREATED: 2118 break; 2119 default: 2120 goto illegal; 2121 } 2122 break; 2123 2124 case SDEV_CANCEL: 2125 switch (oldstate) { 2126 case SDEV_CREATED: 2127 case SDEV_RUNNING: 2128 case SDEV_QUIESCE: 2129 case SDEV_OFFLINE: 2130 case SDEV_BLOCK: 2131 break; 2132 default: 2133 goto illegal; 2134 } 2135 break; 2136 2137 case SDEV_DEL: 2138 switch (oldstate) { 2139 case SDEV_CREATED: 2140 case SDEV_RUNNING: 2141 case SDEV_OFFLINE: 2142 case SDEV_CANCEL: 2143 break; 2144 default: 2145 goto illegal; 2146 } 2147 break; 2148 2149 } 2150 sdev->sdev_state = state; 2151 return 0; 2152 2153 illegal: 2154 SCSI_LOG_ERROR_RECOVERY(1, 2155 sdev_printk(KERN_ERR, sdev, 2156 "Illegal state transition %s->%s\n", 2157 scsi_device_state_name(oldstate), 2158 scsi_device_state_name(state)) 2159 ); 2160 return -EINVAL; 2161} 2162EXPORT_SYMBOL(scsi_device_set_state); 2163 2164/** 2165 * sdev_evt_emit - emit a single SCSI device uevent 2166 * @sdev: associated SCSI device 2167 * @evt: event to emit 2168 * 2169 * Send a single uevent (scsi_event) to the associated scsi_device. 2170 */ 2171static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) 2172{ 2173 int idx = 0; 2174 char *envp[3]; 2175 2176 switch (evt->evt_type) { 2177 case SDEV_EVT_MEDIA_CHANGE: 2178 envp[idx++] = "SDEV_MEDIA_CHANGE=1"; 2179 break; 2180 2181 default: 2182 /* do nothing */ 2183 break; 2184 } 2185 2186 envp[idx++] = NULL; 2187 2188 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); 2189} 2190 2191/** 2192 * sdev_evt_thread - send a uevent for each scsi event 2193 * @work: work struct for scsi_device 2194 * 2195 * Dispatch queued events to their associated scsi_device kobjects 2196 * as uevents. 2197 */ 2198void scsi_evt_thread(struct work_struct *work) 2199{ 2200 struct scsi_device *sdev; 2201 LIST_HEAD(event_list); 2202 2203 sdev = container_of(work, struct scsi_device, event_work); 2204 2205 while (1) { 2206 struct scsi_event *evt; 2207 struct list_head *this, *tmp; 2208 unsigned long flags; 2209 2210 spin_lock_irqsave(&sdev->list_lock, flags); 2211 list_splice_init(&sdev->event_list, &event_list); 2212 spin_unlock_irqrestore(&sdev->list_lock, flags); 2213 2214 if (list_empty(&event_list)) 2215 break; 2216 2217 list_for_each_safe(this, tmp, &event_list) { 2218 evt = list_entry(this, struct scsi_event, node); 2219 list_del(&evt->node); 2220 scsi_evt_emit(sdev, evt); 2221 kfree(evt); 2222 } 2223 } 2224} 2225 2226/** 2227 * sdev_evt_send - send asserted event to uevent thread 2228 * @sdev: scsi_device event occurred on 2229 * @evt: event to send 2230 * 2231 * Assert scsi device event asynchronously. 2232 */ 2233void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) 2234{ 2235 unsigned long flags; 2236 2237#if 0 2238 /* FIXME: currently this check eliminates all media change events 2239 * for polled devices. Need to update to discriminate between AN 2240 * and polled events */ 2241 if (!test_bit(evt->evt_type, sdev->supported_events)) { 2242 kfree(evt); 2243 return; 2244 } 2245#endif 2246 2247 spin_lock_irqsave(&sdev->list_lock, flags); 2248 list_add_tail(&evt->node, &sdev->event_list); 2249 schedule_work(&sdev->event_work); 2250 spin_unlock_irqrestore(&sdev->list_lock, flags); 2251} 2252EXPORT_SYMBOL_GPL(sdev_evt_send); 2253 2254/** 2255 * sdev_evt_alloc - allocate a new scsi event 2256 * @evt_type: type of event to allocate 2257 * @gfpflags: GFP flags for allocation 2258 * 2259 * Allocates and returns a new scsi_event. 2260 */ 2261struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, 2262 gfp_t gfpflags) 2263{ 2264 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); 2265 if (!evt) 2266 return NULL; 2267 2268 evt->evt_type = evt_type; 2269 INIT_LIST_HEAD(&evt->node); 2270 2271 /* evt_type-specific initialization, if any */ 2272 switch (evt_type) { 2273 case SDEV_EVT_MEDIA_CHANGE: 2274 default: 2275 /* do nothing */ 2276 break; 2277 } 2278 2279 return evt; 2280} 2281EXPORT_SYMBOL_GPL(sdev_evt_alloc); 2282 2283/** 2284 * sdev_evt_send_simple - send asserted event to uevent thread 2285 * @sdev: scsi_device event occurred on 2286 * @evt_type: type of event to send 2287 * @gfpflags: GFP flags for allocation 2288 * 2289 * Assert scsi device event asynchronously, given an event type. 2290 */ 2291void sdev_evt_send_simple(struct scsi_device *sdev, 2292 enum scsi_device_event evt_type, gfp_t gfpflags) 2293{ 2294 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); 2295 if (!evt) { 2296 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", 2297 evt_type); 2298 return; 2299 } 2300 2301 sdev_evt_send(sdev, evt); 2302} 2303EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2304 2305/** 2306 * scsi_device_quiesce - Block user issued commands. 2307 * @sdev: scsi device to quiesce. 2308 * 2309 * This works by trying to transition to the SDEV_QUIESCE state 2310 * (which must be a legal transition). When the device is in this 2311 * state, only special requests will be accepted, all others will 2312 * be deferred. Since special requests may also be requeued requests, 2313 * a successful return doesn't guarantee the device will be 2314 * totally quiescent. 2315 * 2316 * Must be called with user context, may sleep. 2317 * 2318 * Returns zero if unsuccessful or an error if not. 2319 */ 2320int 2321scsi_device_quiesce(struct scsi_device *sdev) 2322{ 2323 int err = scsi_device_set_state(sdev, SDEV_QUIESCE); 2324 if (err) 2325 return err; 2326 2327 scsi_run_queue(sdev->request_queue); 2328 while (sdev->device_busy) { 2329 msleep_interruptible(200); 2330 scsi_run_queue(sdev->request_queue); 2331 } 2332 return 0; 2333} 2334EXPORT_SYMBOL(scsi_device_quiesce); 2335 2336/** 2337 * scsi_device_resume - Restart user issued commands to a quiesced device. 2338 * @sdev: scsi device to resume. 2339 * 2340 * Moves the device from quiesced back to running and restarts the 2341 * queues. 2342 * 2343 * Must be called with user context, may sleep. 2344 */ 2345void 2346scsi_device_resume(struct scsi_device *sdev) 2347{ 2348 if(scsi_device_set_state(sdev, SDEV_RUNNING)) 2349 return; 2350 scsi_run_queue(sdev->request_queue); 2351} 2352EXPORT_SYMBOL(scsi_device_resume); 2353 2354static void 2355device_quiesce_fn(struct scsi_device *sdev, void *data) 2356{ 2357 scsi_device_quiesce(sdev); 2358} 2359 2360void 2361scsi_target_quiesce(struct scsi_target *starget) 2362{ 2363 starget_for_each_device(starget, NULL, device_quiesce_fn); 2364} 2365EXPORT_SYMBOL(scsi_target_quiesce); 2366 2367static void 2368device_resume_fn(struct scsi_device *sdev, void *data) 2369{ 2370 scsi_device_resume(sdev); 2371} 2372 2373void 2374scsi_target_resume(struct scsi_target *starget) 2375{ 2376 starget_for_each_device(starget, NULL, device_resume_fn); 2377} 2378EXPORT_SYMBOL(scsi_target_resume); 2379 2380/** 2381 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state 2382 * @sdev: device to block 2383 * 2384 * Block request made by scsi lld's to temporarily stop all 2385 * scsi commands on the specified device. Called from interrupt 2386 * or normal process context. 2387 * 2388 * Returns zero if successful or error if not 2389 * 2390 * Notes: 2391 * This routine transitions the device to the SDEV_BLOCK state 2392 * (which must be a legal transition). When the device is in this 2393 * state, all commands are deferred until the scsi lld reenables 2394 * the device with scsi_device_unblock or device_block_tmo fires. 2395 * This routine assumes the host_lock is held on entry. 2396 */ 2397int 2398scsi_internal_device_block(struct scsi_device *sdev) 2399{ 2400 struct request_queue *q = sdev->request_queue; 2401 unsigned long flags; 2402 int err = 0; 2403 2404 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2405 if (err) { 2406 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); 2407 2408 if (err) 2409 return err; 2410 } 2411 2412 /* 2413 * The device has transitioned to SDEV_BLOCK. Stop the 2414 * block layer from calling the midlayer with this device's 2415 * request queue. 2416 */ 2417 spin_lock_irqsave(q->queue_lock, flags); 2418 blk_stop_queue(q); 2419 spin_unlock_irqrestore(q->queue_lock, flags); 2420 2421 return 0; 2422} 2423EXPORT_SYMBOL_GPL(scsi_internal_device_block); 2424 2425/** 2426 * scsi_internal_device_unblock - resume a device after a block request 2427 * @sdev: device to resume 2428 * 2429 * Called by scsi lld's or the midlayer to restart the device queue 2430 * for the previously suspended scsi device. Called from interrupt or 2431 * normal process context. 2432 * 2433 * Returns zero if successful or error if not. 2434 * 2435 * Notes: 2436 * This routine transitions the device to the SDEV_RUNNING state 2437 * (which must be a legal transition) allowing the midlayer to 2438 * goose the queue for this device. This routine assumes the 2439 * host_lock is held upon entry. 2440 */ 2441int 2442scsi_internal_device_unblock(struct scsi_device *sdev) 2443{ 2444 struct request_queue *q = sdev->request_queue; 2445 int err; 2446 unsigned long flags; 2447 2448 /* 2449 * Try to transition the scsi device to SDEV_RUNNING 2450 * and goose the device queue if successful. 2451 */ 2452 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2453 if (err) { 2454 err = scsi_device_set_state(sdev, SDEV_CREATED); 2455 2456 if (err) 2457 return err; 2458 } 2459 2460 spin_lock_irqsave(q->queue_lock, flags); 2461 blk_start_queue(q); 2462 spin_unlock_irqrestore(q->queue_lock, flags); 2463 2464 return 0; 2465} 2466EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); 2467 2468static void 2469device_block(struct scsi_device *sdev, void *data) 2470{ 2471 scsi_internal_device_block(sdev); 2472} 2473 2474static int 2475target_block(struct device *dev, void *data) 2476{ 2477 if (scsi_is_target_device(dev)) 2478 starget_for_each_device(to_scsi_target(dev), NULL, 2479 device_block); 2480 return 0; 2481} 2482 2483void 2484scsi_target_block(struct device *dev) 2485{ 2486 if (scsi_is_target_device(dev)) 2487 starget_for_each_device(to_scsi_target(dev), NULL, 2488 device_block); 2489 else 2490 device_for_each_child(dev, NULL, target_block); 2491} 2492EXPORT_SYMBOL_GPL(scsi_target_block); 2493 2494static void 2495device_unblock(struct scsi_device *sdev, void *data) 2496{ 2497 scsi_internal_device_unblock(sdev); 2498} 2499 2500static int 2501target_unblock(struct device *dev, void *data) 2502{ 2503 if (scsi_is_target_device(dev)) 2504 starget_for_each_device(to_scsi_target(dev), NULL, 2505 device_unblock); 2506 return 0; 2507} 2508 2509void 2510scsi_target_unblock(struct device *dev) 2511{ 2512 if (scsi_is_target_device(dev)) 2513 starget_for_each_device(to_scsi_target(dev), NULL, 2514 device_unblock); 2515 else 2516 device_for_each_child(dev, NULL, target_unblock); 2517} 2518EXPORT_SYMBOL_GPL(scsi_target_unblock); 2519 2520/** 2521 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt 2522 * @sgl: scatter-gather list 2523 * @sg_count: number of segments in sg 2524 * @offset: offset in bytes into sg, on return offset into the mapped area 2525 * @len: bytes to map, on return number of bytes mapped 2526 * 2527 * Returns virtual address of the start of the mapped page 2528 */ 2529void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, 2530 size_t *offset, size_t *len) 2531{ 2532 int i; 2533 size_t sg_len = 0, len_complete = 0; 2534 struct scatterlist *sg; 2535 struct page *page; 2536 2537 WARN_ON(!irqs_disabled()); 2538 2539 for_each_sg(sgl, sg, sg_count, i) { 2540 len_complete = sg_len; /* Complete sg-entries */ 2541 sg_len += sg->length; 2542 if (sg_len > *offset) 2543 break; 2544 } 2545 2546 if (unlikely(i == sg_count)) { 2547 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 2548 "elements %d\n", 2549 __func__, sg_len, *offset, sg_count); 2550 WARN_ON(1); 2551 return NULL; 2552 } 2553 2554 /* Offset starting from the beginning of first page in this sg-entry */ 2555 *offset = *offset - len_complete + sg->offset; 2556 2557 /* Assumption: contiguous pages can be accessed as "page + i" */ 2558 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); 2559 *offset &= ~PAGE_MASK; 2560 2561 /* Bytes in this sg-entry from *offset to the end of the page */ 2562 sg_len = PAGE_SIZE - *offset; 2563 if (*len > sg_len) 2564 *len = sg_len; 2565 2566 return kmap_atomic(page, KM_BIO_SRC_IRQ); 2567} 2568EXPORT_SYMBOL(scsi_kmap_atomic_sg); 2569 2570/** 2571 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg 2572 * @virt: virtual address to be unmapped 2573 */ 2574void scsi_kunmap_atomic_sg(void *virt) 2575{ 2576 kunmap_atomic(virt, KM_BIO_SRC_IRQ); 2577} 2578EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 2579