scsi_lib.c revision a9bddd74630b2a1f2dedc537417c372b2d9edc76
1/* 2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale 3 * 4 * SCSI queueing library. 5 * Initial versions: Eric Youngdale (eric@andante.org). 6 * Based upon conversations with large numbers 7 * of people at Linux Expo. 8 */ 9 10#include <linux/bio.h> 11#include <linux/bitops.h> 12#include <linux/blkdev.h> 13#include <linux/completion.h> 14#include <linux/kernel.h> 15#include <linux/mempool.h> 16#include <linux/slab.h> 17#include <linux/init.h> 18#include <linux/pci.h> 19#include <linux/delay.h> 20#include <linux/hardirq.h> 21#include <linux/scatterlist.h> 22 23#include <scsi/scsi.h> 24#include <scsi/scsi_cmnd.h> 25#include <scsi/scsi_dbg.h> 26#include <scsi/scsi_device.h> 27#include <scsi/scsi_driver.h> 28#include <scsi/scsi_eh.h> 29#include <scsi/scsi_host.h> 30 31#include "scsi_priv.h" 32#include "scsi_logging.h" 33 34 35#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 36#define SG_MEMPOOL_SIZE 2 37 38struct scsi_host_sg_pool { 39 size_t size; 40 char *name; 41 struct kmem_cache *slab; 42 mempool_t *pool; 43}; 44 45#define SP(x) { x, "sgpool-" __stringify(x) } 46#if (SCSI_MAX_SG_SEGMENTS < 32) 47#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) 48#endif 49static struct scsi_host_sg_pool scsi_sg_pools[] = { 50 SP(8), 51 SP(16), 52#if (SCSI_MAX_SG_SEGMENTS > 32) 53 SP(32), 54#if (SCSI_MAX_SG_SEGMENTS > 64) 55 SP(64), 56#if (SCSI_MAX_SG_SEGMENTS > 128) 57 SP(128), 58#if (SCSI_MAX_SG_SEGMENTS > 256) 59#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX) 60#endif 61#endif 62#endif 63#endif 64 SP(SCSI_MAX_SG_SEGMENTS) 65}; 66#undef SP 67 68struct kmem_cache *scsi_sdb_cache; 69 70static void scsi_run_queue(struct request_queue *q); 71 72/* 73 * Function: scsi_unprep_request() 74 * 75 * Purpose: Remove all preparation done for a request, including its 76 * associated scsi_cmnd, so that it can be requeued. 77 * 78 * Arguments: req - request to unprepare 79 * 80 * Lock status: Assumed that no locks are held upon entry. 81 * 82 * Returns: Nothing. 83 */ 84static void scsi_unprep_request(struct request *req) 85{ 86 struct scsi_cmnd *cmd = req->special; 87 88 req->cmd_flags &= ~REQ_DONTPREP; 89 req->special = NULL; 90 91 scsi_put_command(cmd); 92} 93 94/** 95 * __scsi_queue_insert - private queue insertion 96 * @cmd: The SCSI command being requeued 97 * @reason: The reason for the requeue 98 * @unbusy: Whether the queue should be unbusied 99 * 100 * This is a private queue insertion. The public interface 101 * scsi_queue_insert() always assumes the queue should be unbusied 102 * because it's always called before the completion. This function is 103 * for a requeue after completion, which should only occur in this 104 * file. 105 */ 106static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) 107{ 108 struct Scsi_Host *host = cmd->device->host; 109 struct scsi_device *device = cmd->device; 110 struct scsi_target *starget = scsi_target(device); 111 struct request_queue *q = device->request_queue; 112 unsigned long flags; 113 114 SCSI_LOG_MLQUEUE(1, 115 printk("Inserting command %p into mlqueue\n", cmd)); 116 117 /* 118 * Set the appropriate busy bit for the device/host. 119 * 120 * If the host/device isn't busy, assume that something actually 121 * completed, and that we should be able to queue a command now. 122 * 123 * Note that the prior mid-layer assumption that any host could 124 * always queue at least one command is now broken. The mid-layer 125 * will implement a user specifiable stall (see 126 * scsi_host.max_host_blocked and scsi_device.max_device_blocked) 127 * if a command is requeued with no other commands outstanding 128 * either for the device or for the host. 129 */ 130 switch (reason) { 131 case SCSI_MLQUEUE_HOST_BUSY: 132 host->host_blocked = host->max_host_blocked; 133 break; 134 case SCSI_MLQUEUE_DEVICE_BUSY: 135 device->device_blocked = device->max_device_blocked; 136 break; 137 case SCSI_MLQUEUE_TARGET_BUSY: 138 starget->target_blocked = starget->max_target_blocked; 139 break; 140 } 141 142 /* 143 * Decrement the counters, since these commands are no longer 144 * active on the host/device. 145 */ 146 if (unbusy) 147 scsi_device_unbusy(device); 148 149 /* 150 * Requeue this command. It will go before all other commands 151 * that are already in the queue. 152 * 153 * NOTE: there is magic here about the way the queue is plugged if 154 * we have no outstanding commands. 155 * 156 * Although we *don't* plug the queue, we call the request 157 * function. The SCSI request function detects the blocked condition 158 * and plugs the queue appropriately. 159 */ 160 spin_lock_irqsave(q->queue_lock, flags); 161 blk_requeue_request(q, cmd->request); 162 spin_unlock_irqrestore(q->queue_lock, flags); 163 164 scsi_run_queue(q); 165 166 return 0; 167} 168 169/* 170 * Function: scsi_queue_insert() 171 * 172 * Purpose: Insert a command in the midlevel queue. 173 * 174 * Arguments: cmd - command that we are adding to queue. 175 * reason - why we are inserting command to queue. 176 * 177 * Lock status: Assumed that lock is not held upon entry. 178 * 179 * Returns: Nothing. 180 * 181 * Notes: We do this for one of two cases. Either the host is busy 182 * and it cannot accept any more commands for the time being, 183 * or the device returned QUEUE_FULL and can accept no more 184 * commands. 185 * Notes: This could be called either from an interrupt context or a 186 * normal process context. 187 */ 188int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 189{ 190 return __scsi_queue_insert(cmd, reason, 1); 191} 192/** 193 * scsi_execute - insert request and wait for the result 194 * @sdev: scsi device 195 * @cmd: scsi command 196 * @data_direction: data direction 197 * @buffer: data buffer 198 * @bufflen: len of buffer 199 * @sense: optional sense buffer 200 * @timeout: request timeout in seconds 201 * @retries: number of times to retry request 202 * @flags: or into request flags; 203 * @resid: optional residual length 204 * 205 * returns the req->errors value which is the scsi_cmnd result 206 * field. 207 */ 208int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 209 int data_direction, void *buffer, unsigned bufflen, 210 unsigned char *sense, int timeout, int retries, int flags, 211 int *resid) 212{ 213 struct request *req; 214 int write = (data_direction == DMA_TO_DEVICE); 215 int ret = DRIVER_ERROR << 24; 216 217 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 218 219 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 220 buffer, bufflen, __GFP_WAIT)) 221 goto out; 222 223 req->cmd_len = COMMAND_SIZE(cmd[0]); 224 memcpy(req->cmd, cmd, req->cmd_len); 225 req->sense = sense; 226 req->sense_len = 0; 227 req->retries = retries; 228 req->timeout = timeout; 229 req->cmd_type = REQ_TYPE_BLOCK_PC; 230 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 231 232 /* 233 * head injection *required* here otherwise quiesce won't work 234 */ 235 blk_execute_rq(req->q, NULL, req, 1); 236 237 /* 238 * Some devices (USB mass-storage in particular) may transfer 239 * garbage data together with a residue indicating that the data 240 * is invalid. Prevent the garbage from being misinterpreted 241 * and prevent security leaks by zeroing out the excess data. 242 */ 243 if (unlikely(req->data_len > 0 && req->data_len <= bufflen)) 244 memset(buffer + (bufflen - req->data_len), 0, req->data_len); 245 246 if (resid) 247 *resid = req->data_len; 248 ret = req->errors; 249 out: 250 blk_put_request(req); 251 252 return ret; 253} 254EXPORT_SYMBOL(scsi_execute); 255 256 257int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, 258 int data_direction, void *buffer, unsigned bufflen, 259 struct scsi_sense_hdr *sshdr, int timeout, int retries, 260 int *resid) 261{ 262 char *sense = NULL; 263 int result; 264 265 if (sshdr) { 266 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 267 if (!sense) 268 return DRIVER_ERROR << 24; 269 } 270 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 271 sense, timeout, retries, 0, resid); 272 if (sshdr) 273 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 274 275 kfree(sense); 276 return result; 277} 278EXPORT_SYMBOL(scsi_execute_req); 279 280/* 281 * Function: scsi_init_cmd_errh() 282 * 283 * Purpose: Initialize cmd fields related to error handling. 284 * 285 * Arguments: cmd - command that is ready to be queued. 286 * 287 * Notes: This function has the job of initializing a number of 288 * fields related to error handling. Typically this will 289 * be called once for each command, as required. 290 */ 291static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 292{ 293 cmd->serial_number = 0; 294 scsi_set_resid(cmd, 0); 295 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 296 if (cmd->cmd_len == 0) 297 cmd->cmd_len = scsi_command_size(cmd->cmnd); 298} 299 300void scsi_device_unbusy(struct scsi_device *sdev) 301{ 302 struct Scsi_Host *shost = sdev->host; 303 struct scsi_target *starget = scsi_target(sdev); 304 unsigned long flags; 305 306 spin_lock_irqsave(shost->host_lock, flags); 307 shost->host_busy--; 308 starget->target_busy--; 309 if (unlikely(scsi_host_in_recovery(shost) && 310 (shost->host_failed || shost->host_eh_scheduled))) 311 scsi_eh_wakeup(shost); 312 spin_unlock(shost->host_lock); 313 spin_lock(sdev->request_queue->queue_lock); 314 sdev->device_busy--; 315 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 316} 317 318/* 319 * Called for single_lun devices on IO completion. Clear starget_sdev_user, 320 * and call blk_run_queue for all the scsi_devices on the target - 321 * including current_sdev first. 322 * 323 * Called with *no* scsi locks held. 324 */ 325static void scsi_single_lun_run(struct scsi_device *current_sdev) 326{ 327 struct Scsi_Host *shost = current_sdev->host; 328 struct scsi_device *sdev, *tmp; 329 struct scsi_target *starget = scsi_target(current_sdev); 330 unsigned long flags; 331 332 spin_lock_irqsave(shost->host_lock, flags); 333 starget->starget_sdev_user = NULL; 334 spin_unlock_irqrestore(shost->host_lock, flags); 335 336 /* 337 * Call blk_run_queue for all LUNs on the target, starting with 338 * current_sdev. We race with others (to set starget_sdev_user), 339 * but in most cases, we will be first. Ideally, each LU on the 340 * target would get some limited time or requests on the target. 341 */ 342 blk_run_queue(current_sdev->request_queue); 343 344 spin_lock_irqsave(shost->host_lock, flags); 345 if (starget->starget_sdev_user) 346 goto out; 347 list_for_each_entry_safe(sdev, tmp, &starget->devices, 348 same_target_siblings) { 349 if (sdev == current_sdev) 350 continue; 351 if (scsi_device_get(sdev)) 352 continue; 353 354 spin_unlock_irqrestore(shost->host_lock, flags); 355 blk_run_queue(sdev->request_queue); 356 spin_lock_irqsave(shost->host_lock, flags); 357 358 scsi_device_put(sdev); 359 } 360 out: 361 spin_unlock_irqrestore(shost->host_lock, flags); 362} 363 364static inline int scsi_device_is_busy(struct scsi_device *sdev) 365{ 366 if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked) 367 return 1; 368 369 return 0; 370} 371 372static inline int scsi_target_is_busy(struct scsi_target *starget) 373{ 374 return ((starget->can_queue > 0 && 375 starget->target_busy >= starget->can_queue) || 376 starget->target_blocked); 377} 378 379static inline int scsi_host_is_busy(struct Scsi_Host *shost) 380{ 381 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || 382 shost->host_blocked || shost->host_self_blocked) 383 return 1; 384 385 return 0; 386} 387 388/* 389 * Function: scsi_run_queue() 390 * 391 * Purpose: Select a proper request queue to serve next 392 * 393 * Arguments: q - last request's queue 394 * 395 * Returns: Nothing 396 * 397 * Notes: The previous command was completely finished, start 398 * a new one if possible. 399 */ 400static void scsi_run_queue(struct request_queue *q) 401{ 402 struct scsi_device *sdev = q->queuedata; 403 struct Scsi_Host *shost = sdev->host; 404 LIST_HEAD(starved_list); 405 unsigned long flags; 406 407 if (scsi_target(sdev)->single_lun) 408 scsi_single_lun_run(sdev); 409 410 spin_lock_irqsave(shost->host_lock, flags); 411 list_splice_init(&shost->starved_list, &starved_list); 412 413 while (!list_empty(&starved_list)) { 414 int flagset; 415 416 /* 417 * As long as shost is accepting commands and we have 418 * starved queues, call blk_run_queue. scsi_request_fn 419 * drops the queue_lock and can add us back to the 420 * starved_list. 421 * 422 * host_lock protects the starved_list and starved_entry. 423 * scsi_request_fn must get the host_lock before checking 424 * or modifying starved_list or starved_entry. 425 */ 426 if (scsi_host_is_busy(shost)) 427 break; 428 429 sdev = list_entry(starved_list.next, 430 struct scsi_device, starved_entry); 431 list_del_init(&sdev->starved_entry); 432 if (scsi_target_is_busy(scsi_target(sdev))) { 433 list_move_tail(&sdev->starved_entry, 434 &shost->starved_list); 435 continue; 436 } 437 438 spin_unlock(shost->host_lock); 439 440 spin_lock(sdev->request_queue->queue_lock); 441 flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && 442 !test_bit(QUEUE_FLAG_REENTER, 443 &sdev->request_queue->queue_flags); 444 if (flagset) 445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); 446 __blk_run_queue(sdev->request_queue); 447 if (flagset) 448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); 449 spin_unlock(sdev->request_queue->queue_lock); 450 451 spin_lock(shost->host_lock); 452 } 453 /* put any unprocessed entries back */ 454 list_splice(&starved_list, &shost->starved_list); 455 spin_unlock_irqrestore(shost->host_lock, flags); 456 457 blk_run_queue(q); 458} 459 460/* 461 * Function: scsi_requeue_command() 462 * 463 * Purpose: Handle post-processing of completed commands. 464 * 465 * Arguments: q - queue to operate on 466 * cmd - command that may need to be requeued. 467 * 468 * Returns: Nothing 469 * 470 * Notes: After command completion, there may be blocks left 471 * over which weren't finished by the previous command 472 * this can be for a number of reasons - the main one is 473 * I/O errors in the middle of the request, in which case 474 * we need to request the blocks that come after the bad 475 * sector. 476 * Notes: Upon return, cmd is a stale pointer. 477 */ 478static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 479{ 480 struct request *req = cmd->request; 481 unsigned long flags; 482 483 spin_lock_irqsave(q->queue_lock, flags); 484 scsi_unprep_request(req); 485 blk_requeue_request(q, req); 486 spin_unlock_irqrestore(q->queue_lock, flags); 487 488 scsi_run_queue(q); 489} 490 491void scsi_next_command(struct scsi_cmnd *cmd) 492{ 493 struct scsi_device *sdev = cmd->device; 494 struct request_queue *q = sdev->request_queue; 495 496 /* need to hold a reference on the device before we let go of the cmd */ 497 get_device(&sdev->sdev_gendev); 498 499 scsi_put_command(cmd); 500 scsi_run_queue(q); 501 502 /* ok to remove device now */ 503 put_device(&sdev->sdev_gendev); 504} 505 506void scsi_run_host_queues(struct Scsi_Host *shost) 507{ 508 struct scsi_device *sdev; 509 510 shost_for_each_device(sdev, shost) 511 scsi_run_queue(sdev->request_queue); 512} 513 514static void __scsi_release_buffers(struct scsi_cmnd *, int); 515 516/* 517 * Function: scsi_end_request() 518 * 519 * Purpose: Post-processing of completed commands (usually invoked at end 520 * of upper level post-processing and scsi_io_completion). 521 * 522 * Arguments: cmd - command that is complete. 523 * error - 0 if I/O indicates success, < 0 for I/O error. 524 * bytes - number of bytes of completed I/O 525 * requeue - indicates whether we should requeue leftovers. 526 * 527 * Lock status: Assumed that lock is not held upon entry. 528 * 529 * Returns: cmd if requeue required, NULL otherwise. 530 * 531 * Notes: This is called for block device requests in order to 532 * mark some number of sectors as complete. 533 * 534 * We are guaranteeing that the request queue will be goosed 535 * at some point during this call. 536 * Notes: If cmd was requeued, upon return it will be a stale pointer. 537 */ 538static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, 539 int bytes, int requeue) 540{ 541 struct request_queue *q = cmd->device->request_queue; 542 struct request *req = cmd->request; 543 544 /* 545 * If there are blocks left over at the end, set up the command 546 * to queue the remainder of them. 547 */ 548 if (blk_end_request(req, error, bytes)) { 549 int leftover = (req->hard_nr_sectors << 9); 550 551 if (blk_pc_request(req)) 552 leftover = req->data_len; 553 554 /* kill remainder if no retrys */ 555 if (error && scsi_noretry_cmd(cmd)) 556 blk_end_request(req, error, leftover); 557 else { 558 if (requeue) { 559 /* 560 * Bleah. Leftovers again. Stick the 561 * leftovers in the front of the 562 * queue, and goose the queue again. 563 */ 564 scsi_release_buffers(cmd); 565 scsi_requeue_command(q, cmd); 566 cmd = NULL; 567 } 568 return cmd; 569 } 570 } 571 572 /* 573 * This will goose the queue request function at the end, so we don't 574 * need to worry about launching another command. 575 */ 576 __scsi_release_buffers(cmd, 0); 577 scsi_next_command(cmd); 578 return NULL; 579} 580 581static inline unsigned int scsi_sgtable_index(unsigned short nents) 582{ 583 unsigned int index; 584 585 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS); 586 587 if (nents <= 8) 588 index = 0; 589 else 590 index = get_count_order(nents) - 3; 591 592 return index; 593} 594 595static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) 596{ 597 struct scsi_host_sg_pool *sgp; 598 599 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 600 mempool_free(sgl, sgp->pool); 601} 602 603static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) 604{ 605 struct scsi_host_sg_pool *sgp; 606 607 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 608 return mempool_alloc(sgp->pool, gfp_mask); 609} 610 611static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, 612 gfp_t gfp_mask) 613{ 614 int ret; 615 616 BUG_ON(!nents); 617 618 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, 619 gfp_mask, scsi_sg_alloc); 620 if (unlikely(ret)) 621 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, 622 scsi_sg_free); 623 624 return ret; 625} 626 627static void scsi_free_sgtable(struct scsi_data_buffer *sdb) 628{ 629 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); 630} 631 632static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) 633{ 634 635 if (cmd->sdb.table.nents) 636 scsi_free_sgtable(&cmd->sdb); 637 638 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 639 640 if (do_bidi_check && scsi_bidi_cmnd(cmd)) { 641 struct scsi_data_buffer *bidi_sdb = 642 cmd->request->next_rq->special; 643 scsi_free_sgtable(bidi_sdb); 644 kmem_cache_free(scsi_sdb_cache, bidi_sdb); 645 cmd->request->next_rq->special = NULL; 646 } 647 648 if (scsi_prot_sg_count(cmd)) 649 scsi_free_sgtable(cmd->prot_sdb); 650} 651 652/* 653 * Function: scsi_release_buffers() 654 * 655 * Purpose: Completion processing for block device I/O requests. 656 * 657 * Arguments: cmd - command that we are bailing. 658 * 659 * Lock status: Assumed that no lock is held upon entry. 660 * 661 * Returns: Nothing 662 * 663 * Notes: In the event that an upper level driver rejects a 664 * command, we must release resources allocated during 665 * the __init_io() function. Primarily this would involve 666 * the scatter-gather table, and potentially any bounce 667 * buffers. 668 */ 669void scsi_release_buffers(struct scsi_cmnd *cmd) 670{ 671 __scsi_release_buffers(cmd, 1); 672} 673EXPORT_SYMBOL(scsi_release_buffers); 674 675/* 676 * Bidi commands Must be complete as a whole, both sides at once. 677 * If part of the bytes were written and lld returned 678 * scsi_in()->resid and/or scsi_out()->resid this information will be left 679 * in req->data_len and req->next_rq->data_len. The upper-layer driver can 680 * decide what to do with this information. 681 */ 682static void scsi_end_bidi_request(struct scsi_cmnd *cmd) 683{ 684 struct request *req = cmd->request; 685 unsigned int dlen = req->data_len; 686 unsigned int next_dlen = req->next_rq->data_len; 687 688 req->data_len = scsi_out(cmd)->resid; 689 req->next_rq->data_len = scsi_in(cmd)->resid; 690 691 /* The req and req->next_rq have not been completed */ 692 BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen)); 693 694 scsi_release_buffers(cmd); 695 696 /* 697 * This will goose the queue request function at the end, so we don't 698 * need to worry about launching another command. 699 */ 700 scsi_next_command(cmd); 701} 702 703/* 704 * Function: scsi_io_completion() 705 * 706 * Purpose: Completion processing for block device I/O requests. 707 * 708 * Arguments: cmd - command that is finished. 709 * 710 * Lock status: Assumed that no lock is held upon entry. 711 * 712 * Returns: Nothing 713 * 714 * Notes: This function is matched in terms of capabilities to 715 * the function that created the scatter-gather list. 716 * In other words, if there are no bounce buffers 717 * (the normal case for most drivers), we don't need 718 * the logic to deal with cleaning up afterwards. 719 * 720 * We must call scsi_end_request(). This will finish off 721 * the specified number of sectors. If we are done, the 722 * command block will be released and the queue function 723 * will be goosed. If we are not done then we have to 724 * figure out what to do next: 725 * 726 * a) We can call scsi_requeue_command(). The request 727 * will be unprepared and put back on the queue. Then 728 * a new command will be created for it. This should 729 * be used if we made forward progress, or if we want 730 * to switch from READ(10) to READ(6) for example. 731 * 732 * b) We can call scsi_queue_insert(). The request will 733 * be put back on the queue and retried using the same 734 * command as before, possibly after a delay. 735 * 736 * c) We can call blk_end_request() with -EIO to fail 737 * the remainder of the request. 738 */ 739void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 740{ 741 int result = cmd->result; 742 int this_count; 743 struct request_queue *q = cmd->device->request_queue; 744 struct request *req = cmd->request; 745 int error = 0; 746 struct scsi_sense_hdr sshdr; 747 int sense_valid = 0; 748 int sense_deferred = 0; 749 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, 750 ACTION_DELAYED_RETRY} action; 751 char *description = NULL; 752 753 if (result) { 754 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 755 if (sense_valid) 756 sense_deferred = scsi_sense_is_deferred(&sshdr); 757 } 758 759 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ 760 req->errors = result; 761 if (result) { 762 if (sense_valid && req->sense) { 763 /* 764 * SG_IO wants current and deferred errors 765 */ 766 int len = 8 + cmd->sense_buffer[7]; 767 768 if (len > SCSI_SENSE_BUFFERSIZE) 769 len = SCSI_SENSE_BUFFERSIZE; 770 memcpy(req->sense, cmd->sense_buffer, len); 771 req->sense_len = len; 772 } 773 if (!sense_deferred) 774 error = -EIO; 775 } 776 if (scsi_bidi_cmnd(cmd)) { 777 /* will also release_buffers */ 778 scsi_end_bidi_request(cmd); 779 return; 780 } 781 req->data_len = scsi_get_resid(cmd); 782 } 783 784 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ 785 786 /* 787 * Next deal with any sectors which we were able to correctly 788 * handle. 789 */ 790 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 791 "%d bytes done.\n", 792 req->nr_sectors, good_bytes)); 793 794 /* 795 * Recovered errors need reporting, but they're always treated 796 * as success, so fiddle the result code here. For BLOCK_PC 797 * we already took a copy of the original into rq->errors which 798 * is what gets returned to the user 799 */ 800 if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) { 801 if (!(req->cmd_flags & REQ_QUIET)) 802 scsi_print_sense("", cmd); 803 result = 0; 804 /* BLOCK_PC may have set error */ 805 error = 0; 806 } 807 808 /* 809 * A number of bytes were successfully read. If there 810 * are leftovers and there is some kind of error 811 * (result != 0), retry the rest. 812 */ 813 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) 814 return; 815 this_count = blk_rq_bytes(req); 816 817 error = -EIO; 818 819 if (host_byte(result) == DID_RESET) { 820 /* Third party bus reset or reset for error recovery 821 * reasons. Just retry the command and see what 822 * happens. 823 */ 824 action = ACTION_RETRY; 825 } else if (sense_valid && !sense_deferred) { 826 switch (sshdr.sense_key) { 827 case UNIT_ATTENTION: 828 if (cmd->device->removable) { 829 /* Detected disc change. Set a bit 830 * and quietly refuse further access. 831 */ 832 cmd->device->changed = 1; 833 description = "Media Changed"; 834 action = ACTION_FAIL; 835 } else { 836 /* Must have been a power glitch, or a 837 * bus reset. Could not have been a 838 * media change, so we just retry the 839 * command and see what happens. 840 */ 841 action = ACTION_RETRY; 842 } 843 break; 844 case ILLEGAL_REQUEST: 845 /* If we had an ILLEGAL REQUEST returned, then 846 * we may have performed an unsupported 847 * command. The only thing this should be 848 * would be a ten byte read where only a six 849 * byte read was supported. Also, on a system 850 * where READ CAPACITY failed, we may have 851 * read past the end of the disk. 852 */ 853 if ((cmd->device->use_10_for_rw && 854 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 855 (cmd->cmnd[0] == READ_10 || 856 cmd->cmnd[0] == WRITE_10)) { 857 /* This will issue a new 6-byte command. */ 858 cmd->device->use_10_for_rw = 0; 859 action = ACTION_REPREP; 860 } else if (sshdr.asc == 0x10) /* DIX */ { 861 description = "Host Data Integrity Failure"; 862 action = ACTION_FAIL; 863 error = -EILSEQ; 864 } else 865 action = ACTION_FAIL; 866 break; 867 case ABORTED_COMMAND: 868 action = ACTION_FAIL; 869 if (sshdr.asc == 0x10) { /* DIF */ 870 description = "Target Data Integrity Failure"; 871 error = -EILSEQ; 872 } 873 break; 874 case NOT_READY: 875 /* If the device is in the process of becoming 876 * ready, or has a temporary blockage, retry. 877 */ 878 if (sshdr.asc == 0x04) { 879 switch (sshdr.ascq) { 880 case 0x01: /* becoming ready */ 881 case 0x04: /* format in progress */ 882 case 0x05: /* rebuild in progress */ 883 case 0x06: /* recalculation in progress */ 884 case 0x07: /* operation in progress */ 885 case 0x08: /* Long write in progress */ 886 case 0x09: /* self test in progress */ 887 action = ACTION_DELAYED_RETRY; 888 break; 889 default: 890 description = "Device not ready"; 891 action = ACTION_FAIL; 892 break; 893 } 894 } else { 895 description = "Device not ready"; 896 action = ACTION_FAIL; 897 } 898 break; 899 case VOLUME_OVERFLOW: 900 /* See SSC3rXX or current. */ 901 action = ACTION_FAIL; 902 break; 903 default: 904 description = "Unhandled sense code"; 905 action = ACTION_FAIL; 906 break; 907 } 908 } else { 909 description = "Unhandled error code"; 910 action = ACTION_FAIL; 911 } 912 913 switch (action) { 914 case ACTION_FAIL: 915 /* Give up and fail the remainder of the request */ 916 scsi_release_buffers(cmd); 917 if (!(req->cmd_flags & REQ_QUIET)) { 918 if (description) 919 scmd_printk(KERN_INFO, cmd, "%s\n", 920 description); 921 scsi_print_result(cmd); 922 if (driver_byte(result) & DRIVER_SENSE) 923 scsi_print_sense("", cmd); 924 } 925 blk_end_request(req, -EIO, blk_rq_bytes(req)); 926 scsi_next_command(cmd); 927 break; 928 case ACTION_REPREP: 929 /* Unprep the request and put it back at the head of the queue. 930 * A new command will be prepared and issued. 931 */ 932 scsi_release_buffers(cmd); 933 scsi_requeue_command(q, cmd); 934 break; 935 case ACTION_RETRY: 936 /* Retry the same command immediately */ 937 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0); 938 break; 939 case ACTION_DELAYED_RETRY: 940 /* Retry the same command after a delay */ 941 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); 942 break; 943 } 944} 945 946static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, 947 gfp_t gfp_mask) 948{ 949 int count; 950 951 /* 952 * If sg table allocation fails, requeue request later. 953 */ 954 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, 955 gfp_mask))) { 956 return BLKPREP_DEFER; 957 } 958 959 req->buffer = NULL; 960 961 /* 962 * Next, walk the list, and fill in the addresses and sizes of 963 * each segment. 964 */ 965 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 966 BUG_ON(count > sdb->table.nents); 967 sdb->table.nents = count; 968 if (blk_pc_request(req)) 969 sdb->length = req->data_len; 970 else 971 sdb->length = req->nr_sectors << 9; 972 return BLKPREP_OK; 973} 974 975/* 976 * Function: scsi_init_io() 977 * 978 * Purpose: SCSI I/O initialize function. 979 * 980 * Arguments: cmd - Command descriptor we wish to initialize 981 * 982 * Returns: 0 on success 983 * BLKPREP_DEFER if the failure is retryable 984 * BLKPREP_KILL if the failure is fatal 985 */ 986int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) 987{ 988 int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask); 989 if (error) 990 goto err_exit; 991 992 if (blk_bidi_rq(cmd->request)) { 993 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( 994 scsi_sdb_cache, GFP_ATOMIC); 995 if (!bidi_sdb) { 996 error = BLKPREP_DEFER; 997 goto err_exit; 998 } 999 1000 cmd->request->next_rq->special = bidi_sdb; 1001 error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb, 1002 GFP_ATOMIC); 1003 if (error) 1004 goto err_exit; 1005 } 1006 1007 if (blk_integrity_rq(cmd->request)) { 1008 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; 1009 int ivecs, count; 1010 1011 BUG_ON(prot_sdb == NULL); 1012 ivecs = blk_rq_count_integrity_sg(cmd->request); 1013 1014 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { 1015 error = BLKPREP_DEFER; 1016 goto err_exit; 1017 } 1018 1019 count = blk_rq_map_integrity_sg(cmd->request, 1020 prot_sdb->table.sgl); 1021 BUG_ON(unlikely(count > ivecs)); 1022 1023 cmd->prot_sdb = prot_sdb; 1024 cmd->prot_sdb->table.nents = count; 1025 } 1026 1027 return BLKPREP_OK ; 1028 1029err_exit: 1030 scsi_release_buffers(cmd); 1031 if (error == BLKPREP_KILL) 1032 scsi_put_command(cmd); 1033 else /* BLKPREP_DEFER */ 1034 scsi_unprep_request(cmd->request); 1035 1036 return error; 1037} 1038EXPORT_SYMBOL(scsi_init_io); 1039 1040static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1041 struct request *req) 1042{ 1043 struct scsi_cmnd *cmd; 1044 1045 if (!req->special) { 1046 cmd = scsi_get_command(sdev, GFP_ATOMIC); 1047 if (unlikely(!cmd)) 1048 return NULL; 1049 req->special = cmd; 1050 } else { 1051 cmd = req->special; 1052 } 1053 1054 /* pull a tag out of the request if we have one */ 1055 cmd->tag = req->tag; 1056 cmd->request = req; 1057 1058 cmd->cmnd = req->cmd; 1059 1060 return cmd; 1061} 1062 1063int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1064{ 1065 struct scsi_cmnd *cmd; 1066 int ret = scsi_prep_state_check(sdev, req); 1067 1068 if (ret != BLKPREP_OK) 1069 return ret; 1070 1071 cmd = scsi_get_cmd_from_req(sdev, req); 1072 if (unlikely(!cmd)) 1073 return BLKPREP_DEFER; 1074 1075 /* 1076 * BLOCK_PC requests may transfer data, in which case they must 1077 * a bio attached to them. Or they might contain a SCSI command 1078 * that does not transfer data, in which case they may optionally 1079 * submit a request without an attached bio. 1080 */ 1081 if (req->bio) { 1082 int ret; 1083 1084 BUG_ON(!req->nr_phys_segments); 1085 1086 ret = scsi_init_io(cmd, GFP_ATOMIC); 1087 if (unlikely(ret)) 1088 return ret; 1089 } else { 1090 BUG_ON(req->data_len); 1091 BUG_ON(req->data); 1092 1093 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1094 req->buffer = NULL; 1095 } 1096 1097 cmd->cmd_len = req->cmd_len; 1098 if (!req->data_len) 1099 cmd->sc_data_direction = DMA_NONE; 1100 else if (rq_data_dir(req) == WRITE) 1101 cmd->sc_data_direction = DMA_TO_DEVICE; 1102 else 1103 cmd->sc_data_direction = DMA_FROM_DEVICE; 1104 1105 cmd->transfersize = req->data_len; 1106 cmd->allowed = req->retries; 1107 return BLKPREP_OK; 1108} 1109EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); 1110 1111/* 1112 * Setup a REQ_TYPE_FS command. These are simple read/write request 1113 * from filesystems that still need to be translated to SCSI CDBs from 1114 * the ULD. 1115 */ 1116int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1117{ 1118 struct scsi_cmnd *cmd; 1119 int ret = scsi_prep_state_check(sdev, req); 1120 1121 if (ret != BLKPREP_OK) 1122 return ret; 1123 1124 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh 1125 && sdev->scsi_dh_data->scsi_dh->prep_fn)) { 1126 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); 1127 if (ret != BLKPREP_OK) 1128 return ret; 1129 } 1130 1131 /* 1132 * Filesystem requests must transfer data. 1133 */ 1134 BUG_ON(!req->nr_phys_segments); 1135 1136 cmd = scsi_get_cmd_from_req(sdev, req); 1137 if (unlikely(!cmd)) 1138 return BLKPREP_DEFER; 1139 1140 memset(cmd->cmnd, 0, BLK_MAX_CDB); 1141 return scsi_init_io(cmd, GFP_ATOMIC); 1142} 1143EXPORT_SYMBOL(scsi_setup_fs_cmnd); 1144 1145int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) 1146{ 1147 int ret = BLKPREP_OK; 1148 1149 /* 1150 * If the device is not in running state we will reject some 1151 * or all commands. 1152 */ 1153 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1154 switch (sdev->sdev_state) { 1155 case SDEV_OFFLINE: 1156 /* 1157 * If the device is offline we refuse to process any 1158 * commands. The device must be brought online 1159 * before trying any recovery commands. 1160 */ 1161 sdev_printk(KERN_ERR, sdev, 1162 "rejecting I/O to offline device\n"); 1163 ret = BLKPREP_KILL; 1164 break; 1165 case SDEV_DEL: 1166 /* 1167 * If the device is fully deleted, we refuse to 1168 * process any commands as well. 1169 */ 1170 sdev_printk(KERN_ERR, sdev, 1171 "rejecting I/O to dead device\n"); 1172 ret = BLKPREP_KILL; 1173 break; 1174 case SDEV_QUIESCE: 1175 case SDEV_BLOCK: 1176 case SDEV_CREATED_BLOCK: 1177 /* 1178 * If the devices is blocked we defer normal commands. 1179 */ 1180 if (!(req->cmd_flags & REQ_PREEMPT)) 1181 ret = BLKPREP_DEFER; 1182 break; 1183 default: 1184 /* 1185 * For any other not fully online state we only allow 1186 * special commands. In particular any user initiated 1187 * command is not allowed. 1188 */ 1189 if (!(req->cmd_flags & REQ_PREEMPT)) 1190 ret = BLKPREP_KILL; 1191 break; 1192 } 1193 } 1194 return ret; 1195} 1196EXPORT_SYMBOL(scsi_prep_state_check); 1197 1198int scsi_prep_return(struct request_queue *q, struct request *req, int ret) 1199{ 1200 struct scsi_device *sdev = q->queuedata; 1201 1202 switch (ret) { 1203 case BLKPREP_KILL: 1204 req->errors = DID_NO_CONNECT << 16; 1205 /* release the command and kill it */ 1206 if (req->special) { 1207 struct scsi_cmnd *cmd = req->special; 1208 scsi_release_buffers(cmd); 1209 scsi_put_command(cmd); 1210 req->special = NULL; 1211 } 1212 break; 1213 case BLKPREP_DEFER: 1214 /* 1215 * If we defer, the elv_next_request() returns NULL, but the 1216 * queue must be restarted, so we plug here if no returning 1217 * command will automatically do that. 1218 */ 1219 if (sdev->device_busy == 0) 1220 blk_plug_device(q); 1221 break; 1222 default: 1223 req->cmd_flags |= REQ_DONTPREP; 1224 } 1225 1226 return ret; 1227} 1228EXPORT_SYMBOL(scsi_prep_return); 1229 1230int scsi_prep_fn(struct request_queue *q, struct request *req) 1231{ 1232 struct scsi_device *sdev = q->queuedata; 1233 int ret = BLKPREP_KILL; 1234 1235 if (req->cmd_type == REQ_TYPE_BLOCK_PC) 1236 ret = scsi_setup_blk_pc_cmnd(sdev, req); 1237 return scsi_prep_return(q, req, ret); 1238} 1239 1240/* 1241 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1242 * return 0. 1243 * 1244 * Called with the queue_lock held. 1245 */ 1246static inline int scsi_dev_queue_ready(struct request_queue *q, 1247 struct scsi_device *sdev) 1248{ 1249 if (sdev->device_busy == 0 && sdev->device_blocked) { 1250 /* 1251 * unblock after device_blocked iterates to zero 1252 */ 1253 if (--sdev->device_blocked == 0) { 1254 SCSI_LOG_MLQUEUE(3, 1255 sdev_printk(KERN_INFO, sdev, 1256 "unblocking device at zero depth\n")); 1257 } else { 1258 blk_plug_device(q); 1259 return 0; 1260 } 1261 } 1262 if (scsi_device_is_busy(sdev)) 1263 return 0; 1264 1265 return 1; 1266} 1267 1268 1269/* 1270 * scsi_target_queue_ready: checks if there we can send commands to target 1271 * @sdev: scsi device on starget to check. 1272 * 1273 * Called with the host lock held. 1274 */ 1275static inline int scsi_target_queue_ready(struct Scsi_Host *shost, 1276 struct scsi_device *sdev) 1277{ 1278 struct scsi_target *starget = scsi_target(sdev); 1279 1280 if (starget->single_lun) { 1281 if (starget->starget_sdev_user && 1282 starget->starget_sdev_user != sdev) 1283 return 0; 1284 starget->starget_sdev_user = sdev; 1285 } 1286 1287 if (starget->target_busy == 0 && starget->target_blocked) { 1288 /* 1289 * unblock after target_blocked iterates to zero 1290 */ 1291 if (--starget->target_blocked == 0) { 1292 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, 1293 "unblocking target at zero depth\n")); 1294 } else { 1295 blk_plug_device(sdev->request_queue); 1296 return 0; 1297 } 1298 } 1299 1300 if (scsi_target_is_busy(starget)) { 1301 if (list_empty(&sdev->starved_entry)) { 1302 list_add_tail(&sdev->starved_entry, 1303 &shost->starved_list); 1304 return 0; 1305 } 1306 } 1307 1308 /* We're OK to process the command, so we can't be starved */ 1309 if (!list_empty(&sdev->starved_entry)) 1310 list_del_init(&sdev->starved_entry); 1311 return 1; 1312} 1313 1314/* 1315 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1316 * return 0. We must end up running the queue again whenever 0 is 1317 * returned, else IO can hang. 1318 * 1319 * Called with host_lock held. 1320 */ 1321static inline int scsi_host_queue_ready(struct request_queue *q, 1322 struct Scsi_Host *shost, 1323 struct scsi_device *sdev) 1324{ 1325 if (scsi_host_in_recovery(shost)) 1326 return 0; 1327 if (shost->host_busy == 0 && shost->host_blocked) { 1328 /* 1329 * unblock after host_blocked iterates to zero 1330 */ 1331 if (--shost->host_blocked == 0) { 1332 SCSI_LOG_MLQUEUE(3, 1333 printk("scsi%d unblocking host at zero depth\n", 1334 shost->host_no)); 1335 } else { 1336 return 0; 1337 } 1338 } 1339 if (scsi_host_is_busy(shost)) { 1340 if (list_empty(&sdev->starved_entry)) 1341 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1342 return 0; 1343 } 1344 1345 /* We're OK to process the command, so we can't be starved */ 1346 if (!list_empty(&sdev->starved_entry)) 1347 list_del_init(&sdev->starved_entry); 1348 1349 return 1; 1350} 1351 1352/* 1353 * Busy state exporting function for request stacking drivers. 1354 * 1355 * For efficiency, no lock is taken to check the busy state of 1356 * shost/starget/sdev, since the returned value is not guaranteed and 1357 * may be changed after request stacking drivers call the function, 1358 * regardless of taking lock or not. 1359 * 1360 * When scsi can't dispatch I/Os anymore and needs to kill I/Os 1361 * (e.g. !sdev), scsi needs to return 'not busy'. 1362 * Otherwise, request stacking drivers may hold requests forever. 1363 */ 1364static int scsi_lld_busy(struct request_queue *q) 1365{ 1366 struct scsi_device *sdev = q->queuedata; 1367 struct Scsi_Host *shost; 1368 struct scsi_target *starget; 1369 1370 if (!sdev) 1371 return 0; 1372 1373 shost = sdev->host; 1374 starget = scsi_target(sdev); 1375 1376 if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) || 1377 scsi_target_is_busy(starget) || scsi_device_is_busy(sdev)) 1378 return 1; 1379 1380 return 0; 1381} 1382 1383/* 1384 * Kill a request for a dead device 1385 */ 1386static void scsi_kill_request(struct request *req, struct request_queue *q) 1387{ 1388 struct scsi_cmnd *cmd = req->special; 1389 struct scsi_device *sdev = cmd->device; 1390 struct scsi_target *starget = scsi_target(sdev); 1391 struct Scsi_Host *shost = sdev->host; 1392 1393 blkdev_dequeue_request(req); 1394 1395 if (unlikely(cmd == NULL)) { 1396 printk(KERN_CRIT "impossible request in %s.\n", 1397 __func__); 1398 BUG(); 1399 } 1400 1401 scsi_init_cmd_errh(cmd); 1402 cmd->result = DID_NO_CONNECT << 16; 1403 atomic_inc(&cmd->device->iorequest_cnt); 1404 1405 /* 1406 * SCSI request completion path will do scsi_device_unbusy(), 1407 * bump busy counts. To bump the counters, we need to dance 1408 * with the locks as normal issue path does. 1409 */ 1410 sdev->device_busy++; 1411 spin_unlock(sdev->request_queue->queue_lock); 1412 spin_lock(shost->host_lock); 1413 shost->host_busy++; 1414 starget->target_busy++; 1415 spin_unlock(shost->host_lock); 1416 spin_lock(sdev->request_queue->queue_lock); 1417 1418 blk_complete_request(req); 1419} 1420 1421static void scsi_softirq_done(struct request *rq) 1422{ 1423 struct scsi_cmnd *cmd = rq->special; 1424 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; 1425 int disposition; 1426 1427 INIT_LIST_HEAD(&cmd->eh_entry); 1428 1429 /* 1430 * Set the serial numbers back to zero 1431 */ 1432 cmd->serial_number = 0; 1433 1434 atomic_inc(&cmd->device->iodone_cnt); 1435 if (cmd->result) 1436 atomic_inc(&cmd->device->ioerr_cnt); 1437 1438 disposition = scsi_decide_disposition(cmd); 1439 if (disposition != SUCCESS && 1440 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1441 sdev_printk(KERN_ERR, cmd->device, 1442 "timing out command, waited %lus\n", 1443 wait_for/HZ); 1444 disposition = SUCCESS; 1445 } 1446 1447 scsi_log_completion(cmd, disposition); 1448 1449 switch (disposition) { 1450 case SUCCESS: 1451 scsi_finish_command(cmd); 1452 break; 1453 case NEEDS_RETRY: 1454 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 1455 break; 1456 case ADD_TO_MLQUEUE: 1457 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1458 break; 1459 default: 1460 if (!scsi_eh_scmd_add(cmd, 0)) 1461 scsi_finish_command(cmd); 1462 } 1463} 1464 1465/* 1466 * Function: scsi_request_fn() 1467 * 1468 * Purpose: Main strategy routine for SCSI. 1469 * 1470 * Arguments: q - Pointer to actual queue. 1471 * 1472 * Returns: Nothing 1473 * 1474 * Lock status: IO request lock assumed to be held when called. 1475 */ 1476static void scsi_request_fn(struct request_queue *q) 1477{ 1478 struct scsi_device *sdev = q->queuedata; 1479 struct Scsi_Host *shost; 1480 struct scsi_cmnd *cmd; 1481 struct request *req; 1482 1483 if (!sdev) { 1484 printk("scsi: killing requests for dead queue\n"); 1485 while ((req = elv_next_request(q)) != NULL) 1486 scsi_kill_request(req, q); 1487 return; 1488 } 1489 1490 if(!get_device(&sdev->sdev_gendev)) 1491 /* We must be tearing the block queue down already */ 1492 return; 1493 1494 /* 1495 * To start with, we keep looping until the queue is empty, or until 1496 * the host is no longer able to accept any more requests. 1497 */ 1498 shost = sdev->host; 1499 while (!blk_queue_plugged(q)) { 1500 int rtn; 1501 /* 1502 * get next queueable request. We do this early to make sure 1503 * that the request is fully prepared even if we cannot 1504 * accept it. 1505 */ 1506 req = elv_next_request(q); 1507 if (!req || !scsi_dev_queue_ready(q, sdev)) 1508 break; 1509 1510 if (unlikely(!scsi_device_online(sdev))) { 1511 sdev_printk(KERN_ERR, sdev, 1512 "rejecting I/O to offline device\n"); 1513 scsi_kill_request(req, q); 1514 continue; 1515 } 1516 1517 1518 /* 1519 * Remove the request from the request list. 1520 */ 1521 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1522 blkdev_dequeue_request(req); 1523 sdev->device_busy++; 1524 1525 spin_unlock(q->queue_lock); 1526 cmd = req->special; 1527 if (unlikely(cmd == NULL)) { 1528 printk(KERN_CRIT "impossible request in %s.\n" 1529 "please mail a stack trace to " 1530 "linux-scsi@vger.kernel.org\n", 1531 __func__); 1532 blk_dump_rq_flags(req, "foo"); 1533 BUG(); 1534 } 1535 spin_lock(shost->host_lock); 1536 1537 /* 1538 * We hit this when the driver is using a host wide 1539 * tag map. For device level tag maps the queue_depth check 1540 * in the device ready fn would prevent us from trying 1541 * to allocate a tag. Since the map is a shared host resource 1542 * we add the dev to the starved list so it eventually gets 1543 * a run when a tag is freed. 1544 */ 1545 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { 1546 if (list_empty(&sdev->starved_entry)) 1547 list_add_tail(&sdev->starved_entry, 1548 &shost->starved_list); 1549 goto not_ready; 1550 } 1551 1552 if (!scsi_target_queue_ready(shost, sdev)) 1553 goto not_ready; 1554 1555 if (!scsi_host_queue_ready(q, shost, sdev)) 1556 goto not_ready; 1557 1558 scsi_target(sdev)->target_busy++; 1559 shost->host_busy++; 1560 1561 /* 1562 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will 1563 * take the lock again. 1564 */ 1565 spin_unlock_irq(shost->host_lock); 1566 1567 /* 1568 * Finally, initialize any error handling parameters, and set up 1569 * the timers for timeouts. 1570 */ 1571 scsi_init_cmd_errh(cmd); 1572 1573 /* 1574 * Dispatch the command to the low-level driver. 1575 */ 1576 rtn = scsi_dispatch_cmd(cmd); 1577 spin_lock_irq(q->queue_lock); 1578 if(rtn) { 1579 /* we're refusing the command; because of 1580 * the way locks get dropped, we need to 1581 * check here if plugging is required */ 1582 if(sdev->device_busy == 0) 1583 blk_plug_device(q); 1584 1585 break; 1586 } 1587 } 1588 1589 goto out; 1590 1591 not_ready: 1592 spin_unlock_irq(shost->host_lock); 1593 1594 /* 1595 * lock q, handle tag, requeue req, and decrement device_busy. We 1596 * must return with queue_lock held. 1597 * 1598 * Decrementing device_busy without checking it is OK, as all such 1599 * cases (host limits or settings) should run the queue at some 1600 * later time. 1601 */ 1602 spin_lock_irq(q->queue_lock); 1603 blk_requeue_request(q, req); 1604 sdev->device_busy--; 1605 if(sdev->device_busy == 0) 1606 blk_plug_device(q); 1607 out: 1608 /* must be careful here...if we trigger the ->remove() function 1609 * we cannot be holding the q lock */ 1610 spin_unlock_irq(q->queue_lock); 1611 put_device(&sdev->sdev_gendev); 1612 spin_lock_irq(q->queue_lock); 1613} 1614 1615u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 1616{ 1617 struct device *host_dev; 1618 u64 bounce_limit = 0xffffffff; 1619 1620 if (shost->unchecked_isa_dma) 1621 return BLK_BOUNCE_ISA; 1622 /* 1623 * Platforms with virtual-DMA translation 1624 * hardware have no practical limit. 1625 */ 1626 if (!PCI_DMA_BUS_IS_PHYS) 1627 return BLK_BOUNCE_ANY; 1628 1629 host_dev = scsi_get_device(shost); 1630 if (host_dev && host_dev->dma_mask) 1631 bounce_limit = *host_dev->dma_mask; 1632 1633 return bounce_limit; 1634} 1635EXPORT_SYMBOL(scsi_calculate_bounce_limit); 1636 1637struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, 1638 request_fn_proc *request_fn) 1639{ 1640 struct request_queue *q; 1641 struct device *dev = shost->shost_gendev.parent; 1642 1643 q = blk_init_queue(request_fn, NULL); 1644 if (!q) 1645 return NULL; 1646 1647 /* 1648 * this limit is imposed by hardware restrictions 1649 */ 1650 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1651 blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); 1652 1653 blk_queue_max_sectors(q, shost->max_sectors); 1654 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1655 blk_queue_segment_boundary(q, shost->dma_boundary); 1656 dma_set_seg_boundary(dev, shost->dma_boundary); 1657 1658 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 1659 1660 /* New queue, no concurrency on queue_flags */ 1661 if (!shost->use_clustering) 1662 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 1663 1664 /* 1665 * set a reasonable default alignment on word boundaries: the 1666 * host and device may alter it using 1667 * blk_queue_update_dma_alignment() later. 1668 */ 1669 blk_queue_dma_alignment(q, 0x03); 1670 1671 return q; 1672} 1673EXPORT_SYMBOL(__scsi_alloc_queue); 1674 1675struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 1676{ 1677 struct request_queue *q; 1678 1679 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); 1680 if (!q) 1681 return NULL; 1682 1683 blk_queue_prep_rq(q, scsi_prep_fn); 1684 blk_queue_softirq_done(q, scsi_softirq_done); 1685 blk_queue_rq_timed_out(q, scsi_times_out); 1686 blk_queue_lld_busy(q, scsi_lld_busy); 1687 return q; 1688} 1689 1690void scsi_free_queue(struct request_queue *q) 1691{ 1692 blk_cleanup_queue(q); 1693} 1694 1695/* 1696 * Function: scsi_block_requests() 1697 * 1698 * Purpose: Utility function used by low-level drivers to prevent further 1699 * commands from being queued to the device. 1700 * 1701 * Arguments: shost - Host in question 1702 * 1703 * Returns: Nothing 1704 * 1705 * Lock status: No locks are assumed held. 1706 * 1707 * Notes: There is no timer nor any other means by which the requests 1708 * get unblocked other than the low-level driver calling 1709 * scsi_unblock_requests(). 1710 */ 1711void scsi_block_requests(struct Scsi_Host *shost) 1712{ 1713 shost->host_self_blocked = 1; 1714} 1715EXPORT_SYMBOL(scsi_block_requests); 1716 1717/* 1718 * Function: scsi_unblock_requests() 1719 * 1720 * Purpose: Utility function used by low-level drivers to allow further 1721 * commands from being queued to the device. 1722 * 1723 * Arguments: shost - Host in question 1724 * 1725 * Returns: Nothing 1726 * 1727 * Lock status: No locks are assumed held. 1728 * 1729 * Notes: There is no timer nor any other means by which the requests 1730 * get unblocked other than the low-level driver calling 1731 * scsi_unblock_requests(). 1732 * 1733 * This is done as an API function so that changes to the 1734 * internals of the scsi mid-layer won't require wholesale 1735 * changes to drivers that use this feature. 1736 */ 1737void scsi_unblock_requests(struct Scsi_Host *shost) 1738{ 1739 shost->host_self_blocked = 0; 1740 scsi_run_host_queues(shost); 1741} 1742EXPORT_SYMBOL(scsi_unblock_requests); 1743 1744int __init scsi_init_queue(void) 1745{ 1746 int i; 1747 1748 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", 1749 sizeof(struct scsi_data_buffer), 1750 0, 0, NULL); 1751 if (!scsi_sdb_cache) { 1752 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); 1753 return -ENOMEM; 1754 } 1755 1756 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1757 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1758 int size = sgp->size * sizeof(struct scatterlist); 1759 1760 sgp->slab = kmem_cache_create(sgp->name, size, 0, 1761 SLAB_HWCACHE_ALIGN, NULL); 1762 if (!sgp->slab) { 1763 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1764 sgp->name); 1765 goto cleanup_sdb; 1766 } 1767 1768 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 1769 sgp->slab); 1770 if (!sgp->pool) { 1771 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 1772 sgp->name); 1773 goto cleanup_sdb; 1774 } 1775 } 1776 1777 return 0; 1778 1779cleanup_sdb: 1780 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1781 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1782 if (sgp->pool) 1783 mempool_destroy(sgp->pool); 1784 if (sgp->slab) 1785 kmem_cache_destroy(sgp->slab); 1786 } 1787 kmem_cache_destroy(scsi_sdb_cache); 1788 1789 return -ENOMEM; 1790} 1791 1792void scsi_exit_queue(void) 1793{ 1794 int i; 1795 1796 kmem_cache_destroy(scsi_sdb_cache); 1797 1798 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1799 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1800 mempool_destroy(sgp->pool); 1801 kmem_cache_destroy(sgp->slab); 1802 } 1803} 1804 1805/** 1806 * scsi_mode_select - issue a mode select 1807 * @sdev: SCSI device to be queried 1808 * @pf: Page format bit (1 == standard, 0 == vendor specific) 1809 * @sp: Save page bit (0 == don't save, 1 == save) 1810 * @modepage: mode page being requested 1811 * @buffer: request buffer (may not be smaller than eight bytes) 1812 * @len: length of request buffer. 1813 * @timeout: command timeout 1814 * @retries: number of retries before failing 1815 * @data: returns a structure abstracting the mode header data 1816 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1817 * must be SCSI_SENSE_BUFFERSIZE big. 1818 * 1819 * Returns zero if successful; negative error number or scsi 1820 * status on error 1821 * 1822 */ 1823int 1824scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, 1825 unsigned char *buffer, int len, int timeout, int retries, 1826 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1827{ 1828 unsigned char cmd[10]; 1829 unsigned char *real_buffer; 1830 int ret; 1831 1832 memset(cmd, 0, sizeof(cmd)); 1833 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); 1834 1835 if (sdev->use_10_for_ms) { 1836 if (len > 65535) 1837 return -EINVAL; 1838 real_buffer = kmalloc(8 + len, GFP_KERNEL); 1839 if (!real_buffer) 1840 return -ENOMEM; 1841 memcpy(real_buffer + 8, buffer, len); 1842 len += 8; 1843 real_buffer[0] = 0; 1844 real_buffer[1] = 0; 1845 real_buffer[2] = data->medium_type; 1846 real_buffer[3] = data->device_specific; 1847 real_buffer[4] = data->longlba ? 0x01 : 0; 1848 real_buffer[5] = 0; 1849 real_buffer[6] = data->block_descriptor_length >> 8; 1850 real_buffer[7] = data->block_descriptor_length; 1851 1852 cmd[0] = MODE_SELECT_10; 1853 cmd[7] = len >> 8; 1854 cmd[8] = len; 1855 } else { 1856 if (len > 255 || data->block_descriptor_length > 255 || 1857 data->longlba) 1858 return -EINVAL; 1859 1860 real_buffer = kmalloc(4 + len, GFP_KERNEL); 1861 if (!real_buffer) 1862 return -ENOMEM; 1863 memcpy(real_buffer + 4, buffer, len); 1864 len += 4; 1865 real_buffer[0] = 0; 1866 real_buffer[1] = data->medium_type; 1867 real_buffer[2] = data->device_specific; 1868 real_buffer[3] = data->block_descriptor_length; 1869 1870 1871 cmd[0] = MODE_SELECT; 1872 cmd[4] = len; 1873 } 1874 1875 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 1876 sshdr, timeout, retries, NULL); 1877 kfree(real_buffer); 1878 return ret; 1879} 1880EXPORT_SYMBOL_GPL(scsi_mode_select); 1881 1882/** 1883 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. 1884 * @sdev: SCSI device to be queried 1885 * @dbd: set if mode sense will allow block descriptors to be returned 1886 * @modepage: mode page being requested 1887 * @buffer: request buffer (may not be smaller than eight bytes) 1888 * @len: length of request buffer. 1889 * @timeout: command timeout 1890 * @retries: number of retries before failing 1891 * @data: returns a structure abstracting the mode header data 1892 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1893 * must be SCSI_SENSE_BUFFERSIZE big. 1894 * 1895 * Returns zero if unsuccessful, or the header offset (either 4 1896 * or 8 depending on whether a six or ten byte command was 1897 * issued) if successful. 1898 */ 1899int 1900scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 1901 unsigned char *buffer, int len, int timeout, int retries, 1902 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1903{ 1904 unsigned char cmd[12]; 1905 int use_10_for_ms; 1906 int header_length; 1907 int result; 1908 struct scsi_sense_hdr my_sshdr; 1909 1910 memset(data, 0, sizeof(*data)); 1911 memset(&cmd[0], 0, 12); 1912 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 1913 cmd[2] = modepage; 1914 1915 /* caller might not be interested in sense, but we need it */ 1916 if (!sshdr) 1917 sshdr = &my_sshdr; 1918 1919 retry: 1920 use_10_for_ms = sdev->use_10_for_ms; 1921 1922 if (use_10_for_ms) { 1923 if (len < 8) 1924 len = 8; 1925 1926 cmd[0] = MODE_SENSE_10; 1927 cmd[8] = len; 1928 header_length = 8; 1929 } else { 1930 if (len < 4) 1931 len = 4; 1932 1933 cmd[0] = MODE_SENSE; 1934 cmd[4] = len; 1935 header_length = 4; 1936 } 1937 1938 memset(buffer, 0, len); 1939 1940 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 1941 sshdr, timeout, retries, NULL); 1942 1943 /* This code looks awful: what it's doing is making sure an 1944 * ILLEGAL REQUEST sense return identifies the actual command 1945 * byte as the problem. MODE_SENSE commands can return 1946 * ILLEGAL REQUEST if the code page isn't supported */ 1947 1948 if (use_10_for_ms && !scsi_status_is_good(result) && 1949 (driver_byte(result) & DRIVER_SENSE)) { 1950 if (scsi_sense_valid(sshdr)) { 1951 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 1952 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 1953 /* 1954 * Invalid command operation code 1955 */ 1956 sdev->use_10_for_ms = 0; 1957 goto retry; 1958 } 1959 } 1960 } 1961 1962 if(scsi_status_is_good(result)) { 1963 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && 1964 (modepage == 6 || modepage == 8))) { 1965 /* Initio breakage? */ 1966 header_length = 0; 1967 data->length = 13; 1968 data->medium_type = 0; 1969 data->device_specific = 0; 1970 data->longlba = 0; 1971 data->block_descriptor_length = 0; 1972 } else if(use_10_for_ms) { 1973 data->length = buffer[0]*256 + buffer[1] + 2; 1974 data->medium_type = buffer[2]; 1975 data->device_specific = buffer[3]; 1976 data->longlba = buffer[4] & 0x01; 1977 data->block_descriptor_length = buffer[6]*256 1978 + buffer[7]; 1979 } else { 1980 data->length = buffer[0] + 1; 1981 data->medium_type = buffer[1]; 1982 data->device_specific = buffer[2]; 1983 data->block_descriptor_length = buffer[3]; 1984 } 1985 data->header_length = header_length; 1986 } 1987 1988 return result; 1989} 1990EXPORT_SYMBOL(scsi_mode_sense); 1991 1992/** 1993 * scsi_test_unit_ready - test if unit is ready 1994 * @sdev: scsi device to change the state of. 1995 * @timeout: command timeout 1996 * @retries: number of retries before failing 1997 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for 1998 * returning sense. Make sure that this is cleared before passing 1999 * in. 2000 * 2001 * Returns zero if unsuccessful or an error if TUR failed. For 2002 * removable media, a return of NOT_READY or UNIT_ATTENTION is 2003 * translated to success, with the ->changed flag updated. 2004 **/ 2005int 2006scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, 2007 struct scsi_sense_hdr *sshdr_external) 2008{ 2009 char cmd[] = { 2010 TEST_UNIT_READY, 0, 0, 0, 0, 0, 2011 }; 2012 struct scsi_sense_hdr *sshdr; 2013 int result; 2014 2015 if (!sshdr_external) 2016 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 2017 else 2018 sshdr = sshdr_external; 2019 2020 /* try to eat the UNIT_ATTENTION if there are enough retries */ 2021 do { 2022 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 2023 timeout, retries, NULL); 2024 if (sdev->removable && scsi_sense_valid(sshdr) && 2025 sshdr->sense_key == UNIT_ATTENTION) 2026 sdev->changed = 1; 2027 } while (scsi_sense_valid(sshdr) && 2028 sshdr->sense_key == UNIT_ATTENTION && --retries); 2029 2030 if (!sshdr) 2031 /* could not allocate sense buffer, so can't process it */ 2032 return result; 2033 2034 if (sdev->removable && scsi_sense_valid(sshdr) && 2035 (sshdr->sense_key == UNIT_ATTENTION || 2036 sshdr->sense_key == NOT_READY)) { 2037 sdev->changed = 1; 2038 result = 0; 2039 } 2040 if (!sshdr_external) 2041 kfree(sshdr); 2042 return result; 2043} 2044EXPORT_SYMBOL(scsi_test_unit_ready); 2045 2046/** 2047 * scsi_device_set_state - Take the given device through the device state model. 2048 * @sdev: scsi device to change the state of. 2049 * @state: state to change to. 2050 * 2051 * Returns zero if unsuccessful or an error if the requested 2052 * transition is illegal. 2053 */ 2054int 2055scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 2056{ 2057 enum scsi_device_state oldstate = sdev->sdev_state; 2058 2059 if (state == oldstate) 2060 return 0; 2061 2062 switch (state) { 2063 case SDEV_CREATED: 2064 switch (oldstate) { 2065 case SDEV_CREATED_BLOCK: 2066 break; 2067 default: 2068 goto illegal; 2069 } 2070 break; 2071 2072 case SDEV_RUNNING: 2073 switch (oldstate) { 2074 case SDEV_CREATED: 2075 case SDEV_OFFLINE: 2076 case SDEV_QUIESCE: 2077 case SDEV_BLOCK: 2078 break; 2079 default: 2080 goto illegal; 2081 } 2082 break; 2083 2084 case SDEV_QUIESCE: 2085 switch (oldstate) { 2086 case SDEV_RUNNING: 2087 case SDEV_OFFLINE: 2088 break; 2089 default: 2090 goto illegal; 2091 } 2092 break; 2093 2094 case SDEV_OFFLINE: 2095 switch (oldstate) { 2096 case SDEV_CREATED: 2097 case SDEV_RUNNING: 2098 case SDEV_QUIESCE: 2099 case SDEV_BLOCK: 2100 break; 2101 default: 2102 goto illegal; 2103 } 2104 break; 2105 2106 case SDEV_BLOCK: 2107 switch (oldstate) { 2108 case SDEV_RUNNING: 2109 case SDEV_CREATED_BLOCK: 2110 break; 2111 default: 2112 goto illegal; 2113 } 2114 break; 2115 2116 case SDEV_CREATED_BLOCK: 2117 switch (oldstate) { 2118 case SDEV_CREATED: 2119 break; 2120 default: 2121 goto illegal; 2122 } 2123 break; 2124 2125 case SDEV_CANCEL: 2126 switch (oldstate) { 2127 case SDEV_CREATED: 2128 case SDEV_RUNNING: 2129 case SDEV_QUIESCE: 2130 case SDEV_OFFLINE: 2131 case SDEV_BLOCK: 2132 break; 2133 default: 2134 goto illegal; 2135 } 2136 break; 2137 2138 case SDEV_DEL: 2139 switch (oldstate) { 2140 case SDEV_CREATED: 2141 case SDEV_RUNNING: 2142 case SDEV_OFFLINE: 2143 case SDEV_CANCEL: 2144 break; 2145 default: 2146 goto illegal; 2147 } 2148 break; 2149 2150 } 2151 sdev->sdev_state = state; 2152 return 0; 2153 2154 illegal: 2155 SCSI_LOG_ERROR_RECOVERY(1, 2156 sdev_printk(KERN_ERR, sdev, 2157 "Illegal state transition %s->%s\n", 2158 scsi_device_state_name(oldstate), 2159 scsi_device_state_name(state)) 2160 ); 2161 return -EINVAL; 2162} 2163EXPORT_SYMBOL(scsi_device_set_state); 2164 2165/** 2166 * sdev_evt_emit - emit a single SCSI device uevent 2167 * @sdev: associated SCSI device 2168 * @evt: event to emit 2169 * 2170 * Send a single uevent (scsi_event) to the associated scsi_device. 2171 */ 2172static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) 2173{ 2174 int idx = 0; 2175 char *envp[3]; 2176 2177 switch (evt->evt_type) { 2178 case SDEV_EVT_MEDIA_CHANGE: 2179 envp[idx++] = "SDEV_MEDIA_CHANGE=1"; 2180 break; 2181 2182 default: 2183 /* do nothing */ 2184 break; 2185 } 2186 2187 envp[idx++] = NULL; 2188 2189 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); 2190} 2191 2192/** 2193 * sdev_evt_thread - send a uevent for each scsi event 2194 * @work: work struct for scsi_device 2195 * 2196 * Dispatch queued events to their associated scsi_device kobjects 2197 * as uevents. 2198 */ 2199void scsi_evt_thread(struct work_struct *work) 2200{ 2201 struct scsi_device *sdev; 2202 LIST_HEAD(event_list); 2203 2204 sdev = container_of(work, struct scsi_device, event_work); 2205 2206 while (1) { 2207 struct scsi_event *evt; 2208 struct list_head *this, *tmp; 2209 unsigned long flags; 2210 2211 spin_lock_irqsave(&sdev->list_lock, flags); 2212 list_splice_init(&sdev->event_list, &event_list); 2213 spin_unlock_irqrestore(&sdev->list_lock, flags); 2214 2215 if (list_empty(&event_list)) 2216 break; 2217 2218 list_for_each_safe(this, tmp, &event_list) { 2219 evt = list_entry(this, struct scsi_event, node); 2220 list_del(&evt->node); 2221 scsi_evt_emit(sdev, evt); 2222 kfree(evt); 2223 } 2224 } 2225} 2226 2227/** 2228 * sdev_evt_send - send asserted event to uevent thread 2229 * @sdev: scsi_device event occurred on 2230 * @evt: event to send 2231 * 2232 * Assert scsi device event asynchronously. 2233 */ 2234void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) 2235{ 2236 unsigned long flags; 2237 2238#if 0 2239 /* FIXME: currently this check eliminates all media change events 2240 * for polled devices. Need to update to discriminate between AN 2241 * and polled events */ 2242 if (!test_bit(evt->evt_type, sdev->supported_events)) { 2243 kfree(evt); 2244 return; 2245 } 2246#endif 2247 2248 spin_lock_irqsave(&sdev->list_lock, flags); 2249 list_add_tail(&evt->node, &sdev->event_list); 2250 schedule_work(&sdev->event_work); 2251 spin_unlock_irqrestore(&sdev->list_lock, flags); 2252} 2253EXPORT_SYMBOL_GPL(sdev_evt_send); 2254 2255/** 2256 * sdev_evt_alloc - allocate a new scsi event 2257 * @evt_type: type of event to allocate 2258 * @gfpflags: GFP flags for allocation 2259 * 2260 * Allocates and returns a new scsi_event. 2261 */ 2262struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, 2263 gfp_t gfpflags) 2264{ 2265 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); 2266 if (!evt) 2267 return NULL; 2268 2269 evt->evt_type = evt_type; 2270 INIT_LIST_HEAD(&evt->node); 2271 2272 /* evt_type-specific initialization, if any */ 2273 switch (evt_type) { 2274 case SDEV_EVT_MEDIA_CHANGE: 2275 default: 2276 /* do nothing */ 2277 break; 2278 } 2279 2280 return evt; 2281} 2282EXPORT_SYMBOL_GPL(sdev_evt_alloc); 2283 2284/** 2285 * sdev_evt_send_simple - send asserted event to uevent thread 2286 * @sdev: scsi_device event occurred on 2287 * @evt_type: type of event to send 2288 * @gfpflags: GFP flags for allocation 2289 * 2290 * Assert scsi device event asynchronously, given an event type. 2291 */ 2292void sdev_evt_send_simple(struct scsi_device *sdev, 2293 enum scsi_device_event evt_type, gfp_t gfpflags) 2294{ 2295 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); 2296 if (!evt) { 2297 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", 2298 evt_type); 2299 return; 2300 } 2301 2302 sdev_evt_send(sdev, evt); 2303} 2304EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2305 2306/** 2307 * scsi_device_quiesce - Block user issued commands. 2308 * @sdev: scsi device to quiesce. 2309 * 2310 * This works by trying to transition to the SDEV_QUIESCE state 2311 * (which must be a legal transition). When the device is in this 2312 * state, only special requests will be accepted, all others will 2313 * be deferred. Since special requests may also be requeued requests, 2314 * a successful return doesn't guarantee the device will be 2315 * totally quiescent. 2316 * 2317 * Must be called with user context, may sleep. 2318 * 2319 * Returns zero if unsuccessful or an error if not. 2320 */ 2321int 2322scsi_device_quiesce(struct scsi_device *sdev) 2323{ 2324 int err = scsi_device_set_state(sdev, SDEV_QUIESCE); 2325 if (err) 2326 return err; 2327 2328 scsi_run_queue(sdev->request_queue); 2329 while (sdev->device_busy) { 2330 msleep_interruptible(200); 2331 scsi_run_queue(sdev->request_queue); 2332 } 2333 return 0; 2334} 2335EXPORT_SYMBOL(scsi_device_quiesce); 2336 2337/** 2338 * scsi_device_resume - Restart user issued commands to a quiesced device. 2339 * @sdev: scsi device to resume. 2340 * 2341 * Moves the device from quiesced back to running and restarts the 2342 * queues. 2343 * 2344 * Must be called with user context, may sleep. 2345 */ 2346void 2347scsi_device_resume(struct scsi_device *sdev) 2348{ 2349 if(scsi_device_set_state(sdev, SDEV_RUNNING)) 2350 return; 2351 scsi_run_queue(sdev->request_queue); 2352} 2353EXPORT_SYMBOL(scsi_device_resume); 2354 2355static void 2356device_quiesce_fn(struct scsi_device *sdev, void *data) 2357{ 2358 scsi_device_quiesce(sdev); 2359} 2360 2361void 2362scsi_target_quiesce(struct scsi_target *starget) 2363{ 2364 starget_for_each_device(starget, NULL, device_quiesce_fn); 2365} 2366EXPORT_SYMBOL(scsi_target_quiesce); 2367 2368static void 2369device_resume_fn(struct scsi_device *sdev, void *data) 2370{ 2371 scsi_device_resume(sdev); 2372} 2373 2374void 2375scsi_target_resume(struct scsi_target *starget) 2376{ 2377 starget_for_each_device(starget, NULL, device_resume_fn); 2378} 2379EXPORT_SYMBOL(scsi_target_resume); 2380 2381/** 2382 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state 2383 * @sdev: device to block 2384 * 2385 * Block request made by scsi lld's to temporarily stop all 2386 * scsi commands on the specified device. Called from interrupt 2387 * or normal process context. 2388 * 2389 * Returns zero if successful or error if not 2390 * 2391 * Notes: 2392 * This routine transitions the device to the SDEV_BLOCK state 2393 * (which must be a legal transition). When the device is in this 2394 * state, all commands are deferred until the scsi lld reenables 2395 * the device with scsi_device_unblock or device_block_tmo fires. 2396 * This routine assumes the host_lock is held on entry. 2397 */ 2398int 2399scsi_internal_device_block(struct scsi_device *sdev) 2400{ 2401 struct request_queue *q = sdev->request_queue; 2402 unsigned long flags; 2403 int err = 0; 2404 2405 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2406 if (err) { 2407 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); 2408 2409 if (err) 2410 return err; 2411 } 2412 2413 /* 2414 * The device has transitioned to SDEV_BLOCK. Stop the 2415 * block layer from calling the midlayer with this device's 2416 * request queue. 2417 */ 2418 spin_lock_irqsave(q->queue_lock, flags); 2419 blk_stop_queue(q); 2420 spin_unlock_irqrestore(q->queue_lock, flags); 2421 2422 return 0; 2423} 2424EXPORT_SYMBOL_GPL(scsi_internal_device_block); 2425 2426/** 2427 * scsi_internal_device_unblock - resume a device after a block request 2428 * @sdev: device to resume 2429 * 2430 * Called by scsi lld's or the midlayer to restart the device queue 2431 * for the previously suspended scsi device. Called from interrupt or 2432 * normal process context. 2433 * 2434 * Returns zero if successful or error if not. 2435 * 2436 * Notes: 2437 * This routine transitions the device to the SDEV_RUNNING state 2438 * (which must be a legal transition) allowing the midlayer to 2439 * goose the queue for this device. This routine assumes the 2440 * host_lock is held upon entry. 2441 */ 2442int 2443scsi_internal_device_unblock(struct scsi_device *sdev) 2444{ 2445 struct request_queue *q = sdev->request_queue; 2446 int err; 2447 unsigned long flags; 2448 2449 /* 2450 * Try to transition the scsi device to SDEV_RUNNING 2451 * and goose the device queue if successful. 2452 */ 2453 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2454 if (err) { 2455 err = scsi_device_set_state(sdev, SDEV_CREATED); 2456 2457 if (err) 2458 return err; 2459 } 2460 2461 spin_lock_irqsave(q->queue_lock, flags); 2462 blk_start_queue(q); 2463 spin_unlock_irqrestore(q->queue_lock, flags); 2464 2465 return 0; 2466} 2467EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); 2468 2469static void 2470device_block(struct scsi_device *sdev, void *data) 2471{ 2472 scsi_internal_device_block(sdev); 2473} 2474 2475static int 2476target_block(struct device *dev, void *data) 2477{ 2478 if (scsi_is_target_device(dev)) 2479 starget_for_each_device(to_scsi_target(dev), NULL, 2480 device_block); 2481 return 0; 2482} 2483 2484void 2485scsi_target_block(struct device *dev) 2486{ 2487 if (scsi_is_target_device(dev)) 2488 starget_for_each_device(to_scsi_target(dev), NULL, 2489 device_block); 2490 else 2491 device_for_each_child(dev, NULL, target_block); 2492} 2493EXPORT_SYMBOL_GPL(scsi_target_block); 2494 2495static void 2496device_unblock(struct scsi_device *sdev, void *data) 2497{ 2498 scsi_internal_device_unblock(sdev); 2499} 2500 2501static int 2502target_unblock(struct device *dev, void *data) 2503{ 2504 if (scsi_is_target_device(dev)) 2505 starget_for_each_device(to_scsi_target(dev), NULL, 2506 device_unblock); 2507 return 0; 2508} 2509 2510void 2511scsi_target_unblock(struct device *dev) 2512{ 2513 if (scsi_is_target_device(dev)) 2514 starget_for_each_device(to_scsi_target(dev), NULL, 2515 device_unblock); 2516 else 2517 device_for_each_child(dev, NULL, target_unblock); 2518} 2519EXPORT_SYMBOL_GPL(scsi_target_unblock); 2520 2521/** 2522 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt 2523 * @sgl: scatter-gather list 2524 * @sg_count: number of segments in sg 2525 * @offset: offset in bytes into sg, on return offset into the mapped area 2526 * @len: bytes to map, on return number of bytes mapped 2527 * 2528 * Returns virtual address of the start of the mapped page 2529 */ 2530void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, 2531 size_t *offset, size_t *len) 2532{ 2533 int i; 2534 size_t sg_len = 0, len_complete = 0; 2535 struct scatterlist *sg; 2536 struct page *page; 2537 2538 WARN_ON(!irqs_disabled()); 2539 2540 for_each_sg(sgl, sg, sg_count, i) { 2541 len_complete = sg_len; /* Complete sg-entries */ 2542 sg_len += sg->length; 2543 if (sg_len > *offset) 2544 break; 2545 } 2546 2547 if (unlikely(i == sg_count)) { 2548 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 2549 "elements %d\n", 2550 __func__, sg_len, *offset, sg_count); 2551 WARN_ON(1); 2552 return NULL; 2553 } 2554 2555 /* Offset starting from the beginning of first page in this sg-entry */ 2556 *offset = *offset - len_complete + sg->offset; 2557 2558 /* Assumption: contiguous pages can be accessed as "page + i" */ 2559 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); 2560 *offset &= ~PAGE_MASK; 2561 2562 /* Bytes in this sg-entry from *offset to the end of the page */ 2563 sg_len = PAGE_SIZE - *offset; 2564 if (*len > sg_len) 2565 *len = sg_len; 2566 2567 return kmap_atomic(page, KM_BIO_SRC_IRQ); 2568} 2569EXPORT_SYMBOL(scsi_kmap_atomic_sg); 2570 2571/** 2572 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg 2573 * @virt: virtual address to be unmapped 2574 */ 2575void scsi_kunmap_atomic_sg(void *virt) 2576{ 2577 kunmap_atomic(virt, KM_BIO_SRC_IRQ); 2578} 2579EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 2580