fc_fcp.c revision b277d2aa9a4d969002c4157bf77b76b9ad9ca04a
1/* 2 * Copyright(c) 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved. 4 * Copyright(c) 2008 Mike Christie 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Maintained at www.Open-FCoE.org 20 */ 21 22#include <linux/module.h> 23#include <linux/delay.h> 24#include <linux/kernel.h> 25#include <linux/types.h> 26#include <linux/spinlock.h> 27#include <linux/scatterlist.h> 28#include <linux/err.h> 29#include <linux/crc32.h> 30 31#include <scsi/scsi_tcq.h> 32#include <scsi/scsi.h> 33#include <scsi/scsi_host.h> 34#include <scsi/scsi_device.h> 35#include <scsi/scsi_cmnd.h> 36 37#include <scsi/fc/fc_fc2.h> 38 39#include <scsi/libfc.h> 40#include <scsi/fc_encode.h> 41 42MODULE_AUTHOR("Open-FCoE.org"); 43MODULE_DESCRIPTION("libfc"); 44MODULE_LICENSE("GPL"); 45 46static int fc_fcp_debug; 47 48#define FC_DEBUG_FCP(fmt...) \ 49 do { \ 50 if (fc_fcp_debug) \ 51 FC_DBG(fmt); \ 52 } while (0) 53 54static struct kmem_cache *scsi_pkt_cachep; 55 56/* SRB state definitions */ 57#define FC_SRB_FREE 0 /* cmd is free */ 58#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */ 59#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */ 60#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */ 61#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */ 62#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */ 63#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */ 64#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */ 65#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */ 66 67#define FC_SRB_READ (1 << 1) 68#define FC_SRB_WRITE (1 << 0) 69 70/* 71 * The SCp.ptr should be tested and set under the host lock. NULL indicates 72 * that the command has been retruned to the scsi layer. 73 */ 74#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr) 75#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) 76#define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual) 77#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) 78#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual) 79 80struct fc_fcp_internal { 81 mempool_t *scsi_pkt_pool; 82 struct list_head scsi_pkt_queue; 83 u8 throttled; 84}; 85 86#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) 87 88/* 89 * function prototypes 90 * FC scsi I/O related functions 91 */ 92static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *); 93static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *); 94static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *); 95static void fc_fcp_complete_locked(struct fc_fcp_pkt *); 96static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); 97static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp); 98static void fc_timeout_error(struct fc_fcp_pkt *); 99static void fc_fcp_timeout(unsigned long data); 100static void fc_fcp_rec(struct fc_fcp_pkt *); 101static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); 102static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *); 103static void fc_io_compl(struct fc_fcp_pkt *); 104 105static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32); 106static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *); 107static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *); 108 109/* 110 * command status codes 111 */ 112#define FC_COMPLETE 0 113#define FC_CMD_ABORTED 1 114#define FC_CMD_RESET 2 115#define FC_CMD_PLOGO 3 116#define FC_SNS_RCV 4 117#define FC_TRANS_ERR 5 118#define FC_DATA_OVRRUN 6 119#define FC_DATA_UNDRUN 7 120#define FC_ERROR 8 121#define FC_HRD_ERROR 9 122#define FC_CMD_TIME_OUT 10 123 124/* 125 * Error recovery timeout values. 126 */ 127#define FC_SCSI_ER_TIMEOUT (10 * HZ) 128#define FC_SCSI_TM_TOV (10 * HZ) 129#define FC_SCSI_REC_TOV (2 * HZ) 130#define FC_HOST_RESET_TIMEOUT (30 * HZ) 131 132#define FC_MAX_ERROR_CNT 5 133#define FC_MAX_RECOV_RETRY 3 134 135#define FC_FCP_DFLT_QUEUE_DEPTH 32 136 137/** 138 * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet 139 * @lp: fc lport struct 140 * @gfp: gfp flags for allocation 141 * 142 * This is used by upper layer scsi driver. 143 * Return Value : scsi_pkt structure or null on allocation failure. 144 * Context : call from process context. no locking required. 145 */ 146static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp) 147{ 148 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 149 struct fc_fcp_pkt *fsp; 150 151 fsp = mempool_alloc(si->scsi_pkt_pool, gfp); 152 if (fsp) { 153 memset(fsp, 0, sizeof(*fsp)); 154 fsp->lp = lp; 155 atomic_set(&fsp->ref_cnt, 1); 156 init_timer(&fsp->timer); 157 INIT_LIST_HEAD(&fsp->list); 158 spin_lock_init(&fsp->scsi_pkt_lock); 159 } 160 return fsp; 161} 162 163/** 164 * fc_fcp_pkt_release() - release hold on scsi_pkt packet 165 * @fsp: fcp packet struct 166 * 167 * This is used by upper layer scsi driver. 168 * Context : call from process and interrupt context. 169 * no locking required 170 */ 171static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) 172{ 173 if (atomic_dec_and_test(&fsp->ref_cnt)) { 174 struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp); 175 176 mempool_free(fsp, si->scsi_pkt_pool); 177 } 178} 179 180static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp) 181{ 182 atomic_inc(&fsp->ref_cnt); 183} 184 185/** 186 * fc_fcp_pkt_destory() - release hold on scsi_pkt packet 187 * @seq: exchange sequence 188 * @fsp: fcp packet struct 189 * 190 * Release hold on scsi_pkt packet set to keep scsi_pkt 191 * till EM layer exch resource is not freed. 192 * Context : called from from EM layer. 193 * no locking required 194 */ 195static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) 196{ 197 fc_fcp_pkt_release(fsp); 198} 199 200/** 201 * fc_fcp_lock_pkt() - lock a packet and get a ref to it. 202 * @fsp: fcp packet 203 * 204 * We should only return error if we return a command to scsi-ml before 205 * getting a response. This can happen in cases where we send a abort, but 206 * do not wait for the response and the abort and command can be passing 207 * each other on the wire/network-layer. 208 * 209 * Note: this function locks the packet and gets a reference to allow 210 * callers to call the completion function while the lock is held and 211 * not have to worry about the packets refcount. 212 * 213 * TODO: Maybe we should just have callers grab/release the lock and 214 * have a function that they call to verify the fsp and grab a ref if 215 * needed. 216 */ 217static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp) 218{ 219 spin_lock_bh(&fsp->scsi_pkt_lock); 220 if (fsp->state & FC_SRB_COMPL) { 221 spin_unlock_bh(&fsp->scsi_pkt_lock); 222 return -EPERM; 223 } 224 225 fc_fcp_pkt_hold(fsp); 226 return 0; 227} 228 229static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp) 230{ 231 spin_unlock_bh(&fsp->scsi_pkt_lock); 232 fc_fcp_pkt_release(fsp); 233} 234 235static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) 236{ 237 if (!(fsp->state & FC_SRB_COMPL)) 238 mod_timer(&fsp->timer, jiffies + delay); 239} 240 241static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) 242{ 243 if (!fsp->seq_ptr) 244 return -EINVAL; 245 246 fsp->state |= FC_SRB_ABORT_PENDING; 247 return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0); 248} 249 250/* 251 * Retry command. 252 * An abort isn't needed. 253 */ 254static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) 255{ 256 if (fsp->seq_ptr) { 257 fsp->lp->tt.exch_done(fsp->seq_ptr); 258 fsp->seq_ptr = NULL; 259 } 260 261 fsp->state &= ~FC_SRB_ABORT_PENDING; 262 fsp->io_status = 0; 263 fsp->status_code = FC_ERROR; 264 fc_fcp_complete_locked(fsp); 265} 266 267/* 268 * fc_fcp_ddp_setup - calls to LLD's ddp_setup to set up DDP 269 * transfer for a read I/O indicated by the fc_fcp_pkt. 270 * @fsp: ptr to the fc_fcp_pkt 271 * 272 * This is called in exch_seq_send() when we have a newly allocated 273 * exchange with a valid exchange id to setup ddp. 274 * 275 * returns: none 276 */ 277void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid) 278{ 279 struct fc_lport *lp; 280 281 if (!fsp) 282 return; 283 284 lp = fsp->lp; 285 if ((fsp->req_flags & FC_SRB_READ) && 286 (lp->lro_enabled) && (lp->tt.ddp_setup)) { 287 if (lp->tt.ddp_setup(lp, xid, scsi_sglist(fsp->cmd), 288 scsi_sg_count(fsp->cmd))) 289 fsp->xfer_ddp = xid; 290 } 291} 292EXPORT_SYMBOL(fc_fcp_ddp_setup); 293 294/* 295 * fc_fcp_ddp_done - calls to LLD's ddp_done to release any 296 * DDP related resources for this I/O if it is initialized 297 * as a ddp transfer 298 * @fsp: ptr to the fc_fcp_pkt 299 * 300 * returns: none 301 */ 302static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) 303{ 304 struct fc_lport *lp; 305 306 if (!fsp) 307 return; 308 309 lp = fsp->lp; 310 if (fsp->xfer_ddp && lp->tt.ddp_done) { 311 fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp); 312 fsp->xfer_ddp = 0; 313 } 314} 315 316 317/* 318 * Receive SCSI data from target. 319 * Called after receiving solicited data. 320 */ 321static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 322{ 323 struct scsi_cmnd *sc = fsp->cmd; 324 struct fc_lport *lp = fsp->lp; 325 struct fcoe_dev_stats *stats; 326 struct fc_frame_header *fh; 327 size_t start_offset; 328 size_t offset; 329 u32 crc; 330 u32 copy_len = 0; 331 size_t len; 332 void *buf; 333 struct scatterlist *sg; 334 size_t remaining; 335 336 fh = fc_frame_header_get(fp); 337 offset = ntohl(fh->fh_parm_offset); 338 start_offset = offset; 339 len = fr_len(fp) - sizeof(*fh); 340 buf = fc_frame_payload_get(fp, 0); 341 342 /* if this I/O is ddped, update xfer len */ 343 fc_fcp_ddp_done(fsp); 344 345 if (offset + len > fsp->data_len) { 346 /* this should never happen */ 347 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && 348 fc_frame_crc_check(fp)) 349 goto crc_err; 350 FC_DEBUG_FCP("data received past end. len %zx offset %zx " 351 "data_len %x\n", len, offset, fsp->data_len); 352 fc_fcp_retry_cmd(fsp); 353 return; 354 } 355 if (offset != fsp->xfer_len) 356 fsp->state |= FC_SRB_DISCONTIG; 357 358 crc = 0; 359 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) 360 crc = crc32(~0, (u8 *) fh, sizeof(*fh)); 361 362 sg = scsi_sglist(sc); 363 remaining = len; 364 365 while (remaining > 0 && sg) { 366 size_t off; 367 void *page_addr; 368 size_t sg_bytes; 369 370 if (offset >= sg->length) { 371 offset -= sg->length; 372 sg = sg_next(sg); 373 continue; 374 } 375 sg_bytes = min(remaining, sg->length - offset); 376 377 /* 378 * The scatterlist item may be bigger than PAGE_SIZE, 379 * but we are limited to mapping PAGE_SIZE at a time. 380 */ 381 off = offset + sg->offset; 382 sg_bytes = min(sg_bytes, (size_t) 383 (PAGE_SIZE - (off & ~PAGE_MASK))); 384 page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT), 385 KM_SOFTIRQ0); 386 if (!page_addr) 387 break; /* XXX panic? */ 388 389 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) 390 crc = crc32(crc, buf, sg_bytes); 391 memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, 392 sg_bytes); 393 394 kunmap_atomic(page_addr, KM_SOFTIRQ0); 395 buf += sg_bytes; 396 offset += sg_bytes; 397 remaining -= sg_bytes; 398 copy_len += sg_bytes; 399 } 400 401 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { 402 buf = fc_frame_payload_get(fp, 0); 403 if (len % 4) { 404 crc = crc32(crc, buf + len, 4 - (len % 4)); 405 len += 4 - (len % 4); 406 } 407 408 if (~crc != le32_to_cpu(fr_crc(fp))) { 409crc_err: 410 stats = lp->dev_stats[smp_processor_id()]; 411 stats->ErrorFrames++; 412 if (stats->InvalidCRCCount++ < 5) 413 FC_DBG("CRC error on data frame\n"); 414 /* 415 * Assume the frame is total garbage. 416 * We may have copied it over the good part 417 * of the buffer. 418 * If so, we need to retry the entire operation. 419 * Otherwise, ignore it. 420 */ 421 if (fsp->state & FC_SRB_DISCONTIG) 422 fc_fcp_retry_cmd(fsp); 423 return; 424 } 425 } 426 427 if (fsp->xfer_contig_end == start_offset) 428 fsp->xfer_contig_end += copy_len; 429 fsp->xfer_len += copy_len; 430 431 /* 432 * In the very rare event that this data arrived after the response 433 * and completes the transfer, call the completion handler. 434 */ 435 if (unlikely(fsp->state & FC_SRB_RCV_STATUS) && 436 fsp->xfer_len == fsp->data_len - fsp->scsi_resid) 437 fc_fcp_complete_locked(fsp); 438} 439 440/** 441 * fc_fcp_send_data() - Send SCSI data to target. 442 * @fsp: ptr to fc_fcp_pkt 443 * @sp: ptr to this sequence 444 * @offset: starting offset for this data request 445 * @seq_blen: the burst length for this data request 446 * 447 * Called after receiving a Transfer Ready data descriptor. 448 * if LLD is capable of seq offload then send down seq_blen 449 * size of data in single frame, otherwise send multiple FC 450 * frames of max FC frame payload supported by target port. 451 * 452 * Returns : 0 for success. 453 */ 454static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, 455 size_t offset, size_t seq_blen) 456{ 457 struct fc_exch *ep; 458 struct scsi_cmnd *sc; 459 struct scatterlist *sg; 460 struct fc_frame *fp = NULL; 461 struct fc_lport *lp = fsp->lp; 462 size_t remaining; 463 size_t t_blen; 464 size_t tlen; 465 size_t sg_bytes; 466 size_t frame_offset, fh_parm_offset; 467 int error; 468 void *data = NULL; 469 void *page_addr; 470 int using_sg = lp->sg_supp; 471 u32 f_ctl; 472 473 WARN_ON(seq_blen <= 0); 474 if (unlikely(offset + seq_blen > fsp->data_len)) { 475 /* this should never happen */ 476 FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n", 477 seq_blen, offset); 478 fc_fcp_send_abort(fsp); 479 return 0; 480 } else if (offset != fsp->xfer_len) { 481 /* Out of Order Data Request - no problem, but unexpected. */ 482 FC_DEBUG_FCP("xfer-ready non-contiguous. " 483 "seq_blen %zx offset %zx\n", seq_blen, offset); 484 } 485 486 /* 487 * if LLD is capable of seq_offload then set transport 488 * burst length (t_blen) to seq_blen, otherwise set t_blen 489 * to max FC frame payload previously set in fsp->max_payload. 490 */ 491 t_blen = fsp->max_payload; 492 if (lp->seq_offload) { 493 t_blen = min(seq_blen, (size_t)lp->lso_max); 494 FC_DEBUG_FCP("fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", 495 fsp, seq_blen, lp->lso_max, t_blen); 496 } 497 498 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); 499 if (t_blen > 512) 500 t_blen &= ~(512 - 1); /* round down to block size */ 501 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */ 502 sc = fsp->cmd; 503 504 remaining = seq_blen; 505 fh_parm_offset = frame_offset = offset; 506 tlen = 0; 507 seq = lp->tt.seq_start_next(seq); 508 f_ctl = FC_FC_REL_OFF; 509 WARN_ON(!seq); 510 511 /* 512 * If a get_page()/put_page() will fail, don't use sg lists 513 * in the fc_frame structure. 514 * 515 * The put_page() may be long after the I/O has completed 516 * in the case of FCoE, since the network driver does it 517 * via free_skb(). See the test in free_pages_check(). 518 * 519 * Test this case with 'dd </dev/zero >/dev/st0 bs=64k'. 520 */ 521 if (using_sg) { 522 for (sg = scsi_sglist(sc); sg; sg = sg_next(sg)) { 523 if (page_count(sg_page(sg)) == 0 || 524 (sg_page(sg)->flags & (1 << PG_lru | 525 1 << PG_private | 526 1 << PG_locked | 527 1 << PG_active | 528 1 << PG_slab | 529 1 << PG_swapcache | 530 1 << PG_writeback | 531 1 << PG_reserved | 532 1 << PG_buddy))) { 533 using_sg = 0; 534 break; 535 } 536 } 537 } 538 sg = scsi_sglist(sc); 539 540 while (remaining > 0 && sg) { 541 if (offset >= sg->length) { 542 offset -= sg->length; 543 sg = sg_next(sg); 544 continue; 545 } 546 if (!fp) { 547 tlen = min(t_blen, remaining); 548 549 /* 550 * TODO. Temporary workaround. fc_seq_send() can't 551 * handle odd lengths in non-linear skbs. 552 * This will be the final fragment only. 553 */ 554 if (tlen % 4) 555 using_sg = 0; 556 if (using_sg) { 557 fp = _fc_frame_alloc(lp, 0); 558 if (!fp) 559 return -ENOMEM; 560 } else { 561 fp = fc_frame_alloc(lp, tlen); 562 if (!fp) 563 return -ENOMEM; 564 565 data = (void *)(fr_hdr(fp)) + 566 sizeof(struct fc_frame_header); 567 } 568 fh_parm_offset = frame_offset; 569 fr_max_payload(fp) = fsp->max_payload; 570 } 571 sg_bytes = min(tlen, sg->length - offset); 572 if (using_sg) { 573 WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags > 574 FC_FRAME_SG_LEN); 575 get_page(sg_page(sg)); 576 skb_fill_page_desc(fp_skb(fp), 577 skb_shinfo(fp_skb(fp))->nr_frags, 578 sg_page(sg), sg->offset + offset, 579 sg_bytes); 580 fp_skb(fp)->data_len += sg_bytes; 581 fr_len(fp) += sg_bytes; 582 fp_skb(fp)->truesize += PAGE_SIZE; 583 } else { 584 size_t off = offset + sg->offset; 585 586 /* 587 * The scatterlist item may be bigger than PAGE_SIZE, 588 * but we must not cross pages inside the kmap. 589 */ 590 sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE - 591 (off & ~PAGE_MASK))); 592 page_addr = kmap_atomic(sg_page(sg) + 593 (off >> PAGE_SHIFT), 594 KM_SOFTIRQ0); 595 memcpy(data, (char *)page_addr + (off & ~PAGE_MASK), 596 sg_bytes); 597 kunmap_atomic(page_addr, KM_SOFTIRQ0); 598 data += sg_bytes; 599 } 600 offset += sg_bytes; 601 frame_offset += sg_bytes; 602 tlen -= sg_bytes; 603 remaining -= sg_bytes; 604 605 if (tlen) 606 continue; 607 608 /* 609 * Send sequence with transfer sequence initiative in case 610 * this is last FCP frame of the sequence. 611 */ 612 if (remaining == 0) 613 f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ; 614 615 ep = fc_seq_exch(seq); 616 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, 617 FC_TYPE_FCP, f_ctl, fh_parm_offset); 618 619 /* 620 * send fragment using for a sequence. 621 */ 622 error = lp->tt.seq_send(lp, seq, fp); 623 if (error) { 624 WARN_ON(1); /* send error should be rare */ 625 fc_fcp_retry_cmd(fsp); 626 return 0; 627 } 628 fp = NULL; 629 } 630 fsp->xfer_len += seq_blen; /* premature count? */ 631 return 0; 632} 633 634static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 635{ 636 int ba_done = 1; 637 struct fc_ba_rjt *brp; 638 struct fc_frame_header *fh; 639 640 fh = fc_frame_header_get(fp); 641 switch (fh->fh_r_ctl) { 642 case FC_RCTL_BA_ACC: 643 break; 644 case FC_RCTL_BA_RJT: 645 brp = fc_frame_payload_get(fp, sizeof(*brp)); 646 if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR) 647 break; 648 /* fall thru */ 649 default: 650 /* 651 * we will let the command timeout 652 * and scsi-ml recover in this case, 653 * therefore cleared the ba_done flag. 654 */ 655 ba_done = 0; 656 } 657 658 if (ba_done) { 659 fsp->state |= FC_SRB_ABORTED; 660 fsp->state &= ~FC_SRB_ABORT_PENDING; 661 662 if (fsp->wait_for_comp) 663 complete(&fsp->tm_done); 664 else 665 fc_fcp_complete_locked(fsp); 666 } 667} 668 669/** 670 * fc_fcp_reduce_can_queue() - drop can_queue 671 * @lp: lport to drop queueing for 672 * 673 * If we are getting memory allocation failures, then we may 674 * be trying to execute too many commands. We let the running 675 * commands complete or timeout, then try again with a reduced 676 * can_queue. Eventually we will hit the point where we run 677 * on all reserved structs. 678 */ 679static void fc_fcp_reduce_can_queue(struct fc_lport *lp) 680{ 681 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 682 unsigned long flags; 683 int can_queue; 684 685 spin_lock_irqsave(lp->host->host_lock, flags); 686 if (si->throttled) 687 goto done; 688 si->throttled = 1; 689 690 can_queue = lp->host->can_queue; 691 can_queue >>= 1; 692 if (!can_queue) 693 can_queue = 1; 694 lp->host->can_queue = can_queue; 695 shost_printk(KERN_ERR, lp->host, "Could not allocate frame.\n" 696 "Reducing can_queue to %d.\n", can_queue); 697done: 698 spin_unlock_irqrestore(lp->host->host_lock, flags); 699} 700 701/** 702 * fc_fcp_recv() - Reveive FCP frames 703 * @seq: The sequence the frame is on 704 * @fp: The FC frame 705 * @arg: The related FCP packet 706 * 707 * Return : None 708 * Context : called from Soft IRQ context 709 * can not called holding list lock 710 */ 711static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) 712{ 713 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg; 714 struct fc_lport *lp; 715 struct fc_frame_header *fh; 716 struct fcp_txrdy *dd; 717 u8 r_ctl; 718 int rc = 0; 719 720 if (IS_ERR(fp)) 721 goto errout; 722 723 fh = fc_frame_header_get(fp); 724 r_ctl = fh->fh_r_ctl; 725 lp = fsp->lp; 726 727 if (!(lp->state & LPORT_ST_READY)) 728 goto out; 729 if (fc_fcp_lock_pkt(fsp)) 730 goto out; 731 fsp->last_pkt_time = jiffies; 732 733 if (fh->fh_type == FC_TYPE_BLS) { 734 fc_fcp_abts_resp(fsp, fp); 735 goto unlock; 736 } 737 738 if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) 739 goto unlock; 740 741 if (r_ctl == FC_RCTL_DD_DATA_DESC) { 742 /* 743 * received XFER RDY from the target 744 * need to send data to the target 745 */ 746 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED); 747 dd = fc_frame_payload_get(fp, sizeof(*dd)); 748 WARN_ON(!dd); 749 750 rc = fc_fcp_send_data(fsp, seq, 751 (size_t) ntohl(dd->ft_data_ro), 752 (size_t) ntohl(dd->ft_burst_len)); 753 if (!rc) 754 seq->rec_data = fsp->xfer_len; 755 else if (rc == -ENOMEM) 756 fsp->state |= FC_SRB_NOMEM; 757 } else if (r_ctl == FC_RCTL_DD_SOL_DATA) { 758 /* 759 * received a DATA frame 760 * next we will copy the data to the system buffer 761 */ 762 WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */ 763 fc_fcp_recv_data(fsp, fp); 764 seq->rec_data = fsp->xfer_contig_end; 765 } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) { 766 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED); 767 768 fc_fcp_resp(fsp, fp); 769 } else { 770 FC_DBG("unexpected frame. r_ctl %x\n", r_ctl); 771 } 772unlock: 773 fc_fcp_unlock_pkt(fsp); 774out: 775 fc_frame_free(fp); 776errout: 777 if (IS_ERR(fp)) 778 fc_fcp_error(fsp, fp); 779 else if (rc == -ENOMEM) 780 fc_fcp_reduce_can_queue(lp); 781} 782 783static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 784{ 785 struct fc_frame_header *fh; 786 struct fcp_resp *fc_rp; 787 struct fcp_resp_ext *rp_ex; 788 struct fcp_resp_rsp_info *fc_rp_info; 789 u32 plen; 790 u32 expected_len; 791 u32 respl = 0; 792 u32 snsl = 0; 793 u8 flags = 0; 794 795 plen = fr_len(fp); 796 fh = (struct fc_frame_header *)fr_hdr(fp); 797 if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp))) 798 goto len_err; 799 plen -= sizeof(*fh); 800 fc_rp = (struct fcp_resp *)(fh + 1); 801 fsp->cdb_status = fc_rp->fr_status; 802 flags = fc_rp->fr_flags; 803 fsp->scsi_comp_flags = flags; 804 expected_len = fsp->data_len; 805 806 /* if ddp, update xfer len */ 807 fc_fcp_ddp_done(fsp); 808 809 if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) { 810 rp_ex = (void *)(fc_rp + 1); 811 if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) { 812 if (plen < sizeof(*fc_rp) + sizeof(*rp_ex)) 813 goto len_err; 814 fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1); 815 if (flags & FCP_RSP_LEN_VAL) { 816 respl = ntohl(rp_ex->fr_rsp_len); 817 if (respl != sizeof(*fc_rp_info)) 818 goto len_err; 819 if (fsp->wait_for_comp) { 820 /* Abuse cdb_status for rsp code */ 821 fsp->cdb_status = fc_rp_info->rsp_code; 822 complete(&fsp->tm_done); 823 /* 824 * tmfs will not have any scsi cmd so 825 * exit here 826 */ 827 return; 828 } else 829 goto err; 830 } 831 if (flags & FCP_SNS_LEN_VAL) { 832 snsl = ntohl(rp_ex->fr_sns_len); 833 if (snsl > SCSI_SENSE_BUFFERSIZE) 834 snsl = SCSI_SENSE_BUFFERSIZE; 835 memcpy(fsp->cmd->sense_buffer, 836 (char *)fc_rp_info + respl, snsl); 837 } 838 } 839 if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) { 840 if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid)) 841 goto len_err; 842 if (flags & FCP_RESID_UNDER) { 843 fsp->scsi_resid = ntohl(rp_ex->fr_resid); 844 /* 845 * The cmnd->underflow is the minimum number of 846 * bytes that must be transfered for this 847 * command. Provided a sense condition is not 848 * present, make sure the actual amount 849 * transferred is at least the underflow value 850 * or fail. 851 */ 852 if (!(flags & FCP_SNS_LEN_VAL) && 853 (fc_rp->fr_status == 0) && 854 (scsi_bufflen(fsp->cmd) - 855 fsp->scsi_resid) < fsp->cmd->underflow) 856 goto err; 857 expected_len -= fsp->scsi_resid; 858 } else { 859 fsp->status_code = FC_ERROR; 860 } 861 } 862 } 863 fsp->state |= FC_SRB_RCV_STATUS; 864 865 /* 866 * Check for missing or extra data frames. 867 */ 868 if (unlikely(fsp->xfer_len != expected_len)) { 869 if (fsp->xfer_len < expected_len) { 870 /* 871 * Some data may be queued locally, 872 * Wait a at least one jiffy to see if it is delivered. 873 * If this expires without data, we may do SRR. 874 */ 875 fc_fcp_timer_set(fsp, 2); 876 return; 877 } 878 fsp->status_code = FC_DATA_OVRRUN; 879 FC_DBG("tgt %6x xfer len %zx greater than expected len %x. " 880 "data len %x\n", 881 fsp->rport->port_id, 882 fsp->xfer_len, expected_len, fsp->data_len); 883 } 884 fc_fcp_complete_locked(fsp); 885 return; 886 887len_err: 888 FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n", 889 flags, fr_len(fp), respl, snsl); 890err: 891 fsp->status_code = FC_ERROR; 892 fc_fcp_complete_locked(fsp); 893} 894 895/** 896 * fc_fcp_complete_locked() - complete processing of a fcp packet 897 * @fsp: fcp packet 898 * 899 * This function may sleep if a timer is pending. The packet lock must be 900 * held, and the host lock must not be held. 901 */ 902static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) 903{ 904 struct fc_lport *lp = fsp->lp; 905 struct fc_seq *seq; 906 struct fc_exch *ep; 907 u32 f_ctl; 908 909 if (fsp->state & FC_SRB_ABORT_PENDING) 910 return; 911 912 if (fsp->state & FC_SRB_ABORTED) { 913 if (!fsp->status_code) 914 fsp->status_code = FC_CMD_ABORTED; 915 } else { 916 /* 917 * Test for transport underrun, independent of response 918 * underrun status. 919 */ 920 if (fsp->xfer_len < fsp->data_len && !fsp->io_status && 921 (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) || 922 fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) { 923 fsp->status_code = FC_DATA_UNDRUN; 924 fsp->io_status = 0; 925 } 926 } 927 928 seq = fsp->seq_ptr; 929 if (seq) { 930 fsp->seq_ptr = NULL; 931 if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) { 932 struct fc_frame *conf_frame; 933 struct fc_seq *csp; 934 935 csp = lp->tt.seq_start_next(seq); 936 conf_frame = fc_frame_alloc(fsp->lp, 0); 937 if (conf_frame) { 938 f_ctl = FC_FC_SEQ_INIT; 939 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ; 940 ep = fc_seq_exch(seq); 941 fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL, 942 ep->did, ep->sid, 943 FC_TYPE_FCP, f_ctl, 0); 944 lp->tt.seq_send(lp, csp, conf_frame); 945 } 946 } 947 lp->tt.exch_done(seq); 948 } 949 fc_io_compl(fsp); 950} 951 952static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error) 953{ 954 struct fc_lport *lp = fsp->lp; 955 956 if (fsp->seq_ptr) { 957 lp->tt.exch_done(fsp->seq_ptr); 958 fsp->seq_ptr = NULL; 959 } 960 fsp->status_code = error; 961} 962 963/** 964 * fc_fcp_cleanup_each_cmd() - Cleanup active commads 965 * @lp: logical port 966 * @id: target id 967 * @lun: lun 968 * @error: fsp status code 969 * 970 * If lun or id is -1, they are ignored. 971 */ 972static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id, 973 unsigned int lun, int error) 974{ 975 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 976 struct fc_fcp_pkt *fsp; 977 struct scsi_cmnd *sc_cmd; 978 unsigned long flags; 979 980 spin_lock_irqsave(lp->host->host_lock, flags); 981restart: 982 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { 983 sc_cmd = fsp->cmd; 984 if (id != -1 && scmd_id(sc_cmd) != id) 985 continue; 986 987 if (lun != -1 && sc_cmd->device->lun != lun) 988 continue; 989 990 fc_fcp_pkt_hold(fsp); 991 spin_unlock_irqrestore(lp->host->host_lock, flags); 992 993 if (!fc_fcp_lock_pkt(fsp)) { 994 fc_fcp_cleanup_cmd(fsp, error); 995 fc_io_compl(fsp); 996 fc_fcp_unlock_pkt(fsp); 997 } 998 999 fc_fcp_pkt_release(fsp); 1000 spin_lock_irqsave(lp->host->host_lock, flags); 1001 /* 1002 * while we dropped the lock multiple pkts could 1003 * have been released, so we have to start over. 1004 */ 1005 goto restart; 1006 } 1007 spin_unlock_irqrestore(lp->host->host_lock, flags); 1008} 1009 1010static void fc_fcp_abort_io(struct fc_lport *lp) 1011{ 1012 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR); 1013} 1014 1015/** 1016 * fc_fcp_pkt_send() - send a fcp packet to the lower level. 1017 * @lp: fc lport 1018 * @fsp: fc packet. 1019 * 1020 * This is called by upper layer protocol. 1021 * Return : zero for success and -1 for failure 1022 * Context : called from queuecommand which can be called from process 1023 * or scsi soft irq. 1024 * Locks : called with the host lock and irqs disabled. 1025 */ 1026static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp) 1027{ 1028 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 1029 int rc; 1030 1031 fsp->cmd->SCp.ptr = (char *)fsp; 1032 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len); 1033 fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK; 1034 1035 int_to_scsilun(fsp->cmd->device->lun, 1036 (struct scsi_lun *)fsp->cdb_cmd.fc_lun); 1037 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); 1038 list_add_tail(&fsp->list, &si->scsi_pkt_queue); 1039 1040 spin_unlock_irq(lp->host->host_lock); 1041 rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv); 1042 spin_lock_irq(lp->host->host_lock); 1043 if (rc) 1044 list_del(&fsp->list); 1045 1046 return rc; 1047} 1048 1049static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, 1050 void (*resp)(struct fc_seq *, 1051 struct fc_frame *fp, 1052 void *arg)) 1053{ 1054 struct fc_frame *fp; 1055 struct fc_seq *seq; 1056 struct fc_rport *rport; 1057 struct fc_rport_libfc_priv *rp; 1058 const size_t len = sizeof(fsp->cdb_cmd); 1059 int rc = 0; 1060 1061 if (fc_fcp_lock_pkt(fsp)) 1062 return 0; 1063 1064 fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd)); 1065 if (!fp) { 1066 rc = -1; 1067 goto unlock; 1068 } 1069 1070 memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len); 1071 fr_fsp(fp) = fsp; 1072 rport = fsp->rport; 1073 fsp->max_payload = rport->maxframe_size; 1074 rp = rport->dd_data; 1075 1076 fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, 1077 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, 1078 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1079 1080 seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0); 1081 if (!seq) { 1082 fc_frame_free(fp); 1083 rc = -1; 1084 goto unlock; 1085 } 1086 fsp->last_pkt_time = jiffies; 1087 fsp->seq_ptr = seq; 1088 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */ 1089 1090 setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp); 1091 fc_fcp_timer_set(fsp, 1092 (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ? 1093 FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT); 1094unlock: 1095 fc_fcp_unlock_pkt(fsp); 1096 return rc; 1097} 1098 1099/* 1100 * transport error handler 1101 */ 1102static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1103{ 1104 int error = PTR_ERR(fp); 1105 1106 if (fc_fcp_lock_pkt(fsp)) 1107 return; 1108 1109 switch (error) { 1110 case -FC_EX_CLOSED: 1111 fc_fcp_retry_cmd(fsp); 1112 goto unlock; 1113 default: 1114 FC_DBG("unknown error %ld\n", PTR_ERR(fp)); 1115 } 1116 /* 1117 * clear abort pending, because the lower layer 1118 * decided to force completion. 1119 */ 1120 fsp->state &= ~FC_SRB_ABORT_PENDING; 1121 fsp->status_code = FC_CMD_PLOGO; 1122 fc_fcp_complete_locked(fsp); 1123unlock: 1124 fc_fcp_unlock_pkt(fsp); 1125} 1126 1127/* 1128 * Scsi abort handler- calls to send an abort 1129 * and then wait for abort completion 1130 */ 1131static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp) 1132{ 1133 int rc = FAILED; 1134 1135 if (fc_fcp_send_abort(fsp)) 1136 return FAILED; 1137 1138 init_completion(&fsp->tm_done); 1139 fsp->wait_for_comp = 1; 1140 1141 spin_unlock_bh(&fsp->scsi_pkt_lock); 1142 rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV); 1143 spin_lock_bh(&fsp->scsi_pkt_lock); 1144 fsp->wait_for_comp = 0; 1145 1146 if (!rc) { 1147 FC_DBG("target abort cmd failed\n"); 1148 rc = FAILED; 1149 } else if (fsp->state & FC_SRB_ABORTED) { 1150 FC_DBG("target abort cmd passed\n"); 1151 rc = SUCCESS; 1152 fc_fcp_complete_locked(fsp); 1153 } 1154 1155 return rc; 1156} 1157 1158/* 1159 * Retry LUN reset after resource allocation failed. 1160 */ 1161static void fc_lun_reset_send(unsigned long data) 1162{ 1163 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; 1164 struct fc_lport *lp = fsp->lp; 1165 if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) { 1166 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) 1167 return; 1168 if (fc_fcp_lock_pkt(fsp)) 1169 return; 1170 setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp); 1171 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1172 fc_fcp_unlock_pkt(fsp); 1173 } 1174} 1175 1176/* 1177 * Scsi device reset handler- send a LUN RESET to the device 1178 * and wait for reset reply 1179 */ 1180static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, 1181 unsigned int id, unsigned int lun) 1182{ 1183 int rc; 1184 1185 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len); 1186 fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET; 1187 int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun); 1188 1189 fsp->wait_for_comp = 1; 1190 init_completion(&fsp->tm_done); 1191 1192 fc_lun_reset_send((unsigned long)fsp); 1193 1194 /* 1195 * wait for completion of reset 1196 * after that make sure all commands are terminated 1197 */ 1198 rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV); 1199 1200 spin_lock_bh(&fsp->scsi_pkt_lock); 1201 fsp->state |= FC_SRB_COMPL; 1202 spin_unlock_bh(&fsp->scsi_pkt_lock); 1203 1204 del_timer_sync(&fsp->timer); 1205 1206 spin_lock_bh(&fsp->scsi_pkt_lock); 1207 if (fsp->seq_ptr) { 1208 lp->tt.exch_done(fsp->seq_ptr); 1209 fsp->seq_ptr = NULL; 1210 } 1211 fsp->wait_for_comp = 0; 1212 spin_unlock_bh(&fsp->scsi_pkt_lock); 1213 1214 if (!rc) { 1215 FC_DBG("lun reset failed\n"); 1216 return FAILED; 1217 } 1218 1219 /* cdb_status holds the tmf's rsp code */ 1220 if (fsp->cdb_status != FCP_TMF_CMPL) 1221 return FAILED; 1222 1223 FC_DBG("lun reset to lun %u completed\n", lun); 1224 fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED); 1225 return SUCCESS; 1226} 1227 1228/* 1229 * Task Managment response handler 1230 */ 1231static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1232{ 1233 struct fc_fcp_pkt *fsp = arg; 1234 struct fc_frame_header *fh; 1235 1236 if (IS_ERR(fp)) { 1237 /* 1238 * If there is an error just let it timeout or wait 1239 * for TMF to be aborted if it timedout. 1240 * 1241 * scsi-eh will escalate for when either happens. 1242 */ 1243 return; 1244 } 1245 1246 if (fc_fcp_lock_pkt(fsp)) 1247 return; 1248 1249 /* 1250 * raced with eh timeout handler. 1251 */ 1252 if (!fsp->seq_ptr || !fsp->wait_for_comp) { 1253 spin_unlock_bh(&fsp->scsi_pkt_lock); 1254 return; 1255 } 1256 1257 fh = fc_frame_header_get(fp); 1258 if (fh->fh_type != FC_TYPE_BLS) 1259 fc_fcp_resp(fsp, fp); 1260 fsp->seq_ptr = NULL; 1261 fsp->lp->tt.exch_done(seq); 1262 fc_frame_free(fp); 1263 fc_fcp_unlock_pkt(fsp); 1264} 1265 1266static void fc_fcp_cleanup(struct fc_lport *lp) 1267{ 1268 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR); 1269} 1270 1271/* 1272 * fc_fcp_timeout: called by OS timer function. 1273 * 1274 * The timer has been inactivated and must be reactivated if desired 1275 * using fc_fcp_timer_set(). 1276 * 1277 * Algorithm: 1278 * 1279 * If REC is supported, just issue it, and return. The REC exchange will 1280 * complete or time out, and recovery can continue at that point. 1281 * 1282 * Otherwise, if the response has been received without all the data, 1283 * it has been ER_TIMEOUT since the response was received. 1284 * 1285 * If the response has not been received, 1286 * we see if data was received recently. If it has been, we continue waiting, 1287 * otherwise, we abort the command. 1288 */ 1289static void fc_fcp_timeout(unsigned long data) 1290{ 1291 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; 1292 struct fc_rport *rport = fsp->rport; 1293 struct fc_rport_libfc_priv *rp = rport->dd_data; 1294 1295 if (fc_fcp_lock_pkt(fsp)) 1296 return; 1297 1298 if (fsp->cdb_cmd.fc_tm_flags) 1299 goto unlock; 1300 1301 fsp->state |= FC_SRB_FCP_PROCESSING_TMO; 1302 1303 if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED) 1304 fc_fcp_rec(fsp); 1305 else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2), 1306 jiffies)) 1307 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); 1308 else if (fsp->state & FC_SRB_RCV_STATUS) 1309 fc_fcp_complete_locked(fsp); 1310 else 1311 fc_timeout_error(fsp); 1312 fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO; 1313unlock: 1314 fc_fcp_unlock_pkt(fsp); 1315} 1316 1317/* 1318 * Send a REC ELS request 1319 */ 1320static void fc_fcp_rec(struct fc_fcp_pkt *fsp) 1321{ 1322 struct fc_lport *lp; 1323 struct fc_frame *fp; 1324 struct fc_rport *rport; 1325 struct fc_rport_libfc_priv *rp; 1326 1327 lp = fsp->lp; 1328 rport = fsp->rport; 1329 rp = rport->dd_data; 1330 if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) { 1331 fsp->status_code = FC_HRD_ERROR; 1332 fsp->io_status = 0; 1333 fc_fcp_complete_locked(fsp); 1334 return; 1335 } 1336 fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec)); 1337 if (!fp) 1338 goto retry; 1339 1340 fr_seq(fp) = fsp->seq_ptr; 1341 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, 1342 fc_host_port_id(rp->local_port->host), FC_TYPE_ELS, 1343 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1344 if (lp->tt.elsct_send(lp, rport, fp, ELS_REC, fc_fcp_rec_resp, 1345 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) { 1346 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ 1347 return; 1348 } 1349 fc_frame_free(fp); 1350retry: 1351 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1352 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1353 else 1354 fc_timeout_error(fsp); 1355} 1356 1357/* 1358 * Receive handler for REC ELS frame 1359 * if it is a reject then let the scsi layer to handle 1360 * the timeout. if it is a LS_ACC then if the io was not completed 1361 * then set the timeout and return otherwise complete the exchange 1362 * and tell the scsi layer to restart the I/O. 1363 */ 1364static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1365{ 1366 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg; 1367 struct fc_els_rec_acc *recp; 1368 struct fc_els_ls_rjt *rjt; 1369 u32 e_stat; 1370 u8 opcode; 1371 u32 offset; 1372 enum dma_data_direction data_dir; 1373 enum fc_rctl r_ctl; 1374 struct fc_rport_libfc_priv *rp; 1375 1376 if (IS_ERR(fp)) { 1377 fc_fcp_rec_error(fsp, fp); 1378 return; 1379 } 1380 1381 if (fc_fcp_lock_pkt(fsp)) 1382 goto out; 1383 1384 fsp->recov_retry = 0; 1385 opcode = fc_frame_payload_op(fp); 1386 if (opcode == ELS_LS_RJT) { 1387 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 1388 switch (rjt->er_reason) { 1389 default: 1390 FC_DEBUG_FCP("device %x unexpected REC reject " 1391 "reason %d expl %d\n", 1392 fsp->rport->port_id, rjt->er_reason, 1393 rjt->er_explan); 1394 /* fall through */ 1395 case ELS_RJT_UNSUP: 1396 FC_DEBUG_FCP("device does not support REC\n"); 1397 rp = fsp->rport->dd_data; 1398 /* 1399 * if we do not spport RECs or got some bogus 1400 * reason then resetup timer so we check for 1401 * making progress. 1402 */ 1403 rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; 1404 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); 1405 break; 1406 case ELS_RJT_LOGIC: 1407 case ELS_RJT_UNAB: 1408 /* 1409 * If no data transfer, the command frame got dropped 1410 * so we just retry. If data was transferred, we 1411 * lost the response but the target has no record, 1412 * so we abort and retry. 1413 */ 1414 if (rjt->er_explan == ELS_EXPL_OXID_RXID && 1415 fsp->xfer_len == 0) { 1416 fc_fcp_retry_cmd(fsp); 1417 break; 1418 } 1419 fc_timeout_error(fsp); 1420 break; 1421 } 1422 } else if (opcode == ELS_LS_ACC) { 1423 if (fsp->state & FC_SRB_ABORTED) 1424 goto unlock_out; 1425 1426 data_dir = fsp->cmd->sc_data_direction; 1427 recp = fc_frame_payload_get(fp, sizeof(*recp)); 1428 offset = ntohl(recp->reca_fc4value); 1429 e_stat = ntohl(recp->reca_e_stat); 1430 1431 if (e_stat & ESB_ST_COMPLETE) { 1432 1433 /* 1434 * The exchange is complete. 1435 * 1436 * For output, we must've lost the response. 1437 * For input, all data must've been sent. 1438 * We lost may have lost the response 1439 * (and a confirmation was requested) and maybe 1440 * some data. 1441 * 1442 * If all data received, send SRR 1443 * asking for response. If partial data received, 1444 * or gaps, SRR requests data at start of gap. 1445 * Recovery via SRR relies on in-order-delivery. 1446 */ 1447 if (data_dir == DMA_TO_DEVICE) { 1448 r_ctl = FC_RCTL_DD_CMD_STATUS; 1449 } else if (fsp->xfer_contig_end == offset) { 1450 r_ctl = FC_RCTL_DD_CMD_STATUS; 1451 } else { 1452 offset = fsp->xfer_contig_end; 1453 r_ctl = FC_RCTL_DD_SOL_DATA; 1454 } 1455 fc_fcp_srr(fsp, r_ctl, offset); 1456 } else if (e_stat & ESB_ST_SEQ_INIT) { 1457 1458 /* 1459 * The remote port has the initiative, so just 1460 * keep waiting for it to complete. 1461 */ 1462 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1463 } else { 1464 1465 /* 1466 * The exchange is incomplete, we have seq. initiative. 1467 * Lost response with requested confirmation, 1468 * lost confirmation, lost transfer ready or 1469 * lost write data. 1470 * 1471 * For output, if not all data was received, ask 1472 * for transfer ready to be repeated. 1473 * 1474 * If we received or sent all the data, send SRR to 1475 * request response. 1476 * 1477 * If we lost a response, we may have lost some read 1478 * data as well. 1479 */ 1480 r_ctl = FC_RCTL_DD_SOL_DATA; 1481 if (data_dir == DMA_TO_DEVICE) { 1482 r_ctl = FC_RCTL_DD_CMD_STATUS; 1483 if (offset < fsp->data_len) 1484 r_ctl = FC_RCTL_DD_DATA_DESC; 1485 } else if (offset == fsp->xfer_contig_end) { 1486 r_ctl = FC_RCTL_DD_CMD_STATUS; 1487 } else if (fsp->xfer_contig_end < offset) { 1488 offset = fsp->xfer_contig_end; 1489 } 1490 fc_fcp_srr(fsp, r_ctl, offset); 1491 } 1492 } 1493unlock_out: 1494 fc_fcp_unlock_pkt(fsp); 1495out: 1496 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ 1497 fc_frame_free(fp); 1498} 1499 1500/* 1501 * Handle error response or timeout for REC exchange. 1502 */ 1503static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1504{ 1505 int error = PTR_ERR(fp); 1506 1507 if (fc_fcp_lock_pkt(fsp)) 1508 goto out; 1509 1510 switch (error) { 1511 case -FC_EX_CLOSED: 1512 fc_fcp_retry_cmd(fsp); 1513 break; 1514 1515 default: 1516 FC_DBG("REC %p fid %x error unexpected error %d\n", 1517 fsp, fsp->rport->port_id, error); 1518 fsp->status_code = FC_CMD_PLOGO; 1519 /* fall through */ 1520 1521 case -FC_EX_TIMEOUT: 1522 /* 1523 * Assume REC or LS_ACC was lost. 1524 * The exchange manager will have aborted REC, so retry. 1525 */ 1526 FC_DBG("REC fid %x error error %d retry %d/%d\n", 1527 fsp->rport->port_id, error, fsp->recov_retry, 1528 FC_MAX_RECOV_RETRY); 1529 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1530 fc_fcp_rec(fsp); 1531 else 1532 fc_timeout_error(fsp); 1533 break; 1534 } 1535 fc_fcp_unlock_pkt(fsp); 1536out: 1537 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ 1538} 1539 1540/* 1541 * Time out error routine: 1542 * abort's the I/O close the exchange and 1543 * send completion notification to scsi layer 1544 */ 1545static void fc_timeout_error(struct fc_fcp_pkt *fsp) 1546{ 1547 fsp->status_code = FC_CMD_TIME_OUT; 1548 fsp->cdb_status = 0; 1549 fsp->io_status = 0; 1550 /* 1551 * if this fails then we let the scsi command timer fire and 1552 * scsi-ml escalate. 1553 */ 1554 fc_fcp_send_abort(fsp); 1555} 1556 1557/* 1558 * Sequence retransmission request. 1559 * This is called after receiving status but insufficient data, or 1560 * when expecting status but the request has timed out. 1561 */ 1562static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) 1563{ 1564 struct fc_lport *lp = fsp->lp; 1565 struct fc_rport *rport; 1566 struct fc_rport_libfc_priv *rp; 1567 struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); 1568 struct fc_seq *seq; 1569 struct fcp_srr *srr; 1570 struct fc_frame *fp; 1571 u8 cdb_op; 1572 1573 rport = fsp->rport; 1574 rp = rport->dd_data; 1575 cdb_op = fsp->cdb_cmd.fc_cdb[0]; 1576 1577 if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY) 1578 goto retry; /* shouldn't happen */ 1579 fp = fc_frame_alloc(lp, sizeof(*srr)); 1580 if (!fp) 1581 goto retry; 1582 1583 srr = fc_frame_payload_get(fp, sizeof(*srr)); 1584 memset(srr, 0, sizeof(*srr)); 1585 srr->srr_op = ELS_SRR; 1586 srr->srr_ox_id = htons(ep->oxid); 1587 srr->srr_rx_id = htons(ep->rxid); 1588 srr->srr_r_ctl = r_ctl; 1589 srr->srr_rel_off = htonl(offset); 1590 1591 fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, 1592 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, 1593 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1594 1595 seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL, 1596 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); 1597 if (!seq) { 1598 fc_frame_free(fp); 1599 goto retry; 1600 } 1601 fsp->recov_seq = seq; 1602 fsp->xfer_len = offset; 1603 fsp->xfer_contig_end = offset; 1604 fsp->state &= ~FC_SRB_RCV_STATUS; 1605 fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */ 1606 return; 1607retry: 1608 fc_fcp_retry_cmd(fsp); 1609} 1610 1611/* 1612 * Handle response from SRR. 1613 */ 1614static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1615{ 1616 struct fc_fcp_pkt *fsp = arg; 1617 struct fc_frame_header *fh; 1618 1619 if (IS_ERR(fp)) { 1620 fc_fcp_srr_error(fsp, fp); 1621 return; 1622 } 1623 1624 if (fc_fcp_lock_pkt(fsp)) 1625 goto out; 1626 1627 fh = fc_frame_header_get(fp); 1628 /* 1629 * BUG? fc_fcp_srr_error calls exch_done which would release 1630 * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT, 1631 * then fc_exch_timeout would be sending an abort. The exch_done 1632 * call by fc_fcp_srr_error would prevent fc_exch.c from seeing 1633 * an abort response though. 1634 */ 1635 if (fh->fh_type == FC_TYPE_BLS) { 1636 fc_fcp_unlock_pkt(fsp); 1637 return; 1638 } 1639 1640 fsp->recov_seq = NULL; 1641 switch (fc_frame_payload_op(fp)) { 1642 case ELS_LS_ACC: 1643 fsp->recov_retry = 0; 1644 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1645 break; 1646 case ELS_LS_RJT: 1647 default: 1648 fc_timeout_error(fsp); 1649 break; 1650 } 1651 fc_fcp_unlock_pkt(fsp); 1652 fsp->lp->tt.exch_done(seq); 1653out: 1654 fc_frame_free(fp); 1655 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ 1656} 1657 1658static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1659{ 1660 if (fc_fcp_lock_pkt(fsp)) 1661 goto out; 1662 fsp->lp->tt.exch_done(fsp->recov_seq); 1663 fsp->recov_seq = NULL; 1664 switch (PTR_ERR(fp)) { 1665 case -FC_EX_TIMEOUT: 1666 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1667 fc_fcp_rec(fsp); 1668 else 1669 fc_timeout_error(fsp); 1670 break; 1671 case -FC_EX_CLOSED: /* e.g., link failure */ 1672 /* fall through */ 1673 default: 1674 fc_fcp_retry_cmd(fsp); 1675 break; 1676 } 1677 fc_fcp_unlock_pkt(fsp); 1678out: 1679 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ 1680} 1681 1682static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp) 1683{ 1684 /* lock ? */ 1685 return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull; 1686} 1687 1688/** 1689 * fc_queuecommand - The queuecommand function of the scsi template 1690 * @cmd: struct scsi_cmnd to be executed 1691 * @done: Callback function to be called when cmd is completed 1692 * 1693 * this is the i/o strategy routine, called by the scsi layer 1694 * this routine is called with holding the host_lock. 1695 */ 1696int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) 1697{ 1698 struct fc_lport *lp; 1699 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1700 struct fc_fcp_pkt *fsp; 1701 struct fc_rport_libfc_priv *rp; 1702 int rval; 1703 int rc = 0; 1704 struct fcoe_dev_stats *stats; 1705 1706 lp = shost_priv(sc_cmd->device->host); 1707 1708 rval = fc_remote_port_chkready(rport); 1709 if (rval) { 1710 sc_cmd->result = rval; 1711 done(sc_cmd); 1712 goto out; 1713 } 1714 1715 if (!*(struct fc_remote_port **)rport->dd_data) { 1716 /* 1717 * rport is transitioning from blocked/deleted to 1718 * online 1719 */ 1720 sc_cmd->result = DID_IMM_RETRY << 16; 1721 done(sc_cmd); 1722 goto out; 1723 } 1724 1725 rp = rport->dd_data; 1726 1727 if (!fc_fcp_lport_queue_ready(lp)) { 1728 rc = SCSI_MLQUEUE_HOST_BUSY; 1729 goto out; 1730 } 1731 1732 fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC); 1733 if (fsp == NULL) { 1734 rc = SCSI_MLQUEUE_HOST_BUSY; 1735 goto out; 1736 } 1737 1738 /* 1739 * build the libfc request pkt 1740 */ 1741 fsp->cmd = sc_cmd; /* save the cmd */ 1742 fsp->lp = lp; /* save the softc ptr */ 1743 fsp->rport = rport; /* set the remote port ptr */ 1744 sc_cmd->scsi_done = done; 1745 1746 /* 1747 * set up the transfer length 1748 */ 1749 fsp->data_len = scsi_bufflen(sc_cmd); 1750 fsp->xfer_len = 0; 1751 1752 /* 1753 * setup the data direction 1754 */ 1755 stats = lp->dev_stats[smp_processor_id()]; 1756 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 1757 fsp->req_flags = FC_SRB_READ; 1758 stats->InputRequests++; 1759 stats->InputMegabytes = fsp->data_len; 1760 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 1761 fsp->req_flags = FC_SRB_WRITE; 1762 stats->OutputRequests++; 1763 stats->OutputMegabytes = fsp->data_len; 1764 } else { 1765 fsp->req_flags = 0; 1766 stats->ControlRequests++; 1767 } 1768 1769 fsp->tgt_flags = rp->flags; 1770 1771 init_timer(&fsp->timer); 1772 fsp->timer.data = (unsigned long)fsp; 1773 1774 /* 1775 * send it to the lower layer 1776 * if we get -1 return then put the request in the pending 1777 * queue. 1778 */ 1779 rval = fc_fcp_pkt_send(lp, fsp); 1780 if (rval != 0) { 1781 fsp->state = FC_SRB_FREE; 1782 fc_fcp_pkt_release(fsp); 1783 rc = SCSI_MLQUEUE_HOST_BUSY; 1784 } 1785out: 1786 return rc; 1787} 1788EXPORT_SYMBOL(fc_queuecommand); 1789 1790/** 1791 * fc_io_compl() - Handle responses for completed commands 1792 * @fsp: scsi packet 1793 * 1794 * Translates a error to a Linux SCSI error. 1795 * 1796 * The fcp packet lock must be held when calling. 1797 */ 1798static void fc_io_compl(struct fc_fcp_pkt *fsp) 1799{ 1800 struct fc_fcp_internal *si; 1801 struct scsi_cmnd *sc_cmd; 1802 struct fc_lport *lp; 1803 unsigned long flags; 1804 1805 /* release outstanding ddp context */ 1806 fc_fcp_ddp_done(fsp); 1807 1808 fsp->state |= FC_SRB_COMPL; 1809 if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) { 1810 spin_unlock_bh(&fsp->scsi_pkt_lock); 1811 del_timer_sync(&fsp->timer); 1812 spin_lock_bh(&fsp->scsi_pkt_lock); 1813 } 1814 1815 lp = fsp->lp; 1816 si = fc_get_scsi_internal(lp); 1817 spin_lock_irqsave(lp->host->host_lock, flags); 1818 if (!fsp->cmd) { 1819 spin_unlock_irqrestore(lp->host->host_lock, flags); 1820 return; 1821 } 1822 1823 /* 1824 * if a command timed out while we had to try and throttle IO 1825 * and it is now getting cleaned up, then we are about to 1826 * try again so clear the throttled flag incase we get more 1827 * time outs. 1828 */ 1829 if (si->throttled && fsp->state & FC_SRB_NOMEM) 1830 si->throttled = 0; 1831 1832 sc_cmd = fsp->cmd; 1833 fsp->cmd = NULL; 1834 1835 if (!sc_cmd->SCp.ptr) { 1836 spin_unlock_irqrestore(lp->host->host_lock, flags); 1837 return; 1838 } 1839 1840 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status; 1841 switch (fsp->status_code) { 1842 case FC_COMPLETE: 1843 if (fsp->cdb_status == 0) { 1844 /* 1845 * good I/O status 1846 */ 1847 sc_cmd->result = DID_OK << 16; 1848 if (fsp->scsi_resid) 1849 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid; 1850 } else if (fsp->cdb_status == QUEUE_FULL) { 1851 struct scsi_device *tmp_sdev; 1852 struct scsi_device *sdev = sc_cmd->device; 1853 1854 shost_for_each_device(tmp_sdev, sdev->host) { 1855 if (tmp_sdev->id != sdev->id) 1856 continue; 1857 1858 if (tmp_sdev->queue_depth > 1) { 1859 scsi_track_queue_full(tmp_sdev, 1860 tmp_sdev-> 1861 queue_depth - 1); 1862 } 1863 } 1864 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status; 1865 } else { 1866 /* 1867 * transport level I/O was ok but scsi 1868 * has non zero status 1869 */ 1870 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status; 1871 } 1872 break; 1873 case FC_ERROR: 1874 sc_cmd->result = DID_ERROR << 16; 1875 break; 1876 case FC_DATA_UNDRUN: 1877 if ((fsp->cdb_status == 0) && !(fsp->req_flags & FC_SRB_READ)) { 1878 /* 1879 * scsi status is good but transport level 1880 * underrun. 1881 */ 1882 sc_cmd->result = DID_OK << 16; 1883 } else { 1884 /* 1885 * scsi got underrun, this is an error 1886 */ 1887 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid; 1888 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 1889 } 1890 break; 1891 case FC_DATA_OVRRUN: 1892 /* 1893 * overrun is an error 1894 */ 1895 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 1896 break; 1897 case FC_CMD_ABORTED: 1898 sc_cmd->result = (DID_ABORT << 16) | fsp->io_status; 1899 break; 1900 case FC_CMD_TIME_OUT: 1901 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status; 1902 break; 1903 case FC_CMD_RESET: 1904 sc_cmd->result = (DID_RESET << 16); 1905 break; 1906 case FC_HRD_ERROR: 1907 sc_cmd->result = (DID_NO_CONNECT << 16); 1908 break; 1909 default: 1910 sc_cmd->result = (DID_ERROR << 16); 1911 break; 1912 } 1913 1914 list_del(&fsp->list); 1915 sc_cmd->SCp.ptr = NULL; 1916 sc_cmd->scsi_done(sc_cmd); 1917 spin_unlock_irqrestore(lp->host->host_lock, flags); 1918 1919 /* release ref from initial allocation in queue command */ 1920 fc_fcp_pkt_release(fsp); 1921} 1922 1923/** 1924 * fc_fcp_complete() - complete processing of a fcp packet 1925 * @fsp: fcp packet 1926 * 1927 * This function may sleep if a fsp timer is pending. 1928 * The host lock must not be held by caller. 1929 */ 1930void fc_fcp_complete(struct fc_fcp_pkt *fsp) 1931{ 1932 if (fc_fcp_lock_pkt(fsp)) 1933 return; 1934 1935 fc_fcp_complete_locked(fsp); 1936 fc_fcp_unlock_pkt(fsp); 1937} 1938EXPORT_SYMBOL(fc_fcp_complete); 1939 1940/** 1941 * fc_eh_abort() - Abort a command 1942 * @sc_cmd: scsi command to abort 1943 * 1944 * From scsi host template. 1945 * send ABTS to the target device and wait for the response 1946 * sc_cmd is the pointer to the command to be aborted. 1947 */ 1948int fc_eh_abort(struct scsi_cmnd *sc_cmd) 1949{ 1950 struct fc_fcp_pkt *fsp; 1951 struct fc_lport *lp; 1952 int rc = FAILED; 1953 unsigned long flags; 1954 1955 lp = shost_priv(sc_cmd->device->host); 1956 if (lp->state != LPORT_ST_READY) 1957 return rc; 1958 else if (!lp->link_up) 1959 return rc; 1960 1961 spin_lock_irqsave(lp->host->host_lock, flags); 1962 fsp = CMD_SP(sc_cmd); 1963 if (!fsp) { 1964 /* command completed while scsi eh was setting up */ 1965 spin_unlock_irqrestore(lp->host->host_lock, flags); 1966 return SUCCESS; 1967 } 1968 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ 1969 fc_fcp_pkt_hold(fsp); 1970 spin_unlock_irqrestore(lp->host->host_lock, flags); 1971 1972 if (fc_fcp_lock_pkt(fsp)) { 1973 /* completed while we were waiting for timer to be deleted */ 1974 rc = SUCCESS; 1975 goto release_pkt; 1976 } 1977 1978 rc = fc_fcp_pkt_abort(lp, fsp); 1979 fc_fcp_unlock_pkt(fsp); 1980 1981release_pkt: 1982 fc_fcp_pkt_release(fsp); 1983 return rc; 1984} 1985EXPORT_SYMBOL(fc_eh_abort); 1986 1987/** 1988 * fc_eh_device_reset() Reset a single LUN 1989 * @sc_cmd: scsi command 1990 * 1991 * Set from scsi host template to send tm cmd to the target and wait for the 1992 * response. 1993 */ 1994int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) 1995{ 1996 struct fc_lport *lp; 1997 struct fc_fcp_pkt *fsp; 1998 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1999 int rc = FAILED; 2000 struct fc_rport_libfc_priv *rp; 2001 int rval; 2002 2003 rval = fc_remote_port_chkready(rport); 2004 if (rval) 2005 goto out; 2006 2007 rp = rport->dd_data; 2008 lp = shost_priv(sc_cmd->device->host); 2009 2010 if (lp->state != LPORT_ST_READY) 2011 return rc; 2012 2013 fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO); 2014 if (fsp == NULL) { 2015 FC_DBG("could not allocate scsi_pkt\n"); 2016 sc_cmd->result = DID_NO_CONNECT << 16; 2017 goto out; 2018 } 2019 2020 /* 2021 * Build the libfc request pkt. Do not set the scsi cmnd, because 2022 * the sc passed in is not setup for execution like when sent 2023 * through the queuecommand callout. 2024 */ 2025 fsp->lp = lp; /* save the softc ptr */ 2026 fsp->rport = rport; /* set the remote port ptr */ 2027 2028 /* 2029 * flush outstanding commands 2030 */ 2031 rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun); 2032 fsp->state = FC_SRB_FREE; 2033 fc_fcp_pkt_release(fsp); 2034 2035out: 2036 return rc; 2037} 2038EXPORT_SYMBOL(fc_eh_device_reset); 2039 2040/** 2041 * fc_eh_host_reset() - The reset function will reset the ports on the host. 2042 * @sc_cmd: scsi command 2043 */ 2044int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) 2045{ 2046 struct Scsi_Host *shost = sc_cmd->device->host; 2047 struct fc_lport *lp = shost_priv(shost); 2048 unsigned long wait_tmo; 2049 2050 lp->tt.lport_reset(lp); 2051 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; 2052 while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo)) 2053 msleep(1000); 2054 2055 if (fc_fcp_lport_queue_ready(lp)) { 2056 shost_printk(KERN_INFO, shost, "Host reset succeeded.\n"); 2057 return SUCCESS; 2058 } else { 2059 shost_printk(KERN_INFO, shost, "Host reset failed. " 2060 "lport not ready.\n"); 2061 return FAILED; 2062 } 2063} 2064EXPORT_SYMBOL(fc_eh_host_reset); 2065 2066/** 2067 * fc_slave_alloc() - configure queue depth 2068 * @sdev: scsi device 2069 * 2070 * Configures queue depth based on host's cmd_per_len. If not set 2071 * then we use the libfc default. 2072 */ 2073int fc_slave_alloc(struct scsi_device *sdev) 2074{ 2075 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 2076 int queue_depth; 2077 2078 if (!rport || fc_remote_port_chkready(rport)) 2079 return -ENXIO; 2080 2081 if (sdev->tagged_supported) { 2082 if (sdev->host->hostt->cmd_per_lun) 2083 queue_depth = sdev->host->hostt->cmd_per_lun; 2084 else 2085 queue_depth = FC_FCP_DFLT_QUEUE_DEPTH; 2086 scsi_activate_tcq(sdev, queue_depth); 2087 } 2088 return 0; 2089} 2090EXPORT_SYMBOL(fc_slave_alloc); 2091 2092int fc_change_queue_depth(struct scsi_device *sdev, int qdepth) 2093{ 2094 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 2095 return sdev->queue_depth; 2096} 2097EXPORT_SYMBOL(fc_change_queue_depth); 2098 2099int fc_change_queue_type(struct scsi_device *sdev, int tag_type) 2100{ 2101 if (sdev->tagged_supported) { 2102 scsi_set_tag_type(sdev, tag_type); 2103 if (tag_type) 2104 scsi_activate_tcq(sdev, sdev->queue_depth); 2105 else 2106 scsi_deactivate_tcq(sdev, sdev->queue_depth); 2107 } else 2108 tag_type = 0; 2109 2110 return tag_type; 2111} 2112EXPORT_SYMBOL(fc_change_queue_type); 2113 2114void fc_fcp_destroy(struct fc_lport *lp) 2115{ 2116 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 2117 2118 if (!list_empty(&si->scsi_pkt_queue)) 2119 printk(KERN_ERR "Leaked scsi packets.\n"); 2120 2121 mempool_destroy(si->scsi_pkt_pool); 2122 kfree(si); 2123 lp->scsi_priv = NULL; 2124} 2125EXPORT_SYMBOL(fc_fcp_destroy); 2126 2127int fc_fcp_init(struct fc_lport *lp) 2128{ 2129 int rc; 2130 struct fc_fcp_internal *si; 2131 2132 if (!lp->tt.fcp_cmd_send) 2133 lp->tt.fcp_cmd_send = fc_fcp_cmd_send; 2134 2135 if (!lp->tt.fcp_cleanup) 2136 lp->tt.fcp_cleanup = fc_fcp_cleanup; 2137 2138 if (!lp->tt.fcp_abort_io) 2139 lp->tt.fcp_abort_io = fc_fcp_abort_io; 2140 2141 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL); 2142 if (!si) 2143 return -ENOMEM; 2144 lp->scsi_priv = si; 2145 INIT_LIST_HEAD(&si->scsi_pkt_queue); 2146 2147 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); 2148 if (!si->scsi_pkt_pool) { 2149 rc = -ENOMEM; 2150 goto free_internal; 2151 } 2152 return 0; 2153 2154free_internal: 2155 kfree(si); 2156 return rc; 2157} 2158EXPORT_SYMBOL(fc_fcp_init); 2159 2160static int __init libfc_init(void) 2161{ 2162 int rc; 2163 2164 scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt", 2165 sizeof(struct fc_fcp_pkt), 2166 0, SLAB_HWCACHE_ALIGN, NULL); 2167 if (scsi_pkt_cachep == NULL) { 2168 FC_DBG("Unable to allocate SRB cache...module load failed!"); 2169 return -ENOMEM; 2170 } 2171 2172 rc = fc_setup_exch_mgr(); 2173 if (rc) 2174 goto destroy_pkt_cache; 2175 2176 rc = fc_setup_rport(); 2177 if (rc) 2178 goto destroy_em; 2179 2180 return rc; 2181destroy_em: 2182 fc_destroy_exch_mgr(); 2183destroy_pkt_cache: 2184 kmem_cache_destroy(scsi_pkt_cachep); 2185 return rc; 2186} 2187 2188static void __exit libfc_exit(void) 2189{ 2190 kmem_cache_destroy(scsi_pkt_cachep); 2191 fc_destroy_exch_mgr(); 2192 fc_destroy_rport(); 2193} 2194 2195module_init(libfc_init); 2196module_exit(libfc_exit); 2197