fc_fcp.c revision 7414705ea4aef9ce438e547f3138a680d2d1096c
1/* 2 * Copyright(c) 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved. 4 * Copyright(c) 2008 Mike Christie 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Maintained at www.Open-FCoE.org 20 */ 21 22#include <linux/module.h> 23#include <linux/delay.h> 24#include <linux/kernel.h> 25#include <linux/types.h> 26#include <linux/spinlock.h> 27#include <linux/scatterlist.h> 28#include <linux/err.h> 29#include <linux/crc32.h> 30 31#include <scsi/scsi_tcq.h> 32#include <scsi/scsi.h> 33#include <scsi/scsi_host.h> 34#include <scsi/scsi_device.h> 35#include <scsi/scsi_cmnd.h> 36 37#include <scsi/fc/fc_fc2.h> 38 39#include <scsi/libfc.h> 40#include <scsi/fc_encode.h> 41 42MODULE_AUTHOR("Open-FCoE.org"); 43MODULE_DESCRIPTION("libfc"); 44MODULE_LICENSE("GPL v2"); 45 46unsigned int fc_debug_logging; 47module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR); 48MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); 49 50static struct kmem_cache *scsi_pkt_cachep; 51 52/* SRB state definitions */ 53#define FC_SRB_FREE 0 /* cmd is free */ 54#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */ 55#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */ 56#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */ 57#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */ 58#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */ 59#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */ 60#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */ 61#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */ 62 63#define FC_SRB_READ (1 << 1) 64#define FC_SRB_WRITE (1 << 0) 65 66/* 67 * The SCp.ptr should be tested and set under the host lock. NULL indicates 68 * that the command has been retruned to the scsi layer. 69 */ 70#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr) 71#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) 72#define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual) 73#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) 74#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual) 75 76struct fc_fcp_internal { 77 mempool_t *scsi_pkt_pool; 78 struct list_head scsi_pkt_queue; 79 u8 throttled; 80}; 81 82#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) 83 84/* 85 * function prototypes 86 * FC scsi I/O related functions 87 */ 88static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *); 89static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *); 90static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *); 91static void fc_fcp_complete_locked(struct fc_fcp_pkt *); 92static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); 93static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp); 94static void fc_timeout_error(struct fc_fcp_pkt *); 95static void fc_fcp_timeout(unsigned long data); 96static void fc_fcp_rec(struct fc_fcp_pkt *); 97static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); 98static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *); 99static void fc_io_compl(struct fc_fcp_pkt *); 100 101static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32); 102static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *); 103static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *); 104 105/* 106 * command status codes 107 */ 108#define FC_COMPLETE 0 109#define FC_CMD_ABORTED 1 110#define FC_CMD_RESET 2 111#define FC_CMD_PLOGO 3 112#define FC_SNS_RCV 4 113#define FC_TRANS_ERR 5 114#define FC_DATA_OVRRUN 6 115#define FC_DATA_UNDRUN 7 116#define FC_ERROR 8 117#define FC_HRD_ERROR 9 118#define FC_CMD_TIME_OUT 10 119 120/* 121 * Error recovery timeout values. 122 */ 123#define FC_SCSI_ER_TIMEOUT (10 * HZ) 124#define FC_SCSI_TM_TOV (10 * HZ) 125#define FC_SCSI_REC_TOV (2 * HZ) 126#define FC_HOST_RESET_TIMEOUT (30 * HZ) 127 128#define FC_MAX_ERROR_CNT 5 129#define FC_MAX_RECOV_RETRY 3 130 131#define FC_FCP_DFLT_QUEUE_DEPTH 32 132 133/** 134 * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet 135 * @lp: fc lport struct 136 * @gfp: gfp flags for allocation 137 * 138 * This is used by upper layer scsi driver. 139 * Return Value : scsi_pkt structure or null on allocation failure. 140 * Context : call from process context. no locking required. 141 */ 142static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp) 143{ 144 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 145 struct fc_fcp_pkt *fsp; 146 147 fsp = mempool_alloc(si->scsi_pkt_pool, gfp); 148 if (fsp) { 149 memset(fsp, 0, sizeof(*fsp)); 150 fsp->lp = lp; 151 atomic_set(&fsp->ref_cnt, 1); 152 init_timer(&fsp->timer); 153 INIT_LIST_HEAD(&fsp->list); 154 spin_lock_init(&fsp->scsi_pkt_lock); 155 } 156 return fsp; 157} 158 159/** 160 * fc_fcp_pkt_release() - release hold on scsi_pkt packet 161 * @fsp: fcp packet struct 162 * 163 * This is used by upper layer scsi driver. 164 * Context : call from process and interrupt context. 165 * no locking required 166 */ 167static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) 168{ 169 if (atomic_dec_and_test(&fsp->ref_cnt)) { 170 struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp); 171 172 mempool_free(fsp, si->scsi_pkt_pool); 173 } 174} 175 176static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp) 177{ 178 atomic_inc(&fsp->ref_cnt); 179} 180 181/** 182 * fc_fcp_pkt_destory() - release hold on scsi_pkt packet 183 * @seq: exchange sequence 184 * @fsp: fcp packet struct 185 * 186 * Release hold on scsi_pkt packet set to keep scsi_pkt 187 * till EM layer exch resource is not freed. 188 * Context : called from from EM layer. 189 * no locking required 190 */ 191static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) 192{ 193 fc_fcp_pkt_release(fsp); 194} 195 196/** 197 * fc_fcp_lock_pkt() - lock a packet and get a ref to it. 198 * @fsp: fcp packet 199 * 200 * We should only return error if we return a command to scsi-ml before 201 * getting a response. This can happen in cases where we send a abort, but 202 * do not wait for the response and the abort and command can be passing 203 * each other on the wire/network-layer. 204 * 205 * Note: this function locks the packet and gets a reference to allow 206 * callers to call the completion function while the lock is held and 207 * not have to worry about the packets refcount. 208 * 209 * TODO: Maybe we should just have callers grab/release the lock and 210 * have a function that they call to verify the fsp and grab a ref if 211 * needed. 212 */ 213static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp) 214{ 215 spin_lock_bh(&fsp->scsi_pkt_lock); 216 if (fsp->state & FC_SRB_COMPL) { 217 spin_unlock_bh(&fsp->scsi_pkt_lock); 218 return -EPERM; 219 } 220 221 fc_fcp_pkt_hold(fsp); 222 return 0; 223} 224 225static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp) 226{ 227 spin_unlock_bh(&fsp->scsi_pkt_lock); 228 fc_fcp_pkt_release(fsp); 229} 230 231static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) 232{ 233 if (!(fsp->state & FC_SRB_COMPL)) 234 mod_timer(&fsp->timer, jiffies + delay); 235} 236 237static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) 238{ 239 if (!fsp->seq_ptr) 240 return -EINVAL; 241 242 fsp->state |= FC_SRB_ABORT_PENDING; 243 return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0); 244} 245 246/* 247 * Retry command. 248 * An abort isn't needed. 249 */ 250static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) 251{ 252 if (fsp->seq_ptr) { 253 fsp->lp->tt.exch_done(fsp->seq_ptr); 254 fsp->seq_ptr = NULL; 255 } 256 257 fsp->state &= ~FC_SRB_ABORT_PENDING; 258 fsp->io_status = 0; 259 fsp->status_code = FC_ERROR; 260 fc_fcp_complete_locked(fsp); 261} 262 263/* 264 * fc_fcp_ddp_setup - calls to LLD's ddp_setup to set up DDP 265 * transfer for a read I/O indicated by the fc_fcp_pkt. 266 * @fsp: ptr to the fc_fcp_pkt 267 * 268 * This is called in exch_seq_send() when we have a newly allocated 269 * exchange with a valid exchange id to setup ddp. 270 * 271 * returns: none 272 */ 273void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid) 274{ 275 struct fc_lport *lp; 276 277 if (!fsp) 278 return; 279 280 lp = fsp->lp; 281 if ((fsp->req_flags & FC_SRB_READ) && 282 (lp->lro_enabled) && (lp->tt.ddp_setup)) { 283 if (lp->tt.ddp_setup(lp, xid, scsi_sglist(fsp->cmd), 284 scsi_sg_count(fsp->cmd))) 285 fsp->xfer_ddp = xid; 286 } 287} 288EXPORT_SYMBOL(fc_fcp_ddp_setup); 289 290/* 291 * fc_fcp_ddp_done - calls to LLD's ddp_done to release any 292 * DDP related resources for this I/O if it is initialized 293 * as a ddp transfer 294 * @fsp: ptr to the fc_fcp_pkt 295 * 296 * returns: none 297 */ 298static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) 299{ 300 struct fc_lport *lp; 301 302 if (!fsp) 303 return; 304 305 lp = fsp->lp; 306 if (fsp->xfer_ddp && lp->tt.ddp_done) { 307 fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp); 308 fsp->xfer_ddp = 0; 309 } 310} 311 312 313/* 314 * Receive SCSI data from target. 315 * Called after receiving solicited data. 316 */ 317static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 318{ 319 struct scsi_cmnd *sc = fsp->cmd; 320 struct fc_lport *lp = fsp->lp; 321 struct fcoe_dev_stats *stats; 322 struct fc_frame_header *fh; 323 size_t start_offset; 324 size_t offset; 325 u32 crc; 326 u32 copy_len = 0; 327 size_t len; 328 void *buf; 329 struct scatterlist *sg; 330 size_t remaining; 331 332 fh = fc_frame_header_get(fp); 333 offset = ntohl(fh->fh_parm_offset); 334 start_offset = offset; 335 len = fr_len(fp) - sizeof(*fh); 336 buf = fc_frame_payload_get(fp, 0); 337 338 /* if this I/O is ddped, update xfer len */ 339 fc_fcp_ddp_done(fsp); 340 341 if (offset + len > fsp->data_len) { 342 /* this should never happen */ 343 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && 344 fc_frame_crc_check(fp)) 345 goto crc_err; 346 FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx " 347 "data_len %x\n", len, offset, fsp->data_len); 348 fc_fcp_retry_cmd(fsp); 349 return; 350 } 351 if (offset != fsp->xfer_len) 352 fsp->state |= FC_SRB_DISCONTIG; 353 354 crc = 0; 355 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) 356 crc = crc32(~0, (u8 *) fh, sizeof(*fh)); 357 358 sg = scsi_sglist(sc); 359 remaining = len; 360 361 while (remaining > 0 && sg) { 362 size_t off; 363 void *page_addr; 364 size_t sg_bytes; 365 366 if (offset >= sg->length) { 367 offset -= sg->length; 368 sg = sg_next(sg); 369 continue; 370 } 371 sg_bytes = min(remaining, sg->length - offset); 372 373 /* 374 * The scatterlist item may be bigger than PAGE_SIZE, 375 * but we are limited to mapping PAGE_SIZE at a time. 376 */ 377 off = offset + sg->offset; 378 sg_bytes = min(sg_bytes, (size_t) 379 (PAGE_SIZE - (off & ~PAGE_MASK))); 380 page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT), 381 KM_SOFTIRQ0); 382 if (!page_addr) 383 break; /* XXX panic? */ 384 385 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) 386 crc = crc32(crc, buf, sg_bytes); 387 memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, 388 sg_bytes); 389 390 kunmap_atomic(page_addr, KM_SOFTIRQ0); 391 buf += sg_bytes; 392 offset += sg_bytes; 393 remaining -= sg_bytes; 394 copy_len += sg_bytes; 395 } 396 397 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { 398 buf = fc_frame_payload_get(fp, 0); 399 if (len % 4) { 400 crc = crc32(crc, buf + len, 4 - (len % 4)); 401 len += 4 - (len % 4); 402 } 403 404 if (~crc != le32_to_cpu(fr_crc(fp))) { 405crc_err: 406 stats = fc_lport_get_stats(lp); 407 stats->ErrorFrames++; 408 /* FIXME - per cpu count, not total count! */ 409 if (stats->InvalidCRCCount++ < 5) 410 printk(KERN_WARNING "libfc: CRC error on data " 411 "frame for port (%6x)\n", 412 fc_host_port_id(lp->host)); 413 /* 414 * Assume the frame is total garbage. 415 * We may have copied it over the good part 416 * of the buffer. 417 * If so, we need to retry the entire operation. 418 * Otherwise, ignore it. 419 */ 420 if (fsp->state & FC_SRB_DISCONTIG) 421 fc_fcp_retry_cmd(fsp); 422 return; 423 } 424 } 425 426 if (fsp->xfer_contig_end == start_offset) 427 fsp->xfer_contig_end += copy_len; 428 fsp->xfer_len += copy_len; 429 430 /* 431 * In the very rare event that this data arrived after the response 432 * and completes the transfer, call the completion handler. 433 */ 434 if (unlikely(fsp->state & FC_SRB_RCV_STATUS) && 435 fsp->xfer_len == fsp->data_len - fsp->scsi_resid) 436 fc_fcp_complete_locked(fsp); 437} 438 439/** 440 * fc_fcp_send_data() - Send SCSI data to target. 441 * @fsp: ptr to fc_fcp_pkt 442 * @sp: ptr to this sequence 443 * @offset: starting offset for this data request 444 * @seq_blen: the burst length for this data request 445 * 446 * Called after receiving a Transfer Ready data descriptor. 447 * if LLD is capable of seq offload then send down seq_blen 448 * size of data in single frame, otherwise send multiple FC 449 * frames of max FC frame payload supported by target port. 450 * 451 * Returns : 0 for success. 452 */ 453static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, 454 size_t offset, size_t seq_blen) 455{ 456 struct fc_exch *ep; 457 struct scsi_cmnd *sc; 458 struct scatterlist *sg; 459 struct fc_frame *fp = NULL; 460 struct fc_lport *lp = fsp->lp; 461 size_t remaining; 462 size_t t_blen; 463 size_t tlen; 464 size_t sg_bytes; 465 size_t frame_offset, fh_parm_offset; 466 int error; 467 void *data = NULL; 468 void *page_addr; 469 int using_sg = lp->sg_supp; 470 u32 f_ctl; 471 472 WARN_ON(seq_blen <= 0); 473 if (unlikely(offset + seq_blen > fsp->data_len)) { 474 /* this should never happen */ 475 FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx " 476 "offset %zx\n", seq_blen, offset); 477 fc_fcp_send_abort(fsp); 478 return 0; 479 } else if (offset != fsp->xfer_len) { 480 /* Out of Order Data Request - no problem, but unexpected. */ 481 FC_FCP_DBG(fsp, "xfer-ready non-contiguous. " 482 "seq_blen %zx offset %zx\n", seq_blen, offset); 483 } 484 485 /* 486 * if LLD is capable of seq_offload then set transport 487 * burst length (t_blen) to seq_blen, otherwise set t_blen 488 * to max FC frame payload previously set in fsp->max_payload. 489 */ 490 t_blen = fsp->max_payload; 491 if (lp->seq_offload) { 492 t_blen = min(seq_blen, (size_t)lp->lso_max); 493 FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", 494 fsp, seq_blen, lp->lso_max, t_blen); 495 } 496 497 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); 498 if (t_blen > 512) 499 t_blen &= ~(512 - 1); /* round down to block size */ 500 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */ 501 sc = fsp->cmd; 502 503 remaining = seq_blen; 504 fh_parm_offset = frame_offset = offset; 505 tlen = 0; 506 seq = lp->tt.seq_start_next(seq); 507 f_ctl = FC_FC_REL_OFF; 508 WARN_ON(!seq); 509 510 /* 511 * If a get_page()/put_page() will fail, don't use sg lists 512 * in the fc_frame structure. 513 * 514 * The put_page() may be long after the I/O has completed 515 * in the case of FCoE, since the network driver does it 516 * via free_skb(). See the test in free_pages_check(). 517 * 518 * Test this case with 'dd </dev/zero >/dev/st0 bs=64k'. 519 */ 520 if (using_sg) { 521 for (sg = scsi_sglist(sc); sg; sg = sg_next(sg)) { 522 if (page_count(sg_page(sg)) == 0 || 523 (sg_page(sg)->flags & (1 << PG_lru | 524 1 << PG_private | 525 1 << PG_locked | 526 1 << PG_active | 527 1 << PG_slab | 528 1 << PG_swapcache | 529 1 << PG_writeback | 530 1 << PG_reserved | 531 1 << PG_buddy))) { 532 using_sg = 0; 533 break; 534 } 535 } 536 } 537 sg = scsi_sglist(sc); 538 539 while (remaining > 0 && sg) { 540 if (offset >= sg->length) { 541 offset -= sg->length; 542 sg = sg_next(sg); 543 continue; 544 } 545 if (!fp) { 546 tlen = min(t_blen, remaining); 547 548 /* 549 * TODO. Temporary workaround. fc_seq_send() can't 550 * handle odd lengths in non-linear skbs. 551 * This will be the final fragment only. 552 */ 553 if (tlen % 4) 554 using_sg = 0; 555 if (using_sg) { 556 fp = _fc_frame_alloc(lp, 0); 557 if (!fp) 558 return -ENOMEM; 559 } else { 560 fp = fc_frame_alloc(lp, tlen); 561 if (!fp) 562 return -ENOMEM; 563 564 data = (void *)(fr_hdr(fp)) + 565 sizeof(struct fc_frame_header); 566 } 567 fh_parm_offset = frame_offset; 568 fr_max_payload(fp) = fsp->max_payload; 569 } 570 sg_bytes = min(tlen, sg->length - offset); 571 if (using_sg) { 572 WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags > 573 FC_FRAME_SG_LEN); 574 get_page(sg_page(sg)); 575 skb_fill_page_desc(fp_skb(fp), 576 skb_shinfo(fp_skb(fp))->nr_frags, 577 sg_page(sg), sg->offset + offset, 578 sg_bytes); 579 fp_skb(fp)->data_len += sg_bytes; 580 fr_len(fp) += sg_bytes; 581 fp_skb(fp)->truesize += PAGE_SIZE; 582 } else { 583 size_t off = offset + sg->offset; 584 585 /* 586 * The scatterlist item may be bigger than PAGE_SIZE, 587 * but we must not cross pages inside the kmap. 588 */ 589 sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE - 590 (off & ~PAGE_MASK))); 591 page_addr = kmap_atomic(sg_page(sg) + 592 (off >> PAGE_SHIFT), 593 KM_SOFTIRQ0); 594 memcpy(data, (char *)page_addr + (off & ~PAGE_MASK), 595 sg_bytes); 596 kunmap_atomic(page_addr, KM_SOFTIRQ0); 597 data += sg_bytes; 598 } 599 offset += sg_bytes; 600 frame_offset += sg_bytes; 601 tlen -= sg_bytes; 602 remaining -= sg_bytes; 603 604 if (tlen) 605 continue; 606 607 /* 608 * Send sequence with transfer sequence initiative in case 609 * this is last FCP frame of the sequence. 610 */ 611 if (remaining == 0) 612 f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ; 613 614 ep = fc_seq_exch(seq); 615 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, 616 FC_TYPE_FCP, f_ctl, fh_parm_offset); 617 618 /* 619 * send fragment using for a sequence. 620 */ 621 error = lp->tt.seq_send(lp, seq, fp); 622 if (error) { 623 WARN_ON(1); /* send error should be rare */ 624 fc_fcp_retry_cmd(fsp); 625 return 0; 626 } 627 fp = NULL; 628 } 629 fsp->xfer_len += seq_blen; /* premature count? */ 630 return 0; 631} 632 633static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 634{ 635 int ba_done = 1; 636 struct fc_ba_rjt *brp; 637 struct fc_frame_header *fh; 638 639 fh = fc_frame_header_get(fp); 640 switch (fh->fh_r_ctl) { 641 case FC_RCTL_BA_ACC: 642 break; 643 case FC_RCTL_BA_RJT: 644 brp = fc_frame_payload_get(fp, sizeof(*brp)); 645 if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR) 646 break; 647 /* fall thru */ 648 default: 649 /* 650 * we will let the command timeout 651 * and scsi-ml recover in this case, 652 * therefore cleared the ba_done flag. 653 */ 654 ba_done = 0; 655 } 656 657 if (ba_done) { 658 fsp->state |= FC_SRB_ABORTED; 659 fsp->state &= ~FC_SRB_ABORT_PENDING; 660 661 if (fsp->wait_for_comp) 662 complete(&fsp->tm_done); 663 else 664 fc_fcp_complete_locked(fsp); 665 } 666} 667 668/** 669 * fc_fcp_reduce_can_queue() - drop can_queue 670 * @lp: lport to drop queueing for 671 * 672 * If we are getting memory allocation failures, then we may 673 * be trying to execute too many commands. We let the running 674 * commands complete or timeout, then try again with a reduced 675 * can_queue. Eventually we will hit the point where we run 676 * on all reserved structs. 677 */ 678static void fc_fcp_reduce_can_queue(struct fc_lport *lp) 679{ 680 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 681 unsigned long flags; 682 int can_queue; 683 684 spin_lock_irqsave(lp->host->host_lock, flags); 685 if (si->throttled) 686 goto done; 687 si->throttled = 1; 688 689 can_queue = lp->host->can_queue; 690 can_queue >>= 1; 691 if (!can_queue) 692 can_queue = 1; 693 lp->host->can_queue = can_queue; 694 shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n" 695 "Reducing can_queue to %d.\n", can_queue); 696done: 697 spin_unlock_irqrestore(lp->host->host_lock, flags); 698} 699 700/** 701 * fc_fcp_recv() - Reveive FCP frames 702 * @seq: The sequence the frame is on 703 * @fp: The FC frame 704 * @arg: The related FCP packet 705 * 706 * Return : None 707 * Context : called from Soft IRQ context 708 * can not called holding list lock 709 */ 710static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) 711{ 712 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg; 713 struct fc_lport *lport = fsp->lp; 714 struct fc_frame_header *fh; 715 struct fcp_txrdy *dd; 716 u8 r_ctl; 717 int rc = 0; 718 719 if (IS_ERR(fp)) 720 goto errout; 721 722 fh = fc_frame_header_get(fp); 723 r_ctl = fh->fh_r_ctl; 724 725 if (!(lport->state & LPORT_ST_READY)) 726 goto out; 727 if (fc_fcp_lock_pkt(fsp)) 728 goto out; 729 fsp->last_pkt_time = jiffies; 730 731 if (fh->fh_type == FC_TYPE_BLS) { 732 fc_fcp_abts_resp(fsp, fp); 733 goto unlock; 734 } 735 736 if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) 737 goto unlock; 738 739 if (r_ctl == FC_RCTL_DD_DATA_DESC) { 740 /* 741 * received XFER RDY from the target 742 * need to send data to the target 743 */ 744 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED); 745 dd = fc_frame_payload_get(fp, sizeof(*dd)); 746 WARN_ON(!dd); 747 748 rc = fc_fcp_send_data(fsp, seq, 749 (size_t) ntohl(dd->ft_data_ro), 750 (size_t) ntohl(dd->ft_burst_len)); 751 if (!rc) 752 seq->rec_data = fsp->xfer_len; 753 else if (rc == -ENOMEM) 754 fsp->state |= FC_SRB_NOMEM; 755 } else if (r_ctl == FC_RCTL_DD_SOL_DATA) { 756 /* 757 * received a DATA frame 758 * next we will copy the data to the system buffer 759 */ 760 WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */ 761 fc_fcp_recv_data(fsp, fp); 762 seq->rec_data = fsp->xfer_contig_end; 763 } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) { 764 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED); 765 766 fc_fcp_resp(fsp, fp); 767 } else { 768 FC_FCP_DBG(fsp, "unexpected frame. r_ctl %x\n", r_ctl); 769 } 770unlock: 771 fc_fcp_unlock_pkt(fsp); 772out: 773 fc_frame_free(fp); 774errout: 775 if (IS_ERR(fp)) 776 fc_fcp_error(fsp, fp); 777 else if (rc == -ENOMEM) 778 fc_fcp_reduce_can_queue(lport); 779} 780 781static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 782{ 783 struct fc_frame_header *fh; 784 struct fcp_resp *fc_rp; 785 struct fcp_resp_ext *rp_ex; 786 struct fcp_resp_rsp_info *fc_rp_info; 787 u32 plen; 788 u32 expected_len; 789 u32 respl = 0; 790 u32 snsl = 0; 791 u8 flags = 0; 792 793 plen = fr_len(fp); 794 fh = (struct fc_frame_header *)fr_hdr(fp); 795 if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp))) 796 goto len_err; 797 plen -= sizeof(*fh); 798 fc_rp = (struct fcp_resp *)(fh + 1); 799 fsp->cdb_status = fc_rp->fr_status; 800 flags = fc_rp->fr_flags; 801 fsp->scsi_comp_flags = flags; 802 expected_len = fsp->data_len; 803 804 /* if ddp, update xfer len */ 805 fc_fcp_ddp_done(fsp); 806 807 if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) { 808 rp_ex = (void *)(fc_rp + 1); 809 if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) { 810 if (plen < sizeof(*fc_rp) + sizeof(*rp_ex)) 811 goto len_err; 812 fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1); 813 if (flags & FCP_RSP_LEN_VAL) { 814 respl = ntohl(rp_ex->fr_rsp_len); 815 if (respl != sizeof(*fc_rp_info)) 816 goto len_err; 817 if (fsp->wait_for_comp) { 818 /* Abuse cdb_status for rsp code */ 819 fsp->cdb_status = fc_rp_info->rsp_code; 820 complete(&fsp->tm_done); 821 /* 822 * tmfs will not have any scsi cmd so 823 * exit here 824 */ 825 return; 826 } else 827 goto err; 828 } 829 if (flags & FCP_SNS_LEN_VAL) { 830 snsl = ntohl(rp_ex->fr_sns_len); 831 if (snsl > SCSI_SENSE_BUFFERSIZE) 832 snsl = SCSI_SENSE_BUFFERSIZE; 833 memcpy(fsp->cmd->sense_buffer, 834 (char *)fc_rp_info + respl, snsl); 835 } 836 } 837 if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) { 838 if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid)) 839 goto len_err; 840 if (flags & FCP_RESID_UNDER) { 841 fsp->scsi_resid = ntohl(rp_ex->fr_resid); 842 /* 843 * The cmnd->underflow is the minimum number of 844 * bytes that must be transfered for this 845 * command. Provided a sense condition is not 846 * present, make sure the actual amount 847 * transferred is at least the underflow value 848 * or fail. 849 */ 850 if (!(flags & FCP_SNS_LEN_VAL) && 851 (fc_rp->fr_status == 0) && 852 (scsi_bufflen(fsp->cmd) - 853 fsp->scsi_resid) < fsp->cmd->underflow) 854 goto err; 855 expected_len -= fsp->scsi_resid; 856 } else { 857 fsp->status_code = FC_ERROR; 858 } 859 } 860 } 861 fsp->state |= FC_SRB_RCV_STATUS; 862 863 /* 864 * Check for missing or extra data frames. 865 */ 866 if (unlikely(fsp->xfer_len != expected_len)) { 867 if (fsp->xfer_len < expected_len) { 868 /* 869 * Some data may be queued locally, 870 * Wait a at least one jiffy to see if it is delivered. 871 * If this expires without data, we may do SRR. 872 */ 873 fc_fcp_timer_set(fsp, 2); 874 return; 875 } 876 fsp->status_code = FC_DATA_OVRRUN; 877 FC_FCP_DBG(fsp, "tgt %6x xfer len %zx greater than expected, " 878 "len %x, data len %x\n", 879 fsp->rport->port_id, 880 fsp->xfer_len, expected_len, fsp->data_len); 881 } 882 fc_fcp_complete_locked(fsp); 883 return; 884 885len_err: 886 FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u " 887 "snsl %u\n", flags, fr_len(fp), respl, snsl); 888err: 889 fsp->status_code = FC_ERROR; 890 fc_fcp_complete_locked(fsp); 891} 892 893/** 894 * fc_fcp_complete_locked() - complete processing of a fcp packet 895 * @fsp: fcp packet 896 * 897 * This function may sleep if a timer is pending. The packet lock must be 898 * held, and the host lock must not be held. 899 */ 900static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) 901{ 902 struct fc_lport *lp = fsp->lp; 903 struct fc_seq *seq; 904 struct fc_exch *ep; 905 u32 f_ctl; 906 907 if (fsp->state & FC_SRB_ABORT_PENDING) 908 return; 909 910 if (fsp->state & FC_SRB_ABORTED) { 911 if (!fsp->status_code) 912 fsp->status_code = FC_CMD_ABORTED; 913 } else { 914 /* 915 * Test for transport underrun, independent of response 916 * underrun status. 917 */ 918 if (fsp->xfer_len < fsp->data_len && !fsp->io_status && 919 (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) || 920 fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) { 921 fsp->status_code = FC_DATA_UNDRUN; 922 fsp->io_status = 0; 923 } 924 } 925 926 seq = fsp->seq_ptr; 927 if (seq) { 928 fsp->seq_ptr = NULL; 929 if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) { 930 struct fc_frame *conf_frame; 931 struct fc_seq *csp; 932 933 csp = lp->tt.seq_start_next(seq); 934 conf_frame = fc_frame_alloc(fsp->lp, 0); 935 if (conf_frame) { 936 f_ctl = FC_FC_SEQ_INIT; 937 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ; 938 ep = fc_seq_exch(seq); 939 fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL, 940 ep->did, ep->sid, 941 FC_TYPE_FCP, f_ctl, 0); 942 lp->tt.seq_send(lp, csp, conf_frame); 943 } 944 } 945 lp->tt.exch_done(seq); 946 } 947 fc_io_compl(fsp); 948} 949 950static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error) 951{ 952 struct fc_lport *lp = fsp->lp; 953 954 if (fsp->seq_ptr) { 955 lp->tt.exch_done(fsp->seq_ptr); 956 fsp->seq_ptr = NULL; 957 } 958 fsp->status_code = error; 959} 960 961/** 962 * fc_fcp_cleanup_each_cmd() - Cleanup active commads 963 * @lp: logical port 964 * @id: target id 965 * @lun: lun 966 * @error: fsp status code 967 * 968 * If lun or id is -1, they are ignored. 969 */ 970static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id, 971 unsigned int lun, int error) 972{ 973 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 974 struct fc_fcp_pkt *fsp; 975 struct scsi_cmnd *sc_cmd; 976 unsigned long flags; 977 978 spin_lock_irqsave(lp->host->host_lock, flags); 979restart: 980 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { 981 sc_cmd = fsp->cmd; 982 if (id != -1 && scmd_id(sc_cmd) != id) 983 continue; 984 985 if (lun != -1 && sc_cmd->device->lun != lun) 986 continue; 987 988 fc_fcp_pkt_hold(fsp); 989 spin_unlock_irqrestore(lp->host->host_lock, flags); 990 991 if (!fc_fcp_lock_pkt(fsp)) { 992 fc_fcp_cleanup_cmd(fsp, error); 993 fc_io_compl(fsp); 994 fc_fcp_unlock_pkt(fsp); 995 } 996 997 fc_fcp_pkt_release(fsp); 998 spin_lock_irqsave(lp->host->host_lock, flags); 999 /* 1000 * while we dropped the lock multiple pkts could 1001 * have been released, so we have to start over. 1002 */ 1003 goto restart; 1004 } 1005 spin_unlock_irqrestore(lp->host->host_lock, flags); 1006} 1007 1008static void fc_fcp_abort_io(struct fc_lport *lp) 1009{ 1010 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR); 1011} 1012 1013/** 1014 * fc_fcp_pkt_send() - send a fcp packet to the lower level. 1015 * @lp: fc lport 1016 * @fsp: fc packet. 1017 * 1018 * This is called by upper layer protocol. 1019 * Return : zero for success and -1 for failure 1020 * Context : called from queuecommand which can be called from process 1021 * or scsi soft irq. 1022 * Locks : called with the host lock and irqs disabled. 1023 */ 1024static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp) 1025{ 1026 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 1027 int rc; 1028 1029 fsp->cmd->SCp.ptr = (char *)fsp; 1030 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len); 1031 fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK; 1032 1033 int_to_scsilun(fsp->cmd->device->lun, 1034 (struct scsi_lun *)fsp->cdb_cmd.fc_lun); 1035 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); 1036 list_add_tail(&fsp->list, &si->scsi_pkt_queue); 1037 1038 spin_unlock_irq(lp->host->host_lock); 1039 rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv); 1040 spin_lock_irq(lp->host->host_lock); 1041 if (rc) 1042 list_del(&fsp->list); 1043 1044 return rc; 1045} 1046 1047static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, 1048 void (*resp)(struct fc_seq *, 1049 struct fc_frame *fp, 1050 void *arg)) 1051{ 1052 struct fc_frame *fp; 1053 struct fc_seq *seq; 1054 struct fc_rport *rport; 1055 struct fc_rport_libfc_priv *rp; 1056 const size_t len = sizeof(fsp->cdb_cmd); 1057 int rc = 0; 1058 1059 if (fc_fcp_lock_pkt(fsp)) 1060 return 0; 1061 1062 fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd)); 1063 if (!fp) { 1064 rc = -1; 1065 goto unlock; 1066 } 1067 1068 memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len); 1069 fr_fsp(fp) = fsp; 1070 rport = fsp->rport; 1071 fsp->max_payload = rport->maxframe_size; 1072 rp = rport->dd_data; 1073 1074 fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, 1075 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, 1076 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1077 1078 seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0); 1079 if (!seq) { 1080 fc_frame_free(fp); 1081 rc = -1; 1082 goto unlock; 1083 } 1084 fsp->last_pkt_time = jiffies; 1085 fsp->seq_ptr = seq; 1086 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */ 1087 1088 setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp); 1089 fc_fcp_timer_set(fsp, 1090 (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ? 1091 FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT); 1092unlock: 1093 fc_fcp_unlock_pkt(fsp); 1094 return rc; 1095} 1096 1097/* 1098 * transport error handler 1099 */ 1100static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1101{ 1102 int error = PTR_ERR(fp); 1103 1104 if (fc_fcp_lock_pkt(fsp)) 1105 return; 1106 1107 if (error == -FC_EX_CLOSED) { 1108 fc_fcp_retry_cmd(fsp); 1109 goto unlock; 1110 } 1111 1112 /* 1113 * clear abort pending, because the lower layer 1114 * decided to force completion. 1115 */ 1116 fsp->state &= ~FC_SRB_ABORT_PENDING; 1117 fsp->status_code = FC_CMD_PLOGO; 1118 fc_fcp_complete_locked(fsp); 1119unlock: 1120 fc_fcp_unlock_pkt(fsp); 1121} 1122 1123/* 1124 * Scsi abort handler- calls to send an abort 1125 * and then wait for abort completion 1126 */ 1127static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp) 1128{ 1129 int rc = FAILED; 1130 1131 if (fc_fcp_send_abort(fsp)) 1132 return FAILED; 1133 1134 init_completion(&fsp->tm_done); 1135 fsp->wait_for_comp = 1; 1136 1137 spin_unlock_bh(&fsp->scsi_pkt_lock); 1138 rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV); 1139 spin_lock_bh(&fsp->scsi_pkt_lock); 1140 fsp->wait_for_comp = 0; 1141 1142 if (!rc) { 1143 FC_FCP_DBG(fsp, "target abort cmd failed\n"); 1144 rc = FAILED; 1145 } else if (fsp->state & FC_SRB_ABORTED) { 1146 FC_FCP_DBG(fsp, "target abort cmd passed\n"); 1147 rc = SUCCESS; 1148 fc_fcp_complete_locked(fsp); 1149 } 1150 1151 return rc; 1152} 1153 1154/* 1155 * Retry LUN reset after resource allocation failed. 1156 */ 1157static void fc_lun_reset_send(unsigned long data) 1158{ 1159 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; 1160 struct fc_lport *lp = fsp->lp; 1161 if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) { 1162 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) 1163 return; 1164 if (fc_fcp_lock_pkt(fsp)) 1165 return; 1166 setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp); 1167 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1168 fc_fcp_unlock_pkt(fsp); 1169 } 1170} 1171 1172/* 1173 * Scsi device reset handler- send a LUN RESET to the device 1174 * and wait for reset reply 1175 */ 1176static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, 1177 unsigned int id, unsigned int lun) 1178{ 1179 int rc; 1180 1181 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len); 1182 fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET; 1183 int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun); 1184 1185 fsp->wait_for_comp = 1; 1186 init_completion(&fsp->tm_done); 1187 1188 fc_lun_reset_send((unsigned long)fsp); 1189 1190 /* 1191 * wait for completion of reset 1192 * after that make sure all commands are terminated 1193 */ 1194 rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV); 1195 1196 spin_lock_bh(&fsp->scsi_pkt_lock); 1197 fsp->state |= FC_SRB_COMPL; 1198 spin_unlock_bh(&fsp->scsi_pkt_lock); 1199 1200 del_timer_sync(&fsp->timer); 1201 1202 spin_lock_bh(&fsp->scsi_pkt_lock); 1203 if (fsp->seq_ptr) { 1204 lp->tt.exch_done(fsp->seq_ptr); 1205 fsp->seq_ptr = NULL; 1206 } 1207 fsp->wait_for_comp = 0; 1208 spin_unlock_bh(&fsp->scsi_pkt_lock); 1209 1210 if (!rc) { 1211 FC_SCSI_DBG(lp, "lun reset failed\n"); 1212 return FAILED; 1213 } 1214 1215 /* cdb_status holds the tmf's rsp code */ 1216 if (fsp->cdb_status != FCP_TMF_CMPL) 1217 return FAILED; 1218 1219 FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun); 1220 fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED); 1221 return SUCCESS; 1222} 1223 1224/* 1225 * Task Managment response handler 1226 */ 1227static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1228{ 1229 struct fc_fcp_pkt *fsp = arg; 1230 struct fc_frame_header *fh; 1231 1232 if (IS_ERR(fp)) { 1233 /* 1234 * If there is an error just let it timeout or wait 1235 * for TMF to be aborted if it timedout. 1236 * 1237 * scsi-eh will escalate for when either happens. 1238 */ 1239 return; 1240 } 1241 1242 if (fc_fcp_lock_pkt(fsp)) 1243 return; 1244 1245 /* 1246 * raced with eh timeout handler. 1247 */ 1248 if (!fsp->seq_ptr || !fsp->wait_for_comp) { 1249 spin_unlock_bh(&fsp->scsi_pkt_lock); 1250 return; 1251 } 1252 1253 fh = fc_frame_header_get(fp); 1254 if (fh->fh_type != FC_TYPE_BLS) 1255 fc_fcp_resp(fsp, fp); 1256 fsp->seq_ptr = NULL; 1257 fsp->lp->tt.exch_done(seq); 1258 fc_frame_free(fp); 1259 fc_fcp_unlock_pkt(fsp); 1260} 1261 1262static void fc_fcp_cleanup(struct fc_lport *lp) 1263{ 1264 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR); 1265} 1266 1267/* 1268 * fc_fcp_timeout: called by OS timer function. 1269 * 1270 * The timer has been inactivated and must be reactivated if desired 1271 * using fc_fcp_timer_set(). 1272 * 1273 * Algorithm: 1274 * 1275 * If REC is supported, just issue it, and return. The REC exchange will 1276 * complete or time out, and recovery can continue at that point. 1277 * 1278 * Otherwise, if the response has been received without all the data, 1279 * it has been ER_TIMEOUT since the response was received. 1280 * 1281 * If the response has not been received, 1282 * we see if data was received recently. If it has been, we continue waiting, 1283 * otherwise, we abort the command. 1284 */ 1285static void fc_fcp_timeout(unsigned long data) 1286{ 1287 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; 1288 struct fc_rport *rport = fsp->rport; 1289 struct fc_rport_libfc_priv *rp = rport->dd_data; 1290 1291 if (fc_fcp_lock_pkt(fsp)) 1292 return; 1293 1294 if (fsp->cdb_cmd.fc_tm_flags) 1295 goto unlock; 1296 1297 fsp->state |= FC_SRB_FCP_PROCESSING_TMO; 1298 1299 if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED) 1300 fc_fcp_rec(fsp); 1301 else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2), 1302 jiffies)) 1303 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); 1304 else if (fsp->state & FC_SRB_RCV_STATUS) 1305 fc_fcp_complete_locked(fsp); 1306 else 1307 fc_timeout_error(fsp); 1308 fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO; 1309unlock: 1310 fc_fcp_unlock_pkt(fsp); 1311} 1312 1313/* 1314 * Send a REC ELS request 1315 */ 1316static void fc_fcp_rec(struct fc_fcp_pkt *fsp) 1317{ 1318 struct fc_lport *lp; 1319 struct fc_frame *fp; 1320 struct fc_rport *rport; 1321 struct fc_rport_libfc_priv *rp; 1322 1323 lp = fsp->lp; 1324 rport = fsp->rport; 1325 rp = rport->dd_data; 1326 if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) { 1327 fsp->status_code = FC_HRD_ERROR; 1328 fsp->io_status = 0; 1329 fc_fcp_complete_locked(fsp); 1330 return; 1331 } 1332 fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec)); 1333 if (!fp) 1334 goto retry; 1335 1336 fr_seq(fp) = fsp->seq_ptr; 1337 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, 1338 fc_host_port_id(rp->local_port->host), FC_TYPE_ELS, 1339 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1340 if (lp->tt.elsct_send(lp, rport, fp, ELS_REC, fc_fcp_rec_resp, 1341 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) { 1342 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ 1343 return; 1344 } 1345 fc_frame_free(fp); 1346retry: 1347 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1348 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1349 else 1350 fc_timeout_error(fsp); 1351} 1352 1353/* 1354 * Receive handler for REC ELS frame 1355 * if it is a reject then let the scsi layer to handle 1356 * the timeout. if it is a LS_ACC then if the io was not completed 1357 * then set the timeout and return otherwise complete the exchange 1358 * and tell the scsi layer to restart the I/O. 1359 */ 1360static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1361{ 1362 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg; 1363 struct fc_els_rec_acc *recp; 1364 struct fc_els_ls_rjt *rjt; 1365 u32 e_stat; 1366 u8 opcode; 1367 u32 offset; 1368 enum dma_data_direction data_dir; 1369 enum fc_rctl r_ctl; 1370 struct fc_rport_libfc_priv *rp; 1371 1372 if (IS_ERR(fp)) { 1373 fc_fcp_rec_error(fsp, fp); 1374 return; 1375 } 1376 1377 if (fc_fcp_lock_pkt(fsp)) 1378 goto out; 1379 1380 fsp->recov_retry = 0; 1381 opcode = fc_frame_payload_op(fp); 1382 if (opcode == ELS_LS_RJT) { 1383 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 1384 switch (rjt->er_reason) { 1385 default: 1386 FC_FCP_DBG(fsp, "device %x unexpected REC reject " 1387 "reason %d expl %d\n", 1388 fsp->rport->port_id, rjt->er_reason, 1389 rjt->er_explan); 1390 /* fall through */ 1391 case ELS_RJT_UNSUP: 1392 FC_FCP_DBG(fsp, "device does not support REC\n"); 1393 rp = fsp->rport->dd_data; 1394 /* 1395 * if we do not spport RECs or got some bogus 1396 * reason then resetup timer so we check for 1397 * making progress. 1398 */ 1399 rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; 1400 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); 1401 break; 1402 case ELS_RJT_LOGIC: 1403 case ELS_RJT_UNAB: 1404 /* 1405 * If no data transfer, the command frame got dropped 1406 * so we just retry. If data was transferred, we 1407 * lost the response but the target has no record, 1408 * so we abort and retry. 1409 */ 1410 if (rjt->er_explan == ELS_EXPL_OXID_RXID && 1411 fsp->xfer_len == 0) { 1412 fc_fcp_retry_cmd(fsp); 1413 break; 1414 } 1415 fc_timeout_error(fsp); 1416 break; 1417 } 1418 } else if (opcode == ELS_LS_ACC) { 1419 if (fsp->state & FC_SRB_ABORTED) 1420 goto unlock_out; 1421 1422 data_dir = fsp->cmd->sc_data_direction; 1423 recp = fc_frame_payload_get(fp, sizeof(*recp)); 1424 offset = ntohl(recp->reca_fc4value); 1425 e_stat = ntohl(recp->reca_e_stat); 1426 1427 if (e_stat & ESB_ST_COMPLETE) { 1428 1429 /* 1430 * The exchange is complete. 1431 * 1432 * For output, we must've lost the response. 1433 * For input, all data must've been sent. 1434 * We lost may have lost the response 1435 * (and a confirmation was requested) and maybe 1436 * some data. 1437 * 1438 * If all data received, send SRR 1439 * asking for response. If partial data received, 1440 * or gaps, SRR requests data at start of gap. 1441 * Recovery via SRR relies on in-order-delivery. 1442 */ 1443 if (data_dir == DMA_TO_DEVICE) { 1444 r_ctl = FC_RCTL_DD_CMD_STATUS; 1445 } else if (fsp->xfer_contig_end == offset) { 1446 r_ctl = FC_RCTL_DD_CMD_STATUS; 1447 } else { 1448 offset = fsp->xfer_contig_end; 1449 r_ctl = FC_RCTL_DD_SOL_DATA; 1450 } 1451 fc_fcp_srr(fsp, r_ctl, offset); 1452 } else if (e_stat & ESB_ST_SEQ_INIT) { 1453 1454 /* 1455 * The remote port has the initiative, so just 1456 * keep waiting for it to complete. 1457 */ 1458 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1459 } else { 1460 1461 /* 1462 * The exchange is incomplete, we have seq. initiative. 1463 * Lost response with requested confirmation, 1464 * lost confirmation, lost transfer ready or 1465 * lost write data. 1466 * 1467 * For output, if not all data was received, ask 1468 * for transfer ready to be repeated. 1469 * 1470 * If we received or sent all the data, send SRR to 1471 * request response. 1472 * 1473 * If we lost a response, we may have lost some read 1474 * data as well. 1475 */ 1476 r_ctl = FC_RCTL_DD_SOL_DATA; 1477 if (data_dir == DMA_TO_DEVICE) { 1478 r_ctl = FC_RCTL_DD_CMD_STATUS; 1479 if (offset < fsp->data_len) 1480 r_ctl = FC_RCTL_DD_DATA_DESC; 1481 } else if (offset == fsp->xfer_contig_end) { 1482 r_ctl = FC_RCTL_DD_CMD_STATUS; 1483 } else if (fsp->xfer_contig_end < offset) { 1484 offset = fsp->xfer_contig_end; 1485 } 1486 fc_fcp_srr(fsp, r_ctl, offset); 1487 } 1488 } 1489unlock_out: 1490 fc_fcp_unlock_pkt(fsp); 1491out: 1492 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ 1493 fc_frame_free(fp); 1494} 1495 1496/* 1497 * Handle error response or timeout for REC exchange. 1498 */ 1499static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1500{ 1501 int error = PTR_ERR(fp); 1502 1503 if (fc_fcp_lock_pkt(fsp)) 1504 goto out; 1505 1506 switch (error) { 1507 case -FC_EX_CLOSED: 1508 fc_fcp_retry_cmd(fsp); 1509 break; 1510 1511 default: 1512 FC_FCP_DBG(fsp, "REC %p fid %x error unexpected error %d\n", 1513 fsp, fsp->rport->port_id, error); 1514 fsp->status_code = FC_CMD_PLOGO; 1515 /* fall through */ 1516 1517 case -FC_EX_TIMEOUT: 1518 /* 1519 * Assume REC or LS_ACC was lost. 1520 * The exchange manager will have aborted REC, so retry. 1521 */ 1522 FC_FCP_DBG(fsp, "REC fid %x error error %d retry %d/%d\n", 1523 fsp->rport->port_id, error, fsp->recov_retry, 1524 FC_MAX_RECOV_RETRY); 1525 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1526 fc_fcp_rec(fsp); 1527 else 1528 fc_timeout_error(fsp); 1529 break; 1530 } 1531 fc_fcp_unlock_pkt(fsp); 1532out: 1533 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ 1534} 1535 1536/* 1537 * Time out error routine: 1538 * abort's the I/O close the exchange and 1539 * send completion notification to scsi layer 1540 */ 1541static void fc_timeout_error(struct fc_fcp_pkt *fsp) 1542{ 1543 fsp->status_code = FC_CMD_TIME_OUT; 1544 fsp->cdb_status = 0; 1545 fsp->io_status = 0; 1546 /* 1547 * if this fails then we let the scsi command timer fire and 1548 * scsi-ml escalate. 1549 */ 1550 fc_fcp_send_abort(fsp); 1551} 1552 1553/* 1554 * Sequence retransmission request. 1555 * This is called after receiving status but insufficient data, or 1556 * when expecting status but the request has timed out. 1557 */ 1558static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) 1559{ 1560 struct fc_lport *lp = fsp->lp; 1561 struct fc_rport *rport; 1562 struct fc_rport_libfc_priv *rp; 1563 struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); 1564 struct fc_seq *seq; 1565 struct fcp_srr *srr; 1566 struct fc_frame *fp; 1567 u8 cdb_op; 1568 1569 rport = fsp->rport; 1570 rp = rport->dd_data; 1571 cdb_op = fsp->cdb_cmd.fc_cdb[0]; 1572 1573 if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY) 1574 goto retry; /* shouldn't happen */ 1575 fp = fc_frame_alloc(lp, sizeof(*srr)); 1576 if (!fp) 1577 goto retry; 1578 1579 srr = fc_frame_payload_get(fp, sizeof(*srr)); 1580 memset(srr, 0, sizeof(*srr)); 1581 srr->srr_op = ELS_SRR; 1582 srr->srr_ox_id = htons(ep->oxid); 1583 srr->srr_rx_id = htons(ep->rxid); 1584 srr->srr_r_ctl = r_ctl; 1585 srr->srr_rel_off = htonl(offset); 1586 1587 fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, 1588 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, 1589 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1590 1591 seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL, 1592 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); 1593 if (!seq) { 1594 fc_frame_free(fp); 1595 goto retry; 1596 } 1597 fsp->recov_seq = seq; 1598 fsp->xfer_len = offset; 1599 fsp->xfer_contig_end = offset; 1600 fsp->state &= ~FC_SRB_RCV_STATUS; 1601 fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */ 1602 return; 1603retry: 1604 fc_fcp_retry_cmd(fsp); 1605} 1606 1607/* 1608 * Handle response from SRR. 1609 */ 1610static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1611{ 1612 struct fc_fcp_pkt *fsp = arg; 1613 struct fc_frame_header *fh; 1614 1615 if (IS_ERR(fp)) { 1616 fc_fcp_srr_error(fsp, fp); 1617 return; 1618 } 1619 1620 if (fc_fcp_lock_pkt(fsp)) 1621 goto out; 1622 1623 fh = fc_frame_header_get(fp); 1624 /* 1625 * BUG? fc_fcp_srr_error calls exch_done which would release 1626 * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT, 1627 * then fc_exch_timeout would be sending an abort. The exch_done 1628 * call by fc_fcp_srr_error would prevent fc_exch.c from seeing 1629 * an abort response though. 1630 */ 1631 if (fh->fh_type == FC_TYPE_BLS) { 1632 fc_fcp_unlock_pkt(fsp); 1633 return; 1634 } 1635 1636 fsp->recov_seq = NULL; 1637 switch (fc_frame_payload_op(fp)) { 1638 case ELS_LS_ACC: 1639 fsp->recov_retry = 0; 1640 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1641 break; 1642 case ELS_LS_RJT: 1643 default: 1644 fc_timeout_error(fsp); 1645 break; 1646 } 1647 fc_fcp_unlock_pkt(fsp); 1648 fsp->lp->tt.exch_done(seq); 1649out: 1650 fc_frame_free(fp); 1651 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ 1652} 1653 1654static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1655{ 1656 if (fc_fcp_lock_pkt(fsp)) 1657 goto out; 1658 fsp->lp->tt.exch_done(fsp->recov_seq); 1659 fsp->recov_seq = NULL; 1660 switch (PTR_ERR(fp)) { 1661 case -FC_EX_TIMEOUT: 1662 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1663 fc_fcp_rec(fsp); 1664 else 1665 fc_timeout_error(fsp); 1666 break; 1667 case -FC_EX_CLOSED: /* e.g., link failure */ 1668 /* fall through */ 1669 default: 1670 fc_fcp_retry_cmd(fsp); 1671 break; 1672 } 1673 fc_fcp_unlock_pkt(fsp); 1674out: 1675 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ 1676} 1677 1678static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp) 1679{ 1680 /* lock ? */ 1681 return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull; 1682} 1683 1684/** 1685 * fc_queuecommand - The queuecommand function of the scsi template 1686 * @cmd: struct scsi_cmnd to be executed 1687 * @done: Callback function to be called when cmd is completed 1688 * 1689 * this is the i/o strategy routine, called by the scsi layer 1690 * this routine is called with holding the host_lock. 1691 */ 1692int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) 1693{ 1694 struct fc_lport *lp; 1695 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1696 struct fc_fcp_pkt *fsp; 1697 struct fc_rport_libfc_priv *rp; 1698 int rval; 1699 int rc = 0; 1700 struct fcoe_dev_stats *stats; 1701 1702 lp = shost_priv(sc_cmd->device->host); 1703 1704 rval = fc_remote_port_chkready(rport); 1705 if (rval) { 1706 sc_cmd->result = rval; 1707 done(sc_cmd); 1708 goto out; 1709 } 1710 1711 if (!*(struct fc_remote_port **)rport->dd_data) { 1712 /* 1713 * rport is transitioning from blocked/deleted to 1714 * online 1715 */ 1716 sc_cmd->result = DID_IMM_RETRY << 16; 1717 done(sc_cmd); 1718 goto out; 1719 } 1720 1721 rp = rport->dd_data; 1722 1723 if (!fc_fcp_lport_queue_ready(lp)) { 1724 rc = SCSI_MLQUEUE_HOST_BUSY; 1725 goto out; 1726 } 1727 1728 fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC); 1729 if (fsp == NULL) { 1730 rc = SCSI_MLQUEUE_HOST_BUSY; 1731 goto out; 1732 } 1733 1734 /* 1735 * build the libfc request pkt 1736 */ 1737 fsp->cmd = sc_cmd; /* save the cmd */ 1738 fsp->lp = lp; /* save the softc ptr */ 1739 fsp->rport = rport; /* set the remote port ptr */ 1740 sc_cmd->scsi_done = done; 1741 1742 /* 1743 * set up the transfer length 1744 */ 1745 fsp->data_len = scsi_bufflen(sc_cmd); 1746 fsp->xfer_len = 0; 1747 1748 /* 1749 * setup the data direction 1750 */ 1751 stats = fc_lport_get_stats(lp); 1752 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 1753 fsp->req_flags = FC_SRB_READ; 1754 stats->InputRequests++; 1755 stats->InputMegabytes = fsp->data_len; 1756 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 1757 fsp->req_flags = FC_SRB_WRITE; 1758 stats->OutputRequests++; 1759 stats->OutputMegabytes = fsp->data_len; 1760 } else { 1761 fsp->req_flags = 0; 1762 stats->ControlRequests++; 1763 } 1764 1765 fsp->tgt_flags = rp->flags; 1766 1767 init_timer(&fsp->timer); 1768 fsp->timer.data = (unsigned long)fsp; 1769 1770 /* 1771 * send it to the lower layer 1772 * if we get -1 return then put the request in the pending 1773 * queue. 1774 */ 1775 rval = fc_fcp_pkt_send(lp, fsp); 1776 if (rval != 0) { 1777 fsp->state = FC_SRB_FREE; 1778 fc_fcp_pkt_release(fsp); 1779 rc = SCSI_MLQUEUE_HOST_BUSY; 1780 } 1781out: 1782 return rc; 1783} 1784EXPORT_SYMBOL(fc_queuecommand); 1785 1786/** 1787 * fc_io_compl() - Handle responses for completed commands 1788 * @fsp: scsi packet 1789 * 1790 * Translates a error to a Linux SCSI error. 1791 * 1792 * The fcp packet lock must be held when calling. 1793 */ 1794static void fc_io_compl(struct fc_fcp_pkt *fsp) 1795{ 1796 struct fc_fcp_internal *si; 1797 struct scsi_cmnd *sc_cmd; 1798 struct fc_lport *lp; 1799 unsigned long flags; 1800 1801 /* release outstanding ddp context */ 1802 fc_fcp_ddp_done(fsp); 1803 1804 fsp->state |= FC_SRB_COMPL; 1805 if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) { 1806 spin_unlock_bh(&fsp->scsi_pkt_lock); 1807 del_timer_sync(&fsp->timer); 1808 spin_lock_bh(&fsp->scsi_pkt_lock); 1809 } 1810 1811 lp = fsp->lp; 1812 si = fc_get_scsi_internal(lp); 1813 spin_lock_irqsave(lp->host->host_lock, flags); 1814 if (!fsp->cmd) { 1815 spin_unlock_irqrestore(lp->host->host_lock, flags); 1816 return; 1817 } 1818 1819 /* 1820 * if a command timed out while we had to try and throttle IO 1821 * and it is now getting cleaned up, then we are about to 1822 * try again so clear the throttled flag incase we get more 1823 * time outs. 1824 */ 1825 if (si->throttled && fsp->state & FC_SRB_NOMEM) 1826 si->throttled = 0; 1827 1828 sc_cmd = fsp->cmd; 1829 fsp->cmd = NULL; 1830 1831 if (!sc_cmd->SCp.ptr) { 1832 spin_unlock_irqrestore(lp->host->host_lock, flags); 1833 return; 1834 } 1835 1836 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status; 1837 switch (fsp->status_code) { 1838 case FC_COMPLETE: 1839 if (fsp->cdb_status == 0) { 1840 /* 1841 * good I/O status 1842 */ 1843 sc_cmd->result = DID_OK << 16; 1844 if (fsp->scsi_resid) 1845 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid; 1846 } else if (fsp->cdb_status == QUEUE_FULL) { 1847 struct scsi_device *tmp_sdev; 1848 struct scsi_device *sdev = sc_cmd->device; 1849 1850 shost_for_each_device(tmp_sdev, sdev->host) { 1851 if (tmp_sdev->id != sdev->id) 1852 continue; 1853 1854 if (tmp_sdev->queue_depth > 1) { 1855 scsi_track_queue_full(tmp_sdev, 1856 tmp_sdev-> 1857 queue_depth - 1); 1858 } 1859 } 1860 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status; 1861 } else { 1862 /* 1863 * transport level I/O was ok but scsi 1864 * has non zero status 1865 */ 1866 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status; 1867 } 1868 break; 1869 case FC_ERROR: 1870 sc_cmd->result = DID_ERROR << 16; 1871 break; 1872 case FC_DATA_UNDRUN: 1873 if ((fsp->cdb_status == 0) && !(fsp->req_flags & FC_SRB_READ)) { 1874 /* 1875 * scsi status is good but transport level 1876 * underrun. 1877 */ 1878 sc_cmd->result = DID_OK << 16; 1879 } else { 1880 /* 1881 * scsi got underrun, this is an error 1882 */ 1883 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid; 1884 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 1885 } 1886 break; 1887 case FC_DATA_OVRRUN: 1888 /* 1889 * overrun is an error 1890 */ 1891 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 1892 break; 1893 case FC_CMD_ABORTED: 1894 sc_cmd->result = (DID_ERROR << 16) | fsp->io_status; 1895 break; 1896 case FC_CMD_TIME_OUT: 1897 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status; 1898 break; 1899 case FC_CMD_RESET: 1900 sc_cmd->result = (DID_RESET << 16); 1901 break; 1902 case FC_HRD_ERROR: 1903 sc_cmd->result = (DID_NO_CONNECT << 16); 1904 break; 1905 default: 1906 sc_cmd->result = (DID_ERROR << 16); 1907 break; 1908 } 1909 1910 list_del(&fsp->list); 1911 sc_cmd->SCp.ptr = NULL; 1912 sc_cmd->scsi_done(sc_cmd); 1913 spin_unlock_irqrestore(lp->host->host_lock, flags); 1914 1915 /* release ref from initial allocation in queue command */ 1916 fc_fcp_pkt_release(fsp); 1917} 1918 1919/** 1920 * fc_fcp_complete() - complete processing of a fcp packet 1921 * @fsp: fcp packet 1922 * 1923 * This function may sleep if a fsp timer is pending. 1924 * The host lock must not be held by caller. 1925 */ 1926void fc_fcp_complete(struct fc_fcp_pkt *fsp) 1927{ 1928 if (fc_fcp_lock_pkt(fsp)) 1929 return; 1930 1931 fc_fcp_complete_locked(fsp); 1932 fc_fcp_unlock_pkt(fsp); 1933} 1934EXPORT_SYMBOL(fc_fcp_complete); 1935 1936/** 1937 * fc_eh_abort() - Abort a command 1938 * @sc_cmd: scsi command to abort 1939 * 1940 * From scsi host template. 1941 * send ABTS to the target device and wait for the response 1942 * sc_cmd is the pointer to the command to be aborted. 1943 */ 1944int fc_eh_abort(struct scsi_cmnd *sc_cmd) 1945{ 1946 struct fc_fcp_pkt *fsp; 1947 struct fc_lport *lp; 1948 int rc = FAILED; 1949 unsigned long flags; 1950 1951 lp = shost_priv(sc_cmd->device->host); 1952 if (lp->state != LPORT_ST_READY) 1953 return rc; 1954 else if (!lp->link_up) 1955 return rc; 1956 1957 spin_lock_irqsave(lp->host->host_lock, flags); 1958 fsp = CMD_SP(sc_cmd); 1959 if (!fsp) { 1960 /* command completed while scsi eh was setting up */ 1961 spin_unlock_irqrestore(lp->host->host_lock, flags); 1962 return SUCCESS; 1963 } 1964 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ 1965 fc_fcp_pkt_hold(fsp); 1966 spin_unlock_irqrestore(lp->host->host_lock, flags); 1967 1968 if (fc_fcp_lock_pkt(fsp)) { 1969 /* completed while we were waiting for timer to be deleted */ 1970 rc = SUCCESS; 1971 goto release_pkt; 1972 } 1973 1974 rc = fc_fcp_pkt_abort(lp, fsp); 1975 fc_fcp_unlock_pkt(fsp); 1976 1977release_pkt: 1978 fc_fcp_pkt_release(fsp); 1979 return rc; 1980} 1981EXPORT_SYMBOL(fc_eh_abort); 1982 1983/** 1984 * fc_eh_device_reset() Reset a single LUN 1985 * @sc_cmd: scsi command 1986 * 1987 * Set from scsi host template to send tm cmd to the target and wait for the 1988 * response. 1989 */ 1990int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) 1991{ 1992 struct fc_lport *lp; 1993 struct fc_fcp_pkt *fsp; 1994 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1995 int rc = FAILED; 1996 struct fc_rport_libfc_priv *rp; 1997 int rval; 1998 1999 rval = fc_remote_port_chkready(rport); 2000 if (rval) 2001 goto out; 2002 2003 rp = rport->dd_data; 2004 lp = shost_priv(sc_cmd->device->host); 2005 2006 if (lp->state != LPORT_ST_READY) 2007 return rc; 2008 2009 FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id); 2010 2011 fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO); 2012 if (fsp == NULL) { 2013 printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n"); 2014 sc_cmd->result = DID_NO_CONNECT << 16; 2015 goto out; 2016 } 2017 2018 /* 2019 * Build the libfc request pkt. Do not set the scsi cmnd, because 2020 * the sc passed in is not setup for execution like when sent 2021 * through the queuecommand callout. 2022 */ 2023 fsp->lp = lp; /* save the softc ptr */ 2024 fsp->rport = rport; /* set the remote port ptr */ 2025 2026 /* 2027 * flush outstanding commands 2028 */ 2029 rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun); 2030 fsp->state = FC_SRB_FREE; 2031 fc_fcp_pkt_release(fsp); 2032 2033out: 2034 return rc; 2035} 2036EXPORT_SYMBOL(fc_eh_device_reset); 2037 2038/** 2039 * fc_eh_host_reset() - The reset function will reset the ports on the host. 2040 * @sc_cmd: scsi command 2041 */ 2042int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) 2043{ 2044 struct Scsi_Host *shost = sc_cmd->device->host; 2045 struct fc_lport *lp = shost_priv(shost); 2046 unsigned long wait_tmo; 2047 2048 FC_SCSI_DBG(lp, "Resetting host\n"); 2049 2050 lp->tt.lport_reset(lp); 2051 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; 2052 while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo)) 2053 msleep(1000); 2054 2055 if (fc_fcp_lport_queue_ready(lp)) { 2056 shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded " 2057 "on port (%6x)\n", fc_host_port_id(lp->host)); 2058 return SUCCESS; 2059 } else { 2060 shost_printk(KERN_INFO, shost, "libfc: Host reset failed, " 2061 "port (%6x) is not ready.\n", 2062 fc_host_port_id(lp->host)); 2063 return FAILED; 2064 } 2065} 2066EXPORT_SYMBOL(fc_eh_host_reset); 2067 2068/** 2069 * fc_slave_alloc() - configure queue depth 2070 * @sdev: scsi device 2071 * 2072 * Configures queue depth based on host's cmd_per_len. If not set 2073 * then we use the libfc default. 2074 */ 2075int fc_slave_alloc(struct scsi_device *sdev) 2076{ 2077 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 2078 int queue_depth; 2079 2080 if (!rport || fc_remote_port_chkready(rport)) 2081 return -ENXIO; 2082 2083 if (sdev->tagged_supported) { 2084 if (sdev->host->hostt->cmd_per_lun) 2085 queue_depth = sdev->host->hostt->cmd_per_lun; 2086 else 2087 queue_depth = FC_FCP_DFLT_QUEUE_DEPTH; 2088 scsi_activate_tcq(sdev, queue_depth); 2089 } 2090 return 0; 2091} 2092EXPORT_SYMBOL(fc_slave_alloc); 2093 2094int fc_change_queue_depth(struct scsi_device *sdev, int qdepth) 2095{ 2096 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 2097 return sdev->queue_depth; 2098} 2099EXPORT_SYMBOL(fc_change_queue_depth); 2100 2101int fc_change_queue_type(struct scsi_device *sdev, int tag_type) 2102{ 2103 if (sdev->tagged_supported) { 2104 scsi_set_tag_type(sdev, tag_type); 2105 if (tag_type) 2106 scsi_activate_tcq(sdev, sdev->queue_depth); 2107 else 2108 scsi_deactivate_tcq(sdev, sdev->queue_depth); 2109 } else 2110 tag_type = 0; 2111 2112 return tag_type; 2113} 2114EXPORT_SYMBOL(fc_change_queue_type); 2115 2116void fc_fcp_destroy(struct fc_lport *lp) 2117{ 2118 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 2119 2120 if (!list_empty(&si->scsi_pkt_queue)) 2121 printk(KERN_ERR "libfc: Leaked SCSI packets when destroying " 2122 "port (%6x)\n", fc_host_port_id(lp->host)); 2123 2124 mempool_destroy(si->scsi_pkt_pool); 2125 kfree(si); 2126 lp->scsi_priv = NULL; 2127} 2128EXPORT_SYMBOL(fc_fcp_destroy); 2129 2130int fc_fcp_init(struct fc_lport *lp) 2131{ 2132 int rc; 2133 struct fc_fcp_internal *si; 2134 2135 if (!lp->tt.fcp_cmd_send) 2136 lp->tt.fcp_cmd_send = fc_fcp_cmd_send; 2137 2138 if (!lp->tt.fcp_cleanup) 2139 lp->tt.fcp_cleanup = fc_fcp_cleanup; 2140 2141 if (!lp->tt.fcp_abort_io) 2142 lp->tt.fcp_abort_io = fc_fcp_abort_io; 2143 2144 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL); 2145 if (!si) 2146 return -ENOMEM; 2147 lp->scsi_priv = si; 2148 INIT_LIST_HEAD(&si->scsi_pkt_queue); 2149 2150 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); 2151 if (!si->scsi_pkt_pool) { 2152 rc = -ENOMEM; 2153 goto free_internal; 2154 } 2155 return 0; 2156 2157free_internal: 2158 kfree(si); 2159 return rc; 2160} 2161EXPORT_SYMBOL(fc_fcp_init); 2162 2163static int __init libfc_init(void) 2164{ 2165 int rc; 2166 2167 scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt", 2168 sizeof(struct fc_fcp_pkt), 2169 0, SLAB_HWCACHE_ALIGN, NULL); 2170 if (scsi_pkt_cachep == NULL) { 2171 printk(KERN_ERR "libfc: Unable to allocate SRB cache, " 2172 "module load failed!"); 2173 return -ENOMEM; 2174 } 2175 2176 rc = fc_setup_exch_mgr(); 2177 if (rc) 2178 goto destroy_pkt_cache; 2179 2180 rc = fc_setup_rport(); 2181 if (rc) 2182 goto destroy_em; 2183 2184 return rc; 2185destroy_em: 2186 fc_destroy_exch_mgr(); 2187destroy_pkt_cache: 2188 kmem_cache_destroy(scsi_pkt_cachep); 2189 return rc; 2190} 2191 2192static void __exit libfc_exit(void) 2193{ 2194 kmem_cache_destroy(scsi_pkt_cachep); 2195 fc_destroy_exch_mgr(); 2196 fc_destroy_rport(); 2197} 2198 2199module_init(libfc_init); 2200module_exit(libfc_exit); 2201