fc_fcp.c revision 255f6386b816b2bc0c251af0ee4985ad5a8461b7
1/* 2 * Copyright(c) 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved. 4 * Copyright(c) 2008 Mike Christie 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Maintained at www.Open-FCoE.org 20 */ 21 22#include <linux/module.h> 23#include <linux/delay.h> 24#include <linux/kernel.h> 25#include <linux/types.h> 26#include <linux/spinlock.h> 27#include <linux/scatterlist.h> 28#include <linux/err.h> 29#include <linux/crc32.h> 30 31#include <scsi/scsi_tcq.h> 32#include <scsi/scsi.h> 33#include <scsi/scsi_host.h> 34#include <scsi/scsi_device.h> 35#include <scsi/scsi_cmnd.h> 36 37#include <scsi/fc/fc_fc2.h> 38 39#include <scsi/libfc.h> 40#include <scsi/fc_encode.h> 41 42MODULE_AUTHOR("Open-FCoE.org"); 43MODULE_DESCRIPTION("libfc"); 44MODULE_LICENSE("GPL v2"); 45 46unsigned int fc_debug_logging; 47module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR); 48MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); 49 50static struct kmem_cache *scsi_pkt_cachep; 51 52/* SRB state definitions */ 53#define FC_SRB_FREE 0 /* cmd is free */ 54#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */ 55#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */ 56#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */ 57#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */ 58#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */ 59#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */ 60#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */ 61#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */ 62 63#define FC_SRB_READ (1 << 1) 64#define FC_SRB_WRITE (1 << 0) 65 66/* 67 * The SCp.ptr should be tested and set under the host lock. NULL indicates 68 * that the command has been retruned to the scsi layer. 69 */ 70#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr) 71#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) 72#define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual) 73#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) 74#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual) 75 76struct fc_fcp_internal { 77 mempool_t *scsi_pkt_pool; 78 struct list_head scsi_pkt_queue; 79 u8 throttled; 80}; 81 82#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) 83 84/* 85 * function prototypes 86 * FC scsi I/O related functions 87 */ 88static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *); 89static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *); 90static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *); 91static void fc_fcp_complete_locked(struct fc_fcp_pkt *); 92static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); 93static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp); 94static void fc_timeout_error(struct fc_fcp_pkt *); 95static void fc_fcp_timeout(unsigned long data); 96static void fc_fcp_rec(struct fc_fcp_pkt *); 97static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); 98static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *); 99static void fc_io_compl(struct fc_fcp_pkt *); 100 101static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32); 102static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *); 103static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *); 104 105/* 106 * command status codes 107 */ 108#define FC_COMPLETE 0 109#define FC_CMD_ABORTED 1 110#define FC_CMD_RESET 2 111#define FC_CMD_PLOGO 3 112#define FC_SNS_RCV 4 113#define FC_TRANS_ERR 5 114#define FC_DATA_OVRRUN 6 115#define FC_DATA_UNDRUN 7 116#define FC_ERROR 8 117#define FC_HRD_ERROR 9 118#define FC_CMD_TIME_OUT 10 119 120/* 121 * Error recovery timeout values. 122 */ 123#define FC_SCSI_ER_TIMEOUT (10 * HZ) 124#define FC_SCSI_TM_TOV (10 * HZ) 125#define FC_SCSI_REC_TOV (2 * HZ) 126#define FC_HOST_RESET_TIMEOUT (30 * HZ) 127 128#define FC_MAX_ERROR_CNT 5 129#define FC_MAX_RECOV_RETRY 3 130 131#define FC_FCP_DFLT_QUEUE_DEPTH 32 132 133/** 134 * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet 135 * @lp: fc lport struct 136 * @gfp: gfp flags for allocation 137 * 138 * This is used by upper layer scsi driver. 139 * Return Value : scsi_pkt structure or null on allocation failure. 140 * Context : call from process context. no locking required. 141 */ 142static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp) 143{ 144 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 145 struct fc_fcp_pkt *fsp; 146 147 fsp = mempool_alloc(si->scsi_pkt_pool, gfp); 148 if (fsp) { 149 memset(fsp, 0, sizeof(*fsp)); 150 fsp->lp = lp; 151 atomic_set(&fsp->ref_cnt, 1); 152 init_timer(&fsp->timer); 153 INIT_LIST_HEAD(&fsp->list); 154 spin_lock_init(&fsp->scsi_pkt_lock); 155 } 156 return fsp; 157} 158 159/** 160 * fc_fcp_pkt_release() - release hold on scsi_pkt packet 161 * @fsp: fcp packet struct 162 * 163 * This is used by upper layer scsi driver. 164 * Context : call from process and interrupt context. 165 * no locking required 166 */ 167static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) 168{ 169 if (atomic_dec_and_test(&fsp->ref_cnt)) { 170 struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp); 171 172 mempool_free(fsp, si->scsi_pkt_pool); 173 } 174} 175 176static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp) 177{ 178 atomic_inc(&fsp->ref_cnt); 179} 180 181/** 182 * fc_fcp_pkt_destory() - release hold on scsi_pkt packet 183 * @seq: exchange sequence 184 * @fsp: fcp packet struct 185 * 186 * Release hold on scsi_pkt packet set to keep scsi_pkt 187 * till EM layer exch resource is not freed. 188 * Context : called from from EM layer. 189 * no locking required 190 */ 191static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) 192{ 193 fc_fcp_pkt_release(fsp); 194} 195 196/** 197 * fc_fcp_lock_pkt() - lock a packet and get a ref to it. 198 * @fsp: fcp packet 199 * 200 * We should only return error if we return a command to scsi-ml before 201 * getting a response. This can happen in cases where we send a abort, but 202 * do not wait for the response and the abort and command can be passing 203 * each other on the wire/network-layer. 204 * 205 * Note: this function locks the packet and gets a reference to allow 206 * callers to call the completion function while the lock is held and 207 * not have to worry about the packets refcount. 208 * 209 * TODO: Maybe we should just have callers grab/release the lock and 210 * have a function that they call to verify the fsp and grab a ref if 211 * needed. 212 */ 213static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp) 214{ 215 spin_lock_bh(&fsp->scsi_pkt_lock); 216 if (fsp->state & FC_SRB_COMPL) { 217 spin_unlock_bh(&fsp->scsi_pkt_lock); 218 return -EPERM; 219 } 220 221 fc_fcp_pkt_hold(fsp); 222 return 0; 223} 224 225static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp) 226{ 227 spin_unlock_bh(&fsp->scsi_pkt_lock); 228 fc_fcp_pkt_release(fsp); 229} 230 231static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) 232{ 233 if (!(fsp->state & FC_SRB_COMPL)) 234 mod_timer(&fsp->timer, jiffies + delay); 235} 236 237static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) 238{ 239 if (!fsp->seq_ptr) 240 return -EINVAL; 241 242 fsp->state |= FC_SRB_ABORT_PENDING; 243 return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0); 244} 245 246/* 247 * Retry command. 248 * An abort isn't needed. 249 */ 250static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) 251{ 252 if (fsp->seq_ptr) { 253 fsp->lp->tt.exch_done(fsp->seq_ptr); 254 fsp->seq_ptr = NULL; 255 } 256 257 fsp->state &= ~FC_SRB_ABORT_PENDING; 258 fsp->io_status = 0; 259 fsp->status_code = FC_ERROR; 260 fc_fcp_complete_locked(fsp); 261} 262 263/* 264 * fc_fcp_ddp_setup - calls to LLD's ddp_setup to set up DDP 265 * transfer for a read I/O indicated by the fc_fcp_pkt. 266 * @fsp: ptr to the fc_fcp_pkt 267 * 268 * This is called in exch_seq_send() when we have a newly allocated 269 * exchange with a valid exchange id to setup ddp. 270 * 271 * returns: none 272 */ 273void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid) 274{ 275 struct fc_lport *lp; 276 277 if (!fsp) 278 return; 279 280 lp = fsp->lp; 281 if ((fsp->req_flags & FC_SRB_READ) && 282 (lp->lro_enabled) && (lp->tt.ddp_setup)) { 283 if (lp->tt.ddp_setup(lp, xid, scsi_sglist(fsp->cmd), 284 scsi_sg_count(fsp->cmd))) 285 fsp->xfer_ddp = xid; 286 } 287} 288 289/* 290 * fc_fcp_ddp_done - calls to LLD's ddp_done to release any 291 * DDP related resources for this I/O if it is initialized 292 * as a ddp transfer 293 * @fsp: ptr to the fc_fcp_pkt 294 * 295 * returns: none 296 */ 297static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) 298{ 299 struct fc_lport *lp; 300 301 if (!fsp) 302 return; 303 304 if (fsp->xfer_ddp == FC_XID_UNKNOWN) 305 return; 306 307 lp = fsp->lp; 308 if (lp->tt.ddp_done) { 309 fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp); 310 fsp->xfer_ddp = FC_XID_UNKNOWN; 311 } 312} 313 314 315/* 316 * Receive SCSI data from target. 317 * Called after receiving solicited data. 318 */ 319static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 320{ 321 struct scsi_cmnd *sc = fsp->cmd; 322 struct fc_lport *lp = fsp->lp; 323 struct fcoe_dev_stats *stats; 324 struct fc_frame_header *fh; 325 size_t start_offset; 326 size_t offset; 327 u32 crc; 328 u32 copy_len = 0; 329 size_t len; 330 void *buf; 331 struct scatterlist *sg; 332 size_t remaining; 333 334 fh = fc_frame_header_get(fp); 335 offset = ntohl(fh->fh_parm_offset); 336 start_offset = offset; 337 len = fr_len(fp) - sizeof(*fh); 338 buf = fc_frame_payload_get(fp, 0); 339 340 /* if this I/O is ddped, update xfer len */ 341 fc_fcp_ddp_done(fsp); 342 343 if (offset + len > fsp->data_len) { 344 /* this should never happen */ 345 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && 346 fc_frame_crc_check(fp)) 347 goto crc_err; 348 FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx " 349 "data_len %x\n", len, offset, fsp->data_len); 350 fc_fcp_retry_cmd(fsp); 351 return; 352 } 353 if (offset != fsp->xfer_len) 354 fsp->state |= FC_SRB_DISCONTIG; 355 356 crc = 0; 357 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) 358 crc = crc32(~0, (u8 *) fh, sizeof(*fh)); 359 360 sg = scsi_sglist(sc); 361 remaining = len; 362 363 while (remaining > 0 && sg) { 364 size_t off; 365 void *page_addr; 366 size_t sg_bytes; 367 368 if (offset >= sg->length) { 369 offset -= sg->length; 370 sg = sg_next(sg); 371 continue; 372 } 373 sg_bytes = min(remaining, sg->length - offset); 374 375 /* 376 * The scatterlist item may be bigger than PAGE_SIZE, 377 * but we are limited to mapping PAGE_SIZE at a time. 378 */ 379 off = offset + sg->offset; 380 sg_bytes = min(sg_bytes, (size_t) 381 (PAGE_SIZE - (off & ~PAGE_MASK))); 382 page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT), 383 KM_SOFTIRQ0); 384 if (!page_addr) 385 break; /* XXX panic? */ 386 387 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) 388 crc = crc32(crc, buf, sg_bytes); 389 memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, 390 sg_bytes); 391 392 kunmap_atomic(page_addr, KM_SOFTIRQ0); 393 buf += sg_bytes; 394 offset += sg_bytes; 395 remaining -= sg_bytes; 396 copy_len += sg_bytes; 397 } 398 399 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { 400 buf = fc_frame_payload_get(fp, 0); 401 if (len % 4) { 402 crc = crc32(crc, buf + len, 4 - (len % 4)); 403 len += 4 - (len % 4); 404 } 405 406 if (~crc != le32_to_cpu(fr_crc(fp))) { 407crc_err: 408 stats = fc_lport_get_stats(lp); 409 stats->ErrorFrames++; 410 /* FIXME - per cpu count, not total count! */ 411 if (stats->InvalidCRCCount++ < 5) 412 printk(KERN_WARNING "libfc: CRC error on data " 413 "frame for port (%6x)\n", 414 fc_host_port_id(lp->host)); 415 /* 416 * Assume the frame is total garbage. 417 * We may have copied it over the good part 418 * of the buffer. 419 * If so, we need to retry the entire operation. 420 * Otherwise, ignore it. 421 */ 422 if (fsp->state & FC_SRB_DISCONTIG) 423 fc_fcp_retry_cmd(fsp); 424 return; 425 } 426 } 427 428 if (fsp->xfer_contig_end == start_offset) 429 fsp->xfer_contig_end += copy_len; 430 fsp->xfer_len += copy_len; 431 432 /* 433 * In the very rare event that this data arrived after the response 434 * and completes the transfer, call the completion handler. 435 */ 436 if (unlikely(fsp->state & FC_SRB_RCV_STATUS) && 437 fsp->xfer_len == fsp->data_len - fsp->scsi_resid) 438 fc_fcp_complete_locked(fsp); 439} 440 441/** 442 * fc_fcp_send_data() - Send SCSI data to target. 443 * @fsp: ptr to fc_fcp_pkt 444 * @sp: ptr to this sequence 445 * @offset: starting offset for this data request 446 * @seq_blen: the burst length for this data request 447 * 448 * Called after receiving a Transfer Ready data descriptor. 449 * if LLD is capable of seq offload then send down seq_blen 450 * size of data in single frame, otherwise send multiple FC 451 * frames of max FC frame payload supported by target port. 452 * 453 * Returns : 0 for success. 454 */ 455static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, 456 size_t offset, size_t seq_blen) 457{ 458 struct fc_exch *ep; 459 struct scsi_cmnd *sc; 460 struct scatterlist *sg; 461 struct fc_frame *fp = NULL; 462 struct fc_lport *lp = fsp->lp; 463 size_t remaining; 464 size_t t_blen; 465 size_t tlen; 466 size_t sg_bytes; 467 size_t frame_offset, fh_parm_offset; 468 int error; 469 void *data = NULL; 470 void *page_addr; 471 int using_sg = lp->sg_supp; 472 u32 f_ctl; 473 474 WARN_ON(seq_blen <= 0); 475 if (unlikely(offset + seq_blen > fsp->data_len)) { 476 /* this should never happen */ 477 FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx " 478 "offset %zx\n", seq_blen, offset); 479 fc_fcp_send_abort(fsp); 480 return 0; 481 } else if (offset != fsp->xfer_len) { 482 /* Out of Order Data Request - no problem, but unexpected. */ 483 FC_FCP_DBG(fsp, "xfer-ready non-contiguous. " 484 "seq_blen %zx offset %zx\n", seq_blen, offset); 485 } 486 487 /* 488 * if LLD is capable of seq_offload then set transport 489 * burst length (t_blen) to seq_blen, otherwise set t_blen 490 * to max FC frame payload previously set in fsp->max_payload. 491 */ 492 t_blen = fsp->max_payload; 493 if (lp->seq_offload) { 494 t_blen = min(seq_blen, (size_t)lp->lso_max); 495 FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", 496 fsp, seq_blen, lp->lso_max, t_blen); 497 } 498 499 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); 500 if (t_blen > 512) 501 t_blen &= ~(512 - 1); /* round down to block size */ 502 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */ 503 sc = fsp->cmd; 504 505 remaining = seq_blen; 506 fh_parm_offset = frame_offset = offset; 507 tlen = 0; 508 seq = lp->tt.seq_start_next(seq); 509 f_ctl = FC_FC_REL_OFF; 510 WARN_ON(!seq); 511 512 sg = scsi_sglist(sc); 513 514 while (remaining > 0 && sg) { 515 if (offset >= sg->length) { 516 offset -= sg->length; 517 sg = sg_next(sg); 518 continue; 519 } 520 if (!fp) { 521 tlen = min(t_blen, remaining); 522 523 /* 524 * TODO. Temporary workaround. fc_seq_send() can't 525 * handle odd lengths in non-linear skbs. 526 * This will be the final fragment only. 527 */ 528 if (tlen % 4) 529 using_sg = 0; 530 if (using_sg) { 531 fp = _fc_frame_alloc(lp, 0); 532 if (!fp) 533 return -ENOMEM; 534 } else { 535 fp = fc_frame_alloc(lp, tlen); 536 if (!fp) 537 return -ENOMEM; 538 539 data = (void *)(fr_hdr(fp)) + 540 sizeof(struct fc_frame_header); 541 } 542 fh_parm_offset = frame_offset; 543 fr_max_payload(fp) = fsp->max_payload; 544 } 545 sg_bytes = min(tlen, sg->length - offset); 546 if (using_sg) { 547 get_page(sg_page(sg)); 548 skb_fill_page_desc(fp_skb(fp), 549 skb_shinfo(fp_skb(fp))->nr_frags, 550 sg_page(sg), sg->offset + offset, 551 sg_bytes); 552 fp_skb(fp)->data_len += sg_bytes; 553 fr_len(fp) += sg_bytes; 554 fp_skb(fp)->truesize += PAGE_SIZE; 555 } else { 556 size_t off = offset + sg->offset; 557 558 /* 559 * The scatterlist item may be bigger than PAGE_SIZE, 560 * but we must not cross pages inside the kmap. 561 */ 562 sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE - 563 (off & ~PAGE_MASK))); 564 page_addr = kmap_atomic(sg_page(sg) + 565 (off >> PAGE_SHIFT), 566 KM_SOFTIRQ0); 567 memcpy(data, (char *)page_addr + (off & ~PAGE_MASK), 568 sg_bytes); 569 kunmap_atomic(page_addr, KM_SOFTIRQ0); 570 data += sg_bytes; 571 } 572 offset += sg_bytes; 573 frame_offset += sg_bytes; 574 tlen -= sg_bytes; 575 remaining -= sg_bytes; 576 577 if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) && 578 (tlen)) 579 continue; 580 581 /* 582 * Send sequence with transfer sequence initiative in case 583 * this is last FCP frame of the sequence. 584 */ 585 if (remaining == 0) 586 f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ; 587 588 ep = fc_seq_exch(seq); 589 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, 590 FC_TYPE_FCP, f_ctl, fh_parm_offset); 591 592 /* 593 * send fragment using for a sequence. 594 */ 595 error = lp->tt.seq_send(lp, seq, fp); 596 if (error) { 597 WARN_ON(1); /* send error should be rare */ 598 fc_fcp_retry_cmd(fsp); 599 return 0; 600 } 601 fp = NULL; 602 } 603 fsp->xfer_len += seq_blen; /* premature count? */ 604 return 0; 605} 606 607static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 608{ 609 int ba_done = 1; 610 struct fc_ba_rjt *brp; 611 struct fc_frame_header *fh; 612 613 fh = fc_frame_header_get(fp); 614 switch (fh->fh_r_ctl) { 615 case FC_RCTL_BA_ACC: 616 break; 617 case FC_RCTL_BA_RJT: 618 brp = fc_frame_payload_get(fp, sizeof(*brp)); 619 if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR) 620 break; 621 /* fall thru */ 622 default: 623 /* 624 * we will let the command timeout 625 * and scsi-ml recover in this case, 626 * therefore cleared the ba_done flag. 627 */ 628 ba_done = 0; 629 } 630 631 if (ba_done) { 632 fsp->state |= FC_SRB_ABORTED; 633 fsp->state &= ~FC_SRB_ABORT_PENDING; 634 635 if (fsp->wait_for_comp) 636 complete(&fsp->tm_done); 637 else 638 fc_fcp_complete_locked(fsp); 639 } 640} 641 642/** 643 * fc_fcp_reduce_can_queue() - drop can_queue 644 * @lp: lport to drop queueing for 645 * 646 * If we are getting memory allocation failures, then we may 647 * be trying to execute too many commands. We let the running 648 * commands complete or timeout, then try again with a reduced 649 * can_queue. Eventually we will hit the point where we run 650 * on all reserved structs. 651 */ 652static void fc_fcp_reduce_can_queue(struct fc_lport *lp) 653{ 654 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 655 unsigned long flags; 656 int can_queue; 657 658 spin_lock_irqsave(lp->host->host_lock, flags); 659 if (si->throttled) 660 goto done; 661 si->throttled = 1; 662 663 can_queue = lp->host->can_queue; 664 can_queue >>= 1; 665 if (!can_queue) 666 can_queue = 1; 667 lp->host->can_queue = can_queue; 668 shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n" 669 "Reducing can_queue to %d.\n", can_queue); 670done: 671 spin_unlock_irqrestore(lp->host->host_lock, flags); 672} 673 674/** 675 * fc_fcp_recv() - Reveive FCP frames 676 * @seq: The sequence the frame is on 677 * @fp: The FC frame 678 * @arg: The related FCP packet 679 * 680 * Return : None 681 * Context : called from Soft IRQ context 682 * can not called holding list lock 683 */ 684static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) 685{ 686 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg; 687 struct fc_lport *lport = fsp->lp; 688 struct fc_frame_header *fh; 689 struct fcp_txrdy *dd; 690 u8 r_ctl; 691 int rc = 0; 692 693 if (IS_ERR(fp)) 694 goto errout; 695 696 fh = fc_frame_header_get(fp); 697 r_ctl = fh->fh_r_ctl; 698 699 if (!(lport->state & LPORT_ST_READY)) 700 goto out; 701 if (fc_fcp_lock_pkt(fsp)) 702 goto out; 703 fsp->last_pkt_time = jiffies; 704 705 if (fh->fh_type == FC_TYPE_BLS) { 706 fc_fcp_abts_resp(fsp, fp); 707 goto unlock; 708 } 709 710 if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) 711 goto unlock; 712 713 if (r_ctl == FC_RCTL_DD_DATA_DESC) { 714 /* 715 * received XFER RDY from the target 716 * need to send data to the target 717 */ 718 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED); 719 dd = fc_frame_payload_get(fp, sizeof(*dd)); 720 WARN_ON(!dd); 721 722 rc = fc_fcp_send_data(fsp, seq, 723 (size_t) ntohl(dd->ft_data_ro), 724 (size_t) ntohl(dd->ft_burst_len)); 725 if (!rc) 726 seq->rec_data = fsp->xfer_len; 727 else if (rc == -ENOMEM) 728 fsp->state |= FC_SRB_NOMEM; 729 } else if (r_ctl == FC_RCTL_DD_SOL_DATA) { 730 /* 731 * received a DATA frame 732 * next we will copy the data to the system buffer 733 */ 734 WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */ 735 fc_fcp_recv_data(fsp, fp); 736 seq->rec_data = fsp->xfer_contig_end; 737 } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) { 738 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED); 739 740 fc_fcp_resp(fsp, fp); 741 } else { 742 FC_FCP_DBG(fsp, "unexpected frame. r_ctl %x\n", r_ctl); 743 } 744unlock: 745 fc_fcp_unlock_pkt(fsp); 746out: 747 fc_frame_free(fp); 748errout: 749 if (IS_ERR(fp)) 750 fc_fcp_error(fsp, fp); 751 else if (rc == -ENOMEM) 752 fc_fcp_reduce_can_queue(lport); 753} 754 755static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 756{ 757 struct fc_frame_header *fh; 758 struct fcp_resp *fc_rp; 759 struct fcp_resp_ext *rp_ex; 760 struct fcp_resp_rsp_info *fc_rp_info; 761 u32 plen; 762 u32 expected_len; 763 u32 respl = 0; 764 u32 snsl = 0; 765 u8 flags = 0; 766 767 plen = fr_len(fp); 768 fh = (struct fc_frame_header *)fr_hdr(fp); 769 if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp))) 770 goto len_err; 771 plen -= sizeof(*fh); 772 fc_rp = (struct fcp_resp *)(fh + 1); 773 fsp->cdb_status = fc_rp->fr_status; 774 flags = fc_rp->fr_flags; 775 fsp->scsi_comp_flags = flags; 776 expected_len = fsp->data_len; 777 778 /* if ddp, update xfer len */ 779 fc_fcp_ddp_done(fsp); 780 781 if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) { 782 rp_ex = (void *)(fc_rp + 1); 783 if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) { 784 if (plen < sizeof(*fc_rp) + sizeof(*rp_ex)) 785 goto len_err; 786 fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1); 787 if (flags & FCP_RSP_LEN_VAL) { 788 respl = ntohl(rp_ex->fr_rsp_len); 789 if (respl != sizeof(*fc_rp_info)) 790 goto len_err; 791 if (fsp->wait_for_comp) { 792 /* Abuse cdb_status for rsp code */ 793 fsp->cdb_status = fc_rp_info->rsp_code; 794 complete(&fsp->tm_done); 795 /* 796 * tmfs will not have any scsi cmd so 797 * exit here 798 */ 799 return; 800 } else 801 goto err; 802 } 803 if (flags & FCP_SNS_LEN_VAL) { 804 snsl = ntohl(rp_ex->fr_sns_len); 805 if (snsl > SCSI_SENSE_BUFFERSIZE) 806 snsl = SCSI_SENSE_BUFFERSIZE; 807 memcpy(fsp->cmd->sense_buffer, 808 (char *)fc_rp_info + respl, snsl); 809 } 810 } 811 if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) { 812 if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid)) 813 goto len_err; 814 if (flags & FCP_RESID_UNDER) { 815 fsp->scsi_resid = ntohl(rp_ex->fr_resid); 816 /* 817 * The cmnd->underflow is the minimum number of 818 * bytes that must be transfered for this 819 * command. Provided a sense condition is not 820 * present, make sure the actual amount 821 * transferred is at least the underflow value 822 * or fail. 823 */ 824 if (!(flags & FCP_SNS_LEN_VAL) && 825 (fc_rp->fr_status == 0) && 826 (scsi_bufflen(fsp->cmd) - 827 fsp->scsi_resid) < fsp->cmd->underflow) 828 goto err; 829 expected_len -= fsp->scsi_resid; 830 } else { 831 fsp->status_code = FC_ERROR; 832 } 833 } 834 } 835 fsp->state |= FC_SRB_RCV_STATUS; 836 837 /* 838 * Check for missing or extra data frames. 839 */ 840 if (unlikely(fsp->xfer_len != expected_len)) { 841 if (fsp->xfer_len < expected_len) { 842 /* 843 * Some data may be queued locally, 844 * Wait a at least one jiffy to see if it is delivered. 845 * If this expires without data, we may do SRR. 846 */ 847 fc_fcp_timer_set(fsp, 2); 848 return; 849 } 850 fsp->status_code = FC_DATA_OVRRUN; 851 FC_FCP_DBG(fsp, "tgt %6x xfer len %zx greater than expected, " 852 "len %x, data len %x\n", 853 fsp->rport->port_id, 854 fsp->xfer_len, expected_len, fsp->data_len); 855 } 856 fc_fcp_complete_locked(fsp); 857 return; 858 859len_err: 860 FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u " 861 "snsl %u\n", flags, fr_len(fp), respl, snsl); 862err: 863 fsp->status_code = FC_ERROR; 864 fc_fcp_complete_locked(fsp); 865} 866 867/** 868 * fc_fcp_complete_locked() - complete processing of a fcp packet 869 * @fsp: fcp packet 870 * 871 * This function may sleep if a timer is pending. The packet lock must be 872 * held, and the host lock must not be held. 873 */ 874static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) 875{ 876 struct fc_lport *lp = fsp->lp; 877 struct fc_seq *seq; 878 struct fc_exch *ep; 879 u32 f_ctl; 880 881 if (fsp->state & FC_SRB_ABORT_PENDING) 882 return; 883 884 if (fsp->state & FC_SRB_ABORTED) { 885 if (!fsp->status_code) 886 fsp->status_code = FC_CMD_ABORTED; 887 } else { 888 /* 889 * Test for transport underrun, independent of response 890 * underrun status. 891 */ 892 if (fsp->xfer_len < fsp->data_len && !fsp->io_status && 893 (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) || 894 fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) { 895 fsp->status_code = FC_DATA_UNDRUN; 896 fsp->io_status = 0; 897 } 898 } 899 900 seq = fsp->seq_ptr; 901 if (seq) { 902 fsp->seq_ptr = NULL; 903 if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) { 904 struct fc_frame *conf_frame; 905 struct fc_seq *csp; 906 907 csp = lp->tt.seq_start_next(seq); 908 conf_frame = fc_frame_alloc(fsp->lp, 0); 909 if (conf_frame) { 910 f_ctl = FC_FC_SEQ_INIT; 911 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ; 912 ep = fc_seq_exch(seq); 913 fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL, 914 ep->did, ep->sid, 915 FC_TYPE_FCP, f_ctl, 0); 916 lp->tt.seq_send(lp, csp, conf_frame); 917 } 918 } 919 lp->tt.exch_done(seq); 920 } 921 fc_io_compl(fsp); 922} 923 924static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error) 925{ 926 struct fc_lport *lp = fsp->lp; 927 928 if (fsp->seq_ptr) { 929 lp->tt.exch_done(fsp->seq_ptr); 930 fsp->seq_ptr = NULL; 931 } 932 fsp->status_code = error; 933} 934 935/** 936 * fc_fcp_cleanup_each_cmd() - Cleanup active commads 937 * @lp: logical port 938 * @id: target id 939 * @lun: lun 940 * @error: fsp status code 941 * 942 * If lun or id is -1, they are ignored. 943 */ 944static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id, 945 unsigned int lun, int error) 946{ 947 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 948 struct fc_fcp_pkt *fsp; 949 struct scsi_cmnd *sc_cmd; 950 unsigned long flags; 951 952 spin_lock_irqsave(lp->host->host_lock, flags); 953restart: 954 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { 955 sc_cmd = fsp->cmd; 956 if (id != -1 && scmd_id(sc_cmd) != id) 957 continue; 958 959 if (lun != -1 && sc_cmd->device->lun != lun) 960 continue; 961 962 fc_fcp_pkt_hold(fsp); 963 spin_unlock_irqrestore(lp->host->host_lock, flags); 964 965 if (!fc_fcp_lock_pkt(fsp)) { 966 fc_fcp_cleanup_cmd(fsp, error); 967 fc_io_compl(fsp); 968 fc_fcp_unlock_pkt(fsp); 969 } 970 971 fc_fcp_pkt_release(fsp); 972 spin_lock_irqsave(lp->host->host_lock, flags); 973 /* 974 * while we dropped the lock multiple pkts could 975 * have been released, so we have to start over. 976 */ 977 goto restart; 978 } 979 spin_unlock_irqrestore(lp->host->host_lock, flags); 980} 981 982static void fc_fcp_abort_io(struct fc_lport *lp) 983{ 984 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR); 985} 986 987/** 988 * fc_fcp_pkt_send() - send a fcp packet to the lower level. 989 * @lp: fc lport 990 * @fsp: fc packet. 991 * 992 * This is called by upper layer protocol. 993 * Return : zero for success and -1 for failure 994 * Context : called from queuecommand which can be called from process 995 * or scsi soft irq. 996 * Locks : called with the host lock and irqs disabled. 997 */ 998static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp) 999{ 1000 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 1001 int rc; 1002 1003 fsp->cmd->SCp.ptr = (char *)fsp; 1004 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len); 1005 fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK; 1006 1007 int_to_scsilun(fsp->cmd->device->lun, 1008 (struct scsi_lun *)fsp->cdb_cmd.fc_lun); 1009 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); 1010 list_add_tail(&fsp->list, &si->scsi_pkt_queue); 1011 1012 spin_unlock_irq(lp->host->host_lock); 1013 rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv); 1014 spin_lock_irq(lp->host->host_lock); 1015 if (rc) 1016 list_del(&fsp->list); 1017 1018 return rc; 1019} 1020 1021static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, 1022 void (*resp)(struct fc_seq *, 1023 struct fc_frame *fp, 1024 void *arg)) 1025{ 1026 struct fc_frame *fp; 1027 struct fc_seq *seq; 1028 struct fc_rport *rport; 1029 struct fc_rport_libfc_priv *rp; 1030 const size_t len = sizeof(fsp->cdb_cmd); 1031 int rc = 0; 1032 1033 if (fc_fcp_lock_pkt(fsp)) 1034 return 0; 1035 1036 fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd)); 1037 if (!fp) { 1038 rc = -1; 1039 goto unlock; 1040 } 1041 1042 memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len); 1043 fr_fsp(fp) = fsp; 1044 rport = fsp->rport; 1045 fsp->max_payload = rport->maxframe_size; 1046 rp = rport->dd_data; 1047 1048 fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, 1049 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, 1050 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1051 1052 seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0); 1053 if (!seq) { 1054 rc = -1; 1055 goto unlock; 1056 } 1057 fsp->last_pkt_time = jiffies; 1058 fsp->seq_ptr = seq; 1059 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */ 1060 1061 setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp); 1062 fc_fcp_timer_set(fsp, 1063 (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ? 1064 FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT); 1065unlock: 1066 fc_fcp_unlock_pkt(fsp); 1067 return rc; 1068} 1069 1070/* 1071 * transport error handler 1072 */ 1073static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1074{ 1075 int error = PTR_ERR(fp); 1076 1077 if (fc_fcp_lock_pkt(fsp)) 1078 return; 1079 1080 if (error == -FC_EX_CLOSED) { 1081 fc_fcp_retry_cmd(fsp); 1082 goto unlock; 1083 } 1084 1085 /* 1086 * clear abort pending, because the lower layer 1087 * decided to force completion. 1088 */ 1089 fsp->state &= ~FC_SRB_ABORT_PENDING; 1090 fsp->status_code = FC_CMD_PLOGO; 1091 fc_fcp_complete_locked(fsp); 1092unlock: 1093 fc_fcp_unlock_pkt(fsp); 1094} 1095 1096/* 1097 * Scsi abort handler- calls to send an abort 1098 * and then wait for abort completion 1099 */ 1100static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) 1101{ 1102 int rc = FAILED; 1103 1104 if (fc_fcp_send_abort(fsp)) 1105 return FAILED; 1106 1107 init_completion(&fsp->tm_done); 1108 fsp->wait_for_comp = 1; 1109 1110 spin_unlock_bh(&fsp->scsi_pkt_lock); 1111 rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV); 1112 spin_lock_bh(&fsp->scsi_pkt_lock); 1113 fsp->wait_for_comp = 0; 1114 1115 if (!rc) { 1116 FC_FCP_DBG(fsp, "target abort cmd failed\n"); 1117 rc = FAILED; 1118 } else if (fsp->state & FC_SRB_ABORTED) { 1119 FC_FCP_DBG(fsp, "target abort cmd passed\n"); 1120 rc = SUCCESS; 1121 fc_fcp_complete_locked(fsp); 1122 } 1123 1124 return rc; 1125} 1126 1127/* 1128 * Retry LUN reset after resource allocation failed. 1129 */ 1130static void fc_lun_reset_send(unsigned long data) 1131{ 1132 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; 1133 struct fc_lport *lp = fsp->lp; 1134 if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) { 1135 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) 1136 return; 1137 if (fc_fcp_lock_pkt(fsp)) 1138 return; 1139 setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp); 1140 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1141 fc_fcp_unlock_pkt(fsp); 1142 } 1143} 1144 1145/* 1146 * Scsi device reset handler- send a LUN RESET to the device 1147 * and wait for reset reply 1148 */ 1149static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, 1150 unsigned int id, unsigned int lun) 1151{ 1152 int rc; 1153 1154 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len); 1155 fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET; 1156 int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun); 1157 1158 fsp->wait_for_comp = 1; 1159 init_completion(&fsp->tm_done); 1160 1161 fc_lun_reset_send((unsigned long)fsp); 1162 1163 /* 1164 * wait for completion of reset 1165 * after that make sure all commands are terminated 1166 */ 1167 rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV); 1168 1169 spin_lock_bh(&fsp->scsi_pkt_lock); 1170 fsp->state |= FC_SRB_COMPL; 1171 spin_unlock_bh(&fsp->scsi_pkt_lock); 1172 1173 del_timer_sync(&fsp->timer); 1174 1175 spin_lock_bh(&fsp->scsi_pkt_lock); 1176 if (fsp->seq_ptr) { 1177 lp->tt.exch_done(fsp->seq_ptr); 1178 fsp->seq_ptr = NULL; 1179 } 1180 fsp->wait_for_comp = 0; 1181 spin_unlock_bh(&fsp->scsi_pkt_lock); 1182 1183 if (!rc) { 1184 FC_SCSI_DBG(lp, "lun reset failed\n"); 1185 return FAILED; 1186 } 1187 1188 /* cdb_status holds the tmf's rsp code */ 1189 if (fsp->cdb_status != FCP_TMF_CMPL) 1190 return FAILED; 1191 1192 FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun); 1193 fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED); 1194 return SUCCESS; 1195} 1196 1197/* 1198 * Task Managment response handler 1199 */ 1200static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1201{ 1202 struct fc_fcp_pkt *fsp = arg; 1203 struct fc_frame_header *fh; 1204 1205 if (IS_ERR(fp)) { 1206 /* 1207 * If there is an error just let it timeout or wait 1208 * for TMF to be aborted if it timedout. 1209 * 1210 * scsi-eh will escalate for when either happens. 1211 */ 1212 return; 1213 } 1214 1215 if (fc_fcp_lock_pkt(fsp)) 1216 return; 1217 1218 /* 1219 * raced with eh timeout handler. 1220 */ 1221 if (!fsp->seq_ptr || !fsp->wait_for_comp) { 1222 spin_unlock_bh(&fsp->scsi_pkt_lock); 1223 return; 1224 } 1225 1226 fh = fc_frame_header_get(fp); 1227 if (fh->fh_type != FC_TYPE_BLS) 1228 fc_fcp_resp(fsp, fp); 1229 fsp->seq_ptr = NULL; 1230 fsp->lp->tt.exch_done(seq); 1231 fc_frame_free(fp); 1232 fc_fcp_unlock_pkt(fsp); 1233} 1234 1235static void fc_fcp_cleanup(struct fc_lport *lp) 1236{ 1237 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR); 1238} 1239 1240/* 1241 * fc_fcp_timeout: called by OS timer function. 1242 * 1243 * The timer has been inactivated and must be reactivated if desired 1244 * using fc_fcp_timer_set(). 1245 * 1246 * Algorithm: 1247 * 1248 * If REC is supported, just issue it, and return. The REC exchange will 1249 * complete or time out, and recovery can continue at that point. 1250 * 1251 * Otherwise, if the response has been received without all the data, 1252 * it has been ER_TIMEOUT since the response was received. 1253 * 1254 * If the response has not been received, 1255 * we see if data was received recently. If it has been, we continue waiting, 1256 * otherwise, we abort the command. 1257 */ 1258static void fc_fcp_timeout(unsigned long data) 1259{ 1260 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; 1261 struct fc_rport *rport = fsp->rport; 1262 struct fc_rport_libfc_priv *rp = rport->dd_data; 1263 1264 if (fc_fcp_lock_pkt(fsp)) 1265 return; 1266 1267 if (fsp->cdb_cmd.fc_tm_flags) 1268 goto unlock; 1269 1270 fsp->state |= FC_SRB_FCP_PROCESSING_TMO; 1271 1272 if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED) 1273 fc_fcp_rec(fsp); 1274 else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2), 1275 jiffies)) 1276 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); 1277 else if (fsp->state & FC_SRB_RCV_STATUS) 1278 fc_fcp_complete_locked(fsp); 1279 else 1280 fc_timeout_error(fsp); 1281 fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO; 1282unlock: 1283 fc_fcp_unlock_pkt(fsp); 1284} 1285 1286/* 1287 * Send a REC ELS request 1288 */ 1289static void fc_fcp_rec(struct fc_fcp_pkt *fsp) 1290{ 1291 struct fc_lport *lp; 1292 struct fc_frame *fp; 1293 struct fc_rport *rport; 1294 struct fc_rport_libfc_priv *rp; 1295 1296 lp = fsp->lp; 1297 rport = fsp->rport; 1298 rp = rport->dd_data; 1299 if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) { 1300 fsp->status_code = FC_HRD_ERROR; 1301 fsp->io_status = 0; 1302 fc_fcp_complete_locked(fsp); 1303 return; 1304 } 1305 fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec)); 1306 if (!fp) 1307 goto retry; 1308 1309 fr_seq(fp) = fsp->seq_ptr; 1310 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, 1311 fc_host_port_id(rp->local_port->host), FC_TYPE_ELS, 1312 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1313 if (lp->tt.elsct_send(lp, rport->port_id, fp, ELS_REC, fc_fcp_rec_resp, 1314 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) { 1315 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ 1316 return; 1317 } 1318retry: 1319 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1320 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1321 else 1322 fc_timeout_error(fsp); 1323} 1324 1325/* 1326 * Receive handler for REC ELS frame 1327 * if it is a reject then let the scsi layer to handle 1328 * the timeout. if it is a LS_ACC then if the io was not completed 1329 * then set the timeout and return otherwise complete the exchange 1330 * and tell the scsi layer to restart the I/O. 1331 */ 1332static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1333{ 1334 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg; 1335 struct fc_els_rec_acc *recp; 1336 struct fc_els_ls_rjt *rjt; 1337 u32 e_stat; 1338 u8 opcode; 1339 u32 offset; 1340 enum dma_data_direction data_dir; 1341 enum fc_rctl r_ctl; 1342 struct fc_rport_libfc_priv *rp; 1343 1344 if (IS_ERR(fp)) { 1345 fc_fcp_rec_error(fsp, fp); 1346 return; 1347 } 1348 1349 if (fc_fcp_lock_pkt(fsp)) 1350 goto out; 1351 1352 fsp->recov_retry = 0; 1353 opcode = fc_frame_payload_op(fp); 1354 if (opcode == ELS_LS_RJT) { 1355 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 1356 switch (rjt->er_reason) { 1357 default: 1358 FC_FCP_DBG(fsp, "device %x unexpected REC reject " 1359 "reason %d expl %d\n", 1360 fsp->rport->port_id, rjt->er_reason, 1361 rjt->er_explan); 1362 /* fall through */ 1363 case ELS_RJT_UNSUP: 1364 FC_FCP_DBG(fsp, "device does not support REC\n"); 1365 rp = fsp->rport->dd_data; 1366 /* 1367 * if we do not spport RECs or got some bogus 1368 * reason then resetup timer so we check for 1369 * making progress. 1370 */ 1371 rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; 1372 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); 1373 break; 1374 case ELS_RJT_LOGIC: 1375 case ELS_RJT_UNAB: 1376 /* 1377 * If no data transfer, the command frame got dropped 1378 * so we just retry. If data was transferred, we 1379 * lost the response but the target has no record, 1380 * so we abort and retry. 1381 */ 1382 if (rjt->er_explan == ELS_EXPL_OXID_RXID && 1383 fsp->xfer_len == 0) { 1384 fc_fcp_retry_cmd(fsp); 1385 break; 1386 } 1387 fc_timeout_error(fsp); 1388 break; 1389 } 1390 } else if (opcode == ELS_LS_ACC) { 1391 if (fsp->state & FC_SRB_ABORTED) 1392 goto unlock_out; 1393 1394 data_dir = fsp->cmd->sc_data_direction; 1395 recp = fc_frame_payload_get(fp, sizeof(*recp)); 1396 offset = ntohl(recp->reca_fc4value); 1397 e_stat = ntohl(recp->reca_e_stat); 1398 1399 if (e_stat & ESB_ST_COMPLETE) { 1400 1401 /* 1402 * The exchange is complete. 1403 * 1404 * For output, we must've lost the response. 1405 * For input, all data must've been sent. 1406 * We lost may have lost the response 1407 * (and a confirmation was requested) and maybe 1408 * some data. 1409 * 1410 * If all data received, send SRR 1411 * asking for response. If partial data received, 1412 * or gaps, SRR requests data at start of gap. 1413 * Recovery via SRR relies on in-order-delivery. 1414 */ 1415 if (data_dir == DMA_TO_DEVICE) { 1416 r_ctl = FC_RCTL_DD_CMD_STATUS; 1417 } else if (fsp->xfer_contig_end == offset) { 1418 r_ctl = FC_RCTL_DD_CMD_STATUS; 1419 } else { 1420 offset = fsp->xfer_contig_end; 1421 r_ctl = FC_RCTL_DD_SOL_DATA; 1422 } 1423 fc_fcp_srr(fsp, r_ctl, offset); 1424 } else if (e_stat & ESB_ST_SEQ_INIT) { 1425 1426 /* 1427 * The remote port has the initiative, so just 1428 * keep waiting for it to complete. 1429 */ 1430 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1431 } else { 1432 1433 /* 1434 * The exchange is incomplete, we have seq. initiative. 1435 * Lost response with requested confirmation, 1436 * lost confirmation, lost transfer ready or 1437 * lost write data. 1438 * 1439 * For output, if not all data was received, ask 1440 * for transfer ready to be repeated. 1441 * 1442 * If we received or sent all the data, send SRR to 1443 * request response. 1444 * 1445 * If we lost a response, we may have lost some read 1446 * data as well. 1447 */ 1448 r_ctl = FC_RCTL_DD_SOL_DATA; 1449 if (data_dir == DMA_TO_DEVICE) { 1450 r_ctl = FC_RCTL_DD_CMD_STATUS; 1451 if (offset < fsp->data_len) 1452 r_ctl = FC_RCTL_DD_DATA_DESC; 1453 } else if (offset == fsp->xfer_contig_end) { 1454 r_ctl = FC_RCTL_DD_CMD_STATUS; 1455 } else if (fsp->xfer_contig_end < offset) { 1456 offset = fsp->xfer_contig_end; 1457 } 1458 fc_fcp_srr(fsp, r_ctl, offset); 1459 } 1460 } 1461unlock_out: 1462 fc_fcp_unlock_pkt(fsp); 1463out: 1464 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ 1465 fc_frame_free(fp); 1466} 1467 1468/* 1469 * Handle error response or timeout for REC exchange. 1470 */ 1471static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1472{ 1473 int error = PTR_ERR(fp); 1474 1475 if (fc_fcp_lock_pkt(fsp)) 1476 goto out; 1477 1478 switch (error) { 1479 case -FC_EX_CLOSED: 1480 fc_fcp_retry_cmd(fsp); 1481 break; 1482 1483 default: 1484 FC_FCP_DBG(fsp, "REC %p fid %x error unexpected error %d\n", 1485 fsp, fsp->rport->port_id, error); 1486 fsp->status_code = FC_CMD_PLOGO; 1487 /* fall through */ 1488 1489 case -FC_EX_TIMEOUT: 1490 /* 1491 * Assume REC or LS_ACC was lost. 1492 * The exchange manager will have aborted REC, so retry. 1493 */ 1494 FC_FCP_DBG(fsp, "REC fid %x error error %d retry %d/%d\n", 1495 fsp->rport->port_id, error, fsp->recov_retry, 1496 FC_MAX_RECOV_RETRY); 1497 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1498 fc_fcp_rec(fsp); 1499 else 1500 fc_timeout_error(fsp); 1501 break; 1502 } 1503 fc_fcp_unlock_pkt(fsp); 1504out: 1505 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ 1506} 1507 1508/* 1509 * Time out error routine: 1510 * abort's the I/O close the exchange and 1511 * send completion notification to scsi layer 1512 */ 1513static void fc_timeout_error(struct fc_fcp_pkt *fsp) 1514{ 1515 fsp->status_code = FC_CMD_TIME_OUT; 1516 fsp->cdb_status = 0; 1517 fsp->io_status = 0; 1518 /* 1519 * if this fails then we let the scsi command timer fire and 1520 * scsi-ml escalate. 1521 */ 1522 fc_fcp_send_abort(fsp); 1523} 1524 1525/* 1526 * Sequence retransmission request. 1527 * This is called after receiving status but insufficient data, or 1528 * when expecting status but the request has timed out. 1529 */ 1530static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) 1531{ 1532 struct fc_lport *lp = fsp->lp; 1533 struct fc_rport *rport; 1534 struct fc_rport_libfc_priv *rp; 1535 struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); 1536 struct fc_seq *seq; 1537 struct fcp_srr *srr; 1538 struct fc_frame *fp; 1539 u8 cdb_op; 1540 1541 rport = fsp->rport; 1542 rp = rport->dd_data; 1543 cdb_op = fsp->cdb_cmd.fc_cdb[0]; 1544 1545 if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY) 1546 goto retry; /* shouldn't happen */ 1547 fp = fc_frame_alloc(lp, sizeof(*srr)); 1548 if (!fp) 1549 goto retry; 1550 1551 srr = fc_frame_payload_get(fp, sizeof(*srr)); 1552 memset(srr, 0, sizeof(*srr)); 1553 srr->srr_op = ELS_SRR; 1554 srr->srr_ox_id = htons(ep->oxid); 1555 srr->srr_rx_id = htons(ep->rxid); 1556 srr->srr_r_ctl = r_ctl; 1557 srr->srr_rel_off = htonl(offset); 1558 1559 fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, 1560 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, 1561 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1562 1563 seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL, 1564 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); 1565 if (!seq) 1566 goto retry; 1567 1568 fsp->recov_seq = seq; 1569 fsp->xfer_len = offset; 1570 fsp->xfer_contig_end = offset; 1571 fsp->state &= ~FC_SRB_RCV_STATUS; 1572 fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */ 1573 return; 1574retry: 1575 fc_fcp_retry_cmd(fsp); 1576} 1577 1578/* 1579 * Handle response from SRR. 1580 */ 1581static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1582{ 1583 struct fc_fcp_pkt *fsp = arg; 1584 struct fc_frame_header *fh; 1585 1586 if (IS_ERR(fp)) { 1587 fc_fcp_srr_error(fsp, fp); 1588 return; 1589 } 1590 1591 if (fc_fcp_lock_pkt(fsp)) 1592 goto out; 1593 1594 fh = fc_frame_header_get(fp); 1595 /* 1596 * BUG? fc_fcp_srr_error calls exch_done which would release 1597 * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT, 1598 * then fc_exch_timeout would be sending an abort. The exch_done 1599 * call by fc_fcp_srr_error would prevent fc_exch.c from seeing 1600 * an abort response though. 1601 */ 1602 if (fh->fh_type == FC_TYPE_BLS) { 1603 fc_fcp_unlock_pkt(fsp); 1604 return; 1605 } 1606 1607 fsp->recov_seq = NULL; 1608 switch (fc_frame_payload_op(fp)) { 1609 case ELS_LS_ACC: 1610 fsp->recov_retry = 0; 1611 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1612 break; 1613 case ELS_LS_RJT: 1614 default: 1615 fc_timeout_error(fsp); 1616 break; 1617 } 1618 fc_fcp_unlock_pkt(fsp); 1619 fsp->lp->tt.exch_done(seq); 1620out: 1621 fc_frame_free(fp); 1622 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ 1623} 1624 1625static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1626{ 1627 if (fc_fcp_lock_pkt(fsp)) 1628 goto out; 1629 fsp->lp->tt.exch_done(fsp->recov_seq); 1630 fsp->recov_seq = NULL; 1631 switch (PTR_ERR(fp)) { 1632 case -FC_EX_TIMEOUT: 1633 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1634 fc_fcp_rec(fsp); 1635 else 1636 fc_timeout_error(fsp); 1637 break; 1638 case -FC_EX_CLOSED: /* e.g., link failure */ 1639 /* fall through */ 1640 default: 1641 fc_fcp_retry_cmd(fsp); 1642 break; 1643 } 1644 fc_fcp_unlock_pkt(fsp); 1645out: 1646 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ 1647} 1648 1649static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp) 1650{ 1651 /* lock ? */ 1652 return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull; 1653} 1654 1655/** 1656 * fc_queuecommand - The queuecommand function of the scsi template 1657 * @cmd: struct scsi_cmnd to be executed 1658 * @done: Callback function to be called when cmd is completed 1659 * 1660 * this is the i/o strategy routine, called by the scsi layer 1661 * this routine is called with holding the host_lock. 1662 */ 1663int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) 1664{ 1665 struct fc_lport *lp; 1666 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1667 struct fc_fcp_pkt *fsp; 1668 struct fc_rport_libfc_priv *rp; 1669 int rval; 1670 int rc = 0; 1671 struct fcoe_dev_stats *stats; 1672 1673 lp = shost_priv(sc_cmd->device->host); 1674 1675 rval = fc_remote_port_chkready(rport); 1676 if (rval) { 1677 sc_cmd->result = rval; 1678 done(sc_cmd); 1679 goto out; 1680 } 1681 1682 if (!*(struct fc_remote_port **)rport->dd_data) { 1683 /* 1684 * rport is transitioning from blocked/deleted to 1685 * online 1686 */ 1687 sc_cmd->result = DID_IMM_RETRY << 16; 1688 done(sc_cmd); 1689 goto out; 1690 } 1691 1692 rp = rport->dd_data; 1693 1694 if (!fc_fcp_lport_queue_ready(lp)) { 1695 rc = SCSI_MLQUEUE_HOST_BUSY; 1696 goto out; 1697 } 1698 1699 fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC); 1700 if (fsp == NULL) { 1701 rc = SCSI_MLQUEUE_HOST_BUSY; 1702 goto out; 1703 } 1704 1705 /* 1706 * build the libfc request pkt 1707 */ 1708 fsp->cmd = sc_cmd; /* save the cmd */ 1709 fsp->lp = lp; /* save the softc ptr */ 1710 fsp->rport = rport; /* set the remote port ptr */ 1711 fsp->xfer_ddp = FC_XID_UNKNOWN; 1712 sc_cmd->scsi_done = done; 1713 1714 /* 1715 * set up the transfer length 1716 */ 1717 fsp->data_len = scsi_bufflen(sc_cmd); 1718 fsp->xfer_len = 0; 1719 1720 /* 1721 * setup the data direction 1722 */ 1723 stats = fc_lport_get_stats(lp); 1724 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 1725 fsp->req_flags = FC_SRB_READ; 1726 stats->InputRequests++; 1727 stats->InputMegabytes = fsp->data_len; 1728 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 1729 fsp->req_flags = FC_SRB_WRITE; 1730 stats->OutputRequests++; 1731 stats->OutputMegabytes = fsp->data_len; 1732 } else { 1733 fsp->req_flags = 0; 1734 stats->ControlRequests++; 1735 } 1736 1737 fsp->tgt_flags = rp->flags; 1738 1739 init_timer(&fsp->timer); 1740 fsp->timer.data = (unsigned long)fsp; 1741 1742 /* 1743 * send it to the lower layer 1744 * if we get -1 return then put the request in the pending 1745 * queue. 1746 */ 1747 rval = fc_fcp_pkt_send(lp, fsp); 1748 if (rval != 0) { 1749 fsp->state = FC_SRB_FREE; 1750 fc_fcp_pkt_release(fsp); 1751 rc = SCSI_MLQUEUE_HOST_BUSY; 1752 } 1753out: 1754 return rc; 1755} 1756EXPORT_SYMBOL(fc_queuecommand); 1757 1758/** 1759 * fc_io_compl() - Handle responses for completed commands 1760 * @fsp: scsi packet 1761 * 1762 * Translates a error to a Linux SCSI error. 1763 * 1764 * The fcp packet lock must be held when calling. 1765 */ 1766static void fc_io_compl(struct fc_fcp_pkt *fsp) 1767{ 1768 struct fc_fcp_internal *si; 1769 struct scsi_cmnd *sc_cmd; 1770 struct fc_lport *lp; 1771 unsigned long flags; 1772 1773 /* release outstanding ddp context */ 1774 fc_fcp_ddp_done(fsp); 1775 1776 fsp->state |= FC_SRB_COMPL; 1777 if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) { 1778 spin_unlock_bh(&fsp->scsi_pkt_lock); 1779 del_timer_sync(&fsp->timer); 1780 spin_lock_bh(&fsp->scsi_pkt_lock); 1781 } 1782 1783 lp = fsp->lp; 1784 si = fc_get_scsi_internal(lp); 1785 spin_lock_irqsave(lp->host->host_lock, flags); 1786 if (!fsp->cmd) { 1787 spin_unlock_irqrestore(lp->host->host_lock, flags); 1788 return; 1789 } 1790 1791 /* 1792 * if a command timed out while we had to try and throttle IO 1793 * and it is now getting cleaned up, then we are about to 1794 * try again so clear the throttled flag incase we get more 1795 * time outs. 1796 */ 1797 if (si->throttled && fsp->state & FC_SRB_NOMEM) 1798 si->throttled = 0; 1799 1800 sc_cmd = fsp->cmd; 1801 fsp->cmd = NULL; 1802 1803 if (!sc_cmd->SCp.ptr) { 1804 spin_unlock_irqrestore(lp->host->host_lock, flags); 1805 return; 1806 } 1807 1808 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status; 1809 switch (fsp->status_code) { 1810 case FC_COMPLETE: 1811 if (fsp->cdb_status == 0) { 1812 /* 1813 * good I/O status 1814 */ 1815 sc_cmd->result = DID_OK << 16; 1816 if (fsp->scsi_resid) 1817 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid; 1818 } else { 1819 /* 1820 * transport level I/O was ok but scsi 1821 * has non zero status 1822 */ 1823 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status; 1824 } 1825 break; 1826 case FC_ERROR: 1827 sc_cmd->result = DID_ERROR << 16; 1828 break; 1829 case FC_DATA_UNDRUN: 1830 if ((fsp->cdb_status == 0) && !(fsp->req_flags & FC_SRB_READ)) { 1831 /* 1832 * scsi status is good but transport level 1833 * underrun. 1834 */ 1835 sc_cmd->result = (fsp->state & FC_SRB_RCV_STATUS ? 1836 DID_OK : DID_ERROR) << 16; 1837 } else { 1838 /* 1839 * scsi got underrun, this is an error 1840 */ 1841 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid; 1842 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 1843 } 1844 break; 1845 case FC_DATA_OVRRUN: 1846 /* 1847 * overrun is an error 1848 */ 1849 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 1850 break; 1851 case FC_CMD_ABORTED: 1852 sc_cmd->result = (DID_ERROR << 16) | fsp->io_status; 1853 break; 1854 case FC_CMD_TIME_OUT: 1855 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status; 1856 break; 1857 case FC_CMD_RESET: 1858 sc_cmd->result = (DID_RESET << 16); 1859 break; 1860 case FC_HRD_ERROR: 1861 sc_cmd->result = (DID_NO_CONNECT << 16); 1862 break; 1863 default: 1864 sc_cmd->result = (DID_ERROR << 16); 1865 break; 1866 } 1867 1868 list_del(&fsp->list); 1869 sc_cmd->SCp.ptr = NULL; 1870 sc_cmd->scsi_done(sc_cmd); 1871 spin_unlock_irqrestore(lp->host->host_lock, flags); 1872 1873 /* release ref from initial allocation in queue command */ 1874 fc_fcp_pkt_release(fsp); 1875} 1876 1877/** 1878 * fc_eh_abort() - Abort a command 1879 * @sc_cmd: scsi command to abort 1880 * 1881 * From scsi host template. 1882 * send ABTS to the target device and wait for the response 1883 * sc_cmd is the pointer to the command to be aborted. 1884 */ 1885int fc_eh_abort(struct scsi_cmnd *sc_cmd) 1886{ 1887 struct fc_fcp_pkt *fsp; 1888 struct fc_lport *lp; 1889 int rc = FAILED; 1890 unsigned long flags; 1891 1892 lp = shost_priv(sc_cmd->device->host); 1893 if (lp->state != LPORT_ST_READY) 1894 return rc; 1895 else if (!lp->link_up) 1896 return rc; 1897 1898 spin_lock_irqsave(lp->host->host_lock, flags); 1899 fsp = CMD_SP(sc_cmd); 1900 if (!fsp) { 1901 /* command completed while scsi eh was setting up */ 1902 spin_unlock_irqrestore(lp->host->host_lock, flags); 1903 return SUCCESS; 1904 } 1905 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ 1906 fc_fcp_pkt_hold(fsp); 1907 spin_unlock_irqrestore(lp->host->host_lock, flags); 1908 1909 if (fc_fcp_lock_pkt(fsp)) { 1910 /* completed while we were waiting for timer to be deleted */ 1911 rc = SUCCESS; 1912 goto release_pkt; 1913 } 1914 1915 rc = fc_fcp_pkt_abort(fsp); 1916 fc_fcp_unlock_pkt(fsp); 1917 1918release_pkt: 1919 fc_fcp_pkt_release(fsp); 1920 return rc; 1921} 1922EXPORT_SYMBOL(fc_eh_abort); 1923 1924/** 1925 * fc_eh_device_reset() Reset a single LUN 1926 * @sc_cmd: scsi command 1927 * 1928 * Set from scsi host template to send tm cmd to the target and wait for the 1929 * response. 1930 */ 1931int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) 1932{ 1933 struct fc_lport *lp; 1934 struct fc_fcp_pkt *fsp; 1935 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1936 int rc = FAILED; 1937 struct fc_rport_libfc_priv *rp; 1938 int rval; 1939 1940 rval = fc_remote_port_chkready(rport); 1941 if (rval) 1942 goto out; 1943 1944 rp = rport->dd_data; 1945 lp = shost_priv(sc_cmd->device->host); 1946 1947 if (lp->state != LPORT_ST_READY) 1948 return rc; 1949 1950 FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id); 1951 1952 fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO); 1953 if (fsp == NULL) { 1954 printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n"); 1955 sc_cmd->result = DID_NO_CONNECT << 16; 1956 goto out; 1957 } 1958 1959 /* 1960 * Build the libfc request pkt. Do not set the scsi cmnd, because 1961 * the sc passed in is not setup for execution like when sent 1962 * through the queuecommand callout. 1963 */ 1964 fsp->lp = lp; /* save the softc ptr */ 1965 fsp->rport = rport; /* set the remote port ptr */ 1966 1967 /* 1968 * flush outstanding commands 1969 */ 1970 rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun); 1971 fsp->state = FC_SRB_FREE; 1972 fc_fcp_pkt_release(fsp); 1973 1974out: 1975 return rc; 1976} 1977EXPORT_SYMBOL(fc_eh_device_reset); 1978 1979/** 1980 * fc_eh_host_reset() - The reset function will reset the ports on the host. 1981 * @sc_cmd: scsi command 1982 */ 1983int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) 1984{ 1985 struct Scsi_Host *shost = sc_cmd->device->host; 1986 struct fc_lport *lp = shost_priv(shost); 1987 unsigned long wait_tmo; 1988 1989 FC_SCSI_DBG(lp, "Resetting host\n"); 1990 1991 lp->tt.lport_reset(lp); 1992 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; 1993 while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo)) 1994 msleep(1000); 1995 1996 if (fc_fcp_lport_queue_ready(lp)) { 1997 shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded " 1998 "on port (%6x)\n", fc_host_port_id(lp->host)); 1999 return SUCCESS; 2000 } else { 2001 shost_printk(KERN_INFO, shost, "libfc: Host reset failed, " 2002 "port (%6x) is not ready.\n", 2003 fc_host_port_id(lp->host)); 2004 return FAILED; 2005 } 2006} 2007EXPORT_SYMBOL(fc_eh_host_reset); 2008 2009/** 2010 * fc_slave_alloc() - configure queue depth 2011 * @sdev: scsi device 2012 * 2013 * Configures queue depth based on host's cmd_per_len. If not set 2014 * then we use the libfc default. 2015 */ 2016int fc_slave_alloc(struct scsi_device *sdev) 2017{ 2018 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 2019 2020 if (!rport || fc_remote_port_chkready(rport)) 2021 return -ENXIO; 2022 2023 if (sdev->tagged_supported) 2024 scsi_activate_tcq(sdev, FC_FCP_DFLT_QUEUE_DEPTH); 2025 else 2026 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), 2027 FC_FCP_DFLT_QUEUE_DEPTH); 2028 2029 return 0; 2030} 2031EXPORT_SYMBOL(fc_slave_alloc); 2032 2033int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) 2034{ 2035 switch (reason) { 2036 case SCSI_QDEPTH_DEFAULT: 2037 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 2038 break; 2039 case SCSI_QDEPTH_QFULL: 2040 scsi_track_queue_full(sdev, qdepth); 2041 break; 2042 case SCSI_QDEPTH_RAMP_UP: 2043 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 2044 break; 2045 default: 2046 return -EOPNOTSUPP; 2047 } 2048 return sdev->queue_depth; 2049} 2050EXPORT_SYMBOL(fc_change_queue_depth); 2051 2052int fc_change_queue_type(struct scsi_device *sdev, int tag_type) 2053{ 2054 if (sdev->tagged_supported) { 2055 scsi_set_tag_type(sdev, tag_type); 2056 if (tag_type) 2057 scsi_activate_tcq(sdev, sdev->queue_depth); 2058 else 2059 scsi_deactivate_tcq(sdev, sdev->queue_depth); 2060 } else 2061 tag_type = 0; 2062 2063 return tag_type; 2064} 2065EXPORT_SYMBOL(fc_change_queue_type); 2066 2067void fc_fcp_destroy(struct fc_lport *lp) 2068{ 2069 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 2070 2071 if (!list_empty(&si->scsi_pkt_queue)) 2072 printk(KERN_ERR "libfc: Leaked SCSI packets when destroying " 2073 "port (%6x)\n", fc_host_port_id(lp->host)); 2074 2075 mempool_destroy(si->scsi_pkt_pool); 2076 kfree(si); 2077 lp->scsi_priv = NULL; 2078} 2079EXPORT_SYMBOL(fc_fcp_destroy); 2080 2081int fc_fcp_init(struct fc_lport *lp) 2082{ 2083 int rc; 2084 struct fc_fcp_internal *si; 2085 2086 if (!lp->tt.fcp_cmd_send) 2087 lp->tt.fcp_cmd_send = fc_fcp_cmd_send; 2088 2089 if (!lp->tt.fcp_cleanup) 2090 lp->tt.fcp_cleanup = fc_fcp_cleanup; 2091 2092 if (!lp->tt.fcp_abort_io) 2093 lp->tt.fcp_abort_io = fc_fcp_abort_io; 2094 2095 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL); 2096 if (!si) 2097 return -ENOMEM; 2098 lp->scsi_priv = si; 2099 INIT_LIST_HEAD(&si->scsi_pkt_queue); 2100 2101 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); 2102 if (!si->scsi_pkt_pool) { 2103 rc = -ENOMEM; 2104 goto free_internal; 2105 } 2106 return 0; 2107 2108free_internal: 2109 kfree(si); 2110 return rc; 2111} 2112EXPORT_SYMBOL(fc_fcp_init); 2113 2114static int __init libfc_init(void) 2115{ 2116 int rc; 2117 2118 scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt", 2119 sizeof(struct fc_fcp_pkt), 2120 0, SLAB_HWCACHE_ALIGN, NULL); 2121 if (scsi_pkt_cachep == NULL) { 2122 printk(KERN_ERR "libfc: Unable to allocate SRB cache, " 2123 "module load failed!"); 2124 return -ENOMEM; 2125 } 2126 2127 rc = fc_setup_exch_mgr(); 2128 if (rc) 2129 goto destroy_pkt_cache; 2130 2131 rc = fc_setup_rport(); 2132 if (rc) 2133 goto destroy_em; 2134 2135 return rc; 2136destroy_em: 2137 fc_destroy_exch_mgr(); 2138destroy_pkt_cache: 2139 kmem_cache_destroy(scsi_pkt_cachep); 2140 return rc; 2141} 2142 2143static void __exit libfc_exit(void) 2144{ 2145 kmem_cache_destroy(scsi_pkt_cachep); 2146 fc_destroy_exch_mgr(); 2147 fc_destroy_rport(); 2148} 2149 2150module_init(libfc_init); 2151module_exit(libfc_exit); 2152