fc_fcp.c revision 34f42a070fc98f5dc07e9fa2338b7b8d1dc347eb
1/* 2 * Copyright(c) 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved. 4 * Copyright(c) 2008 Mike Christie 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Maintained at www.Open-FCoE.org 20 */ 21 22#include <linux/module.h> 23#include <linux/delay.h> 24#include <linux/kernel.h> 25#include <linux/types.h> 26#include <linux/spinlock.h> 27#include <linux/scatterlist.h> 28#include <linux/err.h> 29#include <linux/crc32.h> 30 31#include <scsi/scsi_tcq.h> 32#include <scsi/scsi.h> 33#include <scsi/scsi_host.h> 34#include <scsi/scsi_device.h> 35#include <scsi/scsi_cmnd.h> 36 37#include <scsi/fc/fc_fc2.h> 38 39#include <scsi/libfc.h> 40#include <scsi/fc_encode.h> 41 42MODULE_AUTHOR("Open-FCoE.org"); 43MODULE_DESCRIPTION("libfc"); 44MODULE_LICENSE("GPL"); 45 46static int fc_fcp_debug; 47 48#define FC_DEBUG_FCP(fmt...) \ 49 do { \ 50 if (fc_fcp_debug) \ 51 FC_DBG(fmt); \ 52 } while (0) 53 54static struct kmem_cache *scsi_pkt_cachep; 55 56/* SRB state definitions */ 57#define FC_SRB_FREE 0 /* cmd is free */ 58#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */ 59#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */ 60#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */ 61#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */ 62#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */ 63#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */ 64#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */ 65#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */ 66 67#define FC_SRB_READ (1 << 1) 68#define FC_SRB_WRITE (1 << 0) 69 70/* 71 * The SCp.ptr should be tested and set under the host lock. NULL indicates 72 * that the command has been retruned to the scsi layer. 73 */ 74#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr) 75#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) 76#define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual) 77#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) 78#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual) 79 80struct fc_fcp_internal { 81 mempool_t *scsi_pkt_pool; 82 struct list_head scsi_pkt_queue; 83 u8 throttled; 84}; 85 86#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) 87 88/* 89 * function prototypes 90 * FC scsi I/O related functions 91 */ 92static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *); 93static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *); 94static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *); 95static void fc_fcp_complete_locked(struct fc_fcp_pkt *); 96static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); 97static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp); 98static void fc_timeout_error(struct fc_fcp_pkt *); 99static void fc_fcp_timeout(unsigned long data); 100static void fc_fcp_rec(struct fc_fcp_pkt *); 101static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); 102static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *); 103static void fc_io_compl(struct fc_fcp_pkt *); 104 105static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32); 106static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *); 107static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *); 108 109/* 110 * command status codes 111 */ 112#define FC_COMPLETE 0 113#define FC_CMD_ABORTED 1 114#define FC_CMD_RESET 2 115#define FC_CMD_PLOGO 3 116#define FC_SNS_RCV 4 117#define FC_TRANS_ERR 5 118#define FC_DATA_OVRRUN 6 119#define FC_DATA_UNDRUN 7 120#define FC_ERROR 8 121#define FC_HRD_ERROR 9 122#define FC_CMD_TIME_OUT 10 123 124/* 125 * Error recovery timeout values. 126 */ 127#define FC_SCSI_ER_TIMEOUT (10 * HZ) 128#define FC_SCSI_TM_TOV (10 * HZ) 129#define FC_SCSI_REC_TOV (2 * HZ) 130#define FC_HOST_RESET_TIMEOUT (30 * HZ) 131 132#define FC_MAX_ERROR_CNT 5 133#define FC_MAX_RECOV_RETRY 3 134 135#define FC_FCP_DFLT_QUEUE_DEPTH 32 136 137/** 138 * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet 139 * @lp: fc lport struct 140 * @gfp: gfp flags for allocation 141 * 142 * This is used by upper layer scsi driver. 143 * Return Value : scsi_pkt structure or null on allocation failure. 144 * Context : call from process context. no locking required. 145 */ 146static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp) 147{ 148 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 149 struct fc_fcp_pkt *fsp; 150 151 fsp = mempool_alloc(si->scsi_pkt_pool, gfp); 152 if (fsp) { 153 memset(fsp, 0, sizeof(*fsp)); 154 fsp->lp = lp; 155 atomic_set(&fsp->ref_cnt, 1); 156 init_timer(&fsp->timer); 157 INIT_LIST_HEAD(&fsp->list); 158 spin_lock_init(&fsp->scsi_pkt_lock); 159 } 160 return fsp; 161} 162 163/** 164 * fc_fcp_pkt_release() - release hold on scsi_pkt packet 165 * @fsp: fcp packet struct 166 * 167 * This is used by upper layer scsi driver. 168 * Context : call from process and interrupt context. 169 * no locking required 170 */ 171static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) 172{ 173 if (atomic_dec_and_test(&fsp->ref_cnt)) { 174 struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp); 175 176 mempool_free(fsp, si->scsi_pkt_pool); 177 } 178} 179 180static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp) 181{ 182 atomic_inc(&fsp->ref_cnt); 183} 184 185/** 186 * fc_fcp_pkt_destory() - release hold on scsi_pkt packet 187 * @seq: exchange sequence 188 * @fsp: fcp packet struct 189 * 190 * Release hold on scsi_pkt packet set to keep scsi_pkt 191 * till EM layer exch resource is not freed. 192 * Context : called from from EM layer. 193 * no locking required 194 */ 195static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) 196{ 197 fc_fcp_pkt_release(fsp); 198} 199 200/** 201 * fc_fcp_lock_pkt() - lock a packet and get a ref to it. 202 * @fsp: fcp packet 203 * 204 * We should only return error if we return a command to scsi-ml before 205 * getting a response. This can happen in cases where we send a abort, but 206 * do not wait for the response and the abort and command can be passing 207 * each other on the wire/network-layer. 208 * 209 * Note: this function locks the packet and gets a reference to allow 210 * callers to call the completion function while the lock is held and 211 * not have to worry about the packets refcount. 212 * 213 * TODO: Maybe we should just have callers grab/release the lock and 214 * have a function that they call to verify the fsp and grab a ref if 215 * needed. 216 */ 217static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp) 218{ 219 spin_lock_bh(&fsp->scsi_pkt_lock); 220 if (fsp->state & FC_SRB_COMPL) { 221 spin_unlock_bh(&fsp->scsi_pkt_lock); 222 return -EPERM; 223 } 224 225 fc_fcp_pkt_hold(fsp); 226 return 0; 227} 228 229static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp) 230{ 231 spin_unlock_bh(&fsp->scsi_pkt_lock); 232 fc_fcp_pkt_release(fsp); 233} 234 235static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) 236{ 237 if (!(fsp->state & FC_SRB_COMPL)) 238 mod_timer(&fsp->timer, jiffies + delay); 239} 240 241static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) 242{ 243 if (!fsp->seq_ptr) 244 return -EINVAL; 245 246 fsp->state |= FC_SRB_ABORT_PENDING; 247 return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0); 248} 249 250/* 251 * Retry command. 252 * An abort isn't needed. 253 */ 254static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) 255{ 256 if (fsp->seq_ptr) { 257 fsp->lp->tt.exch_done(fsp->seq_ptr); 258 fsp->seq_ptr = NULL; 259 } 260 261 fsp->state &= ~FC_SRB_ABORT_PENDING; 262 fsp->io_status = SUGGEST_RETRY << 24; 263 fsp->status_code = FC_ERROR; 264 fc_fcp_complete_locked(fsp); 265} 266 267/* 268 * Receive SCSI data from target. 269 * Called after receiving solicited data. 270 */ 271static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 272{ 273 struct scsi_cmnd *sc = fsp->cmd; 274 struct fc_lport *lp = fsp->lp; 275 struct fcoe_dev_stats *stats; 276 struct fc_frame_header *fh; 277 size_t start_offset; 278 size_t offset; 279 u32 crc; 280 u32 copy_len = 0; 281 size_t len; 282 void *buf; 283 struct scatterlist *sg; 284 size_t remaining; 285 286 fh = fc_frame_header_get(fp); 287 offset = ntohl(fh->fh_parm_offset); 288 start_offset = offset; 289 len = fr_len(fp) - sizeof(*fh); 290 buf = fc_frame_payload_get(fp, 0); 291 292 if (offset + len > fsp->data_len) { 293 /* this should never happen */ 294 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && 295 fc_frame_crc_check(fp)) 296 goto crc_err; 297 FC_DEBUG_FCP("data received past end. len %zx offset %zx " 298 "data_len %x\n", len, offset, fsp->data_len); 299 fc_fcp_retry_cmd(fsp); 300 return; 301 } 302 if (offset != fsp->xfer_len) 303 fsp->state |= FC_SRB_DISCONTIG; 304 305 crc = 0; 306 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) 307 crc = crc32(~0, (u8 *) fh, sizeof(*fh)); 308 309 sg = scsi_sglist(sc); 310 remaining = len; 311 312 while (remaining > 0 && sg) { 313 size_t off; 314 void *page_addr; 315 size_t sg_bytes; 316 317 if (offset >= sg->length) { 318 offset -= sg->length; 319 sg = sg_next(sg); 320 continue; 321 } 322 sg_bytes = min(remaining, sg->length - offset); 323 324 /* 325 * The scatterlist item may be bigger than PAGE_SIZE, 326 * but we are limited to mapping PAGE_SIZE at a time. 327 */ 328 off = offset + sg->offset; 329 sg_bytes = min(sg_bytes, (size_t) 330 (PAGE_SIZE - (off & ~PAGE_MASK))); 331 page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT), 332 KM_SOFTIRQ0); 333 if (!page_addr) 334 break; /* XXX panic? */ 335 336 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) 337 crc = crc32(crc, buf, sg_bytes); 338 memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, 339 sg_bytes); 340 341 kunmap_atomic(page_addr, KM_SOFTIRQ0); 342 buf += sg_bytes; 343 offset += sg_bytes; 344 remaining -= sg_bytes; 345 copy_len += sg_bytes; 346 } 347 348 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { 349 buf = fc_frame_payload_get(fp, 0); 350 if (len % 4) { 351 crc = crc32(crc, buf + len, 4 - (len % 4)); 352 len += 4 - (len % 4); 353 } 354 355 if (~crc != le32_to_cpu(fr_crc(fp))) { 356crc_err: 357 stats = lp->dev_stats[smp_processor_id()]; 358 stats->ErrorFrames++; 359 if (stats->InvalidCRCCount++ < 5) 360 FC_DBG("CRC error on data frame\n"); 361 /* 362 * Assume the frame is total garbage. 363 * We may have copied it over the good part 364 * of the buffer. 365 * If so, we need to retry the entire operation. 366 * Otherwise, ignore it. 367 */ 368 if (fsp->state & FC_SRB_DISCONTIG) 369 fc_fcp_retry_cmd(fsp); 370 return; 371 } 372 } 373 374 if (fsp->xfer_contig_end == start_offset) 375 fsp->xfer_contig_end += copy_len; 376 fsp->xfer_len += copy_len; 377 378 /* 379 * In the very rare event that this data arrived after the response 380 * and completes the transfer, call the completion handler. 381 */ 382 if (unlikely(fsp->state & FC_SRB_RCV_STATUS) && 383 fsp->xfer_len == fsp->data_len - fsp->scsi_resid) 384 fc_fcp_complete_locked(fsp); 385} 386 387/** 388 * fc_fcp_send_data() - Send SCSI data to target. 389 * @fsp: ptr to fc_fcp_pkt 390 * @sp: ptr to this sequence 391 * @offset: starting offset for this data request 392 * @seq_blen: the burst length for this data request 393 * 394 * Called after receiving a Transfer Ready data descriptor. 395 * if LLD is capable of seq offload then send down seq_blen 396 * size of data in single frame, otherwise send multiple FC 397 * frames of max FC frame payload supported by target port. 398 * 399 * Returns : 0 for success. 400 */ 401static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, 402 size_t offset, size_t seq_blen) 403{ 404 struct fc_exch *ep; 405 struct scsi_cmnd *sc; 406 struct scatterlist *sg; 407 struct fc_frame *fp = NULL; 408 struct fc_lport *lp = fsp->lp; 409 size_t remaining; 410 size_t t_blen; 411 size_t tlen; 412 size_t sg_bytes; 413 size_t frame_offset, fh_parm_offset; 414 int error; 415 void *data = NULL; 416 void *page_addr; 417 int using_sg = lp->sg_supp; 418 u32 f_ctl; 419 420 WARN_ON(seq_blen <= 0); 421 if (unlikely(offset + seq_blen > fsp->data_len)) { 422 /* this should never happen */ 423 FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n", 424 seq_blen, offset); 425 fc_fcp_send_abort(fsp); 426 return 0; 427 } else if (offset != fsp->xfer_len) { 428 /* Out of Order Data Request - no problem, but unexpected. */ 429 FC_DEBUG_FCP("xfer-ready non-contiguous. " 430 "seq_blen %zx offset %zx\n", seq_blen, offset); 431 } 432 433 /* 434 * if LLD is capable of seq_offload then set transport 435 * burst length (t_blen) to seq_blen, otherwise set t_blen 436 * to max FC frame payload previously set in fsp->max_payload. 437 */ 438 t_blen = lp->seq_offload ? seq_blen : fsp->max_payload; 439 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); 440 if (t_blen > 512) 441 t_blen &= ~(512 - 1); /* round down to block size */ 442 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */ 443 sc = fsp->cmd; 444 445 remaining = seq_blen; 446 fh_parm_offset = frame_offset = offset; 447 tlen = 0; 448 seq = lp->tt.seq_start_next(seq); 449 f_ctl = FC_FC_REL_OFF; 450 WARN_ON(!seq); 451 452 /* 453 * If a get_page()/put_page() will fail, don't use sg lists 454 * in the fc_frame structure. 455 * 456 * The put_page() may be long after the I/O has completed 457 * in the case of FCoE, since the network driver does it 458 * via free_skb(). See the test in free_pages_check(). 459 * 460 * Test this case with 'dd </dev/zero >/dev/st0 bs=64k'. 461 */ 462 if (using_sg) { 463 for (sg = scsi_sglist(sc); sg; sg = sg_next(sg)) { 464 if (page_count(sg_page(sg)) == 0 || 465 (sg_page(sg)->flags & (1 << PG_lru | 466 1 << PG_private | 467 1 << PG_locked | 468 1 << PG_active | 469 1 << PG_slab | 470 1 << PG_swapcache | 471 1 << PG_writeback | 472 1 << PG_reserved | 473 1 << PG_buddy))) { 474 using_sg = 0; 475 break; 476 } 477 } 478 } 479 sg = scsi_sglist(sc); 480 481 while (remaining > 0 && sg) { 482 if (offset >= sg->length) { 483 offset -= sg->length; 484 sg = sg_next(sg); 485 continue; 486 } 487 if (!fp) { 488 tlen = min(t_blen, remaining); 489 490 /* 491 * TODO. Temporary workaround. fc_seq_send() can't 492 * handle odd lengths in non-linear skbs. 493 * This will be the final fragment only. 494 */ 495 if (tlen % 4) 496 using_sg = 0; 497 if (using_sg) { 498 fp = _fc_frame_alloc(lp, 0); 499 if (!fp) 500 return -ENOMEM; 501 } else { 502 fp = fc_frame_alloc(lp, tlen); 503 if (!fp) 504 return -ENOMEM; 505 506 data = (void *)(fr_hdr(fp)) + 507 sizeof(struct fc_frame_header); 508 } 509 fh_parm_offset = frame_offset; 510 fr_max_payload(fp) = fsp->max_payload; 511 } 512 sg_bytes = min(tlen, sg->length - offset); 513 if (using_sg) { 514 WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags > 515 FC_FRAME_SG_LEN); 516 get_page(sg_page(sg)); 517 skb_fill_page_desc(fp_skb(fp), 518 skb_shinfo(fp_skb(fp))->nr_frags, 519 sg_page(sg), sg->offset + offset, 520 sg_bytes); 521 fp_skb(fp)->data_len += sg_bytes; 522 fr_len(fp) += sg_bytes; 523 fp_skb(fp)->truesize += PAGE_SIZE; 524 } else { 525 size_t off = offset + sg->offset; 526 527 /* 528 * The scatterlist item may be bigger than PAGE_SIZE, 529 * but we must not cross pages inside the kmap. 530 */ 531 sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE - 532 (off & ~PAGE_MASK))); 533 page_addr = kmap_atomic(sg_page(sg) + 534 (off >> PAGE_SHIFT), 535 KM_SOFTIRQ0); 536 memcpy(data, (char *)page_addr + (off & ~PAGE_MASK), 537 sg_bytes); 538 kunmap_atomic(page_addr, KM_SOFTIRQ0); 539 data += sg_bytes; 540 } 541 offset += sg_bytes; 542 frame_offset += sg_bytes; 543 tlen -= sg_bytes; 544 remaining -= sg_bytes; 545 546 if (tlen) 547 continue; 548 549 /* 550 * Send sequence with transfer sequence initiative in case 551 * this is last FCP frame of the sequence. 552 */ 553 if (remaining == 0) 554 f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ; 555 556 ep = fc_seq_exch(seq); 557 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, 558 FC_TYPE_FCP, f_ctl, fh_parm_offset); 559 560 /* 561 * send fragment using for a sequence. 562 */ 563 error = lp->tt.seq_send(lp, seq, fp); 564 if (error) { 565 WARN_ON(1); /* send error should be rare */ 566 fc_fcp_retry_cmd(fsp); 567 return 0; 568 } 569 fp = NULL; 570 } 571 fsp->xfer_len += seq_blen; /* premature count? */ 572 return 0; 573} 574 575static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 576{ 577 int ba_done = 1; 578 struct fc_ba_rjt *brp; 579 struct fc_frame_header *fh; 580 581 fh = fc_frame_header_get(fp); 582 switch (fh->fh_r_ctl) { 583 case FC_RCTL_BA_ACC: 584 break; 585 case FC_RCTL_BA_RJT: 586 brp = fc_frame_payload_get(fp, sizeof(*brp)); 587 if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR) 588 break; 589 /* fall thru */ 590 default: 591 /* 592 * we will let the command timeout 593 * and scsi-ml recover in this case, 594 * therefore cleared the ba_done flag. 595 */ 596 ba_done = 0; 597 } 598 599 if (ba_done) { 600 fsp->state |= FC_SRB_ABORTED; 601 fsp->state &= ~FC_SRB_ABORT_PENDING; 602 603 if (fsp->wait_for_comp) 604 complete(&fsp->tm_done); 605 else 606 fc_fcp_complete_locked(fsp); 607 } 608} 609 610/** 611 * fc_fcp_reduce_can_queue() - drop can_queue 612 * @lp: lport to drop queueing for 613 * 614 * If we are getting memory allocation failures, then we may 615 * be trying to execute too many commands. We let the running 616 * commands complete or timeout, then try again with a reduced 617 * can_queue. Eventually we will hit the point where we run 618 * on all reserved structs. 619 */ 620static void fc_fcp_reduce_can_queue(struct fc_lport *lp) 621{ 622 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 623 unsigned long flags; 624 int can_queue; 625 626 spin_lock_irqsave(lp->host->host_lock, flags); 627 if (si->throttled) 628 goto done; 629 si->throttled = 1; 630 631 can_queue = lp->host->can_queue; 632 can_queue >>= 1; 633 if (!can_queue) 634 can_queue = 1; 635 lp->host->can_queue = can_queue; 636 shost_printk(KERN_ERR, lp->host, "Could not allocate frame.\n" 637 "Reducing can_queue to %d.\n", can_queue); 638done: 639 spin_unlock_irqrestore(lp->host->host_lock, flags); 640} 641 642/** 643 * fc_fcp_recv() - Reveive FCP frames 644 * @seq: The sequence the frame is on 645 * @fp: The FC frame 646 * @arg: The related FCP packet 647 * 648 * Return : None 649 * Context : called from Soft IRQ context 650 * can not called holding list lock 651 */ 652static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) 653{ 654 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg; 655 struct fc_lport *lp; 656 struct fc_frame_header *fh; 657 struct fcp_txrdy *dd; 658 u8 r_ctl; 659 int rc = 0; 660 661 if (IS_ERR(fp)) 662 goto errout; 663 664 fh = fc_frame_header_get(fp); 665 r_ctl = fh->fh_r_ctl; 666 lp = fsp->lp; 667 668 if (!(lp->state & LPORT_ST_READY)) 669 goto out; 670 if (fc_fcp_lock_pkt(fsp)) 671 goto out; 672 fsp->last_pkt_time = jiffies; 673 674 if (fh->fh_type == FC_TYPE_BLS) { 675 fc_fcp_abts_resp(fsp, fp); 676 goto unlock; 677 } 678 679 if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) 680 goto unlock; 681 682 if (r_ctl == FC_RCTL_DD_DATA_DESC) { 683 /* 684 * received XFER RDY from the target 685 * need to send data to the target 686 */ 687 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED); 688 dd = fc_frame_payload_get(fp, sizeof(*dd)); 689 WARN_ON(!dd); 690 691 rc = fc_fcp_send_data(fsp, seq, 692 (size_t) ntohl(dd->ft_data_ro), 693 (size_t) ntohl(dd->ft_burst_len)); 694 if (!rc) 695 seq->rec_data = fsp->xfer_len; 696 else if (rc == -ENOMEM) 697 fsp->state |= FC_SRB_NOMEM; 698 } else if (r_ctl == FC_RCTL_DD_SOL_DATA) { 699 /* 700 * received a DATA frame 701 * next we will copy the data to the system buffer 702 */ 703 WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */ 704 fc_fcp_recv_data(fsp, fp); 705 seq->rec_data = fsp->xfer_contig_end; 706 } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) { 707 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED); 708 709 fc_fcp_resp(fsp, fp); 710 } else { 711 FC_DBG("unexpected frame. r_ctl %x\n", r_ctl); 712 } 713unlock: 714 fc_fcp_unlock_pkt(fsp); 715out: 716 fc_frame_free(fp); 717errout: 718 if (IS_ERR(fp)) 719 fc_fcp_error(fsp, fp); 720 else if (rc == -ENOMEM) 721 fc_fcp_reduce_can_queue(lp); 722} 723 724static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 725{ 726 struct fc_frame_header *fh; 727 struct fcp_resp *fc_rp; 728 struct fcp_resp_ext *rp_ex; 729 struct fcp_resp_rsp_info *fc_rp_info; 730 u32 plen; 731 u32 expected_len; 732 u32 respl = 0; 733 u32 snsl = 0; 734 u8 flags = 0; 735 736 plen = fr_len(fp); 737 fh = (struct fc_frame_header *)fr_hdr(fp); 738 if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp))) 739 goto len_err; 740 plen -= sizeof(*fh); 741 fc_rp = (struct fcp_resp *)(fh + 1); 742 fsp->cdb_status = fc_rp->fr_status; 743 flags = fc_rp->fr_flags; 744 fsp->scsi_comp_flags = flags; 745 expected_len = fsp->data_len; 746 747 if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) { 748 rp_ex = (void *)(fc_rp + 1); 749 if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) { 750 if (plen < sizeof(*fc_rp) + sizeof(*rp_ex)) 751 goto len_err; 752 fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1); 753 if (flags & FCP_RSP_LEN_VAL) { 754 respl = ntohl(rp_ex->fr_rsp_len); 755 if (respl != sizeof(*fc_rp_info)) 756 goto len_err; 757 if (fsp->wait_for_comp) { 758 /* Abuse cdb_status for rsp code */ 759 fsp->cdb_status = fc_rp_info->rsp_code; 760 complete(&fsp->tm_done); 761 /* 762 * tmfs will not have any scsi cmd so 763 * exit here 764 */ 765 return; 766 } else 767 goto err; 768 } 769 if (flags & FCP_SNS_LEN_VAL) { 770 snsl = ntohl(rp_ex->fr_sns_len); 771 if (snsl > SCSI_SENSE_BUFFERSIZE) 772 snsl = SCSI_SENSE_BUFFERSIZE; 773 memcpy(fsp->cmd->sense_buffer, 774 (char *)fc_rp_info + respl, snsl); 775 } 776 } 777 if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) { 778 if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid)) 779 goto len_err; 780 if (flags & FCP_RESID_UNDER) { 781 fsp->scsi_resid = ntohl(rp_ex->fr_resid); 782 /* 783 * The cmnd->underflow is the minimum number of 784 * bytes that must be transfered for this 785 * command. Provided a sense condition is not 786 * present, make sure the actual amount 787 * transferred is at least the underflow value 788 * or fail. 789 */ 790 if (!(flags & FCP_SNS_LEN_VAL) && 791 (fc_rp->fr_status == 0) && 792 (scsi_bufflen(fsp->cmd) - 793 fsp->scsi_resid) < fsp->cmd->underflow) 794 goto err; 795 expected_len -= fsp->scsi_resid; 796 } else { 797 fsp->status_code = FC_ERROR; 798 } 799 } 800 } 801 fsp->state |= FC_SRB_RCV_STATUS; 802 803 /* 804 * Check for missing or extra data frames. 805 */ 806 if (unlikely(fsp->xfer_len != expected_len)) { 807 if (fsp->xfer_len < expected_len) { 808 /* 809 * Some data may be queued locally, 810 * Wait a at least one jiffy to see if it is delivered. 811 * If this expires without data, we may do SRR. 812 */ 813 fc_fcp_timer_set(fsp, 2); 814 return; 815 } 816 fsp->status_code = FC_DATA_OVRRUN; 817 FC_DBG("tgt %6x xfer len %zx greater than expected len %x. " 818 "data len %x\n", 819 fsp->rport->port_id, 820 fsp->xfer_len, expected_len, fsp->data_len); 821 } 822 fc_fcp_complete_locked(fsp); 823 return; 824 825len_err: 826 FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n", 827 flags, fr_len(fp), respl, snsl); 828err: 829 fsp->status_code = FC_ERROR; 830 fc_fcp_complete_locked(fsp); 831} 832 833/** 834 * fc_fcp_complete_locked() - complete processing of a fcp packet 835 * @fsp: fcp packet 836 * 837 * This function may sleep if a timer is pending. The packet lock must be 838 * held, and the host lock must not be held. 839 */ 840static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) 841{ 842 struct fc_lport *lp = fsp->lp; 843 struct fc_seq *seq; 844 struct fc_exch *ep; 845 u32 f_ctl; 846 847 if (fsp->state & FC_SRB_ABORT_PENDING) 848 return; 849 850 if (fsp->state & FC_SRB_ABORTED) { 851 if (!fsp->status_code) 852 fsp->status_code = FC_CMD_ABORTED; 853 } else { 854 /* 855 * Test for transport underrun, independent of response 856 * underrun status. 857 */ 858 if (fsp->xfer_len < fsp->data_len && !fsp->io_status && 859 (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) || 860 fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) { 861 fsp->status_code = FC_DATA_UNDRUN; 862 fsp->io_status = SUGGEST_RETRY << 24; 863 } 864 } 865 866 seq = fsp->seq_ptr; 867 if (seq) { 868 fsp->seq_ptr = NULL; 869 if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) { 870 struct fc_frame *conf_frame; 871 struct fc_seq *csp; 872 873 csp = lp->tt.seq_start_next(seq); 874 conf_frame = fc_frame_alloc(fsp->lp, 0); 875 if (conf_frame) { 876 f_ctl = FC_FC_SEQ_INIT; 877 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ; 878 ep = fc_seq_exch(seq); 879 fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL, 880 ep->did, ep->sid, 881 FC_TYPE_FCP, f_ctl, 0); 882 lp->tt.seq_send(lp, csp, conf_frame); 883 } 884 } 885 lp->tt.exch_done(seq); 886 } 887 fc_io_compl(fsp); 888} 889 890static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error) 891{ 892 struct fc_lport *lp = fsp->lp; 893 894 if (fsp->seq_ptr) { 895 lp->tt.exch_done(fsp->seq_ptr); 896 fsp->seq_ptr = NULL; 897 } 898 fsp->status_code = error; 899} 900 901/** 902 * fc_fcp_cleanup_each_cmd() - Cleanup active commads 903 * @lp: logical port 904 * @id: target id 905 * @lun: lun 906 * @error: fsp status code 907 * 908 * If lun or id is -1, they are ignored. 909 */ 910static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id, 911 unsigned int lun, int error) 912{ 913 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 914 struct fc_fcp_pkt *fsp; 915 struct scsi_cmnd *sc_cmd; 916 unsigned long flags; 917 918 spin_lock_irqsave(lp->host->host_lock, flags); 919restart: 920 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { 921 sc_cmd = fsp->cmd; 922 if (id != -1 && scmd_id(sc_cmd) != id) 923 continue; 924 925 if (lun != -1 && sc_cmd->device->lun != lun) 926 continue; 927 928 fc_fcp_pkt_hold(fsp); 929 spin_unlock_irqrestore(lp->host->host_lock, flags); 930 931 if (!fc_fcp_lock_pkt(fsp)) { 932 fc_fcp_cleanup_cmd(fsp, error); 933 fc_io_compl(fsp); 934 fc_fcp_unlock_pkt(fsp); 935 } 936 937 fc_fcp_pkt_release(fsp); 938 spin_lock_irqsave(lp->host->host_lock, flags); 939 /* 940 * while we dropped the lock multiple pkts could 941 * have been released, so we have to start over. 942 */ 943 goto restart; 944 } 945 spin_unlock_irqrestore(lp->host->host_lock, flags); 946} 947 948static void fc_fcp_abort_io(struct fc_lport *lp) 949{ 950 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR); 951} 952 953/** 954 * fc_fcp_pkt_send() - send a fcp packet to the lower level. 955 * @lp: fc lport 956 * @fsp: fc packet. 957 * 958 * This is called by upper layer protocol. 959 * Return : zero for success and -1 for failure 960 * Context : called from queuecommand which can be called from process 961 * or scsi soft irq. 962 * Locks : called with the host lock and irqs disabled. 963 */ 964static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp) 965{ 966 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 967 int rc; 968 969 fsp->cmd->SCp.ptr = (char *)fsp; 970 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len); 971 fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK; 972 973 int_to_scsilun(fsp->cmd->device->lun, 974 (struct scsi_lun *)fsp->cdb_cmd.fc_lun); 975 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); 976 list_add_tail(&fsp->list, &si->scsi_pkt_queue); 977 978 spin_unlock_irq(lp->host->host_lock); 979 rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv); 980 spin_lock_irq(lp->host->host_lock); 981 if (rc) 982 list_del(&fsp->list); 983 984 return rc; 985} 986 987static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, 988 void (*resp)(struct fc_seq *, 989 struct fc_frame *fp, 990 void *arg)) 991{ 992 struct fc_frame *fp; 993 struct fc_seq *seq; 994 struct fc_rport *rport; 995 struct fc_rport_libfc_priv *rp; 996 const size_t len = sizeof(fsp->cdb_cmd); 997 int rc = 0; 998 999 if (fc_fcp_lock_pkt(fsp)) 1000 return 0; 1001 1002 fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd)); 1003 if (!fp) { 1004 rc = -1; 1005 goto unlock; 1006 } 1007 1008 memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len); 1009 fr_cmd(fp) = fsp->cmd; 1010 rport = fsp->rport; 1011 fsp->max_payload = rport->maxframe_size; 1012 rp = rport->dd_data; 1013 1014 fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, 1015 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, 1016 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1017 1018 seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0); 1019 if (!seq) { 1020 fc_frame_free(fp); 1021 rc = -1; 1022 goto unlock; 1023 } 1024 fsp->last_pkt_time = jiffies; 1025 fsp->seq_ptr = seq; 1026 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */ 1027 1028 setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp); 1029 fc_fcp_timer_set(fsp, 1030 (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ? 1031 FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT); 1032unlock: 1033 fc_fcp_unlock_pkt(fsp); 1034 return rc; 1035} 1036 1037/* 1038 * transport error handler 1039 */ 1040static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1041{ 1042 int error = PTR_ERR(fp); 1043 1044 if (fc_fcp_lock_pkt(fsp)) 1045 return; 1046 1047 switch (error) { 1048 case -FC_EX_CLOSED: 1049 fc_fcp_retry_cmd(fsp); 1050 goto unlock; 1051 default: 1052 FC_DBG("unknown error %ld\n", PTR_ERR(fp)); 1053 } 1054 /* 1055 * clear abort pending, because the lower layer 1056 * decided to force completion. 1057 */ 1058 fsp->state &= ~FC_SRB_ABORT_PENDING; 1059 fsp->status_code = FC_CMD_PLOGO; 1060 fc_fcp_complete_locked(fsp); 1061unlock: 1062 fc_fcp_unlock_pkt(fsp); 1063} 1064 1065/* 1066 * Scsi abort handler- calls to send an abort 1067 * and then wait for abort completion 1068 */ 1069static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp) 1070{ 1071 int rc = FAILED; 1072 1073 if (fc_fcp_send_abort(fsp)) 1074 return FAILED; 1075 1076 init_completion(&fsp->tm_done); 1077 fsp->wait_for_comp = 1; 1078 1079 spin_unlock_bh(&fsp->scsi_pkt_lock); 1080 rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV); 1081 spin_lock_bh(&fsp->scsi_pkt_lock); 1082 fsp->wait_for_comp = 0; 1083 1084 if (!rc) { 1085 FC_DBG("target abort cmd failed\n"); 1086 rc = FAILED; 1087 } else if (fsp->state & FC_SRB_ABORTED) { 1088 FC_DBG("target abort cmd passed\n"); 1089 rc = SUCCESS; 1090 fc_fcp_complete_locked(fsp); 1091 } 1092 1093 return rc; 1094} 1095 1096/* 1097 * Retry LUN reset after resource allocation failed. 1098 */ 1099static void fc_lun_reset_send(unsigned long data) 1100{ 1101 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; 1102 struct fc_lport *lp = fsp->lp; 1103 if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) { 1104 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) 1105 return; 1106 if (fc_fcp_lock_pkt(fsp)) 1107 return; 1108 setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp); 1109 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1110 fc_fcp_unlock_pkt(fsp); 1111 } 1112} 1113 1114/* 1115 * Scsi device reset handler- send a LUN RESET to the device 1116 * and wait for reset reply 1117 */ 1118static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, 1119 unsigned int id, unsigned int lun) 1120{ 1121 int rc; 1122 1123 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len); 1124 fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET; 1125 int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun); 1126 1127 fsp->wait_for_comp = 1; 1128 init_completion(&fsp->tm_done); 1129 1130 fc_lun_reset_send((unsigned long)fsp); 1131 1132 /* 1133 * wait for completion of reset 1134 * after that make sure all commands are terminated 1135 */ 1136 rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV); 1137 1138 spin_lock_bh(&fsp->scsi_pkt_lock); 1139 fsp->state |= FC_SRB_COMPL; 1140 spin_unlock_bh(&fsp->scsi_pkt_lock); 1141 1142 del_timer_sync(&fsp->timer); 1143 1144 spin_lock_bh(&fsp->scsi_pkt_lock); 1145 if (fsp->seq_ptr) { 1146 lp->tt.exch_done(fsp->seq_ptr); 1147 fsp->seq_ptr = NULL; 1148 } 1149 fsp->wait_for_comp = 0; 1150 spin_unlock_bh(&fsp->scsi_pkt_lock); 1151 1152 if (!rc) { 1153 FC_DBG("lun reset failed\n"); 1154 return FAILED; 1155 } 1156 1157 /* cdb_status holds the tmf's rsp code */ 1158 if (fsp->cdb_status != FCP_TMF_CMPL) 1159 return FAILED; 1160 1161 FC_DBG("lun reset to lun %u completed\n", lun); 1162 fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED); 1163 return SUCCESS; 1164} 1165 1166/* 1167 * Task Managment response handler 1168 */ 1169static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1170{ 1171 struct fc_fcp_pkt *fsp = arg; 1172 struct fc_frame_header *fh; 1173 1174 if (IS_ERR(fp)) { 1175 /* 1176 * If there is an error just let it timeout or wait 1177 * for TMF to be aborted if it timedout. 1178 * 1179 * scsi-eh will escalate for when either happens. 1180 */ 1181 return; 1182 } 1183 1184 if (fc_fcp_lock_pkt(fsp)) 1185 return; 1186 1187 /* 1188 * raced with eh timeout handler. 1189 */ 1190 if (!fsp->seq_ptr || !fsp->wait_for_comp) { 1191 spin_unlock_bh(&fsp->scsi_pkt_lock); 1192 return; 1193 } 1194 1195 fh = fc_frame_header_get(fp); 1196 if (fh->fh_type != FC_TYPE_BLS) 1197 fc_fcp_resp(fsp, fp); 1198 fsp->seq_ptr = NULL; 1199 fsp->lp->tt.exch_done(seq); 1200 fc_frame_free(fp); 1201 fc_fcp_unlock_pkt(fsp); 1202} 1203 1204static void fc_fcp_cleanup(struct fc_lport *lp) 1205{ 1206 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR); 1207} 1208 1209/* 1210 * fc_fcp_timeout: called by OS timer function. 1211 * 1212 * The timer has been inactivated and must be reactivated if desired 1213 * using fc_fcp_timer_set(). 1214 * 1215 * Algorithm: 1216 * 1217 * If REC is supported, just issue it, and return. The REC exchange will 1218 * complete or time out, and recovery can continue at that point. 1219 * 1220 * Otherwise, if the response has been received without all the data, 1221 * it has been ER_TIMEOUT since the response was received. 1222 * 1223 * If the response has not been received, 1224 * we see if data was received recently. If it has been, we continue waiting, 1225 * otherwise, we abort the command. 1226 */ 1227static void fc_fcp_timeout(unsigned long data) 1228{ 1229 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; 1230 struct fc_rport *rport = fsp->rport; 1231 struct fc_rport_libfc_priv *rp = rport->dd_data; 1232 1233 if (fc_fcp_lock_pkt(fsp)) 1234 return; 1235 1236 if (fsp->cdb_cmd.fc_tm_flags) 1237 goto unlock; 1238 1239 fsp->state |= FC_SRB_FCP_PROCESSING_TMO; 1240 1241 if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED) 1242 fc_fcp_rec(fsp); 1243 else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2), 1244 jiffies)) 1245 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); 1246 else if (fsp->state & FC_SRB_RCV_STATUS) 1247 fc_fcp_complete_locked(fsp); 1248 else 1249 fc_timeout_error(fsp); 1250 fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO; 1251unlock: 1252 fc_fcp_unlock_pkt(fsp); 1253} 1254 1255/* 1256 * Send a REC ELS request 1257 */ 1258static void fc_fcp_rec(struct fc_fcp_pkt *fsp) 1259{ 1260 struct fc_lport *lp; 1261 struct fc_frame *fp; 1262 struct fc_rport *rport; 1263 struct fc_rport_libfc_priv *rp; 1264 1265 lp = fsp->lp; 1266 rport = fsp->rport; 1267 rp = rport->dd_data; 1268 if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) { 1269 fsp->status_code = FC_HRD_ERROR; 1270 fsp->io_status = SUGGEST_RETRY << 24; 1271 fc_fcp_complete_locked(fsp); 1272 return; 1273 } 1274 fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec)); 1275 if (!fp) 1276 goto retry; 1277 1278 fr_seq(fp) = fsp->seq_ptr; 1279 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, 1280 fc_host_port_id(rp->local_port->host), FC_TYPE_ELS, 1281 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1282 if (lp->tt.elsct_send(lp, rport, fp, ELS_REC, fc_fcp_rec_resp, 1283 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) { 1284 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ 1285 return; 1286 } 1287 fc_frame_free(fp); 1288retry: 1289 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1290 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1291 else 1292 fc_timeout_error(fsp); 1293} 1294 1295/* 1296 * Receive handler for REC ELS frame 1297 * if it is a reject then let the scsi layer to handle 1298 * the timeout. if it is a LS_ACC then if the io was not completed 1299 * then set the timeout and return otherwise complete the exchange 1300 * and tell the scsi layer to restart the I/O. 1301 */ 1302static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1303{ 1304 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg; 1305 struct fc_els_rec_acc *recp; 1306 struct fc_els_ls_rjt *rjt; 1307 u32 e_stat; 1308 u8 opcode; 1309 u32 offset; 1310 enum dma_data_direction data_dir; 1311 enum fc_rctl r_ctl; 1312 struct fc_rport_libfc_priv *rp; 1313 1314 if (IS_ERR(fp)) { 1315 fc_fcp_rec_error(fsp, fp); 1316 return; 1317 } 1318 1319 if (fc_fcp_lock_pkt(fsp)) 1320 goto out; 1321 1322 fsp->recov_retry = 0; 1323 opcode = fc_frame_payload_op(fp); 1324 if (opcode == ELS_LS_RJT) { 1325 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 1326 switch (rjt->er_reason) { 1327 default: 1328 FC_DEBUG_FCP("device %x unexpected REC reject " 1329 "reason %d expl %d\n", 1330 fsp->rport->port_id, rjt->er_reason, 1331 rjt->er_explan); 1332 /* fall through */ 1333 case ELS_RJT_UNSUP: 1334 FC_DEBUG_FCP("device does not support REC\n"); 1335 rp = fsp->rport->dd_data; 1336 /* 1337 * if we do not spport RECs or got some bogus 1338 * reason then resetup timer so we check for 1339 * making progress. 1340 */ 1341 rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; 1342 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); 1343 break; 1344 case ELS_RJT_LOGIC: 1345 case ELS_RJT_UNAB: 1346 /* 1347 * If no data transfer, the command frame got dropped 1348 * so we just retry. If data was transferred, we 1349 * lost the response but the target has no record, 1350 * so we abort and retry. 1351 */ 1352 if (rjt->er_explan == ELS_EXPL_OXID_RXID && 1353 fsp->xfer_len == 0) { 1354 fc_fcp_retry_cmd(fsp); 1355 break; 1356 } 1357 fc_timeout_error(fsp); 1358 break; 1359 } 1360 } else if (opcode == ELS_LS_ACC) { 1361 if (fsp->state & FC_SRB_ABORTED) 1362 goto unlock_out; 1363 1364 data_dir = fsp->cmd->sc_data_direction; 1365 recp = fc_frame_payload_get(fp, sizeof(*recp)); 1366 offset = ntohl(recp->reca_fc4value); 1367 e_stat = ntohl(recp->reca_e_stat); 1368 1369 if (e_stat & ESB_ST_COMPLETE) { 1370 1371 /* 1372 * The exchange is complete. 1373 * 1374 * For output, we must've lost the response. 1375 * For input, all data must've been sent. 1376 * We lost may have lost the response 1377 * (and a confirmation was requested) and maybe 1378 * some data. 1379 * 1380 * If all data received, send SRR 1381 * asking for response. If partial data received, 1382 * or gaps, SRR requests data at start of gap. 1383 * Recovery via SRR relies on in-order-delivery. 1384 */ 1385 if (data_dir == DMA_TO_DEVICE) { 1386 r_ctl = FC_RCTL_DD_CMD_STATUS; 1387 } else if (fsp->xfer_contig_end == offset) { 1388 r_ctl = FC_RCTL_DD_CMD_STATUS; 1389 } else { 1390 offset = fsp->xfer_contig_end; 1391 r_ctl = FC_RCTL_DD_SOL_DATA; 1392 } 1393 fc_fcp_srr(fsp, r_ctl, offset); 1394 } else if (e_stat & ESB_ST_SEQ_INIT) { 1395 1396 /* 1397 * The remote port has the initiative, so just 1398 * keep waiting for it to complete. 1399 */ 1400 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1401 } else { 1402 1403 /* 1404 * The exchange is incomplete, we have seq. initiative. 1405 * Lost response with requested confirmation, 1406 * lost confirmation, lost transfer ready or 1407 * lost write data. 1408 * 1409 * For output, if not all data was received, ask 1410 * for transfer ready to be repeated. 1411 * 1412 * If we received or sent all the data, send SRR to 1413 * request response. 1414 * 1415 * If we lost a response, we may have lost some read 1416 * data as well. 1417 */ 1418 r_ctl = FC_RCTL_DD_SOL_DATA; 1419 if (data_dir == DMA_TO_DEVICE) { 1420 r_ctl = FC_RCTL_DD_CMD_STATUS; 1421 if (offset < fsp->data_len) 1422 r_ctl = FC_RCTL_DD_DATA_DESC; 1423 } else if (offset == fsp->xfer_contig_end) { 1424 r_ctl = FC_RCTL_DD_CMD_STATUS; 1425 } else if (fsp->xfer_contig_end < offset) { 1426 offset = fsp->xfer_contig_end; 1427 } 1428 fc_fcp_srr(fsp, r_ctl, offset); 1429 } 1430 } 1431unlock_out: 1432 fc_fcp_unlock_pkt(fsp); 1433out: 1434 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ 1435 fc_frame_free(fp); 1436} 1437 1438/* 1439 * Handle error response or timeout for REC exchange. 1440 */ 1441static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1442{ 1443 int error = PTR_ERR(fp); 1444 1445 if (fc_fcp_lock_pkt(fsp)) 1446 goto out; 1447 1448 switch (error) { 1449 case -FC_EX_CLOSED: 1450 fc_fcp_retry_cmd(fsp); 1451 break; 1452 1453 default: 1454 FC_DBG("REC %p fid %x error unexpected error %d\n", 1455 fsp, fsp->rport->port_id, error); 1456 fsp->status_code = FC_CMD_PLOGO; 1457 /* fall through */ 1458 1459 case -FC_EX_TIMEOUT: 1460 /* 1461 * Assume REC or LS_ACC was lost. 1462 * The exchange manager will have aborted REC, so retry. 1463 */ 1464 FC_DBG("REC fid %x error error %d retry %d/%d\n", 1465 fsp->rport->port_id, error, fsp->recov_retry, 1466 FC_MAX_RECOV_RETRY); 1467 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1468 fc_fcp_rec(fsp); 1469 else 1470 fc_timeout_error(fsp); 1471 break; 1472 } 1473 fc_fcp_unlock_pkt(fsp); 1474out: 1475 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ 1476} 1477 1478/* 1479 * Time out error routine: 1480 * abort's the I/O close the exchange and 1481 * send completion notification to scsi layer 1482 */ 1483static void fc_timeout_error(struct fc_fcp_pkt *fsp) 1484{ 1485 fsp->status_code = FC_CMD_TIME_OUT; 1486 fsp->cdb_status = 0; 1487 fsp->io_status = 0; 1488 /* 1489 * if this fails then we let the scsi command timer fire and 1490 * scsi-ml escalate. 1491 */ 1492 fc_fcp_send_abort(fsp); 1493} 1494 1495/* 1496 * Sequence retransmission request. 1497 * This is called after receiving status but insufficient data, or 1498 * when expecting status but the request has timed out. 1499 */ 1500static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) 1501{ 1502 struct fc_lport *lp = fsp->lp; 1503 struct fc_rport *rport; 1504 struct fc_rport_libfc_priv *rp; 1505 struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); 1506 struct fc_seq *seq; 1507 struct fcp_srr *srr; 1508 struct fc_frame *fp; 1509 u8 cdb_op; 1510 1511 rport = fsp->rport; 1512 rp = rport->dd_data; 1513 cdb_op = fsp->cdb_cmd.fc_cdb[0]; 1514 1515 if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY) 1516 goto retry; /* shouldn't happen */ 1517 fp = fc_frame_alloc(lp, sizeof(*srr)); 1518 if (!fp) 1519 goto retry; 1520 1521 srr = fc_frame_payload_get(fp, sizeof(*srr)); 1522 memset(srr, 0, sizeof(*srr)); 1523 srr->srr_op = ELS_SRR; 1524 srr->srr_ox_id = htons(ep->oxid); 1525 srr->srr_rx_id = htons(ep->rxid); 1526 srr->srr_r_ctl = r_ctl; 1527 srr->srr_rel_off = htonl(offset); 1528 1529 fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, 1530 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, 1531 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1532 1533 seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL, 1534 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); 1535 if (!seq) { 1536 fc_frame_free(fp); 1537 goto retry; 1538 } 1539 fsp->recov_seq = seq; 1540 fsp->xfer_len = offset; 1541 fsp->xfer_contig_end = offset; 1542 fsp->state &= ~FC_SRB_RCV_STATUS; 1543 fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */ 1544 return; 1545retry: 1546 fc_fcp_retry_cmd(fsp); 1547} 1548 1549/* 1550 * Handle response from SRR. 1551 */ 1552static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1553{ 1554 struct fc_fcp_pkt *fsp = arg; 1555 struct fc_frame_header *fh; 1556 1557 if (IS_ERR(fp)) { 1558 fc_fcp_srr_error(fsp, fp); 1559 return; 1560 } 1561 1562 if (fc_fcp_lock_pkt(fsp)) 1563 goto out; 1564 1565 fh = fc_frame_header_get(fp); 1566 /* 1567 * BUG? fc_fcp_srr_error calls exch_done which would release 1568 * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT, 1569 * then fc_exch_timeout would be sending an abort. The exch_done 1570 * call by fc_fcp_srr_error would prevent fc_exch.c from seeing 1571 * an abort response though. 1572 */ 1573 if (fh->fh_type == FC_TYPE_BLS) { 1574 fc_fcp_unlock_pkt(fsp); 1575 return; 1576 } 1577 1578 fsp->recov_seq = NULL; 1579 switch (fc_frame_payload_op(fp)) { 1580 case ELS_LS_ACC: 1581 fsp->recov_retry = 0; 1582 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1583 break; 1584 case ELS_LS_RJT: 1585 default: 1586 fc_timeout_error(fsp); 1587 break; 1588 } 1589 fc_fcp_unlock_pkt(fsp); 1590 fsp->lp->tt.exch_done(seq); 1591out: 1592 fc_frame_free(fp); 1593 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ 1594} 1595 1596static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1597{ 1598 if (fc_fcp_lock_pkt(fsp)) 1599 goto out; 1600 fsp->lp->tt.exch_done(fsp->recov_seq); 1601 fsp->recov_seq = NULL; 1602 switch (PTR_ERR(fp)) { 1603 case -FC_EX_TIMEOUT: 1604 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1605 fc_fcp_rec(fsp); 1606 else 1607 fc_timeout_error(fsp); 1608 break; 1609 case -FC_EX_CLOSED: /* e.g., link failure */ 1610 /* fall through */ 1611 default: 1612 fc_fcp_retry_cmd(fsp); 1613 break; 1614 } 1615 fc_fcp_unlock_pkt(fsp); 1616out: 1617 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ 1618} 1619 1620static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp) 1621{ 1622 /* lock ? */ 1623 return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull; 1624} 1625 1626/** 1627 * fc_queuecommand - The queuecommand function of the scsi template 1628 * @cmd: struct scsi_cmnd to be executed 1629 * @done: Callback function to be called when cmd is completed 1630 * 1631 * this is the i/o strategy routine, called by the scsi layer 1632 * this routine is called with holding the host_lock. 1633 */ 1634int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) 1635{ 1636 struct fc_lport *lp; 1637 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1638 struct fc_fcp_pkt *fsp; 1639 struct fc_rport_libfc_priv *rp; 1640 int rval; 1641 int rc = 0; 1642 struct fcoe_dev_stats *stats; 1643 1644 lp = shost_priv(sc_cmd->device->host); 1645 1646 rval = fc_remote_port_chkready(rport); 1647 if (rval) { 1648 sc_cmd->result = rval; 1649 done(sc_cmd); 1650 goto out; 1651 } 1652 1653 if (!*(struct fc_remote_port **)rport->dd_data) { 1654 /* 1655 * rport is transitioning from blocked/deleted to 1656 * online 1657 */ 1658 sc_cmd->result = DID_IMM_RETRY << 16; 1659 done(sc_cmd); 1660 goto out; 1661 } 1662 1663 rp = rport->dd_data; 1664 1665 if (!fc_fcp_lport_queue_ready(lp)) { 1666 rc = SCSI_MLQUEUE_HOST_BUSY; 1667 goto out; 1668 } 1669 1670 fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC); 1671 if (fsp == NULL) { 1672 rc = SCSI_MLQUEUE_HOST_BUSY; 1673 goto out; 1674 } 1675 1676 /* 1677 * build the libfc request pkt 1678 */ 1679 fsp->cmd = sc_cmd; /* save the cmd */ 1680 fsp->lp = lp; /* save the softc ptr */ 1681 fsp->rport = rport; /* set the remote port ptr */ 1682 sc_cmd->scsi_done = done; 1683 1684 /* 1685 * set up the transfer length 1686 */ 1687 fsp->data_len = scsi_bufflen(sc_cmd); 1688 fsp->xfer_len = 0; 1689 1690 /* 1691 * setup the data direction 1692 */ 1693 stats = lp->dev_stats[smp_processor_id()]; 1694 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 1695 fsp->req_flags = FC_SRB_READ; 1696 stats->InputRequests++; 1697 stats->InputMegabytes = fsp->data_len; 1698 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 1699 fsp->req_flags = FC_SRB_WRITE; 1700 stats->OutputRequests++; 1701 stats->OutputMegabytes = fsp->data_len; 1702 } else { 1703 fsp->req_flags = 0; 1704 stats->ControlRequests++; 1705 } 1706 1707 fsp->tgt_flags = rp->flags; 1708 1709 init_timer(&fsp->timer); 1710 fsp->timer.data = (unsigned long)fsp; 1711 1712 /* 1713 * send it to the lower layer 1714 * if we get -1 return then put the request in the pending 1715 * queue. 1716 */ 1717 rval = fc_fcp_pkt_send(lp, fsp); 1718 if (rval != 0) { 1719 fsp->state = FC_SRB_FREE; 1720 fc_fcp_pkt_release(fsp); 1721 rc = SCSI_MLQUEUE_HOST_BUSY; 1722 } 1723out: 1724 return rc; 1725} 1726EXPORT_SYMBOL(fc_queuecommand); 1727 1728/** 1729 * fc_io_compl() - Handle responses for completed commands 1730 * @fsp: scsi packet 1731 * 1732 * Translates a error to a Linux SCSI error. 1733 * 1734 * The fcp packet lock must be held when calling. 1735 */ 1736static void fc_io_compl(struct fc_fcp_pkt *fsp) 1737{ 1738 struct fc_fcp_internal *si; 1739 struct scsi_cmnd *sc_cmd; 1740 struct fc_lport *lp; 1741 unsigned long flags; 1742 1743 fsp->state |= FC_SRB_COMPL; 1744 if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) { 1745 spin_unlock_bh(&fsp->scsi_pkt_lock); 1746 del_timer_sync(&fsp->timer); 1747 spin_lock_bh(&fsp->scsi_pkt_lock); 1748 } 1749 1750 lp = fsp->lp; 1751 si = fc_get_scsi_internal(lp); 1752 spin_lock_irqsave(lp->host->host_lock, flags); 1753 if (!fsp->cmd) { 1754 spin_unlock_irqrestore(lp->host->host_lock, flags); 1755 return; 1756 } 1757 1758 /* 1759 * if a command timed out while we had to try and throttle IO 1760 * and it is now getting cleaned up, then we are about to 1761 * try again so clear the throttled flag incase we get more 1762 * time outs. 1763 */ 1764 if (si->throttled && fsp->state & FC_SRB_NOMEM) 1765 si->throttled = 0; 1766 1767 sc_cmd = fsp->cmd; 1768 fsp->cmd = NULL; 1769 1770 if (!sc_cmd->SCp.ptr) { 1771 spin_unlock_irqrestore(lp->host->host_lock, flags); 1772 return; 1773 } 1774 1775 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status; 1776 switch (fsp->status_code) { 1777 case FC_COMPLETE: 1778 if (fsp->cdb_status == 0) { 1779 /* 1780 * good I/O status 1781 */ 1782 sc_cmd->result = DID_OK << 16; 1783 if (fsp->scsi_resid) 1784 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid; 1785 } else if (fsp->cdb_status == QUEUE_FULL) { 1786 struct scsi_device *tmp_sdev; 1787 struct scsi_device *sdev = sc_cmd->device; 1788 1789 shost_for_each_device(tmp_sdev, sdev->host) { 1790 if (tmp_sdev->id != sdev->id) 1791 continue; 1792 1793 if (tmp_sdev->queue_depth > 1) { 1794 scsi_track_queue_full(tmp_sdev, 1795 tmp_sdev-> 1796 queue_depth - 1); 1797 } 1798 } 1799 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status; 1800 } else { 1801 /* 1802 * transport level I/O was ok but scsi 1803 * has non zero status 1804 */ 1805 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status; 1806 } 1807 break; 1808 case FC_ERROR: 1809 sc_cmd->result = DID_ERROR << 16; 1810 break; 1811 case FC_DATA_UNDRUN: 1812 if ((fsp->cdb_status == 0) && !(fsp->req_flags & FC_SRB_READ)) { 1813 /* 1814 * scsi status is good but transport level 1815 * underrun. 1816 */ 1817 sc_cmd->result = DID_OK << 16; 1818 } else { 1819 /* 1820 * scsi got underrun, this is an error 1821 */ 1822 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid; 1823 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 1824 } 1825 break; 1826 case FC_DATA_OVRRUN: 1827 /* 1828 * overrun is an error 1829 */ 1830 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 1831 break; 1832 case FC_CMD_ABORTED: 1833 sc_cmd->result = (DID_ABORT << 16) | fsp->io_status; 1834 break; 1835 case FC_CMD_TIME_OUT: 1836 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status; 1837 break; 1838 case FC_CMD_RESET: 1839 sc_cmd->result = (DID_RESET << 16); 1840 break; 1841 case FC_HRD_ERROR: 1842 sc_cmd->result = (DID_NO_CONNECT << 16); 1843 break; 1844 default: 1845 sc_cmd->result = (DID_ERROR << 16); 1846 break; 1847 } 1848 1849 list_del(&fsp->list); 1850 sc_cmd->SCp.ptr = NULL; 1851 sc_cmd->scsi_done(sc_cmd); 1852 spin_unlock_irqrestore(lp->host->host_lock, flags); 1853 1854 /* release ref from initial allocation in queue command */ 1855 fc_fcp_pkt_release(fsp); 1856} 1857 1858/** 1859 * fc_fcp_complete() - complete processing of a fcp packet 1860 * @fsp: fcp packet 1861 * 1862 * This function may sleep if a fsp timer is pending. 1863 * The host lock must not be held by caller. 1864 */ 1865void fc_fcp_complete(struct fc_fcp_pkt *fsp) 1866{ 1867 if (fc_fcp_lock_pkt(fsp)) 1868 return; 1869 1870 fc_fcp_complete_locked(fsp); 1871 fc_fcp_unlock_pkt(fsp); 1872} 1873EXPORT_SYMBOL(fc_fcp_complete); 1874 1875/** 1876 * fc_eh_abort() - Abort a command 1877 * @sc_cmd: scsi command to abort 1878 * 1879 * From scsi host template. 1880 * send ABTS to the target device and wait for the response 1881 * sc_cmd is the pointer to the command to be aborted. 1882 */ 1883int fc_eh_abort(struct scsi_cmnd *sc_cmd) 1884{ 1885 struct fc_fcp_pkt *fsp; 1886 struct fc_lport *lp; 1887 int rc = FAILED; 1888 unsigned long flags; 1889 1890 lp = shost_priv(sc_cmd->device->host); 1891 if (lp->state != LPORT_ST_READY) 1892 return rc; 1893 else if (!lp->link_up) 1894 return rc; 1895 1896 spin_lock_irqsave(lp->host->host_lock, flags); 1897 fsp = CMD_SP(sc_cmd); 1898 if (!fsp) { 1899 /* command completed while scsi eh was setting up */ 1900 spin_unlock_irqrestore(lp->host->host_lock, flags); 1901 return SUCCESS; 1902 } 1903 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ 1904 fc_fcp_pkt_hold(fsp); 1905 spin_unlock_irqrestore(lp->host->host_lock, flags); 1906 1907 if (fc_fcp_lock_pkt(fsp)) { 1908 /* completed while we were waiting for timer to be deleted */ 1909 rc = SUCCESS; 1910 goto release_pkt; 1911 } 1912 1913 rc = fc_fcp_pkt_abort(lp, fsp); 1914 fc_fcp_unlock_pkt(fsp); 1915 1916release_pkt: 1917 fc_fcp_pkt_release(fsp); 1918 return rc; 1919} 1920EXPORT_SYMBOL(fc_eh_abort); 1921 1922/** 1923 * fc_eh_device_reset() Reset a single LUN 1924 * @sc_cmd: scsi command 1925 * 1926 * Set from scsi host template to send tm cmd to the target and wait for the 1927 * response. 1928 */ 1929int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) 1930{ 1931 struct fc_lport *lp; 1932 struct fc_fcp_pkt *fsp; 1933 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1934 int rc = FAILED; 1935 struct fc_rport_libfc_priv *rp; 1936 int rval; 1937 1938 rval = fc_remote_port_chkready(rport); 1939 if (rval) 1940 goto out; 1941 1942 rp = rport->dd_data; 1943 lp = shost_priv(sc_cmd->device->host); 1944 1945 if (lp->state != LPORT_ST_READY) 1946 return rc; 1947 1948 fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO); 1949 if (fsp == NULL) { 1950 FC_DBG("could not allocate scsi_pkt\n"); 1951 sc_cmd->result = DID_NO_CONNECT << 16; 1952 goto out; 1953 } 1954 1955 /* 1956 * Build the libfc request pkt. Do not set the scsi cmnd, because 1957 * the sc passed in is not setup for execution like when sent 1958 * through the queuecommand callout. 1959 */ 1960 fsp->lp = lp; /* save the softc ptr */ 1961 fsp->rport = rport; /* set the remote port ptr */ 1962 1963 /* 1964 * flush outstanding commands 1965 */ 1966 rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun); 1967 fsp->state = FC_SRB_FREE; 1968 fc_fcp_pkt_release(fsp); 1969 1970out: 1971 return rc; 1972} 1973EXPORT_SYMBOL(fc_eh_device_reset); 1974 1975/** 1976 * fc_eh_host_reset() - The reset function will reset the ports on the host. 1977 * @sc_cmd: scsi command 1978 */ 1979int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) 1980{ 1981 struct Scsi_Host *shost = sc_cmd->device->host; 1982 struct fc_lport *lp = shost_priv(shost); 1983 unsigned long wait_tmo; 1984 1985 lp->tt.lport_reset(lp); 1986 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; 1987 while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo)) 1988 msleep(1000); 1989 1990 if (fc_fcp_lport_queue_ready(lp)) { 1991 shost_printk(KERN_INFO, shost, "Host reset succeeded.\n"); 1992 return SUCCESS; 1993 } else { 1994 shost_printk(KERN_INFO, shost, "Host reset failed. " 1995 "lport not ready.\n"); 1996 return FAILED; 1997 } 1998} 1999EXPORT_SYMBOL(fc_eh_host_reset); 2000 2001/** 2002 * fc_slave_alloc() - configure queue depth 2003 * @sdev: scsi device 2004 * 2005 * Configures queue depth based on host's cmd_per_len. If not set 2006 * then we use the libfc default. 2007 */ 2008int fc_slave_alloc(struct scsi_device *sdev) 2009{ 2010 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 2011 int queue_depth; 2012 2013 if (!rport || fc_remote_port_chkready(rport)) 2014 return -ENXIO; 2015 2016 if (sdev->tagged_supported) { 2017 if (sdev->host->hostt->cmd_per_lun) 2018 queue_depth = sdev->host->hostt->cmd_per_lun; 2019 else 2020 queue_depth = FC_FCP_DFLT_QUEUE_DEPTH; 2021 scsi_activate_tcq(sdev, queue_depth); 2022 } 2023 return 0; 2024} 2025EXPORT_SYMBOL(fc_slave_alloc); 2026 2027int fc_change_queue_depth(struct scsi_device *sdev, int qdepth) 2028{ 2029 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 2030 return sdev->queue_depth; 2031} 2032EXPORT_SYMBOL(fc_change_queue_depth); 2033 2034int fc_change_queue_type(struct scsi_device *sdev, int tag_type) 2035{ 2036 if (sdev->tagged_supported) { 2037 scsi_set_tag_type(sdev, tag_type); 2038 if (tag_type) 2039 scsi_activate_tcq(sdev, sdev->queue_depth); 2040 else 2041 scsi_deactivate_tcq(sdev, sdev->queue_depth); 2042 } else 2043 tag_type = 0; 2044 2045 return tag_type; 2046} 2047EXPORT_SYMBOL(fc_change_queue_type); 2048 2049void fc_fcp_destroy(struct fc_lport *lp) 2050{ 2051 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 2052 2053 if (!list_empty(&si->scsi_pkt_queue)) 2054 printk(KERN_ERR "Leaked scsi packets.\n"); 2055 2056 mempool_destroy(si->scsi_pkt_pool); 2057 kfree(si); 2058 lp->scsi_priv = NULL; 2059} 2060EXPORT_SYMBOL(fc_fcp_destroy); 2061 2062int fc_fcp_init(struct fc_lport *lp) 2063{ 2064 int rc; 2065 struct fc_fcp_internal *si; 2066 2067 if (!lp->tt.fcp_cmd_send) 2068 lp->tt.fcp_cmd_send = fc_fcp_cmd_send; 2069 2070 if (!lp->tt.fcp_cleanup) 2071 lp->tt.fcp_cleanup = fc_fcp_cleanup; 2072 2073 if (!lp->tt.fcp_abort_io) 2074 lp->tt.fcp_abort_io = fc_fcp_abort_io; 2075 2076 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL); 2077 if (!si) 2078 return -ENOMEM; 2079 lp->scsi_priv = si; 2080 INIT_LIST_HEAD(&si->scsi_pkt_queue); 2081 2082 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); 2083 if (!si->scsi_pkt_pool) { 2084 rc = -ENOMEM; 2085 goto free_internal; 2086 } 2087 return 0; 2088 2089free_internal: 2090 kfree(si); 2091 return rc; 2092} 2093EXPORT_SYMBOL(fc_fcp_init); 2094 2095static int __init libfc_init(void) 2096{ 2097 int rc; 2098 2099 scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt", 2100 sizeof(struct fc_fcp_pkt), 2101 0, SLAB_HWCACHE_ALIGN, NULL); 2102 if (scsi_pkt_cachep == NULL) { 2103 FC_DBG("Unable to allocate SRB cache...module load failed!"); 2104 return -ENOMEM; 2105 } 2106 2107 rc = fc_setup_exch_mgr(); 2108 if (rc) 2109 goto destroy_pkt_cache; 2110 2111 rc = fc_setup_rport(); 2112 if (rc) 2113 goto destroy_em; 2114 2115 return rc; 2116destroy_em: 2117 fc_destroy_exch_mgr(); 2118destroy_pkt_cache: 2119 kmem_cache_destroy(scsi_pkt_cachep); 2120 return rc; 2121} 2122 2123static void __exit libfc_exit(void) 2124{ 2125 kmem_cache_destroy(scsi_pkt_cachep); 2126 fc_destroy_exch_mgr(); 2127 fc_destroy_rport(); 2128} 2129 2130module_init(libfc_init); 2131module_exit(libfc_exit); 2132