zfcp_scsi.c revision ea4a3a6ac40e2a585654808d4aefb39a6d57dca0
1/* 2 * zfcp device driver 3 * 4 * Interface to Linux SCSI midlayer. 5 * 6 * Copyright IBM Corporation 2002, 2010 7 */ 8 9#define KMSG_COMPONENT "zfcp" 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 12#include <linux/types.h> 13#include <linux/slab.h> 14#include <scsi/fc/fc_fcp.h> 15#include <scsi/scsi_eh.h> 16#include <asm/atomic.h> 17#include "zfcp_ext.h" 18#include "zfcp_dbf.h" 19#include "zfcp_fc.h" 20#include "zfcp_reqlist.h" 21 22static unsigned int default_depth = 32; 23module_param_named(queue_depth, default_depth, uint, 0600); 24MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); 25 26static bool enable_dif; 27 28#ifdef CONFIG_ZFCP_DIF 29module_param_named(dif, enable_dif, bool, 0600); 30MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support"); 31#endif 32 33static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, 34 int reason) 35{ 36 switch (reason) { 37 case SCSI_QDEPTH_DEFAULT: 38 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); 39 break; 40 case SCSI_QDEPTH_QFULL: 41 scsi_track_queue_full(sdev, depth); 42 break; 43 case SCSI_QDEPTH_RAMP_UP: 44 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); 45 break; 46 default: 47 return -EOPNOTSUPP; 48 } 49 return sdev->queue_depth; 50} 51 52static void zfcp_scsi_slave_destroy(struct scsi_device *sdev) 53{ 54 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 55 56 zfcp_erp_lun_shutdown_wait(sdev, "scssd_1"); 57 put_device(&zfcp_sdev->port->dev); 58} 59 60static int zfcp_scsi_slave_configure(struct scsi_device *sdp) 61{ 62 if (sdp->tagged_supported) 63 scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth); 64 else 65 scsi_adjust_queue_depth(sdp, 0, 1); 66 return 0; 67} 68 69static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) 70{ 71 set_host_byte(scpnt, result); 72 zfcp_dbf_scsi_fail_send(scpnt); 73 scpnt->scsi_done(scpnt); 74} 75 76static 77int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) 78{ 79 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); 80 struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device)); 81 int status, scsi_result, ret; 82 83 /* reset the status for this request */ 84 scpnt->result = 0; 85 scpnt->host_scribble = NULL; 86 87 scsi_result = fc_remote_port_chkready(rport); 88 if (unlikely(scsi_result)) { 89 scpnt->result = scsi_result; 90 zfcp_dbf_scsi_fail_send(scpnt); 91 scpnt->scsi_done(scpnt); 92 return 0; 93 } 94 95 status = atomic_read(&zfcp_sdev->status); 96 if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) && 97 !(atomic_read(&zfcp_sdev->port->status) & 98 ZFCP_STATUS_COMMON_ERP_FAILED)) { 99 /* only LUN access denied, but port is good 100 * not covered by FC transport, have to fail here */ 101 zfcp_scsi_command_fail(scpnt, DID_ERROR); 102 return 0; 103 } 104 105 if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { 106 /* This could be either 107 * open LUN pending: this is temporary, will result in 108 * open LUN or ERP_FAILED, so retry command 109 * call to rport_delete pending: mimic retry from 110 * fc_remote_port_chkready until rport is BLOCKED 111 */ 112 zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY); 113 return 0; 114 } 115 116 ret = zfcp_fsf_fcp_cmnd(scpnt); 117 if (unlikely(ret == -EBUSY)) 118 return SCSI_MLQUEUE_DEVICE_BUSY; 119 else if (unlikely(ret < 0)) 120 return SCSI_MLQUEUE_HOST_BUSY; 121 122 return ret; 123} 124 125static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) 126{ 127 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 128 struct zfcp_adapter *adapter = 129 (struct zfcp_adapter *) sdev->host->hostdata[0]; 130 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 131 struct zfcp_port *port; 132 struct zfcp_unit *unit; 133 134 port = zfcp_get_port_by_wwpn(adapter, rport->port_name); 135 if (!port) 136 return -ENXIO; 137 138 unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev)); 139 if (unit) 140 put_device(&unit->dev); 141 142 if (!unit && !(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) { 143 put_device(&port->dev); 144 return -ENXIO; 145 } 146 147 zfcp_sdev->port = port; 148 zfcp_sdev->latencies.write.channel.min = 0xFFFFFFFF; 149 zfcp_sdev->latencies.write.fabric.min = 0xFFFFFFFF; 150 zfcp_sdev->latencies.read.channel.min = 0xFFFFFFFF; 151 zfcp_sdev->latencies.read.fabric.min = 0xFFFFFFFF; 152 zfcp_sdev->latencies.cmd.channel.min = 0xFFFFFFFF; 153 zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF; 154 spin_lock_init(&zfcp_sdev->latencies.lock); 155 156 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); 157 zfcp_erp_lun_reopen(sdev, 0, "scsla_1"); 158 zfcp_erp_wait(port->adapter); 159 160 return 0; 161} 162 163static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 164{ 165 struct Scsi_Host *scsi_host = scpnt->device->host; 166 struct zfcp_adapter *adapter = 167 (struct zfcp_adapter *) scsi_host->hostdata[0]; 168 struct zfcp_fsf_req *old_req, *abrt_req; 169 unsigned long flags; 170 unsigned long old_reqid = (unsigned long) scpnt->host_scribble; 171 int retval = SUCCESS, ret; 172 int retry = 3; 173 char *dbf_tag; 174 175 /* avoid race condition between late normal completion and abort */ 176 write_lock_irqsave(&adapter->abort_lock, flags); 177 178 old_req = zfcp_reqlist_find(adapter->req_list, old_reqid); 179 if (!old_req) { 180 write_unlock_irqrestore(&adapter->abort_lock, flags); 181 zfcp_dbf_scsi_abort("abrt_or", scpnt, NULL); 182 return FAILED; /* completion could be in progress */ 183 } 184 old_req->data = NULL; 185 186 /* don't access old fsf_req after releasing the abort_lock */ 187 write_unlock_irqrestore(&adapter->abort_lock, flags); 188 189 while (retry--) { 190 abrt_req = zfcp_fsf_abort_fcp_cmnd(scpnt); 191 if (abrt_req) 192 break; 193 194 zfcp_erp_wait(adapter); 195 ret = fc_block_scsi_eh(scpnt); 196 if (ret) { 197 zfcp_dbf_scsi_abort("abrt_bl", scpnt, NULL); 198 return ret; 199 } 200 if (!(atomic_read(&adapter->status) & 201 ZFCP_STATUS_COMMON_RUNNING)) { 202 zfcp_dbf_scsi_abort("abrt_ru", scpnt, NULL); 203 return SUCCESS; 204 } 205 } 206 if (!abrt_req) { 207 zfcp_dbf_scsi_abort("abrt_ar", scpnt, NULL); 208 return FAILED; 209 } 210 211 wait_for_completion(&abrt_req->completion); 212 213 if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) 214 dbf_tag = "abrt_ok"; 215 else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) 216 dbf_tag = "abrt_nn"; 217 else { 218 dbf_tag = "abrt_fa"; 219 retval = FAILED; 220 } 221 zfcp_dbf_scsi_abort(dbf_tag, scpnt, abrt_req); 222 zfcp_fsf_req_free(abrt_req); 223 return retval; 224} 225 226static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) 227{ 228 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); 229 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 230 struct zfcp_fsf_req *fsf_req = NULL; 231 int retval = SUCCESS, ret; 232 int retry = 3; 233 234 while (retry--) { 235 fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags); 236 if (fsf_req) 237 break; 238 239 zfcp_erp_wait(adapter); 240 ret = fc_block_scsi_eh(scpnt); 241 if (ret) 242 return ret; 243 244 if (!(atomic_read(&adapter->status) & 245 ZFCP_STATUS_COMMON_RUNNING)) { 246 zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags); 247 return SUCCESS; 248 } 249 } 250 if (!fsf_req) 251 return FAILED; 252 253 wait_for_completion(&fsf_req->completion); 254 255 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { 256 zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags); 257 retval = FAILED; 258 } else 259 zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags); 260 261 zfcp_fsf_req_free(fsf_req); 262 return retval; 263} 264 265static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) 266{ 267 return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET); 268} 269 270static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) 271{ 272 return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET); 273} 274 275static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 276{ 277 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); 278 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 279 int ret; 280 281 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); 282 zfcp_erp_wait(adapter); 283 ret = fc_block_scsi_eh(scpnt); 284 if (ret) 285 return ret; 286 287 return SUCCESS; 288} 289 290int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) 291{ 292 struct ccw_dev_id dev_id; 293 294 if (adapter->scsi_host) 295 return 0; 296 297 ccw_device_get_id(adapter->ccw_device, &dev_id); 298 /* register adapter as SCSI host with mid layer of SCSI stack */ 299 adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template, 300 sizeof (struct zfcp_adapter *)); 301 if (!adapter->scsi_host) { 302 dev_err(&adapter->ccw_device->dev, 303 "Registering the FCP device with the " 304 "SCSI stack failed\n"); 305 return -EIO; 306 } 307 308 /* tell the SCSI stack some characteristics of this adapter */ 309 adapter->scsi_host->max_id = 511; 310 adapter->scsi_host->max_lun = 0xFFFFFFFF; 311 adapter->scsi_host->max_channel = 0; 312 adapter->scsi_host->unique_id = dev_id.devno; 313 adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ 314 adapter->scsi_host->transportt = zfcp_data.scsi_transport_template; 315 316 adapter->scsi_host->hostdata[0] = (unsigned long) adapter; 317 318 if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) { 319 scsi_host_put(adapter->scsi_host); 320 return -EIO; 321 } 322 323 return 0; 324} 325 326void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter) 327{ 328 struct Scsi_Host *shost; 329 struct zfcp_port *port; 330 331 shost = adapter->scsi_host; 332 if (!shost) 333 return; 334 335 read_lock_irq(&adapter->port_list_lock); 336 list_for_each_entry(port, &adapter->port_list, list) 337 port->rport = NULL; 338 read_unlock_irq(&adapter->port_list_lock); 339 340 fc_remove_host(shost); 341 scsi_remove_host(shost); 342 scsi_host_put(shost); 343 adapter->scsi_host = NULL; 344 345 return; 346} 347 348static struct fc_host_statistics* 349zfcp_init_fc_host_stats(struct zfcp_adapter *adapter) 350{ 351 struct fc_host_statistics *fc_stats; 352 353 if (!adapter->fc_stats) { 354 fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL); 355 if (!fc_stats) 356 return NULL; 357 adapter->fc_stats = fc_stats; /* freed in adapter_release */ 358 } 359 memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats)); 360 return adapter->fc_stats; 361} 362 363static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats, 364 struct fsf_qtcb_bottom_port *data, 365 struct fsf_qtcb_bottom_port *old) 366{ 367 fc_stats->seconds_since_last_reset = 368 data->seconds_since_last_reset - old->seconds_since_last_reset; 369 fc_stats->tx_frames = data->tx_frames - old->tx_frames; 370 fc_stats->tx_words = data->tx_words - old->tx_words; 371 fc_stats->rx_frames = data->rx_frames - old->rx_frames; 372 fc_stats->rx_words = data->rx_words - old->rx_words; 373 fc_stats->lip_count = data->lip - old->lip; 374 fc_stats->nos_count = data->nos - old->nos; 375 fc_stats->error_frames = data->error_frames - old->error_frames; 376 fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames; 377 fc_stats->link_failure_count = data->link_failure - old->link_failure; 378 fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync; 379 fc_stats->loss_of_signal_count = 380 data->loss_of_signal - old->loss_of_signal; 381 fc_stats->prim_seq_protocol_err_count = 382 data->psp_error_counts - old->psp_error_counts; 383 fc_stats->invalid_tx_word_count = 384 data->invalid_tx_words - old->invalid_tx_words; 385 fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs; 386 fc_stats->fcp_input_requests = 387 data->input_requests - old->input_requests; 388 fc_stats->fcp_output_requests = 389 data->output_requests - old->output_requests; 390 fc_stats->fcp_control_requests = 391 data->control_requests - old->control_requests; 392 fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb; 393 fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb; 394} 395 396static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats, 397 struct fsf_qtcb_bottom_port *data) 398{ 399 fc_stats->seconds_since_last_reset = data->seconds_since_last_reset; 400 fc_stats->tx_frames = data->tx_frames; 401 fc_stats->tx_words = data->tx_words; 402 fc_stats->rx_frames = data->rx_frames; 403 fc_stats->rx_words = data->rx_words; 404 fc_stats->lip_count = data->lip; 405 fc_stats->nos_count = data->nos; 406 fc_stats->error_frames = data->error_frames; 407 fc_stats->dumped_frames = data->dumped_frames; 408 fc_stats->link_failure_count = data->link_failure; 409 fc_stats->loss_of_sync_count = data->loss_of_sync; 410 fc_stats->loss_of_signal_count = data->loss_of_signal; 411 fc_stats->prim_seq_protocol_err_count = data->psp_error_counts; 412 fc_stats->invalid_tx_word_count = data->invalid_tx_words; 413 fc_stats->invalid_crc_count = data->invalid_crcs; 414 fc_stats->fcp_input_requests = data->input_requests; 415 fc_stats->fcp_output_requests = data->output_requests; 416 fc_stats->fcp_control_requests = data->control_requests; 417 fc_stats->fcp_input_megabytes = data->input_mb; 418 fc_stats->fcp_output_megabytes = data->output_mb; 419} 420 421static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host) 422{ 423 struct zfcp_adapter *adapter; 424 struct fc_host_statistics *fc_stats; 425 struct fsf_qtcb_bottom_port *data; 426 int ret; 427 428 adapter = (struct zfcp_adapter *)host->hostdata[0]; 429 fc_stats = zfcp_init_fc_host_stats(adapter); 430 if (!fc_stats) 431 return NULL; 432 433 data = kzalloc(sizeof(*data), GFP_KERNEL); 434 if (!data) 435 return NULL; 436 437 ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data); 438 if (ret) { 439 kfree(data); 440 return NULL; 441 } 442 443 if (adapter->stats_reset && 444 ((jiffies/HZ - adapter->stats_reset) < 445 data->seconds_since_last_reset)) 446 zfcp_adjust_fc_host_stats(fc_stats, data, 447 adapter->stats_reset_data); 448 else 449 zfcp_set_fc_host_stats(fc_stats, data); 450 451 kfree(data); 452 return fc_stats; 453} 454 455static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost) 456{ 457 struct zfcp_adapter *adapter; 458 struct fsf_qtcb_bottom_port *data; 459 int ret; 460 461 adapter = (struct zfcp_adapter *)shost->hostdata[0]; 462 data = kzalloc(sizeof(*data), GFP_KERNEL); 463 if (!data) 464 return; 465 466 ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data); 467 if (ret) 468 kfree(data); 469 else { 470 adapter->stats_reset = jiffies/HZ; 471 kfree(adapter->stats_reset_data); 472 adapter->stats_reset_data = data; /* finally freed in 473 adapter_release */ 474 } 475} 476 477static void zfcp_get_host_port_state(struct Scsi_Host *shost) 478{ 479 struct zfcp_adapter *adapter = 480 (struct zfcp_adapter *)shost->hostdata[0]; 481 int status = atomic_read(&adapter->status); 482 483 if ((status & ZFCP_STATUS_COMMON_RUNNING) && 484 !(status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)) 485 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 486 else if (status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) 487 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 488 else if (status & ZFCP_STATUS_COMMON_ERP_FAILED) 489 fc_host_port_state(shost) = FC_PORTSTATE_ERROR; 490 else 491 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 492} 493 494static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) 495{ 496 rport->dev_loss_tmo = timeout; 497} 498 499/** 500 * zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport 501 * @rport: The FC rport where to teminate I/O 502 * 503 * Abort all pending SCSI commands for a port by closing the 504 * port. Using a reopen avoids a conflict with a shutdown 505 * overwriting a reopen. The "forced" ensures that a disappeared port 506 * is not opened again as valid due to the cached plogi data in 507 * non-NPIV mode. 508 */ 509static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) 510{ 511 struct zfcp_port *port; 512 struct Scsi_Host *shost = rport_to_shost(rport); 513 struct zfcp_adapter *adapter = 514 (struct zfcp_adapter *)shost->hostdata[0]; 515 516 port = zfcp_get_port_by_wwpn(adapter, rport->port_name); 517 518 if (port) { 519 zfcp_erp_port_forced_reopen(port, 0, "sctrpi1"); 520 put_device(&port->dev); 521 } 522} 523 524static void zfcp_scsi_rport_register(struct zfcp_port *port) 525{ 526 struct fc_rport_identifiers ids; 527 struct fc_rport *rport; 528 529 if (port->rport) 530 return; 531 532 ids.node_name = port->wwnn; 533 ids.port_name = port->wwpn; 534 ids.port_id = port->d_id; 535 ids.roles = FC_RPORT_ROLE_FCP_TARGET; 536 537 rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); 538 if (!rport) { 539 dev_err(&port->adapter->ccw_device->dev, 540 "Registering port 0x%016Lx failed\n", 541 (unsigned long long)port->wwpn); 542 return; 543 } 544 545 rport->maxframe_size = port->maxframe_size; 546 rport->supported_classes = port->supported_classes; 547 port->rport = rport; 548 port->starget_id = rport->scsi_target_id; 549 550 zfcp_unit_queue_scsi_scan(port); 551} 552 553static void zfcp_scsi_rport_block(struct zfcp_port *port) 554{ 555 struct fc_rport *rport = port->rport; 556 557 if (rport) { 558 fc_remote_port_delete(rport); 559 port->rport = NULL; 560 } 561} 562 563void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) 564{ 565 get_device(&port->dev); 566 port->rport_task = RPORT_ADD; 567 568 if (!queue_work(port->adapter->work_queue, &port->rport_work)) 569 put_device(&port->dev); 570} 571 572void zfcp_scsi_schedule_rport_block(struct zfcp_port *port) 573{ 574 get_device(&port->dev); 575 port->rport_task = RPORT_DEL; 576 577 if (port->rport && queue_work(port->adapter->work_queue, 578 &port->rport_work)) 579 return; 580 581 put_device(&port->dev); 582} 583 584void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter) 585{ 586 unsigned long flags; 587 struct zfcp_port *port; 588 589 read_lock_irqsave(&adapter->port_list_lock, flags); 590 list_for_each_entry(port, &adapter->port_list, list) 591 zfcp_scsi_schedule_rport_block(port); 592 read_unlock_irqrestore(&adapter->port_list_lock, flags); 593} 594 595void zfcp_scsi_rport_work(struct work_struct *work) 596{ 597 struct zfcp_port *port = container_of(work, struct zfcp_port, 598 rport_work); 599 600 while (port->rport_task) { 601 if (port->rport_task == RPORT_ADD) { 602 port->rport_task = RPORT_NONE; 603 zfcp_scsi_rport_register(port); 604 } else { 605 port->rport_task = RPORT_NONE; 606 zfcp_scsi_rport_block(port); 607 } 608 } 609 610 put_device(&port->dev); 611} 612 613/** 614 * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host 615 * @adapter: The adapter where to configure DIF/DIX for the SCSI host 616 */ 617void zfcp_scsi_set_prot(struct zfcp_adapter *adapter) 618{ 619 unsigned int mask = 0; 620 unsigned int data_div; 621 struct Scsi_Host *shost = adapter->scsi_host; 622 623 data_div = atomic_read(&adapter->status) & 624 ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED; 625 626 if (enable_dif && 627 adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1) 628 mask |= SHOST_DIF_TYPE1_PROTECTION; 629 630 if (enable_dif && data_div && 631 adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) { 632 mask |= SHOST_DIX_TYPE1_PROTECTION; 633 scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); 634 shost->sg_prot_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2; 635 shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2; 636 shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2; 637 } 638 639 scsi_host_set_prot(shost, mask); 640} 641 642/** 643 * zfcp_scsi_dif_sense_error - Report DIF/DIX error as driver sense error 644 * @scmd: The SCSI command to report the error for 645 * @ascq: The ASCQ to put in the sense buffer 646 * 647 * See the error handling in sd_done for the sense codes used here. 648 * Set DID_SOFT_ERROR to retry the request, if possible. 649 */ 650void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq) 651{ 652 scsi_build_sense_buffer(1, scmd->sense_buffer, 653 ILLEGAL_REQUEST, 0x10, ascq); 654 set_driver_byte(scmd, DRIVER_SENSE); 655 scmd->result |= SAM_STAT_CHECK_CONDITION; 656 set_host_byte(scmd, DID_SOFT_ERROR); 657} 658 659struct fc_function_template zfcp_transport_functions = { 660 .show_starget_port_id = 1, 661 .show_starget_port_name = 1, 662 .show_starget_node_name = 1, 663 .show_rport_supported_classes = 1, 664 .show_rport_maxframe_size = 1, 665 .show_rport_dev_loss_tmo = 1, 666 .show_host_node_name = 1, 667 .show_host_port_name = 1, 668 .show_host_permanent_port_name = 1, 669 .show_host_supported_classes = 1, 670 .show_host_supported_fc4s = 1, 671 .show_host_supported_speeds = 1, 672 .show_host_maxframe_size = 1, 673 .show_host_serial_number = 1, 674 .get_fc_host_stats = zfcp_get_fc_host_stats, 675 .reset_fc_host_stats = zfcp_reset_fc_host_stats, 676 .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo, 677 .get_host_port_state = zfcp_get_host_port_state, 678 .terminate_rport_io = zfcp_scsi_terminate_rport_io, 679 .show_host_port_state = 1, 680 .show_host_active_fc4s = 1, 681 .bsg_request = zfcp_fc_exec_bsg_job, 682 .bsg_timeout = zfcp_fc_timeout_bsg_job, 683 /* no functions registered for following dynamic attributes but 684 directly set by LLDD */ 685 .show_host_port_type = 1, 686 .show_host_speed = 1, 687 .show_host_port_id = 1, 688 .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els), 689}; 690 691struct zfcp_data zfcp_data = { 692 .scsi_host_template = { 693 .name = "zfcp", 694 .module = THIS_MODULE, 695 .proc_name = "zfcp", 696 .change_queue_depth = zfcp_scsi_change_queue_depth, 697 .slave_alloc = zfcp_scsi_slave_alloc, 698 .slave_configure = zfcp_scsi_slave_configure, 699 .slave_destroy = zfcp_scsi_slave_destroy, 700 .queuecommand = zfcp_scsi_queuecommand, 701 .eh_abort_handler = zfcp_scsi_eh_abort_handler, 702 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, 703 .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, 704 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, 705 .can_queue = 4096, 706 .this_id = -1, 707 .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ, 708 .cmd_per_lun = 1, 709 .use_clustering = 1, 710 .sdev_attrs = zfcp_sysfs_sdev_attrs, 711 .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8), 712 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, 713 .shost_attrs = zfcp_sysfs_shost_attrs, 714 }, 715}; 716