lpfc_init.c revision 1151e3ec15c32021a8a12a123459ab5e41692898
1/******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/dma-mapping.h> 25#include <linux/idr.h> 26#include <linux/interrupt.h> 27#include <linux/kthread.h> 28#include <linux/pci.h> 29#include <linux/spinlock.h> 30#include <linux/ctype.h> 31#include <linux/aer.h> 32#include <linux/slab.h> 33 34#include <scsi/scsi.h> 35#include <scsi/scsi_device.h> 36#include <scsi/scsi_host.h> 37#include <scsi/scsi_transport_fc.h> 38 39#include "lpfc_hw4.h" 40#include "lpfc_hw.h" 41#include "lpfc_sli.h" 42#include "lpfc_sli4.h" 43#include "lpfc_nl.h" 44#include "lpfc_disc.h" 45#include "lpfc_scsi.h" 46#include "lpfc.h" 47#include "lpfc_logmsg.h" 48#include "lpfc_crtn.h" 49#include "lpfc_vport.h" 50#include "lpfc_version.h" 51 52char *_dump_buf_data; 53unsigned long _dump_buf_data_order; 54char *_dump_buf_dif; 55unsigned long _dump_buf_dif_order; 56spinlock_t _dump_buf_lock; 57 58static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 59static int lpfc_post_rcv_buf(struct lpfc_hba *); 60static int lpfc_sli4_queue_create(struct lpfc_hba *); 61static void lpfc_sli4_queue_destroy(struct lpfc_hba *); 62static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 63static int lpfc_setup_endian_order(struct lpfc_hba *); 64static int lpfc_sli4_read_config(struct lpfc_hba *); 65static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 66static void lpfc_free_sgl_list(struct lpfc_hba *); 67static int lpfc_init_sgl_list(struct lpfc_hba *); 68static int lpfc_init_active_sgl_array(struct lpfc_hba *); 69static void lpfc_free_active_sgl(struct lpfc_hba *); 70static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 71static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 72static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 73static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 74static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 75 76static struct scsi_transport_template *lpfc_transport_template = NULL; 77static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 78static DEFINE_IDR(lpfc_hba_index); 79 80/** 81 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 82 * @phba: pointer to lpfc hba data structure. 83 * 84 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 85 * mailbox command. It retrieves the revision information from the HBA and 86 * collects the Vital Product Data (VPD) about the HBA for preparing the 87 * configuration of the HBA. 88 * 89 * Return codes: 90 * 0 - success. 91 * -ERESTART - requests the SLI layer to reset the HBA and try again. 92 * Any other value - indicates an error. 93 **/ 94int 95lpfc_config_port_prep(struct lpfc_hba *phba) 96{ 97 lpfc_vpd_t *vp = &phba->vpd; 98 int i = 0, rc; 99 LPFC_MBOXQ_t *pmb; 100 MAILBOX_t *mb; 101 char *lpfc_vpd_data = NULL; 102 uint16_t offset = 0; 103 static char licensed[56] = 104 "key unlock for use with gnu public licensed code only\0"; 105 static int init_key = 1; 106 107 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 108 if (!pmb) { 109 phba->link_state = LPFC_HBA_ERROR; 110 return -ENOMEM; 111 } 112 113 mb = &pmb->u.mb; 114 phba->link_state = LPFC_INIT_MBX_CMDS; 115 116 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 117 if (init_key) { 118 uint32_t *ptext = (uint32_t *) licensed; 119 120 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 121 *ptext = cpu_to_be32(*ptext); 122 init_key = 0; 123 } 124 125 lpfc_read_nv(phba, pmb); 126 memset((char*)mb->un.varRDnvp.rsvd3, 0, 127 sizeof (mb->un.varRDnvp.rsvd3)); 128 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 129 sizeof (licensed)); 130 131 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 132 133 if (rc != MBX_SUCCESS) { 134 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 135 "0324 Config Port initialization " 136 "error, mbxCmd x%x READ_NVPARM, " 137 "mbxStatus x%x\n", 138 mb->mbxCommand, mb->mbxStatus); 139 mempool_free(pmb, phba->mbox_mem_pool); 140 return -ERESTART; 141 } 142 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 143 sizeof(phba->wwnn)); 144 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 145 sizeof(phba->wwpn)); 146 } 147 148 phba->sli3_options = 0x0; 149 150 /* Setup and issue mailbox READ REV command */ 151 lpfc_read_rev(phba, pmb); 152 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 153 if (rc != MBX_SUCCESS) { 154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 155 "0439 Adapter failed to init, mbxCmd x%x " 156 "READ_REV, mbxStatus x%x\n", 157 mb->mbxCommand, mb->mbxStatus); 158 mempool_free( pmb, phba->mbox_mem_pool); 159 return -ERESTART; 160 } 161 162 163 /* 164 * The value of rr must be 1 since the driver set the cv field to 1. 165 * This setting requires the FW to set all revision fields. 166 */ 167 if (mb->un.varRdRev.rr == 0) { 168 vp->rev.rBit = 0; 169 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 170 "0440 Adapter failed to init, READ_REV has " 171 "missing revision information.\n"); 172 mempool_free(pmb, phba->mbox_mem_pool); 173 return -ERESTART; 174 } 175 176 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 177 mempool_free(pmb, phba->mbox_mem_pool); 178 return -EINVAL; 179 } 180 181 /* Save information as VPD data */ 182 vp->rev.rBit = 1; 183 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 184 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 185 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 186 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 187 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 188 vp->rev.biuRev = mb->un.varRdRev.biuRev; 189 vp->rev.smRev = mb->un.varRdRev.smRev; 190 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 191 vp->rev.endecRev = mb->un.varRdRev.endecRev; 192 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 193 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 194 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 195 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 196 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 197 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 198 199 /* If the sli feature level is less then 9, we must 200 * tear down all RPIs and VPIs on link down if NPIV 201 * is enabled. 202 */ 203 if (vp->rev.feaLevelHigh < 9) 204 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 205 206 if (lpfc_is_LC_HBA(phba->pcidev->device)) 207 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 208 sizeof (phba->RandomData)); 209 210 /* Get adapter VPD information */ 211 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 212 if (!lpfc_vpd_data) 213 goto out_free_mbox; 214 215 do { 216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 218 219 if (rc != MBX_SUCCESS) { 220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 221 "0441 VPD not present on adapter, " 222 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 223 mb->mbxCommand, mb->mbxStatus); 224 mb->un.varDmp.word_cnt = 0; 225 } 226 /* dump mem may return a zero when finished or we got a 227 * mailbox error, either way we are done. 228 */ 229 if (mb->un.varDmp.word_cnt == 0) 230 break; 231 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 232 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 233 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 234 lpfc_vpd_data + offset, 235 mb->un.varDmp.word_cnt); 236 offset += mb->un.varDmp.word_cnt; 237 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 238 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 239 240 kfree(lpfc_vpd_data); 241out_free_mbox: 242 mempool_free(pmb, phba->mbox_mem_pool); 243 return 0; 244} 245 246/** 247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 248 * @phba: pointer to lpfc hba data structure. 249 * @pmboxq: pointer to the driver internal queue element for mailbox command. 250 * 251 * This is the completion handler for driver's configuring asynchronous event 252 * mailbox command to the device. If the mailbox command returns successfully, 253 * it will set internal async event support flag to 1; otherwise, it will 254 * set internal async event support flag to 0. 255 **/ 256static void 257lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 258{ 259 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 260 phba->temp_sensor_support = 1; 261 else 262 phba->temp_sensor_support = 0; 263 mempool_free(pmboxq, phba->mbox_mem_pool); 264 return; 265} 266 267/** 268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 269 * @phba: pointer to lpfc hba data structure. 270 * @pmboxq: pointer to the driver internal queue element for mailbox command. 271 * 272 * This is the completion handler for dump mailbox command for getting 273 * wake up parameters. When this command complete, the response contain 274 * Option rom version of the HBA. This function translate the version number 275 * into a human readable string and store it in OptionROMVersion. 276 **/ 277static void 278lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 279{ 280 struct prog_id *prg; 281 uint32_t prog_id_word; 282 char dist = ' '; 283 /* character array used for decoding dist type. */ 284 char dist_char[] = "nabx"; 285 286 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 287 mempool_free(pmboxq, phba->mbox_mem_pool); 288 return; 289 } 290 291 prg = (struct prog_id *) &prog_id_word; 292 293 /* word 7 contain option rom version */ 294 prog_id_word = pmboxq->u.mb.un.varWords[7]; 295 296 /* Decode the Option rom version word to a readable string */ 297 if (prg->dist < 4) 298 dist = dist_char[prg->dist]; 299 300 if ((prg->dist == 3) && (prg->num == 0)) 301 sprintf(phba->OptionROMVersion, "%d.%d%d", 302 prg->ver, prg->rev, prg->lev); 303 else 304 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 305 prg->ver, prg->rev, prg->lev, 306 dist, prg->num); 307 mempool_free(pmboxq, phba->mbox_mem_pool); 308 return; 309} 310 311/** 312 * lpfc_config_port_post - Perform lpfc initialization after config port 313 * @phba: pointer to lpfc hba data structure. 314 * 315 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 316 * command call. It performs all internal resource and state setups on the 317 * port: post IOCB buffers, enable appropriate host interrupt attentions, 318 * ELS ring timers, etc. 319 * 320 * Return codes 321 * 0 - success. 322 * Any other value - error. 323 **/ 324int 325lpfc_config_port_post(struct lpfc_hba *phba) 326{ 327 struct lpfc_vport *vport = phba->pport; 328 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 329 LPFC_MBOXQ_t *pmb; 330 MAILBOX_t *mb; 331 struct lpfc_dmabuf *mp; 332 struct lpfc_sli *psli = &phba->sli; 333 uint32_t status, timeout; 334 int i, j; 335 int rc; 336 337 spin_lock_irq(&phba->hbalock); 338 /* 339 * If the Config port completed correctly the HBA is not 340 * over heated any more. 341 */ 342 if (phba->over_temp_state == HBA_OVER_TEMP) 343 phba->over_temp_state = HBA_NORMAL_TEMP; 344 spin_unlock_irq(&phba->hbalock); 345 346 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 347 if (!pmb) { 348 phba->link_state = LPFC_HBA_ERROR; 349 return -ENOMEM; 350 } 351 mb = &pmb->u.mb; 352 353 /* Get login parameters for NID. */ 354 rc = lpfc_read_sparam(phba, pmb, 0); 355 if (rc) { 356 mempool_free(pmb, phba->mbox_mem_pool); 357 return -ENOMEM; 358 } 359 360 pmb->vport = vport; 361 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 363 "0448 Adapter failed init, mbxCmd x%x " 364 "READ_SPARM mbxStatus x%x\n", 365 mb->mbxCommand, mb->mbxStatus); 366 phba->link_state = LPFC_HBA_ERROR; 367 mp = (struct lpfc_dmabuf *) pmb->context1; 368 mempool_free(pmb, phba->mbox_mem_pool); 369 lpfc_mbuf_free(phba, mp->virt, mp->phys); 370 kfree(mp); 371 return -EIO; 372 } 373 374 mp = (struct lpfc_dmabuf *) pmb->context1; 375 376 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 377 lpfc_mbuf_free(phba, mp->virt, mp->phys); 378 kfree(mp); 379 pmb->context1 = NULL; 380 381 if (phba->cfg_soft_wwnn) 382 u64_to_wwn(phba->cfg_soft_wwnn, 383 vport->fc_sparam.nodeName.u.wwn); 384 if (phba->cfg_soft_wwpn) 385 u64_to_wwn(phba->cfg_soft_wwpn, 386 vport->fc_sparam.portName.u.wwn); 387 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 388 sizeof (struct lpfc_name)); 389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 390 sizeof (struct lpfc_name)); 391 392 /* Update the fc_host data structures with new wwn. */ 393 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 394 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 395 fc_host_max_npiv_vports(shost) = phba->max_vpi; 396 397 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 398 /* This should be consolidated into parse_vpd ? - mr */ 399 if (phba->SerialNumber[0] == 0) { 400 uint8_t *outptr; 401 402 outptr = &vport->fc_nodename.u.s.IEEE[0]; 403 for (i = 0; i < 12; i++) { 404 status = *outptr++; 405 j = ((status & 0xf0) >> 4); 406 if (j <= 9) 407 phba->SerialNumber[i] = 408 (char)((uint8_t) 0x30 + (uint8_t) j); 409 else 410 phba->SerialNumber[i] = 411 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 412 i++; 413 j = (status & 0xf); 414 if (j <= 9) 415 phba->SerialNumber[i] = 416 (char)((uint8_t) 0x30 + (uint8_t) j); 417 else 418 phba->SerialNumber[i] = 419 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 420 } 421 } 422 423 lpfc_read_config(phba, pmb); 424 pmb->vport = vport; 425 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 427 "0453 Adapter failed to init, mbxCmd x%x " 428 "READ_CONFIG, mbxStatus x%x\n", 429 mb->mbxCommand, mb->mbxStatus); 430 phba->link_state = LPFC_HBA_ERROR; 431 mempool_free( pmb, phba->mbox_mem_pool); 432 return -EIO; 433 } 434 435 /* Check if the port is disabled */ 436 lpfc_sli_read_link_ste(phba); 437 438 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 439 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 440 phba->cfg_hba_queue_depth = 441 (mb->un.varRdConfig.max_xri + 1) - 442 lpfc_sli4_get_els_iocb_cnt(phba); 443 444 phba->lmt = mb->un.varRdConfig.lmt; 445 446 /* Get the default values for Model Name and Description */ 447 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 448 449 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G) 450 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) 451 && !(phba->lmt & LMT_1Gb)) 452 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) 453 && !(phba->lmt & LMT_2Gb)) 454 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) 455 && !(phba->lmt & LMT_4Gb)) 456 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) 457 && !(phba->lmt & LMT_8Gb)) 458 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) 459 && !(phba->lmt & LMT_10Gb)) 460 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) 461 && !(phba->lmt & LMT_16Gb))) { 462 /* Reset link speed to auto */ 463 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 464 "1302 Invalid speed for this board: " 465 "Reset link speed to auto: x%x\n", 466 phba->cfg_link_speed); 467 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 468 } 469 470 phba->link_state = LPFC_LINK_DOWN; 471 472 /* Only process IOCBs on ELS ring till hba_state is READY */ 473 if (psli->ring[psli->extra_ring].cmdringaddr) 474 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 475 if (psli->ring[psli->fcp_ring].cmdringaddr) 476 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 477 if (psli->ring[psli->next_ring].cmdringaddr) 478 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 479 480 /* Post receive buffers for desired rings */ 481 if (phba->sli_rev != 3) 482 lpfc_post_rcv_buf(phba); 483 484 /* 485 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 486 */ 487 if (phba->intr_type == MSIX) { 488 rc = lpfc_config_msi(phba, pmb); 489 if (rc) { 490 mempool_free(pmb, phba->mbox_mem_pool); 491 return -EIO; 492 } 493 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 494 if (rc != MBX_SUCCESS) { 495 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 496 "0352 Config MSI mailbox command " 497 "failed, mbxCmd x%x, mbxStatus x%x\n", 498 pmb->u.mb.mbxCommand, 499 pmb->u.mb.mbxStatus); 500 mempool_free(pmb, phba->mbox_mem_pool); 501 return -EIO; 502 } 503 } 504 505 spin_lock_irq(&phba->hbalock); 506 /* Initialize ERATT handling flag */ 507 phba->hba_flag &= ~HBA_ERATT_HANDLED; 508 509 /* Enable appropriate host interrupts */ 510 status = readl(phba->HCregaddr); 511 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 512 if (psli->num_rings > 0) 513 status |= HC_R0INT_ENA; 514 if (psli->num_rings > 1) 515 status |= HC_R1INT_ENA; 516 if (psli->num_rings > 2) 517 status |= HC_R2INT_ENA; 518 if (psli->num_rings > 3) 519 status |= HC_R3INT_ENA; 520 521 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 522 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 523 status &= ~(HC_R0INT_ENA); 524 525 writel(status, phba->HCregaddr); 526 readl(phba->HCregaddr); /* flush */ 527 spin_unlock_irq(&phba->hbalock); 528 529 /* Set up ring-0 (ELS) timer */ 530 timeout = phba->fc_ratov * 2; 531 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 532 /* Set up heart beat (HB) timer */ 533 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 534 phba->hb_outstanding = 0; 535 phba->last_completion_time = jiffies; 536 /* Set up error attention (ERATT) polling timer */ 537 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 538 539 if (phba->hba_flag & LINK_DISABLED) { 540 lpfc_printf_log(phba, 541 KERN_ERR, LOG_INIT, 542 "2598 Adapter Link is disabled.\n"); 543 lpfc_down_link(phba, pmb); 544 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 545 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 546 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 547 lpfc_printf_log(phba, 548 KERN_ERR, LOG_INIT, 549 "2599 Adapter failed to issue DOWN_LINK" 550 " mbox command rc 0x%x\n", rc); 551 552 mempool_free(pmb, phba->mbox_mem_pool); 553 return -EIO; 554 } 555 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 556 lpfc_init_link(phba, pmb, phba->cfg_topology, 557 phba->cfg_link_speed); 558 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 559 lpfc_set_loopback_flag(phba); 560 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 561 if (rc != MBX_SUCCESS) { 562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 563 "0454 Adapter failed to init, mbxCmd x%x " 564 "INIT_LINK, mbxStatus x%x\n", 565 mb->mbxCommand, mb->mbxStatus); 566 567 /* Clear all interrupt enable conditions */ 568 writel(0, phba->HCregaddr); 569 readl(phba->HCregaddr); /* flush */ 570 /* Clear all pending interrupts */ 571 writel(0xffffffff, phba->HAregaddr); 572 readl(phba->HAregaddr); /* flush */ 573 574 phba->link_state = LPFC_HBA_ERROR; 575 if (rc != MBX_BUSY) 576 mempool_free(pmb, phba->mbox_mem_pool); 577 return -EIO; 578 } 579 } 580 /* MBOX buffer will be freed in mbox compl */ 581 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 582 if (!pmb) { 583 phba->link_state = LPFC_HBA_ERROR; 584 return -ENOMEM; 585 } 586 587 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 588 pmb->mbox_cmpl = lpfc_config_async_cmpl; 589 pmb->vport = phba->pport; 590 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 591 592 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 593 lpfc_printf_log(phba, 594 KERN_ERR, 595 LOG_INIT, 596 "0456 Adapter failed to issue " 597 "ASYNCEVT_ENABLE mbox status x%x\n", 598 rc); 599 mempool_free(pmb, phba->mbox_mem_pool); 600 } 601 602 /* Get Option rom version */ 603 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 604 if (!pmb) { 605 phba->link_state = LPFC_HBA_ERROR; 606 return -ENOMEM; 607 } 608 609 lpfc_dump_wakeup_param(phba, pmb); 610 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 611 pmb->vport = phba->pport; 612 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 613 614 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 616 "to get Option ROM version status x%x\n", rc); 617 mempool_free(pmb, phba->mbox_mem_pool); 618 } 619 620 return 0; 621} 622 623/** 624 * lpfc_hba_init_link - Initialize the FC link 625 * @phba: pointer to lpfc hba data structure. 626 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 627 * 628 * This routine will issue the INIT_LINK mailbox command call. 629 * It is available to other drivers through the lpfc_hba data 630 * structure for use as a delayed link up mechanism with the 631 * module parameter lpfc_suppress_link_up. 632 * 633 * Return code 634 * 0 - success 635 * Any other value - error 636 **/ 637int 638lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 639{ 640 struct lpfc_vport *vport = phba->pport; 641 LPFC_MBOXQ_t *pmb; 642 MAILBOX_t *mb; 643 int rc; 644 645 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 646 if (!pmb) { 647 phba->link_state = LPFC_HBA_ERROR; 648 return -ENOMEM; 649 } 650 mb = &pmb->u.mb; 651 pmb->vport = vport; 652 653 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 654 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 655 lpfc_set_loopback_flag(phba); 656 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 657 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 659 "0498 Adapter failed to init, mbxCmd x%x " 660 "INIT_LINK, mbxStatus x%x\n", 661 mb->mbxCommand, mb->mbxStatus); 662 if (phba->sli_rev <= LPFC_SLI_REV3) { 663 /* Clear all interrupt enable conditions */ 664 writel(0, phba->HCregaddr); 665 readl(phba->HCregaddr); /* flush */ 666 /* Clear all pending interrupts */ 667 writel(0xffffffff, phba->HAregaddr); 668 readl(phba->HAregaddr); /* flush */ 669 } 670 phba->link_state = LPFC_HBA_ERROR; 671 if (rc != MBX_BUSY || flag == MBX_POLL) 672 mempool_free(pmb, phba->mbox_mem_pool); 673 return -EIO; 674 } 675 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 676 if (flag == MBX_POLL) 677 mempool_free(pmb, phba->mbox_mem_pool); 678 679 return 0; 680} 681 682/** 683 * lpfc_hba_down_link - this routine downs the FC link 684 * @phba: pointer to lpfc hba data structure. 685 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 686 * 687 * This routine will issue the DOWN_LINK mailbox command call. 688 * It is available to other drivers through the lpfc_hba data 689 * structure for use to stop the link. 690 * 691 * Return code 692 * 0 - success 693 * Any other value - error 694 **/ 695int 696lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 697{ 698 LPFC_MBOXQ_t *pmb; 699 int rc; 700 701 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 702 if (!pmb) { 703 phba->link_state = LPFC_HBA_ERROR; 704 return -ENOMEM; 705 } 706 707 lpfc_printf_log(phba, 708 KERN_ERR, LOG_INIT, 709 "0491 Adapter Link is disabled.\n"); 710 lpfc_down_link(phba, pmb); 711 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 712 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 713 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 714 lpfc_printf_log(phba, 715 KERN_ERR, LOG_INIT, 716 "2522 Adapter failed to issue DOWN_LINK" 717 " mbox command rc 0x%x\n", rc); 718 719 mempool_free(pmb, phba->mbox_mem_pool); 720 return -EIO; 721 } 722 if (flag == MBX_POLL) 723 mempool_free(pmb, phba->mbox_mem_pool); 724 725 return 0; 726} 727 728/** 729 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 730 * @phba: pointer to lpfc HBA data structure. 731 * 732 * This routine will do LPFC uninitialization before the HBA is reset when 733 * bringing down the SLI Layer. 734 * 735 * Return codes 736 * 0 - success. 737 * Any other value - error. 738 **/ 739int 740lpfc_hba_down_prep(struct lpfc_hba *phba) 741{ 742 struct lpfc_vport **vports; 743 int i; 744 745 if (phba->sli_rev <= LPFC_SLI_REV3) { 746 /* Disable interrupts */ 747 writel(0, phba->HCregaddr); 748 readl(phba->HCregaddr); /* flush */ 749 } 750 751 if (phba->pport->load_flag & FC_UNLOADING) 752 lpfc_cleanup_discovery_resources(phba->pport); 753 else { 754 vports = lpfc_create_vport_work_array(phba); 755 if (vports != NULL) 756 for (i = 0; i <= phba->max_vports && 757 vports[i] != NULL; i++) 758 lpfc_cleanup_discovery_resources(vports[i]); 759 lpfc_destroy_vport_work_array(phba, vports); 760 } 761 return 0; 762} 763 764/** 765 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 766 * @phba: pointer to lpfc HBA data structure. 767 * 768 * This routine will do uninitialization after the HBA is reset when bring 769 * down the SLI Layer. 770 * 771 * Return codes 772 * 0 - success. 773 * Any other value - error. 774 **/ 775static int 776lpfc_hba_down_post_s3(struct lpfc_hba *phba) 777{ 778 struct lpfc_sli *psli = &phba->sli; 779 struct lpfc_sli_ring *pring; 780 struct lpfc_dmabuf *mp, *next_mp; 781 LIST_HEAD(completions); 782 int i; 783 784 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 785 lpfc_sli_hbqbuf_free_all(phba); 786 else { 787 /* Cleanup preposted buffers on the ELS ring */ 788 pring = &psli->ring[LPFC_ELS_RING]; 789 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 790 list_del(&mp->list); 791 pring->postbufq_cnt--; 792 lpfc_mbuf_free(phba, mp->virt, mp->phys); 793 kfree(mp); 794 } 795 } 796 797 spin_lock_irq(&phba->hbalock); 798 for (i = 0; i < psli->num_rings; i++) { 799 pring = &psli->ring[i]; 800 801 /* At this point in time the HBA is either reset or DOA. Either 802 * way, nothing should be on txcmplq as it will NEVER complete. 803 */ 804 list_splice_init(&pring->txcmplq, &completions); 805 pring->txcmplq_cnt = 0; 806 spin_unlock_irq(&phba->hbalock); 807 808 /* Cancel all the IOCBs from the completions list */ 809 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 810 IOERR_SLI_ABORTED); 811 812 lpfc_sli_abort_iocb_ring(phba, pring); 813 spin_lock_irq(&phba->hbalock); 814 } 815 spin_unlock_irq(&phba->hbalock); 816 817 return 0; 818} 819 820/** 821 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 822 * @phba: pointer to lpfc HBA data structure. 823 * 824 * This routine will do uninitialization after the HBA is reset when bring 825 * down the SLI Layer. 826 * 827 * Return codes 828 * 0 - success. 829 * Any other value - error. 830 **/ 831static int 832lpfc_hba_down_post_s4(struct lpfc_hba *phba) 833{ 834 struct lpfc_scsi_buf *psb, *psb_next; 835 LIST_HEAD(aborts); 836 int ret; 837 unsigned long iflag = 0; 838 struct lpfc_sglq *sglq_entry = NULL; 839 840 ret = lpfc_hba_down_post_s3(phba); 841 if (ret) 842 return ret; 843 /* At this point in time the HBA is either reset or DOA. Either 844 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 845 * on the lpfc_sgl_list so that it can either be freed if the 846 * driver is unloading or reposted if the driver is restarting 847 * the port. 848 */ 849 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 850 /* scsl_buf_list */ 851 /* abts_sgl_list_lock required because worker thread uses this 852 * list. 853 */ 854 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 855 list_for_each_entry(sglq_entry, 856 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 857 sglq_entry->state = SGL_FREED; 858 859 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 860 &phba->sli4_hba.lpfc_sgl_list); 861 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 862 /* abts_scsi_buf_list_lock required because worker thread uses this 863 * list. 864 */ 865 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 866 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 867 &aborts); 868 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 869 spin_unlock_irq(&phba->hbalock); 870 871 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 872 psb->pCmd = NULL; 873 psb->status = IOSTAT_SUCCESS; 874 } 875 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 876 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 877 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 878 return 0; 879} 880 881/** 882 * lpfc_hba_down_post - Wrapper func for hba down post routine 883 * @phba: pointer to lpfc HBA data structure. 884 * 885 * This routine wraps the actual SLI3 or SLI4 routine for performing 886 * uninitialization after the HBA is reset when bring down the SLI Layer. 887 * 888 * Return codes 889 * 0 - success. 890 * Any other value - error. 891 **/ 892int 893lpfc_hba_down_post(struct lpfc_hba *phba) 894{ 895 return (*phba->lpfc_hba_down_post)(phba); 896} 897 898/** 899 * lpfc_hb_timeout - The HBA-timer timeout handler 900 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 901 * 902 * This is the HBA-timer timeout handler registered to the lpfc driver. When 903 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 904 * work-port-events bitmap and the worker thread is notified. This timeout 905 * event will be used by the worker thread to invoke the actual timeout 906 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 907 * be performed in the timeout handler and the HBA timeout event bit shall 908 * be cleared by the worker thread after it has taken the event bitmap out. 909 **/ 910static void 911lpfc_hb_timeout(unsigned long ptr) 912{ 913 struct lpfc_hba *phba; 914 uint32_t tmo_posted; 915 unsigned long iflag; 916 917 phba = (struct lpfc_hba *)ptr; 918 919 /* Check for heart beat timeout conditions */ 920 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 921 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 922 if (!tmo_posted) 923 phba->pport->work_port_events |= WORKER_HB_TMO; 924 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 925 926 /* Tell the worker thread there is work to do */ 927 if (!tmo_posted) 928 lpfc_worker_wake_up(phba); 929 return; 930} 931 932/** 933 * lpfc_rrq_timeout - The RRQ-timer timeout handler 934 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 935 * 936 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 937 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 938 * work-port-events bitmap and the worker thread is notified. This timeout 939 * event will be used by the worker thread to invoke the actual timeout 940 * handler routine, lpfc_rrq_handler. Any periodical operations will 941 * be performed in the timeout handler and the RRQ timeout event bit shall 942 * be cleared by the worker thread after it has taken the event bitmap out. 943 **/ 944static void 945lpfc_rrq_timeout(unsigned long ptr) 946{ 947 struct lpfc_hba *phba; 948 unsigned long iflag; 949 950 phba = (struct lpfc_hba *)ptr; 951 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 952 phba->hba_flag |= HBA_RRQ_ACTIVE; 953 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 954 lpfc_worker_wake_up(phba); 955} 956 957/** 958 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 959 * @phba: pointer to lpfc hba data structure. 960 * @pmboxq: pointer to the driver internal queue element for mailbox command. 961 * 962 * This is the callback function to the lpfc heart-beat mailbox command. 963 * If configured, the lpfc driver issues the heart-beat mailbox command to 964 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 965 * heart-beat mailbox command is issued, the driver shall set up heart-beat 966 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 967 * heart-beat outstanding state. Once the mailbox command comes back and 968 * no error conditions detected, the heart-beat mailbox command timer is 969 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 970 * state is cleared for the next heart-beat. If the timer expired with the 971 * heart-beat outstanding state set, the driver will put the HBA offline. 972 **/ 973static void 974lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 975{ 976 unsigned long drvr_flag; 977 978 spin_lock_irqsave(&phba->hbalock, drvr_flag); 979 phba->hb_outstanding = 0; 980 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 981 982 /* Check and reset heart-beat timer is necessary */ 983 mempool_free(pmboxq, phba->mbox_mem_pool); 984 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 985 !(phba->link_state == LPFC_HBA_ERROR) && 986 !(phba->pport->load_flag & FC_UNLOADING)) 987 mod_timer(&phba->hb_tmofunc, 988 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 989 return; 990} 991 992/** 993 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 994 * @phba: pointer to lpfc hba data structure. 995 * 996 * This is the actual HBA-timer timeout handler to be invoked by the worker 997 * thread whenever the HBA timer fired and HBA-timeout event posted. This 998 * handler performs any periodic operations needed for the device. If such 999 * periodic event has already been attended to either in the interrupt handler 1000 * or by processing slow-ring or fast-ring events within the HBA-timer 1001 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1002 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1003 * is configured and there is no heart-beat mailbox command outstanding, a 1004 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1005 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1006 * to offline. 1007 **/ 1008void 1009lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1010{ 1011 struct lpfc_vport **vports; 1012 LPFC_MBOXQ_t *pmboxq; 1013 struct lpfc_dmabuf *buf_ptr; 1014 int retval, i; 1015 struct lpfc_sli *psli = &phba->sli; 1016 LIST_HEAD(completions); 1017 1018 vports = lpfc_create_vport_work_array(phba); 1019 if (vports != NULL) 1020 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1021 lpfc_rcv_seq_check_edtov(vports[i]); 1022 lpfc_destroy_vport_work_array(phba, vports); 1023 1024 if ((phba->link_state == LPFC_HBA_ERROR) || 1025 (phba->pport->load_flag & FC_UNLOADING) || 1026 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1027 return; 1028 1029 spin_lock_irq(&phba->pport->work_port_lock); 1030 1031 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 1032 jiffies)) { 1033 spin_unlock_irq(&phba->pport->work_port_lock); 1034 if (!phba->hb_outstanding) 1035 mod_timer(&phba->hb_tmofunc, 1036 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1037 else 1038 mod_timer(&phba->hb_tmofunc, 1039 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1040 return; 1041 } 1042 spin_unlock_irq(&phba->pport->work_port_lock); 1043 1044 if (phba->elsbuf_cnt && 1045 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1046 spin_lock_irq(&phba->hbalock); 1047 list_splice_init(&phba->elsbuf, &completions); 1048 phba->elsbuf_cnt = 0; 1049 phba->elsbuf_prev_cnt = 0; 1050 spin_unlock_irq(&phba->hbalock); 1051 1052 while (!list_empty(&completions)) { 1053 list_remove_head(&completions, buf_ptr, 1054 struct lpfc_dmabuf, list); 1055 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1056 kfree(buf_ptr); 1057 } 1058 } 1059 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1060 1061 /* If there is no heart beat outstanding, issue a heartbeat command */ 1062 if (phba->cfg_enable_hba_heartbeat) { 1063 if (!phba->hb_outstanding) { 1064 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1065 (list_empty(&psli->mboxq))) { 1066 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1067 GFP_KERNEL); 1068 if (!pmboxq) { 1069 mod_timer(&phba->hb_tmofunc, 1070 jiffies + 1071 HZ * LPFC_HB_MBOX_INTERVAL); 1072 return; 1073 } 1074 1075 lpfc_heart_beat(phba, pmboxq); 1076 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1077 pmboxq->vport = phba->pport; 1078 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1079 MBX_NOWAIT); 1080 1081 if (retval != MBX_BUSY && 1082 retval != MBX_SUCCESS) { 1083 mempool_free(pmboxq, 1084 phba->mbox_mem_pool); 1085 mod_timer(&phba->hb_tmofunc, 1086 jiffies + 1087 HZ * LPFC_HB_MBOX_INTERVAL); 1088 return; 1089 } 1090 phba->skipped_hb = 0; 1091 phba->hb_outstanding = 1; 1092 } else if (time_before_eq(phba->last_completion_time, 1093 phba->skipped_hb)) { 1094 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1095 "2857 Last completion time not " 1096 " updated in %d ms\n", 1097 jiffies_to_msecs(jiffies 1098 - phba->last_completion_time)); 1099 } else 1100 phba->skipped_hb = jiffies; 1101 1102 mod_timer(&phba->hb_tmofunc, 1103 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1104 return; 1105 } else { 1106 /* 1107 * If heart beat timeout called with hb_outstanding set 1108 * we need to give the hb mailbox cmd a chance to 1109 * complete or TMO. 1110 */ 1111 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1112 "0459 Adapter heartbeat still out" 1113 "standing:last compl time was %d ms.\n", 1114 jiffies_to_msecs(jiffies 1115 - phba->last_completion_time)); 1116 mod_timer(&phba->hb_tmofunc, 1117 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1118 } 1119 } 1120} 1121 1122/** 1123 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1124 * @phba: pointer to lpfc hba data structure. 1125 * 1126 * This routine is called to bring the HBA offline when HBA hardware error 1127 * other than Port Error 6 has been detected. 1128 **/ 1129static void 1130lpfc_offline_eratt(struct lpfc_hba *phba) 1131{ 1132 struct lpfc_sli *psli = &phba->sli; 1133 1134 spin_lock_irq(&phba->hbalock); 1135 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1136 spin_unlock_irq(&phba->hbalock); 1137 lpfc_offline_prep(phba); 1138 1139 lpfc_offline(phba); 1140 lpfc_reset_barrier(phba); 1141 spin_lock_irq(&phba->hbalock); 1142 lpfc_sli_brdreset(phba); 1143 spin_unlock_irq(&phba->hbalock); 1144 lpfc_hba_down_post(phba); 1145 lpfc_sli_brdready(phba, HS_MBRDY); 1146 lpfc_unblock_mgmt_io(phba); 1147 phba->link_state = LPFC_HBA_ERROR; 1148 return; 1149} 1150 1151/** 1152 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1153 * @phba: pointer to lpfc hba data structure. 1154 * 1155 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1156 * other than Port Error 6 has been detected. 1157 **/ 1158static void 1159lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1160{ 1161 lpfc_offline_prep(phba); 1162 lpfc_offline(phba); 1163 lpfc_sli4_brdreset(phba); 1164 lpfc_hba_down_post(phba); 1165 lpfc_sli4_post_status_check(phba); 1166 lpfc_unblock_mgmt_io(phba); 1167 phba->link_state = LPFC_HBA_ERROR; 1168} 1169 1170/** 1171 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1172 * @phba: pointer to lpfc hba data structure. 1173 * 1174 * This routine is invoked to handle the deferred HBA hardware error 1175 * conditions. This type of error is indicated by HBA by setting ER1 1176 * and another ER bit in the host status register. The driver will 1177 * wait until the ER1 bit clears before handling the error condition. 1178 **/ 1179static void 1180lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1181{ 1182 uint32_t old_host_status = phba->work_hs; 1183 struct lpfc_sli_ring *pring; 1184 struct lpfc_sli *psli = &phba->sli; 1185 1186 /* If the pci channel is offline, ignore possible errors, 1187 * since we cannot communicate with the pci card anyway. 1188 */ 1189 if (pci_channel_offline(phba->pcidev)) { 1190 spin_lock_irq(&phba->hbalock); 1191 phba->hba_flag &= ~DEFER_ERATT; 1192 spin_unlock_irq(&phba->hbalock); 1193 return; 1194 } 1195 1196 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1197 "0479 Deferred Adapter Hardware Error " 1198 "Data: x%x x%x x%x\n", 1199 phba->work_hs, 1200 phba->work_status[0], phba->work_status[1]); 1201 1202 spin_lock_irq(&phba->hbalock); 1203 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1204 spin_unlock_irq(&phba->hbalock); 1205 1206 1207 /* 1208 * Firmware stops when it triggred erratt. That could cause the I/Os 1209 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1210 * SCSI layer retry it after re-establishing link. 1211 */ 1212 pring = &psli->ring[psli->fcp_ring]; 1213 lpfc_sli_abort_iocb_ring(phba, pring); 1214 1215 /* 1216 * There was a firmware error. Take the hba offline and then 1217 * attempt to restart it. 1218 */ 1219 lpfc_offline_prep(phba); 1220 lpfc_offline(phba); 1221 1222 /* Wait for the ER1 bit to clear.*/ 1223 while (phba->work_hs & HS_FFER1) { 1224 msleep(100); 1225 phba->work_hs = readl(phba->HSregaddr); 1226 /* If driver is unloading let the worker thread continue */ 1227 if (phba->pport->load_flag & FC_UNLOADING) { 1228 phba->work_hs = 0; 1229 break; 1230 } 1231 } 1232 1233 /* 1234 * This is to ptrotect against a race condition in which 1235 * first write to the host attention register clear the 1236 * host status register. 1237 */ 1238 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1239 phba->work_hs = old_host_status & ~HS_FFER1; 1240 1241 spin_lock_irq(&phba->hbalock); 1242 phba->hba_flag &= ~DEFER_ERATT; 1243 spin_unlock_irq(&phba->hbalock); 1244 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1245 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1246} 1247 1248static void 1249lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1250{ 1251 struct lpfc_board_event_header board_event; 1252 struct Scsi_Host *shost; 1253 1254 board_event.event_type = FC_REG_BOARD_EVENT; 1255 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1256 shost = lpfc_shost_from_vport(phba->pport); 1257 fc_host_post_vendor_event(shost, fc_get_event_number(), 1258 sizeof(board_event), 1259 (char *) &board_event, 1260 LPFC_NL_VENDOR_ID); 1261} 1262 1263/** 1264 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1265 * @phba: pointer to lpfc hba data structure. 1266 * 1267 * This routine is invoked to handle the following HBA hardware error 1268 * conditions: 1269 * 1 - HBA error attention interrupt 1270 * 2 - DMA ring index out of range 1271 * 3 - Mailbox command came back as unknown 1272 **/ 1273static void 1274lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1275{ 1276 struct lpfc_vport *vport = phba->pport; 1277 struct lpfc_sli *psli = &phba->sli; 1278 struct lpfc_sli_ring *pring; 1279 uint32_t event_data; 1280 unsigned long temperature; 1281 struct temp_event temp_event_data; 1282 struct Scsi_Host *shost; 1283 1284 /* If the pci channel is offline, ignore possible errors, 1285 * since we cannot communicate with the pci card anyway. 1286 */ 1287 if (pci_channel_offline(phba->pcidev)) { 1288 spin_lock_irq(&phba->hbalock); 1289 phba->hba_flag &= ~DEFER_ERATT; 1290 spin_unlock_irq(&phba->hbalock); 1291 return; 1292 } 1293 1294 /* If resets are disabled then leave the HBA alone and return */ 1295 if (!phba->cfg_enable_hba_reset) 1296 return; 1297 1298 /* Send an internal error event to mgmt application */ 1299 lpfc_board_errevt_to_mgmt(phba); 1300 1301 if (phba->hba_flag & DEFER_ERATT) 1302 lpfc_handle_deferred_eratt(phba); 1303 1304 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1305 if (phba->work_hs & HS_FFER6) 1306 /* Re-establishing Link */ 1307 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1308 "1301 Re-establishing Link " 1309 "Data: x%x x%x x%x\n", 1310 phba->work_hs, phba->work_status[0], 1311 phba->work_status[1]); 1312 if (phba->work_hs & HS_FFER8) 1313 /* Device Zeroization */ 1314 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1315 "2861 Host Authentication device " 1316 "zeroization Data:x%x x%x x%x\n", 1317 phba->work_hs, phba->work_status[0], 1318 phba->work_status[1]); 1319 1320 spin_lock_irq(&phba->hbalock); 1321 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1322 spin_unlock_irq(&phba->hbalock); 1323 1324 /* 1325 * Firmware stops when it triggled erratt with HS_FFER6. 1326 * That could cause the I/Os dropped by the firmware. 1327 * Error iocb (I/O) on txcmplq and let the SCSI layer 1328 * retry it after re-establishing link. 1329 */ 1330 pring = &psli->ring[psli->fcp_ring]; 1331 lpfc_sli_abort_iocb_ring(phba, pring); 1332 1333 /* 1334 * There was a firmware error. Take the hba offline and then 1335 * attempt to restart it. 1336 */ 1337 lpfc_offline_prep(phba); 1338 lpfc_offline(phba); 1339 lpfc_sli_brdrestart(phba); 1340 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1341 lpfc_unblock_mgmt_io(phba); 1342 return; 1343 } 1344 lpfc_unblock_mgmt_io(phba); 1345 } else if (phba->work_hs & HS_CRIT_TEMP) { 1346 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1347 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1348 temp_event_data.event_code = LPFC_CRIT_TEMP; 1349 temp_event_data.data = (uint32_t)temperature; 1350 1351 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1352 "0406 Adapter maximum temperature exceeded " 1353 "(%ld), taking this port offline " 1354 "Data: x%x x%x x%x\n", 1355 temperature, phba->work_hs, 1356 phba->work_status[0], phba->work_status[1]); 1357 1358 shost = lpfc_shost_from_vport(phba->pport); 1359 fc_host_post_vendor_event(shost, fc_get_event_number(), 1360 sizeof(temp_event_data), 1361 (char *) &temp_event_data, 1362 SCSI_NL_VID_TYPE_PCI 1363 | PCI_VENDOR_ID_EMULEX); 1364 1365 spin_lock_irq(&phba->hbalock); 1366 phba->over_temp_state = HBA_OVER_TEMP; 1367 spin_unlock_irq(&phba->hbalock); 1368 lpfc_offline_eratt(phba); 1369 1370 } else { 1371 /* The if clause above forces this code path when the status 1372 * failure is a value other than FFER6. Do not call the offline 1373 * twice. This is the adapter hardware error path. 1374 */ 1375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1376 "0457 Adapter Hardware Error " 1377 "Data: x%x x%x x%x\n", 1378 phba->work_hs, 1379 phba->work_status[0], phba->work_status[1]); 1380 1381 event_data = FC_REG_DUMP_EVENT; 1382 shost = lpfc_shost_from_vport(vport); 1383 fc_host_post_vendor_event(shost, fc_get_event_number(), 1384 sizeof(event_data), (char *) &event_data, 1385 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1386 1387 lpfc_offline_eratt(phba); 1388 } 1389 return; 1390} 1391 1392/** 1393 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1394 * @phba: pointer to lpfc hba data structure. 1395 * 1396 * This routine is invoked to handle the SLI4 HBA hardware error attention 1397 * conditions. 1398 **/ 1399static void 1400lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1401{ 1402 struct lpfc_vport *vport = phba->pport; 1403 uint32_t event_data; 1404 struct Scsi_Host *shost; 1405 uint32_t if_type; 1406 struct lpfc_register portstat_reg; 1407 1408 /* If the pci channel is offline, ignore possible errors, since 1409 * we cannot communicate with the pci card anyway. 1410 */ 1411 if (pci_channel_offline(phba->pcidev)) 1412 return; 1413 /* If resets are disabled then leave the HBA alone and return */ 1414 if (!phba->cfg_enable_hba_reset) 1415 return; 1416 1417 /* Send an internal error event to mgmt application */ 1418 lpfc_board_errevt_to_mgmt(phba); 1419 1420 /* For now, the actual action for SLI4 device handling is not 1421 * specified yet, just treated it as adaptor hardware failure 1422 */ 1423 event_data = FC_REG_DUMP_EVENT; 1424 shost = lpfc_shost_from_vport(vport); 1425 fc_host_post_vendor_event(shost, fc_get_event_number(), 1426 sizeof(event_data), (char *) &event_data, 1427 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1428 1429 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1430 switch (if_type) { 1431 case LPFC_SLI_INTF_IF_TYPE_0: 1432 lpfc_sli4_offline_eratt(phba); 1433 break; 1434 case LPFC_SLI_INTF_IF_TYPE_2: 1435 portstat_reg.word0 = 1436 readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 1437 1438 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1439 /* TODO: Register for Overtemp async events. */ 1440 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1441 "2889 Port Overtemperature event, " 1442 "taking port\n"); 1443 spin_lock_irq(&phba->hbalock); 1444 phba->over_temp_state = HBA_OVER_TEMP; 1445 spin_unlock_irq(&phba->hbalock); 1446 lpfc_sli4_offline_eratt(phba); 1447 return; 1448 } 1449 if (bf_get(lpfc_sliport_status_rn, &portstat_reg)) { 1450 /* 1451 * TODO: Attempt port recovery via a port reset. 1452 * When fully implemented, the driver should 1453 * attempt to recover the port here and return. 1454 * For now, log an error and take the port offline. 1455 */ 1456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1457 "2887 Port Error: Attempting " 1458 "Port Recovery\n"); 1459 } 1460 lpfc_sli4_offline_eratt(phba); 1461 break; 1462 case LPFC_SLI_INTF_IF_TYPE_1: 1463 default: 1464 break; 1465 } 1466} 1467 1468/** 1469 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1470 * @phba: pointer to lpfc HBA data structure. 1471 * 1472 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1473 * routine from the API jump table function pointer from the lpfc_hba struct. 1474 * 1475 * Return codes 1476 * 0 - success. 1477 * Any other value - error. 1478 **/ 1479void 1480lpfc_handle_eratt(struct lpfc_hba *phba) 1481{ 1482 (*phba->lpfc_handle_eratt)(phba); 1483} 1484 1485/** 1486 * lpfc_handle_latt - The HBA link event handler 1487 * @phba: pointer to lpfc hba data structure. 1488 * 1489 * This routine is invoked from the worker thread to handle a HBA host 1490 * attention link event. 1491 **/ 1492void 1493lpfc_handle_latt(struct lpfc_hba *phba) 1494{ 1495 struct lpfc_vport *vport = phba->pport; 1496 struct lpfc_sli *psli = &phba->sli; 1497 LPFC_MBOXQ_t *pmb; 1498 volatile uint32_t control; 1499 struct lpfc_dmabuf *mp; 1500 int rc = 0; 1501 1502 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1503 if (!pmb) { 1504 rc = 1; 1505 goto lpfc_handle_latt_err_exit; 1506 } 1507 1508 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1509 if (!mp) { 1510 rc = 2; 1511 goto lpfc_handle_latt_free_pmb; 1512 } 1513 1514 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1515 if (!mp->virt) { 1516 rc = 3; 1517 goto lpfc_handle_latt_free_mp; 1518 } 1519 1520 /* Cleanup any outstanding ELS commands */ 1521 lpfc_els_flush_all_cmd(phba); 1522 1523 psli->slistat.link_event++; 1524 lpfc_read_topology(phba, pmb, mp); 1525 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 1526 pmb->vport = vport; 1527 /* Block ELS IOCBs until we have processed this mbox command */ 1528 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1529 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1530 if (rc == MBX_NOT_FINISHED) { 1531 rc = 4; 1532 goto lpfc_handle_latt_free_mbuf; 1533 } 1534 1535 /* Clear Link Attention in HA REG */ 1536 spin_lock_irq(&phba->hbalock); 1537 writel(HA_LATT, phba->HAregaddr); 1538 readl(phba->HAregaddr); /* flush */ 1539 spin_unlock_irq(&phba->hbalock); 1540 1541 return; 1542 1543lpfc_handle_latt_free_mbuf: 1544 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1545 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1546lpfc_handle_latt_free_mp: 1547 kfree(mp); 1548lpfc_handle_latt_free_pmb: 1549 mempool_free(pmb, phba->mbox_mem_pool); 1550lpfc_handle_latt_err_exit: 1551 /* Enable Link attention interrupts */ 1552 spin_lock_irq(&phba->hbalock); 1553 psli->sli_flag |= LPFC_PROCESS_LA; 1554 control = readl(phba->HCregaddr); 1555 control |= HC_LAINT_ENA; 1556 writel(control, phba->HCregaddr); 1557 readl(phba->HCregaddr); /* flush */ 1558 1559 /* Clear Link Attention in HA REG */ 1560 writel(HA_LATT, phba->HAregaddr); 1561 readl(phba->HAregaddr); /* flush */ 1562 spin_unlock_irq(&phba->hbalock); 1563 lpfc_linkdown(phba); 1564 phba->link_state = LPFC_HBA_ERROR; 1565 1566 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1567 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1568 1569 return; 1570} 1571 1572/** 1573 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1574 * @phba: pointer to lpfc hba data structure. 1575 * @vpd: pointer to the vital product data. 1576 * @len: length of the vital product data in bytes. 1577 * 1578 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1579 * an array of characters. In this routine, the ModelName, ProgramType, and 1580 * ModelDesc, etc. fields of the phba data structure will be populated. 1581 * 1582 * Return codes 1583 * 0 - pointer to the VPD passed in is NULL 1584 * 1 - success 1585 **/ 1586int 1587lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1588{ 1589 uint8_t lenlo, lenhi; 1590 int Length; 1591 int i, j; 1592 int finished = 0; 1593 int index = 0; 1594 1595 if (!vpd) 1596 return 0; 1597 1598 /* Vital Product */ 1599 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1600 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1601 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1602 (uint32_t) vpd[3]); 1603 while (!finished && (index < (len - 4))) { 1604 switch (vpd[index]) { 1605 case 0x82: 1606 case 0x91: 1607 index += 1; 1608 lenlo = vpd[index]; 1609 index += 1; 1610 lenhi = vpd[index]; 1611 index += 1; 1612 i = ((((unsigned short)lenhi) << 8) + lenlo); 1613 index += i; 1614 break; 1615 case 0x90: 1616 index += 1; 1617 lenlo = vpd[index]; 1618 index += 1; 1619 lenhi = vpd[index]; 1620 index += 1; 1621 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1622 if (Length > len - index) 1623 Length = len - index; 1624 while (Length > 0) { 1625 /* Look for Serial Number */ 1626 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1627 index += 2; 1628 i = vpd[index]; 1629 index += 1; 1630 j = 0; 1631 Length -= (3+i); 1632 while(i--) { 1633 phba->SerialNumber[j++] = vpd[index++]; 1634 if (j == 31) 1635 break; 1636 } 1637 phba->SerialNumber[j] = 0; 1638 continue; 1639 } 1640 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1641 phba->vpd_flag |= VPD_MODEL_DESC; 1642 index += 2; 1643 i = vpd[index]; 1644 index += 1; 1645 j = 0; 1646 Length -= (3+i); 1647 while(i--) { 1648 phba->ModelDesc[j++] = vpd[index++]; 1649 if (j == 255) 1650 break; 1651 } 1652 phba->ModelDesc[j] = 0; 1653 continue; 1654 } 1655 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1656 phba->vpd_flag |= VPD_MODEL_NAME; 1657 index += 2; 1658 i = vpd[index]; 1659 index += 1; 1660 j = 0; 1661 Length -= (3+i); 1662 while(i--) { 1663 phba->ModelName[j++] = vpd[index++]; 1664 if (j == 79) 1665 break; 1666 } 1667 phba->ModelName[j] = 0; 1668 continue; 1669 } 1670 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1671 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1672 index += 2; 1673 i = vpd[index]; 1674 index += 1; 1675 j = 0; 1676 Length -= (3+i); 1677 while(i--) { 1678 phba->ProgramType[j++] = vpd[index++]; 1679 if (j == 255) 1680 break; 1681 } 1682 phba->ProgramType[j] = 0; 1683 continue; 1684 } 1685 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1686 phba->vpd_flag |= VPD_PORT; 1687 index += 2; 1688 i = vpd[index]; 1689 index += 1; 1690 j = 0; 1691 Length -= (3+i); 1692 while(i--) { 1693 phba->Port[j++] = vpd[index++]; 1694 if (j == 19) 1695 break; 1696 } 1697 phba->Port[j] = 0; 1698 continue; 1699 } 1700 else { 1701 index += 2; 1702 i = vpd[index]; 1703 index += 1; 1704 index += i; 1705 Length -= (3 + i); 1706 } 1707 } 1708 finished = 0; 1709 break; 1710 case 0x78: 1711 finished = 1; 1712 break; 1713 default: 1714 index ++; 1715 break; 1716 } 1717 } 1718 1719 return(1); 1720} 1721 1722/** 1723 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1724 * @phba: pointer to lpfc hba data structure. 1725 * @mdp: pointer to the data structure to hold the derived model name. 1726 * @descp: pointer to the data structure to hold the derived description. 1727 * 1728 * This routine retrieves HBA's description based on its registered PCI device 1729 * ID. The @descp passed into this function points to an array of 256 chars. It 1730 * shall be returned with the model name, maximum speed, and the host bus type. 1731 * The @mdp passed into this function points to an array of 80 chars. When the 1732 * function returns, the @mdp will be filled with the model name. 1733 **/ 1734static void 1735lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1736{ 1737 lpfc_vpd_t *vp; 1738 uint16_t dev_id = phba->pcidev->device; 1739 int max_speed; 1740 int GE = 0; 1741 int oneConnect = 0; /* default is not a oneConnect */ 1742 struct { 1743 char *name; 1744 char *bus; 1745 char *function; 1746 } m = {"<Unknown>", "", ""}; 1747 1748 if (mdp && mdp[0] != '\0' 1749 && descp && descp[0] != '\0') 1750 return; 1751 1752 if (phba->lmt & LMT_10Gb) 1753 max_speed = 10; 1754 else if (phba->lmt & LMT_8Gb) 1755 max_speed = 8; 1756 else if (phba->lmt & LMT_4Gb) 1757 max_speed = 4; 1758 else if (phba->lmt & LMT_2Gb) 1759 max_speed = 2; 1760 else 1761 max_speed = 1; 1762 1763 vp = &phba->vpd; 1764 1765 switch (dev_id) { 1766 case PCI_DEVICE_ID_FIREFLY: 1767 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 1768 break; 1769 case PCI_DEVICE_ID_SUPERFLY: 1770 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1771 m = (typeof(m)){"LP7000", "PCI", 1772 "Fibre Channel Adapter"}; 1773 else 1774 m = (typeof(m)){"LP7000E", "PCI", 1775 "Fibre Channel Adapter"}; 1776 break; 1777 case PCI_DEVICE_ID_DRAGONFLY: 1778 m = (typeof(m)){"LP8000", "PCI", 1779 "Fibre Channel Adapter"}; 1780 break; 1781 case PCI_DEVICE_ID_CENTAUR: 1782 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1783 m = (typeof(m)){"LP9002", "PCI", 1784 "Fibre Channel Adapter"}; 1785 else 1786 m = (typeof(m)){"LP9000", "PCI", 1787 "Fibre Channel Adapter"}; 1788 break; 1789 case PCI_DEVICE_ID_RFLY: 1790 m = (typeof(m)){"LP952", "PCI", 1791 "Fibre Channel Adapter"}; 1792 break; 1793 case PCI_DEVICE_ID_PEGASUS: 1794 m = (typeof(m)){"LP9802", "PCI-X", 1795 "Fibre Channel Adapter"}; 1796 break; 1797 case PCI_DEVICE_ID_THOR: 1798 m = (typeof(m)){"LP10000", "PCI-X", 1799 "Fibre Channel Adapter"}; 1800 break; 1801 case PCI_DEVICE_ID_VIPER: 1802 m = (typeof(m)){"LPX1000", "PCI-X", 1803 "Fibre Channel Adapter"}; 1804 break; 1805 case PCI_DEVICE_ID_PFLY: 1806 m = (typeof(m)){"LP982", "PCI-X", 1807 "Fibre Channel Adapter"}; 1808 break; 1809 case PCI_DEVICE_ID_TFLY: 1810 m = (typeof(m)){"LP1050", "PCI-X", 1811 "Fibre Channel Adapter"}; 1812 break; 1813 case PCI_DEVICE_ID_HELIOS: 1814 m = (typeof(m)){"LP11000", "PCI-X2", 1815 "Fibre Channel Adapter"}; 1816 break; 1817 case PCI_DEVICE_ID_HELIOS_SCSP: 1818 m = (typeof(m)){"LP11000-SP", "PCI-X2", 1819 "Fibre Channel Adapter"}; 1820 break; 1821 case PCI_DEVICE_ID_HELIOS_DCSP: 1822 m = (typeof(m)){"LP11002-SP", "PCI-X2", 1823 "Fibre Channel Adapter"}; 1824 break; 1825 case PCI_DEVICE_ID_NEPTUNE: 1826 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 1827 break; 1828 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1829 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 1830 break; 1831 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1832 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 1833 break; 1834 case PCI_DEVICE_ID_BMID: 1835 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 1836 break; 1837 case PCI_DEVICE_ID_BSMB: 1838 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 1839 break; 1840 case PCI_DEVICE_ID_ZEPHYR: 1841 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1842 break; 1843 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1844 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1845 break; 1846 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1847 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 1848 GE = 1; 1849 break; 1850 case PCI_DEVICE_ID_ZMID: 1851 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 1852 break; 1853 case PCI_DEVICE_ID_ZSMB: 1854 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 1855 break; 1856 case PCI_DEVICE_ID_LP101: 1857 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 1858 break; 1859 case PCI_DEVICE_ID_LP10000S: 1860 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 1861 break; 1862 case PCI_DEVICE_ID_LP11000S: 1863 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 1864 break; 1865 case PCI_DEVICE_ID_LPE11000S: 1866 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 1867 break; 1868 case PCI_DEVICE_ID_SAT: 1869 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 1870 break; 1871 case PCI_DEVICE_ID_SAT_MID: 1872 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 1873 break; 1874 case PCI_DEVICE_ID_SAT_SMB: 1875 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 1876 break; 1877 case PCI_DEVICE_ID_SAT_DCSP: 1878 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 1879 break; 1880 case PCI_DEVICE_ID_SAT_SCSP: 1881 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 1882 break; 1883 case PCI_DEVICE_ID_SAT_S: 1884 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 1885 break; 1886 case PCI_DEVICE_ID_HORNET: 1887 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 1888 GE = 1; 1889 break; 1890 case PCI_DEVICE_ID_PROTEUS_VF: 1891 m = (typeof(m)){"LPev12000", "PCIe IOV", 1892 "Fibre Channel Adapter"}; 1893 break; 1894 case PCI_DEVICE_ID_PROTEUS_PF: 1895 m = (typeof(m)){"LPev12000", "PCIe IOV", 1896 "Fibre Channel Adapter"}; 1897 break; 1898 case PCI_DEVICE_ID_PROTEUS_S: 1899 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 1900 "Fibre Channel Adapter"}; 1901 break; 1902 case PCI_DEVICE_ID_TIGERSHARK: 1903 oneConnect = 1; 1904 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 1905 break; 1906 case PCI_DEVICE_ID_TOMCAT: 1907 oneConnect = 1; 1908 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 1909 break; 1910 case PCI_DEVICE_ID_FALCON: 1911 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 1912 "EmulexSecure Fibre"}; 1913 break; 1914 case PCI_DEVICE_ID_BALIUS: 1915 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 1916 "Fibre Channel Adapter"}; 1917 break; 1918 case PCI_DEVICE_ID_LANCER_FC: 1919 oneConnect = 1; 1920 m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"}; 1921 break; 1922 case PCI_DEVICE_ID_LANCER_FCOE: 1923 oneConnect = 1; 1924 m = (typeof(m)){"Undefined", "PCIe", "FCoE"}; 1925 break; 1926 default: 1927 m = (typeof(m)){"Unknown", "", ""}; 1928 break; 1929 } 1930 1931 if (mdp && mdp[0] == '\0') 1932 snprintf(mdp, 79,"%s", m.name); 1933 /* oneConnect hba requires special processing, they are all initiators 1934 * and we put the port number on the end 1935 */ 1936 if (descp && descp[0] == '\0') { 1937 if (oneConnect) 1938 snprintf(descp, 255, 1939 "Emulex OneConnect %s, %s Initiator, Port %s", 1940 m.name, m.function, 1941 phba->Port); 1942 else 1943 snprintf(descp, 255, 1944 "Emulex %s %d%s %s %s", 1945 m.name, max_speed, (GE) ? "GE" : "Gb", 1946 m.bus, m.function); 1947 } 1948} 1949 1950/** 1951 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 1952 * @phba: pointer to lpfc hba data structure. 1953 * @pring: pointer to a IOCB ring. 1954 * @cnt: the number of IOCBs to be posted to the IOCB ring. 1955 * 1956 * This routine posts a given number of IOCBs with the associated DMA buffer 1957 * descriptors specified by the cnt argument to the given IOCB ring. 1958 * 1959 * Return codes 1960 * The number of IOCBs NOT able to be posted to the IOCB ring. 1961 **/ 1962int 1963lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 1964{ 1965 IOCB_t *icmd; 1966 struct lpfc_iocbq *iocb; 1967 struct lpfc_dmabuf *mp1, *mp2; 1968 1969 cnt += pring->missbufcnt; 1970 1971 /* While there are buffers to post */ 1972 while (cnt > 0) { 1973 /* Allocate buffer for command iocb */ 1974 iocb = lpfc_sli_get_iocbq(phba); 1975 if (iocb == NULL) { 1976 pring->missbufcnt = cnt; 1977 return cnt; 1978 } 1979 icmd = &iocb->iocb; 1980 1981 /* 2 buffers can be posted per command */ 1982 /* Allocate buffer to post */ 1983 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1984 if (mp1) 1985 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1986 if (!mp1 || !mp1->virt) { 1987 kfree(mp1); 1988 lpfc_sli_release_iocbq(phba, iocb); 1989 pring->missbufcnt = cnt; 1990 return cnt; 1991 } 1992 1993 INIT_LIST_HEAD(&mp1->list); 1994 /* Allocate buffer to post */ 1995 if (cnt > 1) { 1996 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1997 if (mp2) 1998 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1999 &mp2->phys); 2000 if (!mp2 || !mp2->virt) { 2001 kfree(mp2); 2002 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2003 kfree(mp1); 2004 lpfc_sli_release_iocbq(phba, iocb); 2005 pring->missbufcnt = cnt; 2006 return cnt; 2007 } 2008 2009 INIT_LIST_HEAD(&mp2->list); 2010 } else { 2011 mp2 = NULL; 2012 } 2013 2014 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2015 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2016 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2017 icmd->ulpBdeCount = 1; 2018 cnt--; 2019 if (mp2) { 2020 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2021 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2022 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2023 cnt--; 2024 icmd->ulpBdeCount = 2; 2025 } 2026 2027 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2028 icmd->ulpLe = 1; 2029 2030 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2031 IOCB_ERROR) { 2032 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2033 kfree(mp1); 2034 cnt++; 2035 if (mp2) { 2036 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2037 kfree(mp2); 2038 cnt++; 2039 } 2040 lpfc_sli_release_iocbq(phba, iocb); 2041 pring->missbufcnt = cnt; 2042 return cnt; 2043 } 2044 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2045 if (mp2) 2046 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2047 } 2048 pring->missbufcnt = 0; 2049 return 0; 2050} 2051 2052/** 2053 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2054 * @phba: pointer to lpfc hba data structure. 2055 * 2056 * This routine posts initial receive IOCB buffers to the ELS ring. The 2057 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2058 * set to 64 IOCBs. 2059 * 2060 * Return codes 2061 * 0 - success (currently always success) 2062 **/ 2063static int 2064lpfc_post_rcv_buf(struct lpfc_hba *phba) 2065{ 2066 struct lpfc_sli *psli = &phba->sli; 2067 2068 /* Ring 0, ELS / CT buffers */ 2069 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2070 /* Ring 2 - FCP no buffers needed */ 2071 2072 return 0; 2073} 2074 2075#define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2076 2077/** 2078 * lpfc_sha_init - Set up initial array of hash table entries 2079 * @HashResultPointer: pointer to an array as hash table. 2080 * 2081 * This routine sets up the initial values to the array of hash table entries 2082 * for the LC HBAs. 2083 **/ 2084static void 2085lpfc_sha_init(uint32_t * HashResultPointer) 2086{ 2087 HashResultPointer[0] = 0x67452301; 2088 HashResultPointer[1] = 0xEFCDAB89; 2089 HashResultPointer[2] = 0x98BADCFE; 2090 HashResultPointer[3] = 0x10325476; 2091 HashResultPointer[4] = 0xC3D2E1F0; 2092} 2093 2094/** 2095 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2096 * @HashResultPointer: pointer to an initial/result hash table. 2097 * @HashWorkingPointer: pointer to an working hash table. 2098 * 2099 * This routine iterates an initial hash table pointed by @HashResultPointer 2100 * with the values from the working hash table pointeed by @HashWorkingPointer. 2101 * The results are putting back to the initial hash table, returned through 2102 * the @HashResultPointer as the result hash table. 2103 **/ 2104static void 2105lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2106{ 2107 int t; 2108 uint32_t TEMP; 2109 uint32_t A, B, C, D, E; 2110 t = 16; 2111 do { 2112 HashWorkingPointer[t] = 2113 S(1, 2114 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2115 8] ^ 2116 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2117 } while (++t <= 79); 2118 t = 0; 2119 A = HashResultPointer[0]; 2120 B = HashResultPointer[1]; 2121 C = HashResultPointer[2]; 2122 D = HashResultPointer[3]; 2123 E = HashResultPointer[4]; 2124 2125 do { 2126 if (t < 20) { 2127 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2128 } else if (t < 40) { 2129 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2130 } else if (t < 60) { 2131 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2132 } else { 2133 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2134 } 2135 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2136 E = D; 2137 D = C; 2138 C = S(30, B); 2139 B = A; 2140 A = TEMP; 2141 } while (++t <= 79); 2142 2143 HashResultPointer[0] += A; 2144 HashResultPointer[1] += B; 2145 HashResultPointer[2] += C; 2146 HashResultPointer[3] += D; 2147 HashResultPointer[4] += E; 2148 2149} 2150 2151/** 2152 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2153 * @RandomChallenge: pointer to the entry of host challenge random number array. 2154 * @HashWorking: pointer to the entry of the working hash array. 2155 * 2156 * This routine calculates the working hash array referred by @HashWorking 2157 * from the challenge random numbers associated with the host, referred by 2158 * @RandomChallenge. The result is put into the entry of the working hash 2159 * array and returned by reference through @HashWorking. 2160 **/ 2161static void 2162lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2163{ 2164 *HashWorking = (*RandomChallenge ^ *HashWorking); 2165} 2166 2167/** 2168 * lpfc_hba_init - Perform special handling for LC HBA initialization 2169 * @phba: pointer to lpfc hba data structure. 2170 * @hbainit: pointer to an array of unsigned 32-bit integers. 2171 * 2172 * This routine performs the special handling for LC HBA initialization. 2173 **/ 2174void 2175lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2176{ 2177 int t; 2178 uint32_t *HashWorking; 2179 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2180 2181 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2182 if (!HashWorking) 2183 return; 2184 2185 HashWorking[0] = HashWorking[78] = *pwwnn++; 2186 HashWorking[1] = HashWorking[79] = *pwwnn; 2187 2188 for (t = 0; t < 7; t++) 2189 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2190 2191 lpfc_sha_init(hbainit); 2192 lpfc_sha_iterate(hbainit, HashWorking); 2193 kfree(HashWorking); 2194} 2195 2196/** 2197 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2198 * @vport: pointer to a virtual N_Port data structure. 2199 * 2200 * This routine performs the necessary cleanups before deleting the @vport. 2201 * It invokes the discovery state machine to perform necessary state 2202 * transitions and to release the ndlps associated with the @vport. Note, 2203 * the physical port is treated as @vport 0. 2204 **/ 2205void 2206lpfc_cleanup(struct lpfc_vport *vport) 2207{ 2208 struct lpfc_hba *phba = vport->phba; 2209 struct lpfc_nodelist *ndlp, *next_ndlp; 2210 int i = 0; 2211 2212 if (phba->link_state > LPFC_LINK_DOWN) 2213 lpfc_port_link_failure(vport); 2214 2215 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2216 if (!NLP_CHK_NODE_ACT(ndlp)) { 2217 ndlp = lpfc_enable_node(vport, ndlp, 2218 NLP_STE_UNUSED_NODE); 2219 if (!ndlp) 2220 continue; 2221 spin_lock_irq(&phba->ndlp_lock); 2222 NLP_SET_FREE_REQ(ndlp); 2223 spin_unlock_irq(&phba->ndlp_lock); 2224 /* Trigger the release of the ndlp memory */ 2225 lpfc_nlp_put(ndlp); 2226 continue; 2227 } 2228 spin_lock_irq(&phba->ndlp_lock); 2229 if (NLP_CHK_FREE_REQ(ndlp)) { 2230 /* The ndlp should not be in memory free mode already */ 2231 spin_unlock_irq(&phba->ndlp_lock); 2232 continue; 2233 } else 2234 /* Indicate request for freeing ndlp memory */ 2235 NLP_SET_FREE_REQ(ndlp); 2236 spin_unlock_irq(&phba->ndlp_lock); 2237 2238 if (vport->port_type != LPFC_PHYSICAL_PORT && 2239 ndlp->nlp_DID == Fabric_DID) { 2240 /* Just free up ndlp with Fabric_DID for vports */ 2241 lpfc_nlp_put(ndlp); 2242 continue; 2243 } 2244 2245 if (ndlp->nlp_type & NLP_FABRIC) 2246 lpfc_disc_state_machine(vport, ndlp, NULL, 2247 NLP_EVT_DEVICE_RECOVERY); 2248 2249 lpfc_disc_state_machine(vport, ndlp, NULL, 2250 NLP_EVT_DEVICE_RM); 2251 2252 } 2253 2254 /* At this point, ALL ndlp's should be gone 2255 * because of the previous NLP_EVT_DEVICE_RM. 2256 * Lets wait for this to happen, if needed. 2257 */ 2258 while (!list_empty(&vport->fc_nodes)) { 2259 if (i++ > 3000) { 2260 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2261 "0233 Nodelist not empty\n"); 2262 list_for_each_entry_safe(ndlp, next_ndlp, 2263 &vport->fc_nodes, nlp_listp) { 2264 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2265 LOG_NODE, 2266 "0282 did:x%x ndlp:x%p " 2267 "usgmap:x%x refcnt:%d\n", 2268 ndlp->nlp_DID, (void *)ndlp, 2269 ndlp->nlp_usg_map, 2270 atomic_read( 2271 &ndlp->kref.refcount)); 2272 } 2273 break; 2274 } 2275 2276 /* Wait for any activity on ndlps to settle */ 2277 msleep(10); 2278 } 2279 lpfc_cleanup_vports_rrqs(vport, NULL); 2280} 2281 2282/** 2283 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2284 * @vport: pointer to a virtual N_Port data structure. 2285 * 2286 * This routine stops all the timers associated with a @vport. This function 2287 * is invoked before disabling or deleting a @vport. Note that the physical 2288 * port is treated as @vport 0. 2289 **/ 2290void 2291lpfc_stop_vport_timers(struct lpfc_vport *vport) 2292{ 2293 del_timer_sync(&vport->els_tmofunc); 2294 del_timer_sync(&vport->fc_fdmitmo); 2295 lpfc_can_disctmo(vport); 2296 return; 2297} 2298 2299/** 2300 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2301 * @phba: pointer to lpfc hba data structure. 2302 * 2303 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2304 * caller of this routine should already hold the host lock. 2305 **/ 2306void 2307__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2308{ 2309 /* Clear pending FCF rediscovery wait flag */ 2310 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2311 2312 /* Now, try to stop the timer */ 2313 del_timer(&phba->fcf.redisc_wait); 2314} 2315 2316/** 2317 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2318 * @phba: pointer to lpfc hba data structure. 2319 * 2320 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2321 * checks whether the FCF rediscovery wait timer is pending with the host 2322 * lock held before proceeding with disabling the timer and clearing the 2323 * wait timer pendig flag. 2324 **/ 2325void 2326lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2327{ 2328 spin_lock_irq(&phba->hbalock); 2329 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2330 /* FCF rediscovery timer already fired or stopped */ 2331 spin_unlock_irq(&phba->hbalock); 2332 return; 2333 } 2334 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2335 /* Clear failover in progress flags */ 2336 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2337 spin_unlock_irq(&phba->hbalock); 2338} 2339 2340/** 2341 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2342 * @phba: pointer to lpfc hba data structure. 2343 * 2344 * This routine stops all the timers associated with a HBA. This function is 2345 * invoked before either putting a HBA offline or unloading the driver. 2346 **/ 2347void 2348lpfc_stop_hba_timers(struct lpfc_hba *phba) 2349{ 2350 lpfc_stop_vport_timers(phba->pport); 2351 del_timer_sync(&phba->sli.mbox_tmo); 2352 del_timer_sync(&phba->fabric_block_timer); 2353 del_timer_sync(&phba->eratt_poll); 2354 del_timer_sync(&phba->hb_tmofunc); 2355 if (phba->sli_rev == LPFC_SLI_REV4) { 2356 del_timer_sync(&phba->rrq_tmr); 2357 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2358 } 2359 phba->hb_outstanding = 0; 2360 2361 switch (phba->pci_dev_grp) { 2362 case LPFC_PCI_DEV_LP: 2363 /* Stop any LightPulse device specific driver timers */ 2364 del_timer_sync(&phba->fcp_poll_timer); 2365 break; 2366 case LPFC_PCI_DEV_OC: 2367 /* Stop any OneConnect device sepcific driver timers */ 2368 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2369 break; 2370 default: 2371 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2372 "0297 Invalid device group (x%x)\n", 2373 phba->pci_dev_grp); 2374 break; 2375 } 2376 return; 2377} 2378 2379/** 2380 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2381 * @phba: pointer to lpfc hba data structure. 2382 * 2383 * This routine marks a HBA's management interface as blocked. Once the HBA's 2384 * management interface is marked as blocked, all the user space access to 2385 * the HBA, whether they are from sysfs interface or libdfc interface will 2386 * all be blocked. The HBA is set to block the management interface when the 2387 * driver prepares the HBA interface for online or offline. 2388 **/ 2389static void 2390lpfc_block_mgmt_io(struct lpfc_hba * phba) 2391{ 2392 unsigned long iflag; 2393 uint8_t actcmd = MBX_HEARTBEAT; 2394 unsigned long timeout; 2395 2396 2397 spin_lock_irqsave(&phba->hbalock, iflag); 2398 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2399 if (phba->sli.mbox_active) 2400 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2401 spin_unlock_irqrestore(&phba->hbalock, iflag); 2402 /* Determine how long we might wait for the active mailbox 2403 * command to be gracefully completed by firmware. 2404 */ 2405 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + 2406 jiffies; 2407 /* Wait for the outstnading mailbox command to complete */ 2408 while (phba->sli.mbox_active) { 2409 /* Check active mailbox complete status every 2ms */ 2410 msleep(2); 2411 if (time_after(jiffies, timeout)) { 2412 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2413 "2813 Mgmt IO is Blocked %x " 2414 "- mbox cmd %x still active\n", 2415 phba->sli.sli_flag, actcmd); 2416 break; 2417 } 2418 } 2419} 2420 2421/** 2422 * lpfc_online - Initialize and bring a HBA online 2423 * @phba: pointer to lpfc hba data structure. 2424 * 2425 * This routine initializes the HBA and brings a HBA online. During this 2426 * process, the management interface is blocked to prevent user space access 2427 * to the HBA interfering with the driver initialization. 2428 * 2429 * Return codes 2430 * 0 - successful 2431 * 1 - failed 2432 **/ 2433int 2434lpfc_online(struct lpfc_hba *phba) 2435{ 2436 struct lpfc_vport *vport; 2437 struct lpfc_vport **vports; 2438 int i; 2439 2440 if (!phba) 2441 return 0; 2442 vport = phba->pport; 2443 2444 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2445 return 0; 2446 2447 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2448 "0458 Bring Adapter online\n"); 2449 2450 lpfc_block_mgmt_io(phba); 2451 2452 if (!lpfc_sli_queue_setup(phba)) { 2453 lpfc_unblock_mgmt_io(phba); 2454 return 1; 2455 } 2456 2457 if (phba->sli_rev == LPFC_SLI_REV4) { 2458 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2459 lpfc_unblock_mgmt_io(phba); 2460 return 1; 2461 } 2462 } else { 2463 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2464 lpfc_unblock_mgmt_io(phba); 2465 return 1; 2466 } 2467 } 2468 2469 vports = lpfc_create_vport_work_array(phba); 2470 if (vports != NULL) 2471 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2472 struct Scsi_Host *shost; 2473 shost = lpfc_shost_from_vport(vports[i]); 2474 spin_lock_irq(shost->host_lock); 2475 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2476 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2477 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2478 if (phba->sli_rev == LPFC_SLI_REV4) 2479 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2480 spin_unlock_irq(shost->host_lock); 2481 } 2482 lpfc_destroy_vport_work_array(phba, vports); 2483 2484 lpfc_unblock_mgmt_io(phba); 2485 return 0; 2486} 2487 2488/** 2489 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2490 * @phba: pointer to lpfc hba data structure. 2491 * 2492 * This routine marks a HBA's management interface as not blocked. Once the 2493 * HBA's management interface is marked as not blocked, all the user space 2494 * access to the HBA, whether they are from sysfs interface or libdfc 2495 * interface will be allowed. The HBA is set to block the management interface 2496 * when the driver prepares the HBA interface for online or offline and then 2497 * set to unblock the management interface afterwards. 2498 **/ 2499void 2500lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2501{ 2502 unsigned long iflag; 2503 2504 spin_lock_irqsave(&phba->hbalock, iflag); 2505 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2506 spin_unlock_irqrestore(&phba->hbalock, iflag); 2507} 2508 2509/** 2510 * lpfc_offline_prep - Prepare a HBA to be brought offline 2511 * @phba: pointer to lpfc hba data structure. 2512 * 2513 * This routine is invoked to prepare a HBA to be brought offline. It performs 2514 * unregistration login to all the nodes on all vports and flushes the mailbox 2515 * queue to make it ready to be brought offline. 2516 **/ 2517void 2518lpfc_offline_prep(struct lpfc_hba * phba) 2519{ 2520 struct lpfc_vport *vport = phba->pport; 2521 struct lpfc_nodelist *ndlp, *next_ndlp; 2522 struct lpfc_vport **vports; 2523 struct Scsi_Host *shost; 2524 int i; 2525 2526 if (vport->fc_flag & FC_OFFLINE_MODE) 2527 return; 2528 2529 lpfc_block_mgmt_io(phba); 2530 2531 lpfc_linkdown(phba); 2532 2533 /* Issue an unreg_login to all nodes on all vports */ 2534 vports = lpfc_create_vport_work_array(phba); 2535 if (vports != NULL) { 2536 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2537 if (vports[i]->load_flag & FC_UNLOADING) 2538 continue; 2539 shost = lpfc_shost_from_vport(vports[i]); 2540 spin_lock_irq(shost->host_lock); 2541 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2542 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2543 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2544 spin_unlock_irq(shost->host_lock); 2545 2546 shost = lpfc_shost_from_vport(vports[i]); 2547 list_for_each_entry_safe(ndlp, next_ndlp, 2548 &vports[i]->fc_nodes, 2549 nlp_listp) { 2550 if (!NLP_CHK_NODE_ACT(ndlp)) 2551 continue; 2552 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2553 continue; 2554 if (ndlp->nlp_type & NLP_FABRIC) { 2555 lpfc_disc_state_machine(vports[i], ndlp, 2556 NULL, NLP_EVT_DEVICE_RECOVERY); 2557 lpfc_disc_state_machine(vports[i], ndlp, 2558 NULL, NLP_EVT_DEVICE_RM); 2559 } 2560 spin_lock_irq(shost->host_lock); 2561 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2562 spin_unlock_irq(shost->host_lock); 2563 lpfc_unreg_rpi(vports[i], ndlp); 2564 } 2565 } 2566 } 2567 lpfc_destroy_vport_work_array(phba, vports); 2568 2569 lpfc_sli_mbox_sys_shutdown(phba); 2570} 2571 2572/** 2573 * lpfc_offline - Bring a HBA offline 2574 * @phba: pointer to lpfc hba data structure. 2575 * 2576 * This routine actually brings a HBA offline. It stops all the timers 2577 * associated with the HBA, brings down the SLI layer, and eventually 2578 * marks the HBA as in offline state for the upper layer protocol. 2579 **/ 2580void 2581lpfc_offline(struct lpfc_hba *phba) 2582{ 2583 struct Scsi_Host *shost; 2584 struct lpfc_vport **vports; 2585 int i; 2586 2587 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2588 return; 2589 2590 /* stop port and all timers associated with this hba */ 2591 lpfc_stop_port(phba); 2592 vports = lpfc_create_vport_work_array(phba); 2593 if (vports != NULL) 2594 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2595 lpfc_stop_vport_timers(vports[i]); 2596 lpfc_destroy_vport_work_array(phba, vports); 2597 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2598 "0460 Bring Adapter offline\n"); 2599 /* Bring down the SLI Layer and cleanup. The HBA is offline 2600 now. */ 2601 lpfc_sli_hba_down(phba); 2602 spin_lock_irq(&phba->hbalock); 2603 phba->work_ha = 0; 2604 spin_unlock_irq(&phba->hbalock); 2605 vports = lpfc_create_vport_work_array(phba); 2606 if (vports != NULL) 2607 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2608 shost = lpfc_shost_from_vport(vports[i]); 2609 spin_lock_irq(shost->host_lock); 2610 vports[i]->work_port_events = 0; 2611 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2612 spin_unlock_irq(shost->host_lock); 2613 } 2614 lpfc_destroy_vport_work_array(phba, vports); 2615} 2616 2617/** 2618 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2619 * @phba: pointer to lpfc hba data structure. 2620 * 2621 * This routine is to free all the SCSI buffers and IOCBs from the driver 2622 * list back to kernel. It is called from lpfc_pci_remove_one to free 2623 * the internal resources before the device is removed from the system. 2624 * 2625 * Return codes 2626 * 0 - successful (for now, it always returns 0) 2627 **/ 2628static int 2629lpfc_scsi_free(struct lpfc_hba *phba) 2630{ 2631 struct lpfc_scsi_buf *sb, *sb_next; 2632 struct lpfc_iocbq *io, *io_next; 2633 2634 spin_lock_irq(&phba->hbalock); 2635 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2636 spin_lock(&phba->scsi_buf_list_lock); 2637 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2638 list_del(&sb->list); 2639 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2640 sb->dma_handle); 2641 kfree(sb); 2642 phba->total_scsi_bufs--; 2643 } 2644 spin_unlock(&phba->scsi_buf_list_lock); 2645 2646 /* Release all the lpfc_iocbq entries maintained by this host. */ 2647 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2648 list_del(&io->list); 2649 kfree(io); 2650 phba->total_iocbq_bufs--; 2651 } 2652 spin_unlock_irq(&phba->hbalock); 2653 return 0; 2654} 2655 2656/** 2657 * lpfc_create_port - Create an FC port 2658 * @phba: pointer to lpfc hba data structure. 2659 * @instance: a unique integer ID to this FC port. 2660 * @dev: pointer to the device data structure. 2661 * 2662 * This routine creates a FC port for the upper layer protocol. The FC port 2663 * can be created on top of either a physical port or a virtual port provided 2664 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2665 * and associates the FC port created before adding the shost into the SCSI 2666 * layer. 2667 * 2668 * Return codes 2669 * @vport - pointer to the virtual N_Port data structure. 2670 * NULL - port create failed. 2671 **/ 2672struct lpfc_vport * 2673lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2674{ 2675 struct lpfc_vport *vport; 2676 struct Scsi_Host *shost; 2677 int error = 0; 2678 2679 if (dev != &phba->pcidev->dev) 2680 shost = scsi_host_alloc(&lpfc_vport_template, 2681 sizeof(struct lpfc_vport)); 2682 else 2683 shost = scsi_host_alloc(&lpfc_template, 2684 sizeof(struct lpfc_vport)); 2685 if (!shost) 2686 goto out; 2687 2688 vport = (struct lpfc_vport *) shost->hostdata; 2689 vport->phba = phba; 2690 vport->load_flag |= FC_LOADING; 2691 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2692 vport->fc_rscn_flush = 0; 2693 2694 lpfc_get_vport_cfgparam(vport); 2695 shost->unique_id = instance; 2696 shost->max_id = LPFC_MAX_TARGET; 2697 shost->max_lun = vport->cfg_max_luns; 2698 shost->this_id = -1; 2699 shost->max_cmd_len = 16; 2700 if (phba->sli_rev == LPFC_SLI_REV4) { 2701 shost->dma_boundary = 2702 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 2703 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2704 } 2705 2706 /* 2707 * Set initial can_queue value since 0 is no longer supported and 2708 * scsi_add_host will fail. This will be adjusted later based on the 2709 * max xri value determined in hba setup. 2710 */ 2711 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2712 if (dev != &phba->pcidev->dev) { 2713 shost->transportt = lpfc_vport_transport_template; 2714 vport->port_type = LPFC_NPIV_PORT; 2715 } else { 2716 shost->transportt = lpfc_transport_template; 2717 vport->port_type = LPFC_PHYSICAL_PORT; 2718 } 2719 2720 /* Initialize all internally managed lists. */ 2721 INIT_LIST_HEAD(&vport->fc_nodes); 2722 INIT_LIST_HEAD(&vport->rcv_buffer_list); 2723 spin_lock_init(&vport->work_port_lock); 2724 2725 init_timer(&vport->fc_disctmo); 2726 vport->fc_disctmo.function = lpfc_disc_timeout; 2727 vport->fc_disctmo.data = (unsigned long)vport; 2728 2729 init_timer(&vport->fc_fdmitmo); 2730 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2731 vport->fc_fdmitmo.data = (unsigned long)vport; 2732 2733 init_timer(&vport->els_tmofunc); 2734 vport->els_tmofunc.function = lpfc_els_timeout; 2735 vport->els_tmofunc.data = (unsigned long)vport; 2736 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2737 if (error) 2738 goto out_put_shost; 2739 2740 spin_lock_irq(&phba->hbalock); 2741 list_add_tail(&vport->listentry, &phba->port_list); 2742 spin_unlock_irq(&phba->hbalock); 2743 return vport; 2744 2745out_put_shost: 2746 scsi_host_put(shost); 2747out: 2748 return NULL; 2749} 2750 2751/** 2752 * destroy_port - destroy an FC port 2753 * @vport: pointer to an lpfc virtual N_Port data structure. 2754 * 2755 * This routine destroys a FC port from the upper layer protocol. All the 2756 * resources associated with the port are released. 2757 **/ 2758void 2759destroy_port(struct lpfc_vport *vport) 2760{ 2761 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2762 struct lpfc_hba *phba = vport->phba; 2763 2764 lpfc_debugfs_terminate(vport); 2765 fc_remove_host(shost); 2766 scsi_remove_host(shost); 2767 2768 spin_lock_irq(&phba->hbalock); 2769 list_del_init(&vport->listentry); 2770 spin_unlock_irq(&phba->hbalock); 2771 2772 lpfc_cleanup(vport); 2773 return; 2774} 2775 2776/** 2777 * lpfc_get_instance - Get a unique integer ID 2778 * 2779 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2780 * uses the kernel idr facility to perform the task. 2781 * 2782 * Return codes: 2783 * instance - a unique integer ID allocated as the new instance. 2784 * -1 - lpfc get instance failed. 2785 **/ 2786int 2787lpfc_get_instance(void) 2788{ 2789 int instance = 0; 2790 2791 /* Assign an unused number */ 2792 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2793 return -1; 2794 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2795 return -1; 2796 return instance; 2797} 2798 2799/** 2800 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2801 * @shost: pointer to SCSI host data structure. 2802 * @time: elapsed time of the scan in jiffies. 2803 * 2804 * This routine is called by the SCSI layer with a SCSI host to determine 2805 * whether the scan host is finished. 2806 * 2807 * Note: there is no scan_start function as adapter initialization will have 2808 * asynchronously kicked off the link initialization. 2809 * 2810 * Return codes 2811 * 0 - SCSI host scan is not over yet. 2812 * 1 - SCSI host scan is over. 2813 **/ 2814int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2815{ 2816 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2817 struct lpfc_hba *phba = vport->phba; 2818 int stat = 0; 2819 2820 spin_lock_irq(shost->host_lock); 2821 2822 if (vport->load_flag & FC_UNLOADING) { 2823 stat = 1; 2824 goto finished; 2825 } 2826 if (time >= 30 * HZ) { 2827 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2828 "0461 Scanning longer than 30 " 2829 "seconds. Continuing initialization\n"); 2830 stat = 1; 2831 goto finished; 2832 } 2833 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2834 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2835 "0465 Link down longer than 15 " 2836 "seconds. Continuing initialization\n"); 2837 stat = 1; 2838 goto finished; 2839 } 2840 2841 if (vport->port_state != LPFC_VPORT_READY) 2842 goto finished; 2843 if (vport->num_disc_nodes || vport->fc_prli_sent) 2844 goto finished; 2845 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2846 goto finished; 2847 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2848 goto finished; 2849 2850 stat = 1; 2851 2852finished: 2853 spin_unlock_irq(shost->host_lock); 2854 return stat; 2855} 2856 2857/** 2858 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2859 * @shost: pointer to SCSI host data structure. 2860 * 2861 * This routine initializes a given SCSI host attributes on a FC port. The 2862 * SCSI host can be either on top of a physical port or a virtual port. 2863 **/ 2864void lpfc_host_attrib_init(struct Scsi_Host *shost) 2865{ 2866 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2867 struct lpfc_hba *phba = vport->phba; 2868 /* 2869 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2870 */ 2871 2872 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2873 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2874 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2875 2876 memset(fc_host_supported_fc4s(shost), 0, 2877 sizeof(fc_host_supported_fc4s(shost))); 2878 fc_host_supported_fc4s(shost)[2] = 1; 2879 fc_host_supported_fc4s(shost)[7] = 1; 2880 2881 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2882 sizeof fc_host_symbolic_name(shost)); 2883 2884 fc_host_supported_speeds(shost) = 0; 2885 if (phba->lmt & LMT_10Gb) 2886 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2887 if (phba->lmt & LMT_8Gb) 2888 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2889 if (phba->lmt & LMT_4Gb) 2890 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2891 if (phba->lmt & LMT_2Gb) 2892 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2893 if (phba->lmt & LMT_1Gb) 2894 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2895 2896 fc_host_maxframe_size(shost) = 2897 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2898 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2899 2900 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 2901 2902 /* This value is also unchanging */ 2903 memset(fc_host_active_fc4s(shost), 0, 2904 sizeof(fc_host_active_fc4s(shost))); 2905 fc_host_active_fc4s(shost)[2] = 1; 2906 fc_host_active_fc4s(shost)[7] = 1; 2907 2908 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2909 spin_lock_irq(shost->host_lock); 2910 vport->load_flag &= ~FC_LOADING; 2911 spin_unlock_irq(shost->host_lock); 2912} 2913 2914/** 2915 * lpfc_stop_port_s3 - Stop SLI3 device port 2916 * @phba: pointer to lpfc hba data structure. 2917 * 2918 * This routine is invoked to stop an SLI3 device port, it stops the device 2919 * from generating interrupts and stops the device driver's timers for the 2920 * device. 2921 **/ 2922static void 2923lpfc_stop_port_s3(struct lpfc_hba *phba) 2924{ 2925 /* Clear all interrupt enable conditions */ 2926 writel(0, phba->HCregaddr); 2927 readl(phba->HCregaddr); /* flush */ 2928 /* Clear all pending interrupts */ 2929 writel(0xffffffff, phba->HAregaddr); 2930 readl(phba->HAregaddr); /* flush */ 2931 2932 /* Reset some HBA SLI setup states */ 2933 lpfc_stop_hba_timers(phba); 2934 phba->pport->work_port_events = 0; 2935} 2936 2937/** 2938 * lpfc_stop_port_s4 - Stop SLI4 device port 2939 * @phba: pointer to lpfc hba data structure. 2940 * 2941 * This routine is invoked to stop an SLI4 device port, it stops the device 2942 * from generating interrupts and stops the device driver's timers for the 2943 * device. 2944 **/ 2945static void 2946lpfc_stop_port_s4(struct lpfc_hba *phba) 2947{ 2948 /* Reset some HBA SLI4 setup states */ 2949 lpfc_stop_hba_timers(phba); 2950 phba->pport->work_port_events = 0; 2951 phba->sli4_hba.intr_enable = 0; 2952} 2953 2954/** 2955 * lpfc_stop_port - Wrapper function for stopping hba port 2956 * @phba: Pointer to HBA context object. 2957 * 2958 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 2959 * the API jump table function pointer from the lpfc_hba struct. 2960 **/ 2961void 2962lpfc_stop_port(struct lpfc_hba *phba) 2963{ 2964 phba->lpfc_stop_port(phba); 2965} 2966 2967/** 2968 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 2969 * @phba: Pointer to hba for which this call is being executed. 2970 * 2971 * This routine starts the timer waiting for the FCF rediscovery to complete. 2972 **/ 2973void 2974lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 2975{ 2976 unsigned long fcf_redisc_wait_tmo = 2977 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 2978 /* Start fcf rediscovery wait period timer */ 2979 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 2980 spin_lock_irq(&phba->hbalock); 2981 /* Allow action to new fcf asynchronous event */ 2982 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 2983 /* Mark the FCF rediscovery pending state */ 2984 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 2985 spin_unlock_irq(&phba->hbalock); 2986} 2987 2988/** 2989 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 2990 * @ptr: Map to lpfc_hba data structure pointer. 2991 * 2992 * This routine is invoked when waiting for FCF table rediscover has been 2993 * timed out. If new FCF record(s) has (have) been discovered during the 2994 * wait period, a new FCF event shall be added to the FCOE async event 2995 * list, and then worker thread shall be waked up for processing from the 2996 * worker thread context. 2997 **/ 2998void 2999lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 3000{ 3001 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 3002 3003 /* Don't send FCF rediscovery event if timer cancelled */ 3004 spin_lock_irq(&phba->hbalock); 3005 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3006 spin_unlock_irq(&phba->hbalock); 3007 return; 3008 } 3009 /* Clear FCF rediscovery timer pending flag */ 3010 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3011 /* FCF rediscovery event to worker thread */ 3012 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 3013 spin_unlock_irq(&phba->hbalock); 3014 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3015 "2776 FCF rediscover quiescent timer expired\n"); 3016 /* wake up worker thread */ 3017 lpfc_worker_wake_up(phba); 3018} 3019 3020/** 3021 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3022 * @phba: pointer to lpfc hba data structure. 3023 * @acqe_link: pointer to the async link completion queue entry. 3024 * 3025 * This routine is to parse the SLI4 link-attention link fault code and 3026 * translate it into the base driver's read link attention mailbox command 3027 * status. 3028 * 3029 * Return: Link-attention status in terms of base driver's coding. 3030 **/ 3031static uint16_t 3032lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3033 struct lpfc_acqe_link *acqe_link) 3034{ 3035 uint16_t latt_fault; 3036 3037 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3038 case LPFC_ASYNC_LINK_FAULT_NONE: 3039 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3040 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3041 latt_fault = 0; 3042 break; 3043 default: 3044 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3045 "0398 Invalid link fault code: x%x\n", 3046 bf_get(lpfc_acqe_link_fault, acqe_link)); 3047 latt_fault = MBXERR_ERROR; 3048 break; 3049 } 3050 return latt_fault; 3051} 3052 3053/** 3054 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3055 * @phba: pointer to lpfc hba data structure. 3056 * @acqe_link: pointer to the async link completion queue entry. 3057 * 3058 * This routine is to parse the SLI4 link attention type and translate it 3059 * into the base driver's link attention type coding. 3060 * 3061 * Return: Link attention type in terms of base driver's coding. 3062 **/ 3063static uint8_t 3064lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3065 struct lpfc_acqe_link *acqe_link) 3066{ 3067 uint8_t att_type; 3068 3069 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3070 case LPFC_ASYNC_LINK_STATUS_DOWN: 3071 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3072 att_type = LPFC_ATT_LINK_DOWN; 3073 break; 3074 case LPFC_ASYNC_LINK_STATUS_UP: 3075 /* Ignore physical link up events - wait for logical link up */ 3076 att_type = LPFC_ATT_RESERVED; 3077 break; 3078 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3079 att_type = LPFC_ATT_LINK_UP; 3080 break; 3081 default: 3082 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3083 "0399 Invalid link attention type: x%x\n", 3084 bf_get(lpfc_acqe_link_status, acqe_link)); 3085 att_type = LPFC_ATT_RESERVED; 3086 break; 3087 } 3088 return att_type; 3089} 3090 3091/** 3092 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 3093 * @phba: pointer to lpfc hba data structure. 3094 * @acqe_link: pointer to the async link completion queue entry. 3095 * 3096 * This routine is to parse the SLI4 link-attention link speed and translate 3097 * it into the base driver's link-attention link speed coding. 3098 * 3099 * Return: Link-attention link speed in terms of base driver's coding. 3100 **/ 3101static uint8_t 3102lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 3103 struct lpfc_acqe_link *acqe_link) 3104{ 3105 uint8_t link_speed; 3106 3107 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3108 case LPFC_ASYNC_LINK_SPEED_ZERO: 3109 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3110 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3111 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3112 break; 3113 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3114 link_speed = LPFC_LINK_SPEED_1GHZ; 3115 break; 3116 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3117 link_speed = LPFC_LINK_SPEED_10GHZ; 3118 break; 3119 default: 3120 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3121 "0483 Invalid link-attention link speed: x%x\n", 3122 bf_get(lpfc_acqe_link_speed, acqe_link)); 3123 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3124 break; 3125 } 3126 return link_speed; 3127} 3128 3129/** 3130 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 3131 * @phba: pointer to lpfc hba data structure. 3132 * @acqe_link: pointer to the async link completion queue entry. 3133 * 3134 * This routine is to handle the SLI4 asynchronous FCoE link event. 3135 **/ 3136static void 3137lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3138 struct lpfc_acqe_link *acqe_link) 3139{ 3140 struct lpfc_dmabuf *mp; 3141 LPFC_MBOXQ_t *pmb; 3142 MAILBOX_t *mb; 3143 struct lpfc_mbx_read_top *la; 3144 uint8_t att_type; 3145 int rc; 3146 3147 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3148 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 3149 return; 3150 phba->fcoe_eventtag = acqe_link->event_tag; 3151 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3152 if (!pmb) { 3153 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3154 "0395 The mboxq allocation failed\n"); 3155 return; 3156 } 3157 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3158 if (!mp) { 3159 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3160 "0396 The lpfc_dmabuf allocation failed\n"); 3161 goto out_free_pmb; 3162 } 3163 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3164 if (!mp->virt) { 3165 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3166 "0397 The mbuf allocation failed\n"); 3167 goto out_free_dmabuf; 3168 } 3169 3170 /* Cleanup any outstanding ELS commands */ 3171 lpfc_els_flush_all_cmd(phba); 3172 3173 /* Block ELS IOCBs until we have done process link event */ 3174 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3175 3176 /* Update link event statistics */ 3177 phba->sli.slistat.link_event++; 3178 3179 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3180 lpfc_read_topology(phba, pmb, mp); 3181 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3182 pmb->vport = phba->pport; 3183 3184 /* Keep the link status for extra SLI4 state machine reference */ 3185 phba->sli4_hba.link_state.speed = 3186 bf_get(lpfc_acqe_link_speed, acqe_link); 3187 phba->sli4_hba.link_state.duplex = 3188 bf_get(lpfc_acqe_link_duplex, acqe_link); 3189 phba->sli4_hba.link_state.status = 3190 bf_get(lpfc_acqe_link_status, acqe_link); 3191 phba->sli4_hba.link_state.type = 3192 bf_get(lpfc_acqe_link_type, acqe_link); 3193 phba->sli4_hba.link_state.number = 3194 bf_get(lpfc_acqe_link_number, acqe_link); 3195 phba->sli4_hba.link_state.fault = 3196 bf_get(lpfc_acqe_link_fault, acqe_link); 3197 phba->sli4_hba.link_state.logical_speed = 3198 bf_get(lpfc_acqe_logical_link_speed, acqe_link); 3199 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3200 "2900 Async FCoE Link event - Speed:%dGBit duplex:x%x " 3201 "LA Type:x%x Port Type:%d Port Number:%d Logical " 3202 "speed:%dMbps Fault:%d\n", 3203 phba->sli4_hba.link_state.speed, 3204 phba->sli4_hba.link_state.topology, 3205 phba->sli4_hba.link_state.status, 3206 phba->sli4_hba.link_state.type, 3207 phba->sli4_hba.link_state.number, 3208 phba->sli4_hba.link_state.logical_speed * 10, 3209 phba->sli4_hba.link_state.fault); 3210 /* 3211 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 3212 * topology info. Note: Optional for non FC-AL ports. 3213 */ 3214 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3215 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3216 if (rc == MBX_NOT_FINISHED) 3217 goto out_free_dmabuf; 3218 return; 3219 } 3220 /* 3221 * For FCoE Mode: fill in all the topology information we need and call 3222 * the READ_TOPOLOGY completion routine to continue without actually 3223 * sending the READ_TOPOLOGY mailbox command to the port. 3224 */ 3225 /* Parse and translate status field */ 3226 mb = &pmb->u.mb; 3227 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3228 3229 /* Parse and translate link attention fields */ 3230 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 3231 la->eventTag = acqe_link->event_tag; 3232 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 3233 bf_set(lpfc_mbx_read_top_link_spd, la, 3234 lpfc_sli4_parse_latt_link_speed(phba, acqe_link)); 3235 3236 /* Fake the the following irrelvant fields */ 3237 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 3238 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 3239 bf_set(lpfc_mbx_read_top_il, la, 0); 3240 bf_set(lpfc_mbx_read_top_pb, la, 0); 3241 bf_set(lpfc_mbx_read_top_fa, la, 0); 3242 bf_set(lpfc_mbx_read_top_mm, la, 0); 3243 3244 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3245 lpfc_mbx_cmpl_read_topology(phba, pmb); 3246 3247 return; 3248 3249out_free_dmabuf: 3250 kfree(mp); 3251out_free_pmb: 3252 mempool_free(pmb, phba->mbox_mem_pool); 3253} 3254 3255/** 3256 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 3257 * @phba: pointer to lpfc hba data structure. 3258 * @acqe_fc: pointer to the async fc completion queue entry. 3259 * 3260 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 3261 * that the event was received and then issue a read_topology mailbox command so 3262 * that the rest of the driver will treat it the same as SLI3. 3263 **/ 3264static void 3265lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 3266{ 3267 struct lpfc_dmabuf *mp; 3268 LPFC_MBOXQ_t *pmb; 3269 int rc; 3270 3271 if (bf_get(lpfc_trailer_type, acqe_fc) != 3272 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 3273 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3274 "2895 Non FC link Event detected.(%d)\n", 3275 bf_get(lpfc_trailer_type, acqe_fc)); 3276 return; 3277 } 3278 /* Keep the link status for extra SLI4 state machine reference */ 3279 phba->sli4_hba.link_state.speed = 3280 bf_get(lpfc_acqe_fc_la_speed, acqe_fc); 3281 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 3282 phba->sli4_hba.link_state.topology = 3283 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 3284 phba->sli4_hba.link_state.status = 3285 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 3286 phba->sli4_hba.link_state.type = 3287 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 3288 phba->sli4_hba.link_state.number = 3289 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 3290 phba->sli4_hba.link_state.fault = 3291 bf_get(lpfc_acqe_link_fault, acqe_fc); 3292 phba->sli4_hba.link_state.logical_speed = 3293 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc); 3294 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3295 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 3296 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 3297 "%dMbps Fault:%d\n", 3298 phba->sli4_hba.link_state.speed, 3299 phba->sli4_hba.link_state.topology, 3300 phba->sli4_hba.link_state.status, 3301 phba->sli4_hba.link_state.type, 3302 phba->sli4_hba.link_state.number, 3303 phba->sli4_hba.link_state.logical_speed * 10, 3304 phba->sli4_hba.link_state.fault); 3305 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3306 if (!pmb) { 3307 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3308 "2897 The mboxq allocation failed\n"); 3309 return; 3310 } 3311 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3312 if (!mp) { 3313 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3314 "2898 The lpfc_dmabuf allocation failed\n"); 3315 goto out_free_pmb; 3316 } 3317 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3318 if (!mp->virt) { 3319 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3320 "2899 The mbuf allocation failed\n"); 3321 goto out_free_dmabuf; 3322 } 3323 3324 /* Cleanup any outstanding ELS commands */ 3325 lpfc_els_flush_all_cmd(phba); 3326 3327 /* Block ELS IOCBs until we have done process link event */ 3328 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3329 3330 /* Update link event statistics */ 3331 phba->sli.slistat.link_event++; 3332 3333 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3334 lpfc_read_topology(phba, pmb, mp); 3335 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3336 pmb->vport = phba->pport; 3337 3338 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3339 if (rc == MBX_NOT_FINISHED) 3340 goto out_free_dmabuf; 3341 return; 3342 3343out_free_dmabuf: 3344 kfree(mp); 3345out_free_pmb: 3346 mempool_free(pmb, phba->mbox_mem_pool); 3347} 3348 3349/** 3350 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 3351 * @phba: pointer to lpfc hba data structure. 3352 * @acqe_fc: pointer to the async SLI completion queue entry. 3353 * 3354 * This routine is to handle the SLI4 asynchronous SLI events. 3355 **/ 3356static void 3357lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 3358{ 3359 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3360 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 3361 "x%08x SLI Event Type:%d", 3362 acqe_sli->event_data1, acqe_sli->event_data2, 3363 bf_get(lpfc_trailer_type, acqe_sli)); 3364 return; 3365} 3366 3367/** 3368 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 3369 * @vport: pointer to vport data structure. 3370 * 3371 * This routine is to perform Clear Virtual Link (CVL) on a vport in 3372 * response to a CVL event. 3373 * 3374 * Return the pointer to the ndlp with the vport if successful, otherwise 3375 * return NULL. 3376 **/ 3377static struct lpfc_nodelist * 3378lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 3379{ 3380 struct lpfc_nodelist *ndlp; 3381 struct Scsi_Host *shost; 3382 struct lpfc_hba *phba; 3383 3384 if (!vport) 3385 return NULL; 3386 phba = vport->phba; 3387 if (!phba) 3388 return NULL; 3389 ndlp = lpfc_findnode_did(vport, Fabric_DID); 3390 if (!ndlp) { 3391 /* Cannot find existing Fabric ndlp, so allocate a new one */ 3392 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3393 if (!ndlp) 3394 return 0; 3395 lpfc_nlp_init(vport, ndlp, Fabric_DID); 3396 /* Set the node type */ 3397 ndlp->nlp_type |= NLP_FABRIC; 3398 /* Put ndlp onto node list */ 3399 lpfc_enqueue_node(vport, ndlp); 3400 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 3401 /* re-setup ndlp without removing from node list */ 3402 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 3403 if (!ndlp) 3404 return 0; 3405 } 3406 if ((phba->pport->port_state < LPFC_FLOGI) && 3407 (phba->pport->port_state != LPFC_VPORT_FAILED)) 3408 return NULL; 3409 /* If virtual link is not yet instantiated ignore CVL */ 3410 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 3411 && (vport->port_state != LPFC_VPORT_FAILED)) 3412 return NULL; 3413 shost = lpfc_shost_from_vport(vport); 3414 if (!shost) 3415 return NULL; 3416 lpfc_linkdown_port(vport); 3417 lpfc_cleanup_pending_mbox(vport); 3418 spin_lock_irq(shost->host_lock); 3419 vport->fc_flag |= FC_VPORT_CVL_RCVD; 3420 spin_unlock_irq(shost->host_lock); 3421 3422 return ndlp; 3423} 3424 3425/** 3426 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 3427 * @vport: pointer to lpfc hba data structure. 3428 * 3429 * This routine is to perform Clear Virtual Link (CVL) on all vports in 3430 * response to a FCF dead event. 3431 **/ 3432static void 3433lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 3434{ 3435 struct lpfc_vport **vports; 3436 int i; 3437 3438 vports = lpfc_create_vport_work_array(phba); 3439 if (vports) 3440 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3441 lpfc_sli4_perform_vport_cvl(vports[i]); 3442 lpfc_destroy_vport_work_array(phba, vports); 3443} 3444 3445/** 3446 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 3447 * @phba: pointer to lpfc hba data structure. 3448 * @acqe_link: pointer to the async fcoe completion queue entry. 3449 * 3450 * This routine is to handle the SLI4 asynchronous fcoe event. 3451 **/ 3452static void 3453lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 3454 struct lpfc_acqe_fip *acqe_fip) 3455{ 3456 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 3457 int rc; 3458 struct lpfc_vport *vport; 3459 struct lpfc_nodelist *ndlp; 3460 struct Scsi_Host *shost; 3461 int active_vlink_present; 3462 struct lpfc_vport **vports; 3463 int i; 3464 3465 phba->fc_eventTag = acqe_fip->event_tag; 3466 phba->fcoe_eventtag = acqe_fip->event_tag; 3467 switch (event_type) { 3468 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 3469 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 3470 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 3471 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3472 LOG_DISCOVERY, 3473 "2546 New FCF event, evt_tag:x%x, " 3474 "index:x%x\n", 3475 acqe_fip->event_tag, 3476 acqe_fip->index); 3477 else 3478 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3479 LOG_DISCOVERY, 3480 "2788 FCF param modified event, " 3481 "evt_tag:x%x, index:x%x\n", 3482 acqe_fip->event_tag, 3483 acqe_fip->index); 3484 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3485 /* 3486 * During period of FCF discovery, read the FCF 3487 * table record indexed by the event to update 3488 * FCF roundrobin failover eligible FCF bmask. 3489 */ 3490 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3491 LOG_DISCOVERY, 3492 "2779 Read FCF (x%x) for updating " 3493 "roundrobin FCF failover bmask\n", 3494 acqe_fip->index); 3495 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 3496 } 3497 3498 /* If the FCF discovery is in progress, do nothing. */ 3499 spin_lock_irq(&phba->hbalock); 3500 if (phba->hba_flag & FCF_TS_INPROG) { 3501 spin_unlock_irq(&phba->hbalock); 3502 break; 3503 } 3504 /* If fast FCF failover rescan event is pending, do nothing */ 3505 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3506 spin_unlock_irq(&phba->hbalock); 3507 break; 3508 } 3509 3510 /* If the FCF has been in discovered state, do nothing. */ 3511 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 3512 spin_unlock_irq(&phba->hbalock); 3513 break; 3514 } 3515 spin_unlock_irq(&phba->hbalock); 3516 3517 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3518 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3519 "2770 Start FCF table scan per async FCF " 3520 "event, evt_tag:x%x, index:x%x\n", 3521 acqe_fip->event_tag, acqe_fip->index); 3522 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3523 LPFC_FCOE_FCF_GET_FIRST); 3524 if (rc) 3525 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3526 "2547 Issue FCF scan read FCF mailbox " 3527 "command failed (x%x)\n", rc); 3528 break; 3529 3530 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 3531 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3532 "2548 FCF Table full count 0x%x tag 0x%x\n", 3533 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 3534 acqe_fip->event_tag); 3535 break; 3536 3537 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 3538 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3539 "2549 FCF (x%x) disconnected from network, " 3540 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 3541 /* 3542 * If we are in the middle of FCF failover process, clear 3543 * the corresponding FCF bit in the roundrobin bitmap. 3544 */ 3545 spin_lock_irq(&phba->hbalock); 3546 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3547 spin_unlock_irq(&phba->hbalock); 3548 /* Update FLOGI FCF failover eligible FCF bmask */ 3549 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 3550 break; 3551 } 3552 spin_unlock_irq(&phba->hbalock); 3553 3554 /* If the event is not for currently used fcf do nothing */ 3555 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 3556 break; 3557 3558 /* 3559 * Otherwise, request the port to rediscover the entire FCF 3560 * table for a fast recovery from case that the current FCF 3561 * is no longer valid as we are not in the middle of FCF 3562 * failover process already. 3563 */ 3564 spin_lock_irq(&phba->hbalock); 3565 /* Mark the fast failover process in progress */ 3566 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 3567 spin_unlock_irq(&phba->hbalock); 3568 3569 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3570 "2771 Start FCF fast failover process due to " 3571 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 3572 "\n", acqe_fip->event_tag, acqe_fip->index); 3573 rc = lpfc_sli4_redisc_fcf_table(phba); 3574 if (rc) { 3575 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3576 LOG_DISCOVERY, 3577 "2772 Issue FCF rediscover mabilbox " 3578 "command failed, fail through to FCF " 3579 "dead event\n"); 3580 spin_lock_irq(&phba->hbalock); 3581 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 3582 spin_unlock_irq(&phba->hbalock); 3583 /* 3584 * Last resort will fail over by treating this 3585 * as a link down to FCF registration. 3586 */ 3587 lpfc_sli4_fcf_dead_failthrough(phba); 3588 } else { 3589 /* Reset FCF roundrobin bmask for new discovery */ 3590 memset(phba->fcf.fcf_rr_bmask, 0, 3591 sizeof(*phba->fcf.fcf_rr_bmask)); 3592 /* 3593 * Handling fast FCF failover to a DEAD FCF event is 3594 * considered equalivant to receiving CVL to all vports. 3595 */ 3596 lpfc_sli4_perform_all_vport_cvl(phba); 3597 } 3598 break; 3599 case LPFC_FIP_EVENT_TYPE_CVL: 3600 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3601 "2718 Clear Virtual Link Received for VPI 0x%x" 3602 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 3603 vport = lpfc_find_vport_by_vpid(phba, 3604 acqe_fip->index - phba->vpi_base); 3605 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3606 if (!ndlp) 3607 break; 3608 active_vlink_present = 0; 3609 3610 vports = lpfc_create_vport_work_array(phba); 3611 if (vports) { 3612 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 3613 i++) { 3614 if ((!(vports[i]->fc_flag & 3615 FC_VPORT_CVL_RCVD)) && 3616 (vports[i]->port_state > LPFC_FDISC)) { 3617 active_vlink_present = 1; 3618 break; 3619 } 3620 } 3621 lpfc_destroy_vport_work_array(phba, vports); 3622 } 3623 3624 if (active_vlink_present) { 3625 /* 3626 * If there are other active VLinks present, 3627 * re-instantiate the Vlink using FDISC. 3628 */ 3629 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3630 shost = lpfc_shost_from_vport(vport); 3631 spin_lock_irq(shost->host_lock); 3632 ndlp->nlp_flag |= NLP_DELAY_TMO; 3633 spin_unlock_irq(shost->host_lock); 3634 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 3635 vport->port_state = LPFC_FDISC; 3636 } else { 3637 /* 3638 * Otherwise, we request port to rediscover 3639 * the entire FCF table for a fast recovery 3640 * from possible case that the current FCF 3641 * is no longer valid if we are not already 3642 * in the FCF failover process. 3643 */ 3644 spin_lock_irq(&phba->hbalock); 3645 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3646 spin_unlock_irq(&phba->hbalock); 3647 break; 3648 } 3649 /* Mark the fast failover process in progress */ 3650 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 3651 spin_unlock_irq(&phba->hbalock); 3652 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3653 LOG_DISCOVERY, 3654 "2773 Start FCF failover per CVL, " 3655 "evt_tag:x%x\n", acqe_fip->event_tag); 3656 rc = lpfc_sli4_redisc_fcf_table(phba); 3657 if (rc) { 3658 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3659 LOG_DISCOVERY, 3660 "2774 Issue FCF rediscover " 3661 "mabilbox command failed, " 3662 "through to CVL event\n"); 3663 spin_lock_irq(&phba->hbalock); 3664 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 3665 spin_unlock_irq(&phba->hbalock); 3666 /* 3667 * Last resort will be re-try on the 3668 * the current registered FCF entry. 3669 */ 3670 lpfc_retry_pport_discovery(phba); 3671 } else 3672 /* 3673 * Reset FCF roundrobin bmask for new 3674 * discovery. 3675 */ 3676 memset(phba->fcf.fcf_rr_bmask, 0, 3677 sizeof(*phba->fcf.fcf_rr_bmask)); 3678 } 3679 break; 3680 default: 3681 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3682 "0288 Unknown FCoE event type 0x%x event tag " 3683 "0x%x\n", event_type, acqe_fip->event_tag); 3684 break; 3685 } 3686} 3687 3688/** 3689 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 3690 * @phba: pointer to lpfc hba data structure. 3691 * @acqe_link: pointer to the async dcbx completion queue entry. 3692 * 3693 * This routine is to handle the SLI4 asynchronous dcbx event. 3694 **/ 3695static void 3696lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3697 struct lpfc_acqe_dcbx *acqe_dcbx) 3698{ 3699 phba->fc_eventTag = acqe_dcbx->event_tag; 3700 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3701 "0290 The SLI4 DCBX asynchronous event is not " 3702 "handled yet\n"); 3703} 3704 3705/** 3706 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 3707 * @phba: pointer to lpfc hba data structure. 3708 * @acqe_link: pointer to the async grp5 completion queue entry. 3709 * 3710 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 3711 * is an asynchronous notified of a logical link speed change. The Port 3712 * reports the logical link speed in units of 10Mbps. 3713 **/ 3714static void 3715lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 3716 struct lpfc_acqe_grp5 *acqe_grp5) 3717{ 3718 uint16_t prev_ll_spd; 3719 3720 phba->fc_eventTag = acqe_grp5->event_tag; 3721 phba->fcoe_eventtag = acqe_grp5->event_tag; 3722 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 3723 phba->sli4_hba.link_state.logical_speed = 3724 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); 3725 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3726 "2789 GRP5 Async Event: Updating logical link speed " 3727 "from %dMbps to %dMbps\n", (prev_ll_spd * 10), 3728 (phba->sli4_hba.link_state.logical_speed*10)); 3729} 3730 3731/** 3732 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 3733 * @phba: pointer to lpfc hba data structure. 3734 * 3735 * This routine is invoked by the worker thread to process all the pending 3736 * SLI4 asynchronous events. 3737 **/ 3738void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 3739{ 3740 struct lpfc_cq_event *cq_event; 3741 3742 /* First, declare the async event has been handled */ 3743 spin_lock_irq(&phba->hbalock); 3744 phba->hba_flag &= ~ASYNC_EVENT; 3745 spin_unlock_irq(&phba->hbalock); 3746 /* Now, handle all the async events */ 3747 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 3748 /* Get the first event from the head of the event queue */ 3749 spin_lock_irq(&phba->hbalock); 3750 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 3751 cq_event, struct lpfc_cq_event, list); 3752 spin_unlock_irq(&phba->hbalock); 3753 /* Process the asynchronous event */ 3754 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 3755 case LPFC_TRAILER_CODE_LINK: 3756 lpfc_sli4_async_link_evt(phba, 3757 &cq_event->cqe.acqe_link); 3758 break; 3759 case LPFC_TRAILER_CODE_FCOE: 3760 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 3761 break; 3762 case LPFC_TRAILER_CODE_DCBX: 3763 lpfc_sli4_async_dcbx_evt(phba, 3764 &cq_event->cqe.acqe_dcbx); 3765 break; 3766 case LPFC_TRAILER_CODE_GRP5: 3767 lpfc_sli4_async_grp5_evt(phba, 3768 &cq_event->cqe.acqe_grp5); 3769 break; 3770 case LPFC_TRAILER_CODE_FC: 3771 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 3772 break; 3773 case LPFC_TRAILER_CODE_SLI: 3774 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 3775 break; 3776 default: 3777 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3778 "1804 Invalid asynchrous event code: " 3779 "x%x\n", bf_get(lpfc_trailer_code, 3780 &cq_event->cqe.mcqe_cmpl)); 3781 break; 3782 } 3783 /* Free the completion event processed to the free pool */ 3784 lpfc_sli4_cq_event_release(phba, cq_event); 3785 } 3786} 3787 3788/** 3789 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 3790 * @phba: pointer to lpfc hba data structure. 3791 * 3792 * This routine is invoked by the worker thread to process FCF table 3793 * rediscovery pending completion event. 3794 **/ 3795void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 3796{ 3797 int rc; 3798 3799 spin_lock_irq(&phba->hbalock); 3800 /* Clear FCF rediscovery timeout event */ 3801 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 3802 /* Clear driver fast failover FCF record flag */ 3803 phba->fcf.failover_rec.flag = 0; 3804 /* Set state for FCF fast failover */ 3805 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 3806 spin_unlock_irq(&phba->hbalock); 3807 3808 /* Scan FCF table from the first entry to re-discover SAN */ 3809 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3810 "2777 Start post-quiescent FCF table scan\n"); 3811 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 3812 if (rc) 3813 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3814 "2747 Issue FCF scan read FCF mailbox " 3815 "command failed 0x%x\n", rc); 3816} 3817 3818/** 3819 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3820 * @phba: pointer to lpfc hba data structure. 3821 * @dev_grp: The HBA PCI-Device group number. 3822 * 3823 * This routine is invoked to set up the per HBA PCI-Device group function 3824 * API jump table entries. 3825 * 3826 * Return: 0 if success, otherwise -ENODEV 3827 **/ 3828int 3829lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3830{ 3831 int rc; 3832 3833 /* Set up lpfc PCI-device group */ 3834 phba->pci_dev_grp = dev_grp; 3835 3836 /* The LPFC_PCI_DEV_OC uses SLI4 */ 3837 if (dev_grp == LPFC_PCI_DEV_OC) 3838 phba->sli_rev = LPFC_SLI_REV4; 3839 3840 /* Set up device INIT API function jump table */ 3841 rc = lpfc_init_api_table_setup(phba, dev_grp); 3842 if (rc) 3843 return -ENODEV; 3844 /* Set up SCSI API function jump table */ 3845 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 3846 if (rc) 3847 return -ENODEV; 3848 /* Set up SLI API function jump table */ 3849 rc = lpfc_sli_api_table_setup(phba, dev_grp); 3850 if (rc) 3851 return -ENODEV; 3852 /* Set up MBOX API function jump table */ 3853 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 3854 if (rc) 3855 return -ENODEV; 3856 3857 return 0; 3858} 3859 3860/** 3861 * lpfc_log_intr_mode - Log the active interrupt mode 3862 * @phba: pointer to lpfc hba data structure. 3863 * @intr_mode: active interrupt mode adopted. 3864 * 3865 * This routine it invoked to log the currently used active interrupt mode 3866 * to the device. 3867 **/ 3868static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 3869{ 3870 switch (intr_mode) { 3871 case 0: 3872 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3873 "0470 Enable INTx interrupt mode.\n"); 3874 break; 3875 case 1: 3876 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3877 "0481 Enabled MSI interrupt mode.\n"); 3878 break; 3879 case 2: 3880 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3881 "0480 Enabled MSI-X interrupt mode.\n"); 3882 break; 3883 default: 3884 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3885 "0482 Illegal interrupt mode.\n"); 3886 break; 3887 } 3888 return; 3889} 3890 3891/** 3892 * lpfc_enable_pci_dev - Enable a generic PCI device. 3893 * @phba: pointer to lpfc hba data structure. 3894 * 3895 * This routine is invoked to enable the PCI device that is common to all 3896 * PCI devices. 3897 * 3898 * Return codes 3899 * 0 - successful 3900 * other values - error 3901 **/ 3902static int 3903lpfc_enable_pci_dev(struct lpfc_hba *phba) 3904{ 3905 struct pci_dev *pdev; 3906 int bars; 3907 3908 /* Obtain PCI device reference */ 3909 if (!phba->pcidev) 3910 goto out_error; 3911 else 3912 pdev = phba->pcidev; 3913 /* Select PCI BARs */ 3914 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3915 /* Enable PCI device */ 3916 if (pci_enable_device_mem(pdev)) 3917 goto out_error; 3918 /* Request PCI resource for the device */ 3919 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 3920 goto out_disable_device; 3921 /* Set up device as PCI master and save state for EEH */ 3922 pci_set_master(pdev); 3923 pci_try_set_mwi(pdev); 3924 pci_save_state(pdev); 3925 3926 return 0; 3927 3928out_disable_device: 3929 pci_disable_device(pdev); 3930out_error: 3931 return -ENODEV; 3932} 3933 3934/** 3935 * lpfc_disable_pci_dev - Disable a generic PCI device. 3936 * @phba: pointer to lpfc hba data structure. 3937 * 3938 * This routine is invoked to disable the PCI device that is common to all 3939 * PCI devices. 3940 **/ 3941static void 3942lpfc_disable_pci_dev(struct lpfc_hba *phba) 3943{ 3944 struct pci_dev *pdev; 3945 int bars; 3946 3947 /* Obtain PCI device reference */ 3948 if (!phba->pcidev) 3949 return; 3950 else 3951 pdev = phba->pcidev; 3952 /* Select PCI BARs */ 3953 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3954 /* Release PCI resource and disable PCI device */ 3955 pci_release_selected_regions(pdev, bars); 3956 pci_disable_device(pdev); 3957 /* Null out PCI private reference to driver */ 3958 pci_set_drvdata(pdev, NULL); 3959 3960 return; 3961} 3962 3963/** 3964 * lpfc_reset_hba - Reset a hba 3965 * @phba: pointer to lpfc hba data structure. 3966 * 3967 * This routine is invoked to reset a hba device. It brings the HBA 3968 * offline, performs a board restart, and then brings the board back 3969 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 3970 * on outstanding mailbox commands. 3971 **/ 3972void 3973lpfc_reset_hba(struct lpfc_hba *phba) 3974{ 3975 /* If resets are disabled then set error state and return. */ 3976 if (!phba->cfg_enable_hba_reset) { 3977 phba->link_state = LPFC_HBA_ERROR; 3978 return; 3979 } 3980 lpfc_offline_prep(phba); 3981 lpfc_offline(phba); 3982 lpfc_sli_brdrestart(phba); 3983 lpfc_online(phba); 3984 lpfc_unblock_mgmt_io(phba); 3985} 3986 3987/** 3988 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 3989 * @phba: pointer to lpfc hba data structure. 3990 * 3991 * This routine is invoked to set up the driver internal resources specific to 3992 * support the SLI-3 HBA device it attached to. 3993 * 3994 * Return codes 3995 * 0 - successful 3996 * other values - error 3997 **/ 3998static int 3999lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 4000{ 4001 struct lpfc_sli *psli; 4002 4003 /* 4004 * Initialize timers used by driver 4005 */ 4006 4007 /* Heartbeat timer */ 4008 init_timer(&phba->hb_tmofunc); 4009 phba->hb_tmofunc.function = lpfc_hb_timeout; 4010 phba->hb_tmofunc.data = (unsigned long)phba; 4011 4012 psli = &phba->sli; 4013 /* MBOX heartbeat timer */ 4014 init_timer(&psli->mbox_tmo); 4015 psli->mbox_tmo.function = lpfc_mbox_timeout; 4016 psli->mbox_tmo.data = (unsigned long) phba; 4017 /* FCP polling mode timer */ 4018 init_timer(&phba->fcp_poll_timer); 4019 phba->fcp_poll_timer.function = lpfc_poll_timeout; 4020 phba->fcp_poll_timer.data = (unsigned long) phba; 4021 /* Fabric block timer */ 4022 init_timer(&phba->fabric_block_timer); 4023 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4024 phba->fabric_block_timer.data = (unsigned long) phba; 4025 /* EA polling mode timer */ 4026 init_timer(&phba->eratt_poll); 4027 phba->eratt_poll.function = lpfc_poll_eratt; 4028 phba->eratt_poll.data = (unsigned long) phba; 4029 4030 /* Host attention work mask setup */ 4031 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 4032 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 4033 4034 /* Get all the module params for configuring this host */ 4035 lpfc_get_cfgparam(phba); 4036 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 4037 phba->menlo_flag |= HBA_MENLO_SUPPORT; 4038 /* check for menlo minimum sg count */ 4039 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 4040 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 4041 } 4042 4043 /* 4044 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4045 * used to create the sg_dma_buf_pool must be dynamically calculated. 4046 * 2 segments are added since the IOCB needs a command and response bde. 4047 */ 4048 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 4049 sizeof(struct fcp_rsp) + 4050 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 4051 4052 if (phba->cfg_enable_bg) { 4053 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 4054 phba->cfg_sg_dma_buf_size += 4055 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 4056 } 4057 4058 /* Also reinitialize the host templates with new values. */ 4059 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4060 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4061 4062 phba->max_vpi = LPFC_MAX_VPI; 4063 /* This will be set to correct value after config_port mbox */ 4064 phba->max_vports = 0; 4065 4066 /* 4067 * Initialize the SLI Layer to run with lpfc HBAs. 4068 */ 4069 lpfc_sli_setup(phba); 4070 lpfc_sli_queue_setup(phba); 4071 4072 /* Allocate device driver memory */ 4073 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 4074 return -ENOMEM; 4075 4076 return 0; 4077} 4078 4079/** 4080 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 4081 * @phba: pointer to lpfc hba data structure. 4082 * 4083 * This routine is invoked to unset the driver internal resources set up 4084 * specific for supporting the SLI-3 HBA device it attached to. 4085 **/ 4086static void 4087lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 4088{ 4089 /* Free device driver memory allocated */ 4090 lpfc_mem_free_all(phba); 4091 4092 return; 4093} 4094 4095/** 4096 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 4097 * @phba: pointer to lpfc hba data structure. 4098 * 4099 * This routine is invoked to set up the driver internal resources specific to 4100 * support the SLI-4 HBA device it attached to. 4101 * 4102 * Return codes 4103 * 0 - successful 4104 * other values - error 4105 **/ 4106static int 4107lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 4108{ 4109 struct lpfc_sli *psli; 4110 LPFC_MBOXQ_t *mboxq; 4111 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 4112 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4113 struct lpfc_mqe *mqe; 4114 int longs, sli_family; 4115 4116 /* Before proceed, wait for POST done and device ready */ 4117 rc = lpfc_sli4_post_status_check(phba); 4118 if (rc) 4119 return -ENODEV; 4120 4121 /* 4122 * Initialize timers used by driver 4123 */ 4124 4125 /* Heartbeat timer */ 4126 init_timer(&phba->hb_tmofunc); 4127 phba->hb_tmofunc.function = lpfc_hb_timeout; 4128 phba->hb_tmofunc.data = (unsigned long)phba; 4129 init_timer(&phba->rrq_tmr); 4130 phba->rrq_tmr.function = lpfc_rrq_timeout; 4131 phba->rrq_tmr.data = (unsigned long)phba; 4132 4133 psli = &phba->sli; 4134 /* MBOX heartbeat timer */ 4135 init_timer(&psli->mbox_tmo); 4136 psli->mbox_tmo.function = lpfc_mbox_timeout; 4137 psli->mbox_tmo.data = (unsigned long) phba; 4138 /* Fabric block timer */ 4139 init_timer(&phba->fabric_block_timer); 4140 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4141 phba->fabric_block_timer.data = (unsigned long) phba; 4142 /* EA polling mode timer */ 4143 init_timer(&phba->eratt_poll); 4144 phba->eratt_poll.function = lpfc_poll_eratt; 4145 phba->eratt_poll.data = (unsigned long) phba; 4146 /* FCF rediscover timer */ 4147 init_timer(&phba->fcf.redisc_wait); 4148 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 4149 phba->fcf.redisc_wait.data = (unsigned long)phba; 4150 4151 /* 4152 * We need to do a READ_CONFIG mailbox command here before 4153 * calling lpfc_get_cfgparam. For VFs this will report the 4154 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 4155 * All of the resources allocated 4156 * for this Port are tied to these values. 4157 */ 4158 /* Get all the module params for configuring this host */ 4159 lpfc_get_cfgparam(phba); 4160 phba->max_vpi = LPFC_MAX_VPI; 4161 /* This will be set to correct value after the read_config mbox */ 4162 phba->max_vports = 0; 4163 4164 /* Program the default value of vlan_id and fc_map */ 4165 phba->valid_vlan = 0; 4166 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4167 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4168 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4169 4170 /* 4171 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4172 * used to create the sg_dma_buf_pool must be dynamically calculated. 4173 * 2 segments are added since the IOCB needs a command and response bde. 4174 * To insure that the scsi sgl does not cross a 4k page boundary only 4175 * sgl sizes of must be a power of 2. 4176 */ 4177 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 4178 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); 4179 4180 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 4181 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 4182 switch (sli_family) { 4183 case LPFC_SLI_INTF_FAMILY_BE2: 4184 case LPFC_SLI_INTF_FAMILY_BE3: 4185 /* There is a single hint for BE - 2 pages per BPL. */ 4186 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == 4187 LPFC_SLI_INTF_SLI_HINT1_1) 4188 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 4189 break; 4190 case LPFC_SLI_INTF_FAMILY_LNCR_A0: 4191 case LPFC_SLI_INTF_FAMILY_LNCR_B0: 4192 default: 4193 break; 4194 } 4195 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 4196 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 4197 dma_buf_size = dma_buf_size << 1) 4198 ; 4199 if (dma_buf_size == max_buf_size) 4200 phba->cfg_sg_seg_cnt = (dma_buf_size - 4201 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - 4202 (2 * sizeof(struct sli4_sge))) / 4203 sizeof(struct sli4_sge); 4204 phba->cfg_sg_dma_buf_size = dma_buf_size; 4205 4206 /* Initialize buffer queue management fields */ 4207 hbq_count = lpfc_sli_hbq_count(); 4208 for (i = 0; i < hbq_count; ++i) 4209 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 4210 INIT_LIST_HEAD(&phba->rb_pend_list); 4211 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 4212 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 4213 4214 /* 4215 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 4216 */ 4217 /* Initialize the Abort scsi buffer list used by driver */ 4218 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 4219 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 4220 /* This abort list used by worker thread */ 4221 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 4222 4223 /* 4224 * Initialize dirver internal slow-path work queues 4225 */ 4226 4227 /* Driver internel slow-path CQ Event pool */ 4228 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 4229 /* Response IOCB work queue list */ 4230 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 4231 /* Asynchronous event CQ Event work queue list */ 4232 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 4233 /* Fast-path XRI aborted CQ Event work queue list */ 4234 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 4235 /* Slow-path XRI aborted CQ Event work queue list */ 4236 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 4237 /* Receive queue CQ Event work queue list */ 4238 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 4239 4240 /* Initialize the driver internal SLI layer lists. */ 4241 lpfc_sli_setup(phba); 4242 lpfc_sli_queue_setup(phba); 4243 4244 /* Allocate device driver memory */ 4245 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 4246 if (rc) 4247 return -ENOMEM; 4248 4249 /* IF Type 2 ports get initialized now. */ 4250 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 4251 LPFC_SLI_INTF_IF_TYPE_2) { 4252 rc = lpfc_pci_function_reset(phba); 4253 if (unlikely(rc)) 4254 return -ENODEV; 4255 } 4256 4257 /* Create the bootstrap mailbox command */ 4258 rc = lpfc_create_bootstrap_mbox(phba); 4259 if (unlikely(rc)) 4260 goto out_free_mem; 4261 4262 /* Set up the host's endian order with the device. */ 4263 rc = lpfc_setup_endian_order(phba); 4264 if (unlikely(rc)) 4265 goto out_free_bsmbx; 4266 4267 /* Set up the hba's configuration parameters. */ 4268 rc = lpfc_sli4_read_config(phba); 4269 if (unlikely(rc)) 4270 goto out_free_bsmbx; 4271 4272 /* IF Type 0 ports get initialized now. */ 4273 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 4274 LPFC_SLI_INTF_IF_TYPE_0) { 4275 rc = lpfc_pci_function_reset(phba); 4276 if (unlikely(rc)) 4277 goto out_free_bsmbx; 4278 } 4279 4280 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4281 GFP_KERNEL); 4282 if (!mboxq) { 4283 rc = -ENOMEM; 4284 goto out_free_bsmbx; 4285 } 4286 4287 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 4288 lpfc_supported_pages(mboxq); 4289 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4290 if (!rc) { 4291 mqe = &mboxq->u.mqe; 4292 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 4293 LPFC_MAX_SUPPORTED_PAGES); 4294 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 4295 switch (pn_page[i]) { 4296 case LPFC_SLI4_PARAMETERS: 4297 phba->sli4_hba.pc_sli4_params.supported = 1; 4298 break; 4299 default: 4300 break; 4301 } 4302 } 4303 /* Read the port's SLI4 Parameters capabilities if supported. */ 4304 if (phba->sli4_hba.pc_sli4_params.supported) 4305 rc = lpfc_pc_sli4_params_get(phba, mboxq); 4306 if (rc) { 4307 mempool_free(mboxq, phba->mbox_mem_pool); 4308 rc = -EIO; 4309 goto out_free_bsmbx; 4310 } 4311 } 4312 /* 4313 * Get sli4 parameters that override parameters from Port capabilities. 4314 * If this call fails it is not a critical error so continue loading. 4315 */ 4316 lpfc_get_sli4_parameters(phba, mboxq); 4317 mempool_free(mboxq, phba->mbox_mem_pool); 4318 /* Create all the SLI4 queues */ 4319 rc = lpfc_sli4_queue_create(phba); 4320 if (rc) 4321 goto out_free_bsmbx; 4322 4323 /* Create driver internal CQE event pool */ 4324 rc = lpfc_sli4_cq_event_pool_create(phba); 4325 if (rc) 4326 goto out_destroy_queue; 4327 4328 /* Initialize and populate the iocb list per host */ 4329 rc = lpfc_init_sgl_list(phba); 4330 if (rc) { 4331 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4332 "1400 Failed to initialize sgl list.\n"); 4333 goto out_destroy_cq_event_pool; 4334 } 4335 rc = lpfc_init_active_sgl_array(phba); 4336 if (rc) { 4337 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4338 "1430 Failed to initialize sgl list.\n"); 4339 goto out_free_sgl_list; 4340 } 4341 4342 rc = lpfc_sli4_init_rpi_hdrs(phba); 4343 if (rc) { 4344 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4345 "1432 Failed to initialize rpi headers.\n"); 4346 goto out_free_active_sgl; 4347 } 4348 4349 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 4350 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 4351 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 4352 GFP_KERNEL); 4353 if (!phba->fcf.fcf_rr_bmask) { 4354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4355 "2759 Failed allocate memory for FCF round " 4356 "robin failover bmask\n"); 4357 goto out_remove_rpi_hdrs; 4358 } 4359 4360 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4361 phba->cfg_fcp_eq_count), GFP_KERNEL); 4362 if (!phba->sli4_hba.fcp_eq_hdl) { 4363 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4364 "2572 Failed allocate memory for fast-path " 4365 "per-EQ handle array\n"); 4366 goto out_free_fcf_rr_bmask; 4367 } 4368 4369 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4370 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 4371 if (!phba->sli4_hba.msix_entries) { 4372 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4373 "2573 Failed allocate memory for msi-x " 4374 "interrupt vector entries\n"); 4375 goto out_free_fcp_eq_hdl; 4376 } 4377 4378 return rc; 4379 4380out_free_fcp_eq_hdl: 4381 kfree(phba->sli4_hba.fcp_eq_hdl); 4382out_free_fcf_rr_bmask: 4383 kfree(phba->fcf.fcf_rr_bmask); 4384out_remove_rpi_hdrs: 4385 lpfc_sli4_remove_rpi_hdrs(phba); 4386out_free_active_sgl: 4387 lpfc_free_active_sgl(phba); 4388out_free_sgl_list: 4389 lpfc_free_sgl_list(phba); 4390out_destroy_cq_event_pool: 4391 lpfc_sli4_cq_event_pool_destroy(phba); 4392out_destroy_queue: 4393 lpfc_sli4_queue_destroy(phba); 4394out_free_bsmbx: 4395 lpfc_destroy_bootstrap_mbox(phba); 4396out_free_mem: 4397 lpfc_mem_free(phba); 4398 return rc; 4399} 4400 4401/** 4402 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 4403 * @phba: pointer to lpfc hba data structure. 4404 * 4405 * This routine is invoked to unset the driver internal resources set up 4406 * specific for supporting the SLI-4 HBA device it attached to. 4407 **/ 4408static void 4409lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 4410{ 4411 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 4412 4413 /* Free memory allocated for msi-x interrupt vector entries */ 4414 kfree(phba->sli4_hba.msix_entries); 4415 4416 /* Free memory allocated for fast-path work queue handles */ 4417 kfree(phba->sli4_hba.fcp_eq_hdl); 4418 4419 /* Free the allocated rpi headers. */ 4420 lpfc_sli4_remove_rpi_hdrs(phba); 4421 lpfc_sli4_remove_rpis(phba); 4422 4423 /* Free eligible FCF index bmask */ 4424 kfree(phba->fcf.fcf_rr_bmask); 4425 4426 /* Free the ELS sgl list */ 4427 lpfc_free_active_sgl(phba); 4428 lpfc_free_sgl_list(phba); 4429 4430 /* Free the SCSI sgl management array */ 4431 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4432 4433 /* Free the SLI4 queues */ 4434 lpfc_sli4_queue_destroy(phba); 4435 4436 /* Free the completion queue EQ event pool */ 4437 lpfc_sli4_cq_event_release_all(phba); 4438 lpfc_sli4_cq_event_pool_destroy(phba); 4439 4440 /* Free the bsmbx region. */ 4441 lpfc_destroy_bootstrap_mbox(phba); 4442 4443 /* Free the SLI Layer memory with SLI4 HBAs */ 4444 lpfc_mem_free_all(phba); 4445 4446 /* Free the current connect table */ 4447 list_for_each_entry_safe(conn_entry, next_conn_entry, 4448 &phba->fcf_conn_rec_list, list) { 4449 list_del_init(&conn_entry->list); 4450 kfree(conn_entry); 4451 } 4452 4453 return; 4454} 4455 4456/** 4457 * lpfc_init_api_table_setup - Set up init api fucntion jump table 4458 * @phba: The hba struct for which this call is being executed. 4459 * @dev_grp: The HBA PCI-Device group number. 4460 * 4461 * This routine sets up the device INIT interface API function jump table 4462 * in @phba struct. 4463 * 4464 * Returns: 0 - success, -ENODEV - failure. 4465 **/ 4466int 4467lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4468{ 4469 phba->lpfc_hba_init_link = lpfc_hba_init_link; 4470 phba->lpfc_hba_down_link = lpfc_hba_down_link; 4471 switch (dev_grp) { 4472 case LPFC_PCI_DEV_LP: 4473 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4474 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 4475 phba->lpfc_stop_port = lpfc_stop_port_s3; 4476 break; 4477 case LPFC_PCI_DEV_OC: 4478 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 4479 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 4480 phba->lpfc_stop_port = lpfc_stop_port_s4; 4481 break; 4482 default: 4483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4484 "1431 Invalid HBA PCI-device group: 0x%x\n", 4485 dev_grp); 4486 return -ENODEV; 4487 break; 4488 } 4489 return 0; 4490} 4491 4492/** 4493 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 4494 * @phba: pointer to lpfc hba data structure. 4495 * 4496 * This routine is invoked to set up the driver internal resources before the 4497 * device specific resource setup to support the HBA device it attached to. 4498 * 4499 * Return codes 4500 * 0 - successful 4501 * other values - error 4502 **/ 4503static int 4504lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 4505{ 4506 /* 4507 * Driver resources common to all SLI revisions 4508 */ 4509 atomic_set(&phba->fast_event_count, 0); 4510 spin_lock_init(&phba->hbalock); 4511 4512 /* Initialize ndlp management spinlock */ 4513 spin_lock_init(&phba->ndlp_lock); 4514 4515 INIT_LIST_HEAD(&phba->port_list); 4516 INIT_LIST_HEAD(&phba->work_list); 4517 init_waitqueue_head(&phba->wait_4_mlo_m_q); 4518 4519 /* Initialize the wait queue head for the kernel thread */ 4520 init_waitqueue_head(&phba->work_waitq); 4521 4522 /* Initialize the scsi buffer list used by driver for scsi IO */ 4523 spin_lock_init(&phba->scsi_buf_list_lock); 4524 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 4525 4526 /* Initialize the fabric iocb list */ 4527 INIT_LIST_HEAD(&phba->fabric_iocb_list); 4528 4529 /* Initialize list to save ELS buffers */ 4530 INIT_LIST_HEAD(&phba->elsbuf); 4531 4532 /* Initialize FCF connection rec list */ 4533 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 4534 4535 return 0; 4536} 4537 4538/** 4539 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 4540 * @phba: pointer to lpfc hba data structure. 4541 * 4542 * This routine is invoked to set up the driver internal resources after the 4543 * device specific resource setup to support the HBA device it attached to. 4544 * 4545 * Return codes 4546 * 0 - successful 4547 * other values - error 4548 **/ 4549static int 4550lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 4551{ 4552 int error; 4553 4554 /* Startup the kernel thread for this host adapter. */ 4555 phba->worker_thread = kthread_run(lpfc_do_work, phba, 4556 "lpfc_worker_%d", phba->brd_no); 4557 if (IS_ERR(phba->worker_thread)) { 4558 error = PTR_ERR(phba->worker_thread); 4559 return error; 4560 } 4561 4562 return 0; 4563} 4564 4565/** 4566 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 4567 * @phba: pointer to lpfc hba data structure. 4568 * 4569 * This routine is invoked to unset the driver internal resources set up after 4570 * the device specific resource setup for supporting the HBA device it 4571 * attached to. 4572 **/ 4573static void 4574lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 4575{ 4576 /* Stop kernel worker thread */ 4577 kthread_stop(phba->worker_thread); 4578} 4579 4580/** 4581 * lpfc_free_iocb_list - Free iocb list. 4582 * @phba: pointer to lpfc hba data structure. 4583 * 4584 * This routine is invoked to free the driver's IOCB list and memory. 4585 **/ 4586static void 4587lpfc_free_iocb_list(struct lpfc_hba *phba) 4588{ 4589 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 4590 4591 spin_lock_irq(&phba->hbalock); 4592 list_for_each_entry_safe(iocbq_entry, iocbq_next, 4593 &phba->lpfc_iocb_list, list) { 4594 list_del(&iocbq_entry->list); 4595 kfree(iocbq_entry); 4596 phba->total_iocbq_bufs--; 4597 } 4598 spin_unlock_irq(&phba->hbalock); 4599 4600 return; 4601} 4602 4603/** 4604 * lpfc_init_iocb_list - Allocate and initialize iocb list. 4605 * @phba: pointer to lpfc hba data structure. 4606 * 4607 * This routine is invoked to allocate and initizlize the driver's IOCB 4608 * list and set up the IOCB tag array accordingly. 4609 * 4610 * Return codes 4611 * 0 - successful 4612 * other values - error 4613 **/ 4614static int 4615lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 4616{ 4617 struct lpfc_iocbq *iocbq_entry = NULL; 4618 uint16_t iotag; 4619 int i; 4620 4621 /* Initialize and populate the iocb list per host. */ 4622 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 4623 for (i = 0; i < iocb_count; i++) { 4624 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 4625 if (iocbq_entry == NULL) { 4626 printk(KERN_ERR "%s: only allocated %d iocbs of " 4627 "expected %d count. Unloading driver.\n", 4628 __func__, i, LPFC_IOCB_LIST_CNT); 4629 goto out_free_iocbq; 4630 } 4631 4632 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 4633 if (iotag == 0) { 4634 kfree(iocbq_entry); 4635 printk(KERN_ERR "%s: failed to allocate IOTAG. " 4636 "Unloading driver.\n", __func__); 4637 goto out_free_iocbq; 4638 } 4639 iocbq_entry->sli4_xritag = NO_XRI; 4640 4641 spin_lock_irq(&phba->hbalock); 4642 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 4643 phba->total_iocbq_bufs++; 4644 spin_unlock_irq(&phba->hbalock); 4645 } 4646 4647 return 0; 4648 4649out_free_iocbq: 4650 lpfc_free_iocb_list(phba); 4651 4652 return -ENOMEM; 4653} 4654 4655/** 4656 * lpfc_free_sgl_list - Free sgl list. 4657 * @phba: pointer to lpfc hba data structure. 4658 * 4659 * This routine is invoked to free the driver's sgl list and memory. 4660 **/ 4661static void 4662lpfc_free_sgl_list(struct lpfc_hba *phba) 4663{ 4664 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 4665 LIST_HEAD(sglq_list); 4666 4667 spin_lock_irq(&phba->hbalock); 4668 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 4669 spin_unlock_irq(&phba->hbalock); 4670 4671 list_for_each_entry_safe(sglq_entry, sglq_next, 4672 &sglq_list, list) { 4673 list_del(&sglq_entry->list); 4674 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 4675 kfree(sglq_entry); 4676 phba->sli4_hba.total_sglq_bufs--; 4677 } 4678 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4679} 4680 4681/** 4682 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 4683 * @phba: pointer to lpfc hba data structure. 4684 * 4685 * This routine is invoked to allocate the driver's active sgl memory. 4686 * This array will hold the sglq_entry's for active IOs. 4687 **/ 4688static int 4689lpfc_init_active_sgl_array(struct lpfc_hba *phba) 4690{ 4691 int size; 4692 size = sizeof(struct lpfc_sglq *); 4693 size *= phba->sli4_hba.max_cfg_param.max_xri; 4694 4695 phba->sli4_hba.lpfc_sglq_active_list = 4696 kzalloc(size, GFP_KERNEL); 4697 if (!phba->sli4_hba.lpfc_sglq_active_list) 4698 return -ENOMEM; 4699 return 0; 4700} 4701 4702/** 4703 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 4704 * @phba: pointer to lpfc hba data structure. 4705 * 4706 * This routine is invoked to walk through the array of active sglq entries 4707 * and free all of the resources. 4708 * This is just a place holder for now. 4709 **/ 4710static void 4711lpfc_free_active_sgl(struct lpfc_hba *phba) 4712{ 4713 kfree(phba->sli4_hba.lpfc_sglq_active_list); 4714} 4715 4716/** 4717 * lpfc_init_sgl_list - Allocate and initialize sgl list. 4718 * @phba: pointer to lpfc hba data structure. 4719 * 4720 * This routine is invoked to allocate and initizlize the driver's sgl 4721 * list and set up the sgl xritag tag array accordingly. 4722 * 4723 * Return codes 4724 * 0 - successful 4725 * other values - error 4726 **/ 4727static int 4728lpfc_init_sgl_list(struct lpfc_hba *phba) 4729{ 4730 struct lpfc_sglq *sglq_entry = NULL; 4731 int i; 4732 int els_xri_cnt; 4733 4734 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4735 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4736 "2400 lpfc_init_sgl_list els %d.\n", 4737 els_xri_cnt); 4738 /* Initialize and populate the sglq list per host/VF. */ 4739 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4740 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 4741 4742 /* Sanity check on XRI management */ 4743 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 4744 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4745 "2562 No room left for SCSI XRI allocation: " 4746 "max_xri=%d, els_xri=%d\n", 4747 phba->sli4_hba.max_cfg_param.max_xri, 4748 els_xri_cnt); 4749 return -ENOMEM; 4750 } 4751 4752 /* Allocate memory for the ELS XRI management array */ 4753 phba->sli4_hba.lpfc_els_sgl_array = 4754 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), 4755 GFP_KERNEL); 4756 4757 if (!phba->sli4_hba.lpfc_els_sgl_array) { 4758 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4759 "2401 Failed to allocate memory for ELS " 4760 "XRI management array of size %d.\n", 4761 els_xri_cnt); 4762 return -ENOMEM; 4763 } 4764 4765 /* Keep the SCSI XRI into the XRI management array */ 4766 phba->sli4_hba.scsi_xri_max = 4767 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4768 phba->sli4_hba.scsi_xri_cnt = 0; 4769 4770 phba->sli4_hba.lpfc_scsi_psb_array = 4771 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4772 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4773 4774 if (!phba->sli4_hba.lpfc_scsi_psb_array) { 4775 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4776 "2563 Failed to allocate memory for SCSI " 4777 "XRI management array of size %d.\n", 4778 phba->sli4_hba.scsi_xri_max); 4779 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4780 return -ENOMEM; 4781 } 4782 4783 for (i = 0; i < els_xri_cnt; i++) { 4784 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); 4785 if (sglq_entry == NULL) { 4786 printk(KERN_ERR "%s: only allocated %d sgls of " 4787 "expected %d count. Unloading driver.\n", 4788 __func__, i, els_xri_cnt); 4789 goto out_free_mem; 4790 } 4791 4792 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); 4793 if (sglq_entry->sli4_xritag == NO_XRI) { 4794 kfree(sglq_entry); 4795 printk(KERN_ERR "%s: failed to allocate XRI.\n" 4796 "Unloading driver.\n", __func__); 4797 goto out_free_mem; 4798 } 4799 sglq_entry->buff_type = GEN_BUFF_TYPE; 4800 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4801 if (sglq_entry->virt == NULL) { 4802 kfree(sglq_entry); 4803 printk(KERN_ERR "%s: failed to allocate mbuf.\n" 4804 "Unloading driver.\n", __func__); 4805 goto out_free_mem; 4806 } 4807 sglq_entry->sgl = sglq_entry->virt; 4808 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 4809 4810 /* The list order is used by later block SGL registraton */ 4811 spin_lock_irq(&phba->hbalock); 4812 sglq_entry->state = SGL_FREED; 4813 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4814 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4815 phba->sli4_hba.total_sglq_bufs++; 4816 spin_unlock_irq(&phba->hbalock); 4817 } 4818 return 0; 4819 4820out_free_mem: 4821 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4822 lpfc_free_sgl_list(phba); 4823 return -ENOMEM; 4824} 4825 4826/** 4827 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 4828 * @phba: pointer to lpfc hba data structure. 4829 * 4830 * This routine is invoked to post rpi header templates to the 4831 * HBA consistent with the SLI-4 interface spec. This routine 4832 * posts a PAGE_SIZE memory region to the port to hold up to 4833 * PAGE_SIZE modulo 64 rpi context headers. 4834 * No locks are held here because this is an initialization routine 4835 * called only from probe or lpfc_online when interrupts are not 4836 * enabled and the driver is reinitializing the device. 4837 * 4838 * Return codes 4839 * 0 - successful 4840 * -ENOMEM - No availble memory 4841 * -EIO - The mailbox failed to complete successfully. 4842 **/ 4843int 4844lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4845{ 4846 int rc = 0; 4847 int longs; 4848 uint16_t rpi_count; 4849 struct lpfc_rpi_hdr *rpi_hdr; 4850 4851 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 4852 4853 /* 4854 * Provision an rpi bitmask range for discovery. The total count 4855 * is the difference between max and base + 1. 4856 */ 4857 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 4858 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4859 4860 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 4861 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 4862 GFP_KERNEL); 4863 if (!phba->sli4_hba.rpi_bmask) 4864 return -ENOMEM; 4865 4866 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 4867 if (!rpi_hdr) { 4868 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4869 "0391 Error during rpi post operation\n"); 4870 lpfc_sli4_remove_rpis(phba); 4871 rc = -ENODEV; 4872 } 4873 4874 return rc; 4875} 4876 4877/** 4878 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 4879 * @phba: pointer to lpfc hba data structure. 4880 * 4881 * This routine is invoked to allocate a single 4KB memory region to 4882 * support rpis and stores them in the phba. This single region 4883 * provides support for up to 64 rpis. The region is used globally 4884 * by the device. 4885 * 4886 * Returns: 4887 * A valid rpi hdr on success. 4888 * A NULL pointer on any failure. 4889 **/ 4890struct lpfc_rpi_hdr * 4891lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 4892{ 4893 uint16_t rpi_limit, curr_rpi_range; 4894 struct lpfc_dmabuf *dmabuf; 4895 struct lpfc_rpi_hdr *rpi_hdr; 4896 4897 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 4898 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4899 4900 spin_lock_irq(&phba->hbalock); 4901 curr_rpi_range = phba->sli4_hba.next_rpi; 4902 spin_unlock_irq(&phba->hbalock); 4903 4904 /* 4905 * The port has a limited number of rpis. The increment here 4906 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 4907 * and to allow the full max_rpi range per port. 4908 */ 4909 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 4910 return NULL; 4911 4912 /* 4913 * First allocate the protocol header region for the port. The 4914 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 4915 */ 4916 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4917 if (!dmabuf) 4918 return NULL; 4919 4920 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4921 LPFC_HDR_TEMPLATE_SIZE, 4922 &dmabuf->phys, 4923 GFP_KERNEL); 4924 if (!dmabuf->virt) { 4925 rpi_hdr = NULL; 4926 goto err_free_dmabuf; 4927 } 4928 4929 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 4930 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 4931 rpi_hdr = NULL; 4932 goto err_free_coherent; 4933 } 4934 4935 /* Save the rpi header data for cleanup later. */ 4936 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 4937 if (!rpi_hdr) 4938 goto err_free_coherent; 4939 4940 rpi_hdr->dmabuf = dmabuf; 4941 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 4942 rpi_hdr->page_count = 1; 4943 spin_lock_irq(&phba->hbalock); 4944 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 4945 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 4946 4947 /* 4948 * The next_rpi stores the next module-64 rpi value to post 4949 * in any subsequent rpi memory region postings. 4950 */ 4951 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; 4952 spin_unlock_irq(&phba->hbalock); 4953 return rpi_hdr; 4954 4955 err_free_coherent: 4956 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 4957 dmabuf->virt, dmabuf->phys); 4958 err_free_dmabuf: 4959 kfree(dmabuf); 4960 return NULL; 4961} 4962 4963/** 4964 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 4965 * @phba: pointer to lpfc hba data structure. 4966 * 4967 * This routine is invoked to remove all memory resources allocated 4968 * to support rpis. This routine presumes the caller has released all 4969 * rpis consumed by fabric or port logins and is prepared to have 4970 * the header pages removed. 4971 **/ 4972void 4973lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 4974{ 4975 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 4976 4977 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 4978 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 4979 list_del(&rpi_hdr->list); 4980 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 4981 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 4982 kfree(rpi_hdr->dmabuf); 4983 kfree(rpi_hdr); 4984 } 4985 4986 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4987 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 4988} 4989 4990/** 4991 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 4992 * @pdev: pointer to pci device data structure. 4993 * 4994 * This routine is invoked to allocate the driver hba data structure for an 4995 * HBA device. If the allocation is successful, the phba reference to the 4996 * PCI device data structure is set. 4997 * 4998 * Return codes 4999 * pointer to @phba - successful 5000 * NULL - error 5001 **/ 5002static struct lpfc_hba * 5003lpfc_hba_alloc(struct pci_dev *pdev) 5004{ 5005 struct lpfc_hba *phba; 5006 5007 /* Allocate memory for HBA structure */ 5008 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 5009 if (!phba) { 5010 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 5011 return NULL; 5012 } 5013 5014 /* Set reference to PCI device in HBA structure */ 5015 phba->pcidev = pdev; 5016 5017 /* Assign an unused board number */ 5018 phba->brd_no = lpfc_get_instance(); 5019 if (phba->brd_no < 0) { 5020 kfree(phba); 5021 return NULL; 5022 } 5023 5024 spin_lock_init(&phba->ct_ev_lock); 5025 INIT_LIST_HEAD(&phba->ct_ev_waiters); 5026 5027 return phba; 5028} 5029 5030/** 5031 * lpfc_hba_free - Free driver hba data structure with a device. 5032 * @phba: pointer to lpfc hba data structure. 5033 * 5034 * This routine is invoked to free the driver hba data structure with an 5035 * HBA device. 5036 **/ 5037static void 5038lpfc_hba_free(struct lpfc_hba *phba) 5039{ 5040 /* Release the driver assigned board number */ 5041 idr_remove(&lpfc_hba_index, phba->brd_no); 5042 5043 kfree(phba); 5044 return; 5045} 5046 5047/** 5048 * lpfc_create_shost - Create hba physical port with associated scsi host. 5049 * @phba: pointer to lpfc hba data structure. 5050 * 5051 * This routine is invoked to create HBA physical port and associate a SCSI 5052 * host with it. 5053 * 5054 * Return codes 5055 * 0 - successful 5056 * other values - error 5057 **/ 5058static int 5059lpfc_create_shost(struct lpfc_hba *phba) 5060{ 5061 struct lpfc_vport *vport; 5062 struct Scsi_Host *shost; 5063 5064 /* Initialize HBA FC structure */ 5065 phba->fc_edtov = FF_DEF_EDTOV; 5066 phba->fc_ratov = FF_DEF_RATOV; 5067 phba->fc_altov = FF_DEF_ALTOV; 5068 phba->fc_arbtov = FF_DEF_ARBTOV; 5069 5070 atomic_set(&phba->sdev_cnt, 0); 5071 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 5072 if (!vport) 5073 return -ENODEV; 5074 5075 shost = lpfc_shost_from_vport(vport); 5076 phba->pport = vport; 5077 lpfc_debugfs_initialize(vport); 5078 /* Put reference to SCSI host to driver's device private data */ 5079 pci_set_drvdata(phba->pcidev, shost); 5080 5081 return 0; 5082} 5083 5084/** 5085 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 5086 * @phba: pointer to lpfc hba data structure. 5087 * 5088 * This routine is invoked to destroy HBA physical port and the associated 5089 * SCSI host. 5090 **/ 5091static void 5092lpfc_destroy_shost(struct lpfc_hba *phba) 5093{ 5094 struct lpfc_vport *vport = phba->pport; 5095 5096 /* Destroy physical port that associated with the SCSI host */ 5097 destroy_port(vport); 5098 5099 return; 5100} 5101 5102/** 5103 * lpfc_setup_bg - Setup Block guard structures and debug areas. 5104 * @phba: pointer to lpfc hba data structure. 5105 * @shost: the shost to be used to detect Block guard settings. 5106 * 5107 * This routine sets up the local Block guard protocol settings for @shost. 5108 * This routine also allocates memory for debugging bg buffers. 5109 **/ 5110static void 5111lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 5112{ 5113 int pagecnt = 10; 5114 if (lpfc_prot_mask && lpfc_prot_guard) { 5115 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5116 "1478 Registering BlockGuard with the " 5117 "SCSI layer\n"); 5118 scsi_host_set_prot(shost, lpfc_prot_mask); 5119 scsi_host_set_guard(shost, lpfc_prot_guard); 5120 } 5121 if (!_dump_buf_data) { 5122 while (pagecnt) { 5123 spin_lock_init(&_dump_buf_lock); 5124 _dump_buf_data = 5125 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5126 if (_dump_buf_data) { 5127 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5128 "9043 BLKGRD: allocated %d pages for " 5129 "_dump_buf_data at 0x%p\n", 5130 (1 << pagecnt), _dump_buf_data); 5131 _dump_buf_data_order = pagecnt; 5132 memset(_dump_buf_data, 0, 5133 ((1 << PAGE_SHIFT) << pagecnt)); 5134 break; 5135 } else 5136 --pagecnt; 5137 } 5138 if (!_dump_buf_data_order) 5139 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5140 "9044 BLKGRD: ERROR unable to allocate " 5141 "memory for hexdump\n"); 5142 } else 5143 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5144 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 5145 "\n", _dump_buf_data); 5146 if (!_dump_buf_dif) { 5147 while (pagecnt) { 5148 _dump_buf_dif = 5149 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5150 if (_dump_buf_dif) { 5151 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5152 "9046 BLKGRD: allocated %d pages for " 5153 "_dump_buf_dif at 0x%p\n", 5154 (1 << pagecnt), _dump_buf_dif); 5155 _dump_buf_dif_order = pagecnt; 5156 memset(_dump_buf_dif, 0, 5157 ((1 << PAGE_SHIFT) << pagecnt)); 5158 break; 5159 } else 5160 --pagecnt; 5161 } 5162 if (!_dump_buf_dif_order) 5163 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5164 "9047 BLKGRD: ERROR unable to allocate " 5165 "memory for hexdump\n"); 5166 } else 5167 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5168 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 5169 _dump_buf_dif); 5170} 5171 5172/** 5173 * lpfc_post_init_setup - Perform necessary device post initialization setup. 5174 * @phba: pointer to lpfc hba data structure. 5175 * 5176 * This routine is invoked to perform all the necessary post initialization 5177 * setup for the device. 5178 **/ 5179static void 5180lpfc_post_init_setup(struct lpfc_hba *phba) 5181{ 5182 struct Scsi_Host *shost; 5183 struct lpfc_adapter_event_header adapter_event; 5184 5185 /* Get the default values for Model Name and Description */ 5186 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 5187 5188 /* 5189 * hba setup may have changed the hba_queue_depth so we need to 5190 * adjust the value of can_queue. 5191 */ 5192 shost = pci_get_drvdata(phba->pcidev); 5193 shost->can_queue = phba->cfg_hba_queue_depth - 10; 5194 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 5195 lpfc_setup_bg(phba, shost); 5196 5197 lpfc_host_attrib_init(shost); 5198 5199 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 5200 spin_lock_irq(shost->host_lock); 5201 lpfc_poll_start_timer(phba); 5202 spin_unlock_irq(shost->host_lock); 5203 } 5204 5205 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5206 "0428 Perform SCSI scan\n"); 5207 /* Send board arrival event to upper layer */ 5208 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 5209 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 5210 fc_host_post_vendor_event(shost, fc_get_event_number(), 5211 sizeof(adapter_event), 5212 (char *) &adapter_event, 5213 LPFC_NL_VENDOR_ID); 5214 return; 5215} 5216 5217/** 5218 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 5219 * @phba: pointer to lpfc hba data structure. 5220 * 5221 * This routine is invoked to set up the PCI device memory space for device 5222 * with SLI-3 interface spec. 5223 * 5224 * Return codes 5225 * 0 - successful 5226 * other values - error 5227 **/ 5228static int 5229lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 5230{ 5231 struct pci_dev *pdev; 5232 unsigned long bar0map_len, bar2map_len; 5233 int i, hbq_count; 5234 void *ptr; 5235 int error = -ENODEV; 5236 5237 /* Obtain PCI device reference */ 5238 if (!phba->pcidev) 5239 return error; 5240 else 5241 pdev = phba->pcidev; 5242 5243 /* Set the device DMA mask size */ 5244 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 5245 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 5246 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 5247 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 5248 return error; 5249 } 5250 } 5251 5252 /* Get the bus address of Bar0 and Bar2 and the number of bytes 5253 * required by each mapping. 5254 */ 5255 phba->pci_bar0_map = pci_resource_start(pdev, 0); 5256 bar0map_len = pci_resource_len(pdev, 0); 5257 5258 phba->pci_bar2_map = pci_resource_start(pdev, 2); 5259 bar2map_len = pci_resource_len(pdev, 2); 5260 5261 /* Map HBA SLIM to a kernel virtual address. */ 5262 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 5263 if (!phba->slim_memmap_p) { 5264 dev_printk(KERN_ERR, &pdev->dev, 5265 "ioremap failed for SLIM memory.\n"); 5266 goto out; 5267 } 5268 5269 /* Map HBA Control Registers to a kernel virtual address. */ 5270 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 5271 if (!phba->ctrl_regs_memmap_p) { 5272 dev_printk(KERN_ERR, &pdev->dev, 5273 "ioremap failed for HBA control registers.\n"); 5274 goto out_iounmap_slim; 5275 } 5276 5277 /* Allocate memory for SLI-2 structures */ 5278 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 5279 SLI2_SLIM_SIZE, 5280 &phba->slim2p.phys, 5281 GFP_KERNEL); 5282 if (!phba->slim2p.virt) 5283 goto out_iounmap; 5284 5285 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 5286 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 5287 phba->mbox_ext = (phba->slim2p.virt + 5288 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 5289 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 5290 phba->IOCBs = (phba->slim2p.virt + 5291 offsetof(struct lpfc_sli2_slim, IOCBs)); 5292 5293 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 5294 lpfc_sli_hbq_size(), 5295 &phba->hbqslimp.phys, 5296 GFP_KERNEL); 5297 if (!phba->hbqslimp.virt) 5298 goto out_free_slim; 5299 5300 hbq_count = lpfc_sli_hbq_count(); 5301 ptr = phba->hbqslimp.virt; 5302 for (i = 0; i < hbq_count; ++i) { 5303 phba->hbqs[i].hbq_virt = ptr; 5304 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5305 ptr += (lpfc_hbq_defs[i]->entry_count * 5306 sizeof(struct lpfc_hbq_entry)); 5307 } 5308 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 5309 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 5310 5311 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 5312 5313 INIT_LIST_HEAD(&phba->rb_pend_list); 5314 5315 phba->MBslimaddr = phba->slim_memmap_p; 5316 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 5317 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 5318 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 5319 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 5320 5321 return 0; 5322 5323out_free_slim: 5324 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5325 phba->slim2p.virt, phba->slim2p.phys); 5326out_iounmap: 5327 iounmap(phba->ctrl_regs_memmap_p); 5328out_iounmap_slim: 5329 iounmap(phba->slim_memmap_p); 5330out: 5331 return error; 5332} 5333 5334/** 5335 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 5336 * @phba: pointer to lpfc hba data structure. 5337 * 5338 * This routine is invoked to unset the PCI device memory space for device 5339 * with SLI-3 interface spec. 5340 **/ 5341static void 5342lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 5343{ 5344 struct pci_dev *pdev; 5345 5346 /* Obtain PCI device reference */ 5347 if (!phba->pcidev) 5348 return; 5349 else 5350 pdev = phba->pcidev; 5351 5352 /* Free coherent DMA memory allocated */ 5353 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 5354 phba->hbqslimp.virt, phba->hbqslimp.phys); 5355 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5356 phba->slim2p.virt, phba->slim2p.phys); 5357 5358 /* I/O memory unmap */ 5359 iounmap(phba->ctrl_regs_memmap_p); 5360 iounmap(phba->slim_memmap_p); 5361 5362 return; 5363} 5364 5365/** 5366 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 5367 * @phba: pointer to lpfc hba data structure. 5368 * 5369 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 5370 * done and check status. 5371 * 5372 * Return 0 if successful, otherwise -ENODEV. 5373 **/ 5374int 5375lpfc_sli4_post_status_check(struct lpfc_hba *phba) 5376{ 5377 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 5378 struct lpfc_register reg_data; 5379 int i, port_error = 0; 5380 uint32_t if_type; 5381 5382 if (!phba->sli4_hba.PSMPHRregaddr) 5383 return -ENODEV; 5384 5385 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 5386 for (i = 0; i < 3000; i++) { 5387 portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr); 5388 if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) { 5389 /* Port has a fatal POST error, break out */ 5390 port_error = -ENODEV; 5391 break; 5392 } 5393 if (LPFC_POST_STAGE_PORT_READY == 5394 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 5395 break; 5396 msleep(10); 5397 } 5398 5399 /* 5400 * If there was a port error during POST, then don't proceed with 5401 * other register reads as the data may not be valid. Just exit. 5402 */ 5403 if (port_error) { 5404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5405 "1408 Port Failed POST - portsmphr=0x%x, " 5406 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 5407 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 5408 portsmphr_reg.word0, 5409 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 5410 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 5411 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 5412 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 5413 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 5414 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 5415 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 5416 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 5417 } else { 5418 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5419 "2534 Device Info: SLIFamily=0x%x, " 5420 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 5421 "SLIHint_2=0x%x, FT=0x%x\n", 5422 bf_get(lpfc_sli_intf_sli_family, 5423 &phba->sli4_hba.sli_intf), 5424 bf_get(lpfc_sli_intf_slirev, 5425 &phba->sli4_hba.sli_intf), 5426 bf_get(lpfc_sli_intf_if_type, 5427 &phba->sli4_hba.sli_intf), 5428 bf_get(lpfc_sli_intf_sli_hint1, 5429 &phba->sli4_hba.sli_intf), 5430 bf_get(lpfc_sli_intf_sli_hint2, 5431 &phba->sli4_hba.sli_intf), 5432 bf_get(lpfc_sli_intf_func_type, 5433 &phba->sli4_hba.sli_intf)); 5434 /* 5435 * Check for other Port errors during the initialization 5436 * process. Fail the load if the port did not come up 5437 * correctly. 5438 */ 5439 if_type = bf_get(lpfc_sli_intf_if_type, 5440 &phba->sli4_hba.sli_intf); 5441 switch (if_type) { 5442 case LPFC_SLI_INTF_IF_TYPE_0: 5443 phba->sli4_hba.ue_mask_lo = 5444 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 5445 phba->sli4_hba.ue_mask_hi = 5446 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 5447 uerrlo_reg.word0 = 5448 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 5449 uerrhi_reg.word0 = 5450 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 5451 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 5452 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 5453 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5454 "1422 Unrecoverable Error " 5455 "Detected during POST " 5456 "uerr_lo_reg=0x%x, " 5457 "uerr_hi_reg=0x%x, " 5458 "ue_mask_lo_reg=0x%x, " 5459 "ue_mask_hi_reg=0x%x\n", 5460 uerrlo_reg.word0, 5461 uerrhi_reg.word0, 5462 phba->sli4_hba.ue_mask_lo, 5463 phba->sli4_hba.ue_mask_hi); 5464 port_error = -ENODEV; 5465 } 5466 break; 5467 case LPFC_SLI_INTF_IF_TYPE_2: 5468 /* Final checks. The port status should be clean. */ 5469 reg_data.word0 = 5470 readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 5471 if (bf_get(lpfc_sliport_status_err, ®_data)) { 5472 phba->work_status[0] = 5473 readl(phba->sli4_hba.u.if_type2. 5474 ERR1regaddr); 5475 phba->work_status[1] = 5476 readl(phba->sli4_hba.u.if_type2. 5477 ERR2regaddr); 5478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5479 "2888 Port Error Detected " 5480 "during POST: " 5481 "port status reg 0x%x, " 5482 "port_smphr reg 0x%x, " 5483 "error 1=0x%x, error 2=0x%x\n", 5484 reg_data.word0, 5485 portsmphr_reg.word0, 5486 phba->work_status[0], 5487 phba->work_status[1]); 5488 port_error = -ENODEV; 5489 } 5490 break; 5491 case LPFC_SLI_INTF_IF_TYPE_1: 5492 default: 5493 break; 5494 } 5495 } 5496 return port_error; 5497} 5498 5499/** 5500 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 5501 * @phba: pointer to lpfc hba data structure. 5502 * @if_type: The SLI4 interface type getting configured. 5503 * 5504 * This routine is invoked to set up SLI4 BAR0 PCI config space register 5505 * memory map. 5506 **/ 5507static void 5508lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 5509{ 5510 switch (if_type) { 5511 case LPFC_SLI_INTF_IF_TYPE_0: 5512 phba->sli4_hba.u.if_type0.UERRLOregaddr = 5513 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 5514 phba->sli4_hba.u.if_type0.UERRHIregaddr = 5515 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 5516 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 5517 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 5518 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 5519 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 5520 phba->sli4_hba.SLIINTFregaddr = 5521 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 5522 break; 5523 case LPFC_SLI_INTF_IF_TYPE_2: 5524 phba->sli4_hba.u.if_type2.ERR1regaddr = 5525 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1; 5526 phba->sli4_hba.u.if_type2.ERR2regaddr = 5527 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2; 5528 phba->sli4_hba.u.if_type2.CTRLregaddr = 5529 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL; 5530 phba->sli4_hba.u.if_type2.STATUSregaddr = 5531 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS; 5532 phba->sli4_hba.SLIINTFregaddr = 5533 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 5534 phba->sli4_hba.PSMPHRregaddr = 5535 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR; 5536 phba->sli4_hba.RQDBregaddr = 5537 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL; 5538 phba->sli4_hba.WQDBregaddr = 5539 phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL; 5540 phba->sli4_hba.EQCQDBregaddr = 5541 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 5542 phba->sli4_hba.MQDBregaddr = 5543 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 5544 phba->sli4_hba.BMBXregaddr = 5545 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 5546 break; 5547 case LPFC_SLI_INTF_IF_TYPE_1: 5548 default: 5549 dev_printk(KERN_ERR, &phba->pcidev->dev, 5550 "FATAL - unsupported SLI4 interface type - %d\n", 5551 if_type); 5552 break; 5553 } 5554} 5555 5556/** 5557 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 5558 * @phba: pointer to lpfc hba data structure. 5559 * 5560 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 5561 * memory map. 5562 **/ 5563static void 5564lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 5565{ 5566 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5567 LPFC_SLIPORT_IF0_SMPHR; 5568 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5569 LPFC_HST_ISR0; 5570 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5571 LPFC_HST_IMR0; 5572 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5573 LPFC_HST_ISCR0; 5574} 5575 5576/** 5577 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 5578 * @phba: pointer to lpfc hba data structure. 5579 * @vf: virtual function number 5580 * 5581 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 5582 * based on the given viftual function number, @vf. 5583 * 5584 * Return 0 if successful, otherwise -ENODEV. 5585 **/ 5586static int 5587lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 5588{ 5589 if (vf > LPFC_VIR_FUNC_MAX) 5590 return -ENODEV; 5591 5592 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5593 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 5594 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5595 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 5596 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5597 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 5598 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5599 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 5600 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5601 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 5602 return 0; 5603} 5604 5605/** 5606 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 5607 * @phba: pointer to lpfc hba data structure. 5608 * 5609 * This routine is invoked to create the bootstrap mailbox 5610 * region consistent with the SLI-4 interface spec. This 5611 * routine allocates all memory necessary to communicate 5612 * mailbox commands to the port and sets up all alignment 5613 * needs. No locks are expected to be held when calling 5614 * this routine. 5615 * 5616 * Return codes 5617 * 0 - successful 5618 * -ENOMEM - could not allocated memory. 5619 **/ 5620static int 5621lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 5622{ 5623 uint32_t bmbx_size; 5624 struct lpfc_dmabuf *dmabuf; 5625 struct dma_address *dma_address; 5626 uint32_t pa_addr; 5627 uint64_t phys_addr; 5628 5629 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5630 if (!dmabuf) 5631 return -ENOMEM; 5632 5633 /* 5634 * The bootstrap mailbox region is comprised of 2 parts 5635 * plus an alignment restriction of 16 bytes. 5636 */ 5637 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 5638 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5639 bmbx_size, 5640 &dmabuf->phys, 5641 GFP_KERNEL); 5642 if (!dmabuf->virt) { 5643 kfree(dmabuf); 5644 return -ENOMEM; 5645 } 5646 memset(dmabuf->virt, 0, bmbx_size); 5647 5648 /* 5649 * Initialize the bootstrap mailbox pointers now so that the register 5650 * operations are simple later. The mailbox dma address is required 5651 * to be 16-byte aligned. Also align the virtual memory as each 5652 * maibox is copied into the bmbx mailbox region before issuing the 5653 * command to the port. 5654 */ 5655 phba->sli4_hba.bmbx.dmabuf = dmabuf; 5656 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 5657 5658 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 5659 LPFC_ALIGN_16_BYTE); 5660 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 5661 LPFC_ALIGN_16_BYTE); 5662 5663 /* 5664 * Set the high and low physical addresses now. The SLI4 alignment 5665 * requirement is 16 bytes and the mailbox is posted to the port 5666 * as two 30-bit addresses. The other data is a bit marking whether 5667 * the 30-bit address is the high or low address. 5668 * Upcast bmbx aphys to 64bits so shift instruction compiles 5669 * clean on 32 bit machines. 5670 */ 5671 dma_address = &phba->sli4_hba.bmbx.dma_address; 5672 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 5673 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 5674 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 5675 LPFC_BMBX_BIT1_ADDR_HI); 5676 5677 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 5678 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 5679 LPFC_BMBX_BIT1_ADDR_LO); 5680 return 0; 5681} 5682 5683/** 5684 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 5685 * @phba: pointer to lpfc hba data structure. 5686 * 5687 * This routine is invoked to teardown the bootstrap mailbox 5688 * region and release all host resources. This routine requires 5689 * the caller to ensure all mailbox commands recovered, no 5690 * additional mailbox comands are sent, and interrupts are disabled 5691 * before calling this routine. 5692 * 5693 **/ 5694static void 5695lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 5696{ 5697 dma_free_coherent(&phba->pcidev->dev, 5698 phba->sli4_hba.bmbx.bmbx_size, 5699 phba->sli4_hba.bmbx.dmabuf->virt, 5700 phba->sli4_hba.bmbx.dmabuf->phys); 5701 5702 kfree(phba->sli4_hba.bmbx.dmabuf); 5703 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 5704} 5705 5706/** 5707 * lpfc_sli4_read_config - Get the config parameters. 5708 * @phba: pointer to lpfc hba data structure. 5709 * 5710 * This routine is invoked to read the configuration parameters from the HBA. 5711 * The configuration parameters are used to set the base and maximum values 5712 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 5713 * allocation for the port. 5714 * 5715 * Return codes 5716 * 0 - successful 5717 * -ENOMEM - No availble memory 5718 * -EIO - The mailbox failed to complete successfully. 5719 **/ 5720static int 5721lpfc_sli4_read_config(struct lpfc_hba *phba) 5722{ 5723 LPFC_MBOXQ_t *pmb; 5724 struct lpfc_mbx_read_config *rd_config; 5725 uint32_t rc = 0; 5726 5727 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5728 if (!pmb) { 5729 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5730 "2011 Unable to allocate memory for issuing " 5731 "SLI_CONFIG_SPECIAL mailbox command\n"); 5732 return -ENOMEM; 5733 } 5734 5735 lpfc_read_config(phba, pmb); 5736 5737 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 5738 if (rc != MBX_SUCCESS) { 5739 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5740 "2012 Mailbox failed , mbxCmd x%x " 5741 "READ_CONFIG, mbxStatus x%x\n", 5742 bf_get(lpfc_mqe_command, &pmb->u.mqe), 5743 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 5744 rc = -EIO; 5745 } else { 5746 rd_config = &pmb->u.mqe.un.rd_config; 5747 phba->sli4_hba.max_cfg_param.max_xri = 5748 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 5749 phba->sli4_hba.max_cfg_param.xri_base = 5750 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 5751 phba->sli4_hba.max_cfg_param.max_vpi = 5752 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 5753 phba->sli4_hba.max_cfg_param.vpi_base = 5754 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 5755 phba->sli4_hba.max_cfg_param.max_rpi = 5756 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 5757 phba->sli4_hba.max_cfg_param.rpi_base = 5758 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 5759 phba->sli4_hba.max_cfg_param.max_vfi = 5760 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 5761 phba->sli4_hba.max_cfg_param.vfi_base = 5762 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 5763 phba->sli4_hba.max_cfg_param.max_fcfi = 5764 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 5765 phba->sli4_hba.max_cfg_param.fcfi_base = 5766 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); 5767 phba->sli4_hba.max_cfg_param.max_eq = 5768 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 5769 phba->sli4_hba.max_cfg_param.max_rq = 5770 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 5771 phba->sli4_hba.max_cfg_param.max_wq = 5772 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 5773 phba->sli4_hba.max_cfg_param.max_cq = 5774 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 5775 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 5776 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 5777 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 5778 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 5779 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5780 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 5781 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 5782 phba->max_vports = phba->max_vpi; 5783 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5784 "2003 cfg params XRI(B:%d M:%d), " 5785 "VPI(B:%d M:%d) " 5786 "VFI(B:%d M:%d) " 5787 "RPI(B:%d M:%d) " 5788 "FCFI(B:%d M:%d)\n", 5789 phba->sli4_hba.max_cfg_param.xri_base, 5790 phba->sli4_hba.max_cfg_param.max_xri, 5791 phba->sli4_hba.max_cfg_param.vpi_base, 5792 phba->sli4_hba.max_cfg_param.max_vpi, 5793 phba->sli4_hba.max_cfg_param.vfi_base, 5794 phba->sli4_hba.max_cfg_param.max_vfi, 5795 phba->sli4_hba.max_cfg_param.rpi_base, 5796 phba->sli4_hba.max_cfg_param.max_rpi, 5797 phba->sli4_hba.max_cfg_param.fcfi_base, 5798 phba->sli4_hba.max_cfg_param.max_fcfi); 5799 } 5800 mempool_free(pmb, phba->mbox_mem_pool); 5801 5802 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 5803 if (phba->cfg_hba_queue_depth > 5804 (phba->sli4_hba.max_cfg_param.max_xri - 5805 lpfc_sli4_get_els_iocb_cnt(phba))) 5806 phba->cfg_hba_queue_depth = 5807 phba->sli4_hba.max_cfg_param.max_xri - 5808 lpfc_sli4_get_els_iocb_cnt(phba); 5809 return rc; 5810} 5811 5812/** 5813 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 5814 * @phba: pointer to lpfc hba data structure. 5815 * 5816 * This routine is invoked to setup the port-side endian order when 5817 * the port if_type is 0. This routine has no function for other 5818 * if_types. 5819 * 5820 * Return codes 5821 * 0 - successful 5822 * -ENOMEM - No availble memory 5823 * -EIO - The mailbox failed to complete successfully. 5824 **/ 5825static int 5826lpfc_setup_endian_order(struct lpfc_hba *phba) 5827{ 5828 LPFC_MBOXQ_t *mboxq; 5829 uint32_t if_type, rc = 0; 5830 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 5831 HOST_ENDIAN_HIGH_WORD1}; 5832 5833 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 5834 switch (if_type) { 5835 case LPFC_SLI_INTF_IF_TYPE_0: 5836 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 5837 GFP_KERNEL); 5838 if (!mboxq) { 5839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5840 "0492 Unable to allocate memory for " 5841 "issuing SLI_CONFIG_SPECIAL mailbox " 5842 "command\n"); 5843 return -ENOMEM; 5844 } 5845 5846 /* 5847 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 5848 * two words to contain special data values and no other data. 5849 */ 5850 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 5851 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 5852 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5853 if (rc != MBX_SUCCESS) { 5854 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5855 "0493 SLI_CONFIG_SPECIAL mailbox " 5856 "failed with status x%x\n", 5857 rc); 5858 rc = -EIO; 5859 } 5860 mempool_free(mboxq, phba->mbox_mem_pool); 5861 break; 5862 case LPFC_SLI_INTF_IF_TYPE_2: 5863 case LPFC_SLI_INTF_IF_TYPE_1: 5864 default: 5865 break; 5866 } 5867 return rc; 5868} 5869 5870/** 5871 * lpfc_sli4_queue_create - Create all the SLI4 queues 5872 * @phba: pointer to lpfc hba data structure. 5873 * 5874 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 5875 * operation. For each SLI4 queue type, the parameters such as queue entry 5876 * count (queue depth) shall be taken from the module parameter. For now, 5877 * we just use some constant number as place holder. 5878 * 5879 * Return codes 5880 * 0 - successful 5881 * -ENOMEM - No availble memory 5882 * -EIO - The mailbox failed to complete successfully. 5883 **/ 5884static int 5885lpfc_sli4_queue_create(struct lpfc_hba *phba) 5886{ 5887 struct lpfc_queue *qdesc; 5888 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5889 int cfg_fcp_wq_count; 5890 int cfg_fcp_eq_count; 5891 5892 /* 5893 * Sanity check for confiugred queue parameters against the run-time 5894 * device parameters 5895 */ 5896 5897 /* Sanity check on FCP fast-path WQ parameters */ 5898 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 5899 if (cfg_fcp_wq_count > 5900 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 5901 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 5902 LPFC_SP_WQN_DEF; 5903 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 5904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5905 "2581 Not enough WQs (%d) from " 5906 "the pci function for supporting " 5907 "FCP WQs (%d)\n", 5908 phba->sli4_hba.max_cfg_param.max_wq, 5909 phba->cfg_fcp_wq_count); 5910 goto out_error; 5911 } 5912 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5913 "2582 Not enough WQs (%d) from the pci " 5914 "function for supporting the requested " 5915 "FCP WQs (%d), the actual FCP WQs can " 5916 "be supported: %d\n", 5917 phba->sli4_hba.max_cfg_param.max_wq, 5918 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 5919 } 5920 /* The actual number of FCP work queues adopted */ 5921 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 5922 5923 /* Sanity check on FCP fast-path EQ parameters */ 5924 cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 5925 if (cfg_fcp_eq_count > 5926 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 5927 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 5928 LPFC_SP_EQN_DEF; 5929 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 5930 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5931 "2574 Not enough EQs (%d) from the " 5932 "pci function for supporting FCP " 5933 "EQs (%d)\n", 5934 phba->sli4_hba.max_cfg_param.max_eq, 5935 phba->cfg_fcp_eq_count); 5936 goto out_error; 5937 } 5938 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5939 "2575 Not enough EQs (%d) from the pci " 5940 "function for supporting the requested " 5941 "FCP EQs (%d), the actual FCP EQs can " 5942 "be supported: %d\n", 5943 phba->sli4_hba.max_cfg_param.max_eq, 5944 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 5945 } 5946 /* It does not make sense to have more EQs than WQs */ 5947 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 5948 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5949 "2593 The FCP EQ count(%d) cannot be greater " 5950 "than the FCP WQ count(%d), limiting the " 5951 "FCP EQ count to %d\n", cfg_fcp_eq_count, 5952 phba->cfg_fcp_wq_count, 5953 phba->cfg_fcp_wq_count); 5954 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 5955 } 5956 /* The actual number of FCP event queues adopted */ 5957 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 5958 /* The overall number of event queues used */ 5959 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 5960 5961 /* 5962 * Create Event Queues (EQs) 5963 */ 5964 5965 /* Get EQ depth from module parameter, fake the default for now */ 5966 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 5967 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 5968 5969 /* Create slow path event queue */ 5970 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5971 phba->sli4_hba.eq_ecount); 5972 if (!qdesc) { 5973 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5974 "0496 Failed allocate slow-path EQ\n"); 5975 goto out_error; 5976 } 5977 phba->sli4_hba.sp_eq = qdesc; 5978 5979 /* Create fast-path FCP Event Queue(s) */ 5980 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 5981 phba->cfg_fcp_eq_count), GFP_KERNEL); 5982 if (!phba->sli4_hba.fp_eq) { 5983 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5984 "2576 Failed allocate memory for fast-path " 5985 "EQ record array\n"); 5986 goto out_free_sp_eq; 5987 } 5988 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5989 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5990 phba->sli4_hba.eq_ecount); 5991 if (!qdesc) { 5992 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5993 "0497 Failed allocate fast-path EQ\n"); 5994 goto out_free_fp_eq; 5995 } 5996 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 5997 } 5998 5999 /* 6000 * Create Complete Queues (CQs) 6001 */ 6002 6003 /* Get CQ depth from module parameter, fake the default for now */ 6004 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 6005 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 6006 6007 /* Create slow-path Mailbox Command Complete Queue */ 6008 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6009 phba->sli4_hba.cq_ecount); 6010 if (!qdesc) { 6011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6012 "0500 Failed allocate slow-path mailbox CQ\n"); 6013 goto out_free_fp_eq; 6014 } 6015 phba->sli4_hba.mbx_cq = qdesc; 6016 6017 /* Create slow-path ELS Complete Queue */ 6018 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6019 phba->sli4_hba.cq_ecount); 6020 if (!qdesc) { 6021 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6022 "0501 Failed allocate slow-path ELS CQ\n"); 6023 goto out_free_mbx_cq; 6024 } 6025 phba->sli4_hba.els_cq = qdesc; 6026 6027 6028 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 6029 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 6030 phba->cfg_fcp_eq_count), GFP_KERNEL); 6031 if (!phba->sli4_hba.fcp_cq) { 6032 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6033 "2577 Failed allocate memory for fast-path " 6034 "CQ record array\n"); 6035 goto out_free_els_cq; 6036 } 6037 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6038 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6039 phba->sli4_hba.cq_ecount); 6040 if (!qdesc) { 6041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6042 "0499 Failed allocate fast-path FCP " 6043 "CQ (%d)\n", fcp_cqidx); 6044 goto out_free_fcp_cq; 6045 } 6046 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 6047 } 6048 6049 /* Create Mailbox Command Queue */ 6050 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 6051 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 6052 6053 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 6054 phba->sli4_hba.mq_ecount); 6055 if (!qdesc) { 6056 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6057 "0505 Failed allocate slow-path MQ\n"); 6058 goto out_free_fcp_cq; 6059 } 6060 phba->sli4_hba.mbx_wq = qdesc; 6061 6062 /* 6063 * Create all the Work Queues (WQs) 6064 */ 6065 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 6066 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 6067 6068 /* Create slow-path ELS Work Queue */ 6069 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6070 phba->sli4_hba.wq_ecount); 6071 if (!qdesc) { 6072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6073 "0504 Failed allocate slow-path ELS WQ\n"); 6074 goto out_free_mbx_wq; 6075 } 6076 phba->sli4_hba.els_wq = qdesc; 6077 6078 /* Create fast-path FCP Work Queue(s) */ 6079 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 6080 phba->cfg_fcp_wq_count), GFP_KERNEL); 6081 if (!phba->sli4_hba.fcp_wq) { 6082 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6083 "2578 Failed allocate memory for fast-path " 6084 "WQ record array\n"); 6085 goto out_free_els_wq; 6086 } 6087 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6088 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6089 phba->sli4_hba.wq_ecount); 6090 if (!qdesc) { 6091 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6092 "0503 Failed allocate fast-path FCP " 6093 "WQ (%d)\n", fcp_wqidx); 6094 goto out_free_fcp_wq; 6095 } 6096 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 6097 } 6098 6099 /* 6100 * Create Receive Queue (RQ) 6101 */ 6102 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 6103 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 6104 6105 /* Create Receive Queue for header */ 6106 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6107 phba->sli4_hba.rq_ecount); 6108 if (!qdesc) { 6109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6110 "0506 Failed allocate receive HRQ\n"); 6111 goto out_free_fcp_wq; 6112 } 6113 phba->sli4_hba.hdr_rq = qdesc; 6114 6115 /* Create Receive Queue for data */ 6116 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6117 phba->sli4_hba.rq_ecount); 6118 if (!qdesc) { 6119 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6120 "0507 Failed allocate receive DRQ\n"); 6121 goto out_free_hdr_rq; 6122 } 6123 phba->sli4_hba.dat_rq = qdesc; 6124 6125 return 0; 6126 6127out_free_hdr_rq: 6128 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6129 phba->sli4_hba.hdr_rq = NULL; 6130out_free_fcp_wq: 6131 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 6132 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 6133 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 6134 } 6135 kfree(phba->sli4_hba.fcp_wq); 6136out_free_els_wq: 6137 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6138 phba->sli4_hba.els_wq = NULL; 6139out_free_mbx_wq: 6140 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6141 phba->sli4_hba.mbx_wq = NULL; 6142out_free_fcp_cq: 6143 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 6144 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 6145 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 6146 } 6147 kfree(phba->sli4_hba.fcp_cq); 6148out_free_els_cq: 6149 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6150 phba->sli4_hba.els_cq = NULL; 6151out_free_mbx_cq: 6152 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6153 phba->sli4_hba.mbx_cq = NULL; 6154out_free_fp_eq: 6155 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 6156 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 6157 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 6158 } 6159 kfree(phba->sli4_hba.fp_eq); 6160out_free_sp_eq: 6161 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6162 phba->sli4_hba.sp_eq = NULL; 6163out_error: 6164 return -ENOMEM; 6165} 6166 6167/** 6168 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 6169 * @phba: pointer to lpfc hba data structure. 6170 * 6171 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 6172 * operation. 6173 * 6174 * Return codes 6175 * 0 - successful 6176 * -ENOMEM - No availble memory 6177 * -EIO - The mailbox failed to complete successfully. 6178 **/ 6179static void 6180lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 6181{ 6182 int fcp_qidx; 6183 6184 /* Release mailbox command work queue */ 6185 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6186 phba->sli4_hba.mbx_wq = NULL; 6187 6188 /* Release ELS work queue */ 6189 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6190 phba->sli4_hba.els_wq = NULL; 6191 6192 /* Release FCP work queue */ 6193 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6194 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 6195 kfree(phba->sli4_hba.fcp_wq); 6196 phba->sli4_hba.fcp_wq = NULL; 6197 6198 /* Release unsolicited receive queue */ 6199 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6200 phba->sli4_hba.hdr_rq = NULL; 6201 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 6202 phba->sli4_hba.dat_rq = NULL; 6203 6204 /* Release ELS complete queue */ 6205 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6206 phba->sli4_hba.els_cq = NULL; 6207 6208 /* Release mailbox command complete queue */ 6209 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6210 phba->sli4_hba.mbx_cq = NULL; 6211 6212 /* Release FCP response complete queue */ 6213 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6214 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6215 kfree(phba->sli4_hba.fcp_cq); 6216 phba->sli4_hba.fcp_cq = NULL; 6217 6218 /* Release fast-path event queue */ 6219 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6220 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 6221 kfree(phba->sli4_hba.fp_eq); 6222 phba->sli4_hba.fp_eq = NULL; 6223 6224 /* Release slow-path event queue */ 6225 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6226 phba->sli4_hba.sp_eq = NULL; 6227 6228 return; 6229} 6230 6231/** 6232 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 6233 * @phba: pointer to lpfc hba data structure. 6234 * 6235 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 6236 * operation. 6237 * 6238 * Return codes 6239 * 0 - successful 6240 * -ENOMEM - No availble memory 6241 * -EIO - The mailbox failed to complete successfully. 6242 **/ 6243int 6244lpfc_sli4_queue_setup(struct lpfc_hba *phba) 6245{ 6246 int rc = -ENOMEM; 6247 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6248 int fcp_cq_index = 0; 6249 6250 /* 6251 * Set up Event Queues (EQs) 6252 */ 6253 6254 /* Set up slow-path event queue */ 6255 if (!phba->sli4_hba.sp_eq) { 6256 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6257 "0520 Slow-path EQ not allocated\n"); 6258 goto out_error; 6259 } 6260 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 6261 LPFC_SP_DEF_IMAX); 6262 if (rc) { 6263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6264 "0521 Failed setup of slow-path EQ: " 6265 "rc = 0x%x\n", rc); 6266 goto out_error; 6267 } 6268 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6269 "2583 Slow-path EQ setup: queue-id=%d\n", 6270 phba->sli4_hba.sp_eq->queue_id); 6271 6272 /* Set up fast-path event queue */ 6273 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6274 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6275 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6276 "0522 Fast-path EQ (%d) not " 6277 "allocated\n", fcp_eqidx); 6278 goto out_destroy_fp_eq; 6279 } 6280 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 6281 phba->cfg_fcp_imax); 6282 if (rc) { 6283 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6284 "0523 Failed setup of fast-path EQ " 6285 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 6286 goto out_destroy_fp_eq; 6287 } 6288 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6289 "2584 Fast-path EQ setup: " 6290 "queue[%d]-id=%d\n", fcp_eqidx, 6291 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 6292 } 6293 6294 /* 6295 * Set up Complete Queues (CQs) 6296 */ 6297 6298 /* Set up slow-path MBOX Complete Queue as the first CQ */ 6299 if (!phba->sli4_hba.mbx_cq) { 6300 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6301 "0528 Mailbox CQ not allocated\n"); 6302 goto out_destroy_fp_eq; 6303 } 6304 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 6305 LPFC_MCQ, LPFC_MBOX); 6306 if (rc) { 6307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6308 "0529 Failed setup of slow-path mailbox CQ: " 6309 "rc = 0x%x\n", rc); 6310 goto out_destroy_fp_eq; 6311 } 6312 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6313 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 6314 phba->sli4_hba.mbx_cq->queue_id, 6315 phba->sli4_hba.sp_eq->queue_id); 6316 6317 /* Set up slow-path ELS Complete Queue */ 6318 if (!phba->sli4_hba.els_cq) { 6319 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6320 "0530 ELS CQ not allocated\n"); 6321 goto out_destroy_mbx_cq; 6322 } 6323 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 6324 LPFC_WCQ, LPFC_ELS); 6325 if (rc) { 6326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6327 "0531 Failed setup of slow-path ELS CQ: " 6328 "rc = 0x%x\n", rc); 6329 goto out_destroy_mbx_cq; 6330 } 6331 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6332 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 6333 phba->sli4_hba.els_cq->queue_id, 6334 phba->sli4_hba.sp_eq->queue_id); 6335 6336 /* Set up fast-path FCP Response Complete Queue */ 6337 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6338 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6339 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6340 "0526 Fast-path FCP CQ (%d) not " 6341 "allocated\n", fcp_cqidx); 6342 goto out_destroy_fcp_cq; 6343 } 6344 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 6345 phba->sli4_hba.fp_eq[fcp_cqidx], 6346 LPFC_WCQ, LPFC_FCP); 6347 if (rc) { 6348 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6349 "0527 Failed setup of fast-path FCP " 6350 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 6351 goto out_destroy_fcp_cq; 6352 } 6353 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6354 "2588 FCP CQ setup: cq[%d]-id=%d, " 6355 "parent eq[%d]-id=%d\n", 6356 fcp_cqidx, 6357 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 6358 fcp_cqidx, 6359 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 6360 } 6361 6362 /* 6363 * Set up all the Work Queues (WQs) 6364 */ 6365 6366 /* Set up Mailbox Command Queue */ 6367 if (!phba->sli4_hba.mbx_wq) { 6368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6369 "0538 Slow-path MQ not allocated\n"); 6370 goto out_destroy_fcp_cq; 6371 } 6372 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 6373 phba->sli4_hba.mbx_cq, LPFC_MBOX); 6374 if (rc) { 6375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6376 "0539 Failed setup of slow-path MQ: " 6377 "rc = 0x%x\n", rc); 6378 goto out_destroy_fcp_cq; 6379 } 6380 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6381 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 6382 phba->sli4_hba.mbx_wq->queue_id, 6383 phba->sli4_hba.mbx_cq->queue_id); 6384 6385 /* Set up slow-path ELS Work Queue */ 6386 if (!phba->sli4_hba.els_wq) { 6387 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6388 "0536 Slow-path ELS WQ not allocated\n"); 6389 goto out_destroy_mbx_wq; 6390 } 6391 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 6392 phba->sli4_hba.els_cq, LPFC_ELS); 6393 if (rc) { 6394 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6395 "0537 Failed setup of slow-path ELS WQ: " 6396 "rc = 0x%x\n", rc); 6397 goto out_destroy_mbx_wq; 6398 } 6399 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6400 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 6401 phba->sli4_hba.els_wq->queue_id, 6402 phba->sli4_hba.els_cq->queue_id); 6403 6404 /* Set up fast-path FCP Work Queue */ 6405 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6406 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 6407 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6408 "0534 Fast-path FCP WQ (%d) not " 6409 "allocated\n", fcp_wqidx); 6410 goto out_destroy_fcp_wq; 6411 } 6412 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 6413 phba->sli4_hba.fcp_cq[fcp_cq_index], 6414 LPFC_FCP); 6415 if (rc) { 6416 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6417 "0535 Failed setup of fast-path FCP " 6418 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 6419 goto out_destroy_fcp_wq; 6420 } 6421 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6422 "2591 FCP WQ setup: wq[%d]-id=%d, " 6423 "parent cq[%d]-id=%d\n", 6424 fcp_wqidx, 6425 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 6426 fcp_cq_index, 6427 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 6428 /* Round robin FCP Work Queue's Completion Queue assignment */ 6429 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 6430 } 6431 6432 /* 6433 * Create Receive Queue (RQ) 6434 */ 6435 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 6436 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6437 "0540 Receive Queue not allocated\n"); 6438 goto out_destroy_fcp_wq; 6439 } 6440 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 6441 phba->sli4_hba.els_cq, LPFC_USOL); 6442 if (rc) { 6443 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6444 "0541 Failed setup of Receive Queue: " 6445 "rc = 0x%x\n", rc); 6446 goto out_destroy_fcp_wq; 6447 } 6448 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6449 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 6450 "parent cq-id=%d\n", 6451 phba->sli4_hba.hdr_rq->queue_id, 6452 phba->sli4_hba.dat_rq->queue_id, 6453 phba->sli4_hba.els_cq->queue_id); 6454 return 0; 6455 6456out_destroy_fcp_wq: 6457 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 6458 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 6459 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6460out_destroy_mbx_wq: 6461 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6462out_destroy_fcp_cq: 6463 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6464 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6465 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6466out_destroy_mbx_cq: 6467 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6468out_destroy_fp_eq: 6469 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 6470 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 6471 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6472out_error: 6473 return rc; 6474} 6475 6476/** 6477 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 6478 * @phba: pointer to lpfc hba data structure. 6479 * 6480 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 6481 * operation. 6482 * 6483 * Return codes 6484 * 0 - successful 6485 * -ENOMEM - No availble memory 6486 * -EIO - The mailbox failed to complete successfully. 6487 **/ 6488void 6489lpfc_sli4_queue_unset(struct lpfc_hba *phba) 6490{ 6491 int fcp_qidx; 6492 6493 /* Unset mailbox command work queue */ 6494 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6495 /* Unset ELS work queue */ 6496 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6497 /* Unset unsolicited receive queue */ 6498 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 6499 /* Unset FCP work queue */ 6500 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6501 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 6502 /* Unset mailbox command complete queue */ 6503 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6504 /* Unset ELS complete queue */ 6505 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6506 /* Unset FCP response complete queue */ 6507 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6508 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6509 /* Unset fast-path event queue */ 6510 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6511 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6512 /* Unset slow-path event queue */ 6513 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6514} 6515 6516/** 6517 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 6518 * @phba: pointer to lpfc hba data structure. 6519 * 6520 * This routine is invoked to allocate and set up a pool of completion queue 6521 * events. The body of the completion queue event is a completion queue entry 6522 * CQE. For now, this pool is used for the interrupt service routine to queue 6523 * the following HBA completion queue events for the worker thread to process: 6524 * - Mailbox asynchronous events 6525 * - Receive queue completion unsolicited events 6526 * Later, this can be used for all the slow-path events. 6527 * 6528 * Return codes 6529 * 0 - successful 6530 * -ENOMEM - No availble memory 6531 **/ 6532static int 6533lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 6534{ 6535 struct lpfc_cq_event *cq_event; 6536 int i; 6537 6538 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 6539 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 6540 if (!cq_event) 6541 goto out_pool_create_fail; 6542 list_add_tail(&cq_event->list, 6543 &phba->sli4_hba.sp_cqe_event_pool); 6544 } 6545 return 0; 6546 6547out_pool_create_fail: 6548 lpfc_sli4_cq_event_pool_destroy(phba); 6549 return -ENOMEM; 6550} 6551 6552/** 6553 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 6554 * @phba: pointer to lpfc hba data structure. 6555 * 6556 * This routine is invoked to free the pool of completion queue events at 6557 * driver unload time. Note that, it is the responsibility of the driver 6558 * cleanup routine to free all the outstanding completion-queue events 6559 * allocated from this pool back into the pool before invoking this routine 6560 * to destroy the pool. 6561 **/ 6562static void 6563lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 6564{ 6565 struct lpfc_cq_event *cq_event, *next_cq_event; 6566 6567 list_for_each_entry_safe(cq_event, next_cq_event, 6568 &phba->sli4_hba.sp_cqe_event_pool, list) { 6569 list_del(&cq_event->list); 6570 kfree(cq_event); 6571 } 6572} 6573 6574/** 6575 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6576 * @phba: pointer to lpfc hba data structure. 6577 * 6578 * This routine is the lock free version of the API invoked to allocate a 6579 * completion-queue event from the free pool. 6580 * 6581 * Return: Pointer to the newly allocated completion-queue event if successful 6582 * NULL otherwise. 6583 **/ 6584struct lpfc_cq_event * 6585__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6586{ 6587 struct lpfc_cq_event *cq_event = NULL; 6588 6589 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 6590 struct lpfc_cq_event, list); 6591 return cq_event; 6592} 6593 6594/** 6595 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6596 * @phba: pointer to lpfc hba data structure. 6597 * 6598 * This routine is the lock version of the API invoked to allocate a 6599 * completion-queue event from the free pool. 6600 * 6601 * Return: Pointer to the newly allocated completion-queue event if successful 6602 * NULL otherwise. 6603 **/ 6604struct lpfc_cq_event * 6605lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6606{ 6607 struct lpfc_cq_event *cq_event; 6608 unsigned long iflags; 6609 6610 spin_lock_irqsave(&phba->hbalock, iflags); 6611 cq_event = __lpfc_sli4_cq_event_alloc(phba); 6612 spin_unlock_irqrestore(&phba->hbalock, iflags); 6613 return cq_event; 6614} 6615 6616/** 6617 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6618 * @phba: pointer to lpfc hba data structure. 6619 * @cq_event: pointer to the completion queue event to be freed. 6620 * 6621 * This routine is the lock free version of the API invoked to release a 6622 * completion-queue event back into the free pool. 6623 **/ 6624void 6625__lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6626 struct lpfc_cq_event *cq_event) 6627{ 6628 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 6629} 6630 6631/** 6632 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6633 * @phba: pointer to lpfc hba data structure. 6634 * @cq_event: pointer to the completion queue event to be freed. 6635 * 6636 * This routine is the lock version of the API invoked to release a 6637 * completion-queue event back into the free pool. 6638 **/ 6639void 6640lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6641 struct lpfc_cq_event *cq_event) 6642{ 6643 unsigned long iflags; 6644 spin_lock_irqsave(&phba->hbalock, iflags); 6645 __lpfc_sli4_cq_event_release(phba, cq_event); 6646 spin_unlock_irqrestore(&phba->hbalock, iflags); 6647} 6648 6649/** 6650 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 6651 * @phba: pointer to lpfc hba data structure. 6652 * 6653 * This routine is to free all the pending completion-queue events to the 6654 * back into the free pool for device reset. 6655 **/ 6656static void 6657lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 6658{ 6659 LIST_HEAD(cqelist); 6660 struct lpfc_cq_event *cqe; 6661 unsigned long iflags; 6662 6663 /* Retrieve all the pending WCQEs from pending WCQE lists */ 6664 spin_lock_irqsave(&phba->hbalock, iflags); 6665 /* Pending FCP XRI abort events */ 6666 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 6667 &cqelist); 6668 /* Pending ELS XRI abort events */ 6669 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 6670 &cqelist); 6671 /* Pending asynnc events */ 6672 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 6673 &cqelist); 6674 spin_unlock_irqrestore(&phba->hbalock, iflags); 6675 6676 while (!list_empty(&cqelist)) { 6677 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 6678 lpfc_sli4_cq_event_release(phba, cqe); 6679 } 6680} 6681 6682/** 6683 * lpfc_pci_function_reset - Reset pci function. 6684 * @phba: pointer to lpfc hba data structure. 6685 * 6686 * This routine is invoked to request a PCI function reset. It will destroys 6687 * all resources assigned to the PCI function which originates this request. 6688 * 6689 * Return codes 6690 * 0 - successful 6691 * -ENOMEM - No availble memory 6692 * -EIO - The mailbox failed to complete successfully. 6693 **/ 6694int 6695lpfc_pci_function_reset(struct lpfc_hba *phba) 6696{ 6697 LPFC_MBOXQ_t *mboxq; 6698 uint32_t rc = 0, if_type; 6699 uint32_t shdr_status, shdr_add_status; 6700 uint32_t rdy_chk, num_resets = 0, reset_again = 0; 6701 union lpfc_sli4_cfg_shdr *shdr; 6702 struct lpfc_register reg_data; 6703 6704 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 6705 switch (if_type) { 6706 case LPFC_SLI_INTF_IF_TYPE_0: 6707 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6708 GFP_KERNEL); 6709 if (!mboxq) { 6710 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6711 "0494 Unable to allocate memory for " 6712 "issuing SLI_FUNCTION_RESET mailbox " 6713 "command\n"); 6714 return -ENOMEM; 6715 } 6716 6717 /* Setup PCI function reset mailbox-ioctl command */ 6718 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6719 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 6720 LPFC_SLI4_MBX_EMBED); 6721 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6722 shdr = (union lpfc_sli4_cfg_shdr *) 6723 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6724 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6725 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 6726 &shdr->response); 6727 if (rc != MBX_TIMEOUT) 6728 mempool_free(mboxq, phba->mbox_mem_pool); 6729 if (shdr_status || shdr_add_status || rc) { 6730 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6731 "0495 SLI_FUNCTION_RESET mailbox " 6732 "failed with status x%x add_status x%x," 6733 " mbx status x%x\n", 6734 shdr_status, shdr_add_status, rc); 6735 rc = -ENXIO; 6736 } 6737 break; 6738 case LPFC_SLI_INTF_IF_TYPE_2: 6739 for (num_resets = 0; 6740 num_resets < MAX_IF_TYPE_2_RESETS; 6741 num_resets++) { 6742 reg_data.word0 = 0; 6743 bf_set(lpfc_sliport_ctrl_end, ®_data, 6744 LPFC_SLIPORT_LITTLE_ENDIAN); 6745 bf_set(lpfc_sliport_ctrl_ip, ®_data, 6746 LPFC_SLIPORT_INIT_PORT); 6747 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 6748 CTRLregaddr); 6749 6750 /* 6751 * Poll the Port Status Register and wait for RDY for 6752 * up to 10 seconds. If the port doesn't respond, treat 6753 * it as an error. If the port responds with RN, start 6754 * the loop again. 6755 */ 6756 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { 6757 reg_data.word0 = 6758 readl(phba->sli4_hba.u.if_type2. 6759 STATUSregaddr); 6760 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 6761 break; 6762 if (bf_get(lpfc_sliport_status_rn, ®_data)) { 6763 reset_again++; 6764 break; 6765 } 6766 msleep(10); 6767 } 6768 6769 /* 6770 * If the port responds to the init request with 6771 * reset needed, delay for a bit and restart the loop. 6772 */ 6773 if (reset_again) { 6774 msleep(10); 6775 reset_again = 0; 6776 continue; 6777 } 6778 6779 /* Detect any port errors. */ 6780 reg_data.word0 = readl(phba->sli4_hba.u.if_type2. 6781 STATUSregaddr); 6782 if ((bf_get(lpfc_sliport_status_err, ®_data)) || 6783 (rdy_chk >= 1000)) { 6784 phba->work_status[0] = readl( 6785 phba->sli4_hba.u.if_type2.ERR1regaddr); 6786 phba->work_status[1] = readl( 6787 phba->sli4_hba.u.if_type2.ERR2regaddr); 6788 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6789 "2890 Port Error Detected " 6790 "during Port Reset: " 6791 "port status reg 0x%x, " 6792 "error 1=0x%x, error 2=0x%x\n", 6793 reg_data.word0, 6794 phba->work_status[0], 6795 phba->work_status[1]); 6796 rc = -ENODEV; 6797 } 6798 6799 /* 6800 * Terminate the outer loop provided the Port indicated 6801 * ready within 10 seconds. 6802 */ 6803 if (rdy_chk < 1000) 6804 break; 6805 } 6806 break; 6807 case LPFC_SLI_INTF_IF_TYPE_1: 6808 default: 6809 break; 6810 } 6811 6812 /* Catch the not-ready port failure after a port reset. */ 6813 if (num_resets >= MAX_IF_TYPE_2_RESETS) 6814 rc = -ENODEV; 6815 6816 return rc; 6817} 6818 6819/** 6820 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 6821 * @phba: pointer to lpfc hba data structure. 6822 * @cnt: number of nop mailbox commands to send. 6823 * 6824 * This routine is invoked to send a number @cnt of NOP mailbox command and 6825 * wait for each command to complete. 6826 * 6827 * Return: the number of NOP mailbox command completed. 6828 **/ 6829static int 6830lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 6831{ 6832 LPFC_MBOXQ_t *mboxq; 6833 int length, cmdsent; 6834 uint32_t mbox_tmo; 6835 uint32_t rc = 0; 6836 uint32_t shdr_status, shdr_add_status; 6837 union lpfc_sli4_cfg_shdr *shdr; 6838 6839 if (cnt == 0) { 6840 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6841 "2518 Requested to send 0 NOP mailbox cmd\n"); 6842 return cnt; 6843 } 6844 6845 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6846 if (!mboxq) { 6847 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6848 "2519 Unable to allocate memory for issuing " 6849 "NOP mailbox command\n"); 6850 return 0; 6851 } 6852 6853 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 6854 length = (sizeof(struct lpfc_mbx_nop) - 6855 sizeof(struct lpfc_sli4_cfg_mhdr)); 6856 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6857 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 6858 6859 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 6860 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 6861 if (!phba->sli4_hba.intr_enable) 6862 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6863 else 6864 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 6865 if (rc == MBX_TIMEOUT) 6866 break; 6867 /* Check return status */ 6868 shdr = (union lpfc_sli4_cfg_shdr *) 6869 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6870 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6871 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 6872 &shdr->response); 6873 if (shdr_status || shdr_add_status || rc) { 6874 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6875 "2520 NOP mailbox command failed " 6876 "status x%x add_status x%x mbx " 6877 "status x%x\n", shdr_status, 6878 shdr_add_status, rc); 6879 break; 6880 } 6881 } 6882 6883 if (rc != MBX_TIMEOUT) 6884 mempool_free(mboxq, phba->mbox_mem_pool); 6885 6886 return cmdsent; 6887} 6888 6889/** 6890 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 6891 * @phba: pointer to lpfc hba data structure. 6892 * 6893 * This routine is invoked to set up the PCI device memory space for device 6894 * with SLI-4 interface spec. 6895 * 6896 * Return codes 6897 * 0 - successful 6898 * other values - error 6899 **/ 6900static int 6901lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 6902{ 6903 struct pci_dev *pdev; 6904 unsigned long bar0map_len, bar1map_len, bar2map_len; 6905 int error = -ENODEV; 6906 uint32_t if_type; 6907 6908 /* Obtain PCI device reference */ 6909 if (!phba->pcidev) 6910 return error; 6911 else 6912 pdev = phba->pcidev; 6913 6914 /* Set the device DMA mask size */ 6915 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6916 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6917 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6918 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6919 return error; 6920 } 6921 } 6922 6923 /* 6924 * The BARs and register set definitions and offset locations are 6925 * dependent on the if_type. 6926 */ 6927 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 6928 &phba->sli4_hba.sli_intf.word0)) { 6929 return error; 6930 } 6931 6932 /* There is no SLI3 failback for SLI4 devices. */ 6933 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 6934 LPFC_SLI_INTF_VALID) { 6935 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6936 "2894 SLI_INTF reg contents invalid " 6937 "sli_intf reg 0x%x\n", 6938 phba->sli4_hba.sli_intf.word0); 6939 return error; 6940 } 6941 6942 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 6943 /* 6944 * Get the bus address of SLI4 device Bar regions and the 6945 * number of bytes required by each mapping. The mapping of the 6946 * particular PCI BARs regions is dependent on the type of 6947 * SLI4 device. 6948 */ 6949 if (pci_resource_start(pdev, 0)) { 6950 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6951 bar0map_len = pci_resource_len(pdev, 0); 6952 6953 /* 6954 * Map SLI4 PCI Config Space Register base to a kernel virtual 6955 * addr 6956 */ 6957 phba->sli4_hba.conf_regs_memmap_p = 6958 ioremap(phba->pci_bar0_map, bar0map_len); 6959 if (!phba->sli4_hba.conf_regs_memmap_p) { 6960 dev_printk(KERN_ERR, &pdev->dev, 6961 "ioremap failed for SLI4 PCI config " 6962 "registers.\n"); 6963 goto out; 6964 } 6965 /* Set up BAR0 PCI config space register memory map */ 6966 lpfc_sli4_bar0_register_memmap(phba, if_type); 6967 } else { 6968 phba->pci_bar0_map = pci_resource_start(pdev, 1); 6969 bar0map_len = pci_resource_len(pdev, 1); 6970 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 6971 dev_printk(KERN_ERR, &pdev->dev, 6972 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 6973 goto out; 6974 } 6975 phba->sli4_hba.conf_regs_memmap_p = 6976 ioremap(phba->pci_bar0_map, bar0map_len); 6977 if (!phba->sli4_hba.conf_regs_memmap_p) { 6978 dev_printk(KERN_ERR, &pdev->dev, 6979 "ioremap failed for SLI4 PCI config " 6980 "registers.\n"); 6981 goto out; 6982 } 6983 lpfc_sli4_bar0_register_memmap(phba, if_type); 6984 } 6985 6986 if (pci_resource_start(pdev, 2)) { 6987 /* 6988 * Map SLI4 if type 0 HBA Control Register base to a kernel 6989 * virtual address and setup the registers. 6990 */ 6991 phba->pci_bar1_map = pci_resource_start(pdev, 2); 6992 bar1map_len = pci_resource_len(pdev, 2); 6993 phba->sli4_hba.ctrl_regs_memmap_p = 6994 ioremap(phba->pci_bar1_map, bar1map_len); 6995 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 6996 dev_printk(KERN_ERR, &pdev->dev, 6997 "ioremap failed for SLI4 HBA control registers.\n"); 6998 goto out_iounmap_conf; 6999 } 7000 lpfc_sli4_bar1_register_memmap(phba); 7001 } 7002 7003 if (pci_resource_start(pdev, 4)) { 7004 /* 7005 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 7006 * virtual address and setup the registers. 7007 */ 7008 phba->pci_bar2_map = pci_resource_start(pdev, 4); 7009 bar2map_len = pci_resource_len(pdev, 4); 7010 phba->sli4_hba.drbl_regs_memmap_p = 7011 ioremap(phba->pci_bar2_map, bar2map_len); 7012 if (!phba->sli4_hba.drbl_regs_memmap_p) { 7013 dev_printk(KERN_ERR, &pdev->dev, 7014 "ioremap failed for SLI4 HBA doorbell registers.\n"); 7015 goto out_iounmap_ctrl; 7016 } 7017 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 7018 if (error) 7019 goto out_iounmap_all; 7020 } 7021 7022 return 0; 7023 7024out_iounmap_all: 7025 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7026out_iounmap_ctrl: 7027 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7028out_iounmap_conf: 7029 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7030out: 7031 return error; 7032} 7033 7034/** 7035 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 7036 * @phba: pointer to lpfc hba data structure. 7037 * 7038 * This routine is invoked to unset the PCI device memory space for device 7039 * with SLI-4 interface spec. 7040 **/ 7041static void 7042lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 7043{ 7044 struct pci_dev *pdev; 7045 7046 /* Obtain PCI device reference */ 7047 if (!phba->pcidev) 7048 return; 7049 else 7050 pdev = phba->pcidev; 7051 7052 /* Free coherent DMA memory allocated */ 7053 7054 /* Unmap I/O memory space */ 7055 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7056 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7057 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7058 7059 return; 7060} 7061 7062/** 7063 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 7064 * @phba: pointer to lpfc hba data structure. 7065 * 7066 * This routine is invoked to enable the MSI-X interrupt vectors to device 7067 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 7068 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 7069 * invoked, enables either all or nothing, depending on the current 7070 * availability of PCI vector resources. The device driver is responsible 7071 * for calling the individual request_irq() to register each MSI-X vector 7072 * with a interrupt handler, which is done in this function. Note that 7073 * later when device is unloading, the driver should always call free_irq() 7074 * on all MSI-X vectors it has done request_irq() on before calling 7075 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 7076 * will be left with MSI-X enabled and leaks its vectors. 7077 * 7078 * Return codes 7079 * 0 - successful 7080 * other values - error 7081 **/ 7082static int 7083lpfc_sli_enable_msix(struct lpfc_hba *phba) 7084{ 7085 int rc, i; 7086 LPFC_MBOXQ_t *pmb; 7087 7088 /* Set up MSI-X multi-message vectors */ 7089 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7090 phba->msix_entries[i].entry = i; 7091 7092 /* Configure MSI-X capability structure */ 7093 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 7094 ARRAY_SIZE(phba->msix_entries)); 7095 if (rc) { 7096 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7097 "0420 PCI enable MSI-X failed (%d)\n", rc); 7098 goto msi_fail_out; 7099 } 7100 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7101 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7102 "0477 MSI-X entry[%d]: vector=x%x " 7103 "message=%d\n", i, 7104 phba->msix_entries[i].vector, 7105 phba->msix_entries[i].entry); 7106 /* 7107 * Assign MSI-X vectors to interrupt handlers 7108 */ 7109 7110 /* vector-0 is associated to slow-path handler */ 7111 rc = request_irq(phba->msix_entries[0].vector, 7112 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 7113 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7114 if (rc) { 7115 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7116 "0421 MSI-X slow-path request_irq failed " 7117 "(%d)\n", rc); 7118 goto msi_fail_out; 7119 } 7120 7121 /* vector-1 is associated to fast-path handler */ 7122 rc = request_irq(phba->msix_entries[1].vector, 7123 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 7124 LPFC_FP_DRIVER_HANDLER_NAME, phba); 7125 7126 if (rc) { 7127 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7128 "0429 MSI-X fast-path request_irq failed " 7129 "(%d)\n", rc); 7130 goto irq_fail_out; 7131 } 7132 7133 /* 7134 * Configure HBA MSI-X attention conditions to messages 7135 */ 7136 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7137 7138 if (!pmb) { 7139 rc = -ENOMEM; 7140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7141 "0474 Unable to allocate memory for issuing " 7142 "MBOX_CONFIG_MSI command\n"); 7143 goto mem_fail_out; 7144 } 7145 rc = lpfc_config_msi(phba, pmb); 7146 if (rc) 7147 goto mbx_fail_out; 7148 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 7149 if (rc != MBX_SUCCESS) { 7150 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 7151 "0351 Config MSI mailbox command failed, " 7152 "mbxCmd x%x, mbxStatus x%x\n", 7153 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 7154 goto mbx_fail_out; 7155 } 7156 7157 /* Free memory allocated for mailbox command */ 7158 mempool_free(pmb, phba->mbox_mem_pool); 7159 return rc; 7160 7161mbx_fail_out: 7162 /* Free memory allocated for mailbox command */ 7163 mempool_free(pmb, phba->mbox_mem_pool); 7164 7165mem_fail_out: 7166 /* free the irq already requested */ 7167 free_irq(phba->msix_entries[1].vector, phba); 7168 7169irq_fail_out: 7170 /* free the irq already requested */ 7171 free_irq(phba->msix_entries[0].vector, phba); 7172 7173msi_fail_out: 7174 /* Unconfigure MSI-X capability structure */ 7175 pci_disable_msix(phba->pcidev); 7176 return rc; 7177} 7178 7179/** 7180 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 7181 * @phba: pointer to lpfc hba data structure. 7182 * 7183 * This routine is invoked to release the MSI-X vectors and then disable the 7184 * MSI-X interrupt mode to device with SLI-3 interface spec. 7185 **/ 7186static void 7187lpfc_sli_disable_msix(struct lpfc_hba *phba) 7188{ 7189 int i; 7190 7191 /* Free up MSI-X multi-message vectors */ 7192 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7193 free_irq(phba->msix_entries[i].vector, phba); 7194 /* Disable MSI-X */ 7195 pci_disable_msix(phba->pcidev); 7196 7197 return; 7198} 7199 7200/** 7201 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 7202 * @phba: pointer to lpfc hba data structure. 7203 * 7204 * This routine is invoked to enable the MSI interrupt mode to device with 7205 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 7206 * enable the MSI vector. The device driver is responsible for calling the 7207 * request_irq() to register MSI vector with a interrupt the handler, which 7208 * is done in this function. 7209 * 7210 * Return codes 7211 * 0 - successful 7212 * other values - error 7213 */ 7214static int 7215lpfc_sli_enable_msi(struct lpfc_hba *phba) 7216{ 7217 int rc; 7218 7219 rc = pci_enable_msi(phba->pcidev); 7220 if (!rc) 7221 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7222 "0462 PCI enable MSI mode success.\n"); 7223 else { 7224 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7225 "0471 PCI enable MSI mode failed (%d)\n", rc); 7226 return rc; 7227 } 7228 7229 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 7230 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7231 if (rc) { 7232 pci_disable_msi(phba->pcidev); 7233 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7234 "0478 MSI request_irq failed (%d)\n", rc); 7235 } 7236 return rc; 7237} 7238 7239/** 7240 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 7241 * @phba: pointer to lpfc hba data structure. 7242 * 7243 * This routine is invoked to disable the MSI interrupt mode to device with 7244 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 7245 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7246 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7247 * its vector. 7248 */ 7249static void 7250lpfc_sli_disable_msi(struct lpfc_hba *phba) 7251{ 7252 free_irq(phba->pcidev->irq, phba); 7253 pci_disable_msi(phba->pcidev); 7254 return; 7255} 7256 7257/** 7258 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 7259 * @phba: pointer to lpfc hba data structure. 7260 * 7261 * This routine is invoked to enable device interrupt and associate driver's 7262 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 7263 * spec. Depends on the interrupt mode configured to the driver, the driver 7264 * will try to fallback from the configured interrupt mode to an interrupt 7265 * mode which is supported by the platform, kernel, and device in the order 7266 * of: 7267 * MSI-X -> MSI -> IRQ. 7268 * 7269 * Return codes 7270 * 0 - successful 7271 * other values - error 7272 **/ 7273static uint32_t 7274lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 7275{ 7276 uint32_t intr_mode = LPFC_INTR_ERROR; 7277 int retval; 7278 7279 if (cfg_mode == 2) { 7280 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 7281 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 7282 if (!retval) { 7283 /* Now, try to enable MSI-X interrupt mode */ 7284 retval = lpfc_sli_enable_msix(phba); 7285 if (!retval) { 7286 /* Indicate initialization to MSI-X mode */ 7287 phba->intr_type = MSIX; 7288 intr_mode = 2; 7289 } 7290 } 7291 } 7292 7293 /* Fallback to MSI if MSI-X initialization failed */ 7294 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7295 retval = lpfc_sli_enable_msi(phba); 7296 if (!retval) { 7297 /* Indicate initialization to MSI mode */ 7298 phba->intr_type = MSI; 7299 intr_mode = 1; 7300 } 7301 } 7302 7303 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7304 if (phba->intr_type == NONE) { 7305 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 7306 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7307 if (!retval) { 7308 /* Indicate initialization to INTx mode */ 7309 phba->intr_type = INTx; 7310 intr_mode = 0; 7311 } 7312 } 7313 return intr_mode; 7314} 7315 7316/** 7317 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 7318 * @phba: pointer to lpfc hba data structure. 7319 * 7320 * This routine is invoked to disable device interrupt and disassociate the 7321 * driver's interrupt handler(s) from interrupt vector(s) to device with 7322 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 7323 * release the interrupt vector(s) for the message signaled interrupt. 7324 **/ 7325static void 7326lpfc_sli_disable_intr(struct lpfc_hba *phba) 7327{ 7328 /* Disable the currently initialized interrupt mode */ 7329 if (phba->intr_type == MSIX) 7330 lpfc_sli_disable_msix(phba); 7331 else if (phba->intr_type == MSI) 7332 lpfc_sli_disable_msi(phba); 7333 else if (phba->intr_type == INTx) 7334 free_irq(phba->pcidev->irq, phba); 7335 7336 /* Reset interrupt management states */ 7337 phba->intr_type = NONE; 7338 phba->sli.slistat.sli_intr = 0; 7339 7340 return; 7341} 7342 7343/** 7344 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 7345 * @phba: pointer to lpfc hba data structure. 7346 * 7347 * This routine is invoked to enable the MSI-X interrupt vectors to device 7348 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 7349 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 7350 * enables either all or nothing, depending on the current availability of 7351 * PCI vector resources. The device driver is responsible for calling the 7352 * individual request_irq() to register each MSI-X vector with a interrupt 7353 * handler, which is done in this function. Note that later when device is 7354 * unloading, the driver should always call free_irq() on all MSI-X vectors 7355 * it has done request_irq() on before calling pci_disable_msix(). Failure 7356 * to do so results in a BUG_ON() and a device will be left with MSI-X 7357 * enabled and leaks its vectors. 7358 * 7359 * Return codes 7360 * 0 - successful 7361 * other values - error 7362 **/ 7363static int 7364lpfc_sli4_enable_msix(struct lpfc_hba *phba) 7365{ 7366 int vectors, rc, index; 7367 7368 /* Set up MSI-X multi-message vectors */ 7369 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 7370 phba->sli4_hba.msix_entries[index].entry = index; 7371 7372 /* Configure MSI-X capability structure */ 7373 vectors = phba->sli4_hba.cfg_eqn; 7374enable_msix_vectors: 7375 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 7376 vectors); 7377 if (rc > 1) { 7378 vectors = rc; 7379 goto enable_msix_vectors; 7380 } else if (rc) { 7381 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7382 "0484 PCI enable MSI-X failed (%d)\n", rc); 7383 goto msi_fail_out; 7384 } 7385 7386 /* Log MSI-X vector assignment */ 7387 for (index = 0; index < vectors; index++) 7388 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7389 "0489 MSI-X entry[%d]: vector=x%x " 7390 "message=%d\n", index, 7391 phba->sli4_hba.msix_entries[index].vector, 7392 phba->sli4_hba.msix_entries[index].entry); 7393 /* 7394 * Assign MSI-X vectors to interrupt handlers 7395 */ 7396 7397 /* The first vector must associated to slow-path handler for MQ */ 7398 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 7399 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 7400 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7401 if (rc) { 7402 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7403 "0485 MSI-X slow-path request_irq failed " 7404 "(%d)\n", rc); 7405 goto msi_fail_out; 7406 } 7407 7408 /* The rest of the vector(s) are associated to fast-path handler(s) */ 7409 for (index = 1; index < vectors; index++) { 7410 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 7411 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 7412 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 7413 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 7414 LPFC_FP_DRIVER_HANDLER_NAME, 7415 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7416 if (rc) { 7417 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7418 "0486 MSI-X fast-path (%d) " 7419 "request_irq failed (%d)\n", index, rc); 7420 goto cfg_fail_out; 7421 } 7422 } 7423 phba->sli4_hba.msix_vec_nr = vectors; 7424 7425 return rc; 7426 7427cfg_fail_out: 7428 /* free the irq already requested */ 7429 for (--index; index >= 1; index--) 7430 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 7431 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7432 7433 /* free the irq already requested */ 7434 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7435 7436msi_fail_out: 7437 /* Unconfigure MSI-X capability structure */ 7438 pci_disable_msix(phba->pcidev); 7439 return rc; 7440} 7441 7442/** 7443 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 7444 * @phba: pointer to lpfc hba data structure. 7445 * 7446 * This routine is invoked to release the MSI-X vectors and then disable the 7447 * MSI-X interrupt mode to device with SLI-4 interface spec. 7448 **/ 7449static void 7450lpfc_sli4_disable_msix(struct lpfc_hba *phba) 7451{ 7452 int index; 7453 7454 /* Free up MSI-X multi-message vectors */ 7455 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7456 7457 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++) 7458 free_irq(phba->sli4_hba.msix_entries[index].vector, 7459 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7460 7461 /* Disable MSI-X */ 7462 pci_disable_msix(phba->pcidev); 7463 7464 return; 7465} 7466 7467/** 7468 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 7469 * @phba: pointer to lpfc hba data structure. 7470 * 7471 * This routine is invoked to enable the MSI interrupt mode to device with 7472 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 7473 * to enable the MSI vector. The device driver is responsible for calling 7474 * the request_irq() to register MSI vector with a interrupt the handler, 7475 * which is done in this function. 7476 * 7477 * Return codes 7478 * 0 - successful 7479 * other values - error 7480 **/ 7481static int 7482lpfc_sli4_enable_msi(struct lpfc_hba *phba) 7483{ 7484 int rc, index; 7485 7486 rc = pci_enable_msi(phba->pcidev); 7487 if (!rc) 7488 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7489 "0487 PCI enable MSI mode success.\n"); 7490 else { 7491 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7492 "0488 PCI enable MSI mode failed (%d)\n", rc); 7493 return rc; 7494 } 7495 7496 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7497 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7498 if (rc) { 7499 pci_disable_msi(phba->pcidev); 7500 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7501 "0490 MSI request_irq failed (%d)\n", rc); 7502 return rc; 7503 } 7504 7505 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 7506 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7507 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7508 } 7509 7510 return 0; 7511} 7512 7513/** 7514 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 7515 * @phba: pointer to lpfc hba data structure. 7516 * 7517 * This routine is invoked to disable the MSI interrupt mode to device with 7518 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 7519 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7520 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7521 * its vector. 7522 **/ 7523static void 7524lpfc_sli4_disable_msi(struct lpfc_hba *phba) 7525{ 7526 free_irq(phba->pcidev->irq, phba); 7527 pci_disable_msi(phba->pcidev); 7528 return; 7529} 7530 7531/** 7532 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 7533 * @phba: pointer to lpfc hba data structure. 7534 * 7535 * This routine is invoked to enable device interrupt and associate driver's 7536 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 7537 * interface spec. Depends on the interrupt mode configured to the driver, 7538 * the driver will try to fallback from the configured interrupt mode to an 7539 * interrupt mode which is supported by the platform, kernel, and device in 7540 * the order of: 7541 * MSI-X -> MSI -> IRQ. 7542 * 7543 * Return codes 7544 * 0 - successful 7545 * other values - error 7546 **/ 7547static uint32_t 7548lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 7549{ 7550 uint32_t intr_mode = LPFC_INTR_ERROR; 7551 int retval, index; 7552 7553 if (cfg_mode == 2) { 7554 /* Preparation before conf_msi mbox cmd */ 7555 retval = 0; 7556 if (!retval) { 7557 /* Now, try to enable MSI-X interrupt mode */ 7558 retval = lpfc_sli4_enable_msix(phba); 7559 if (!retval) { 7560 /* Indicate initialization to MSI-X mode */ 7561 phba->intr_type = MSIX; 7562 intr_mode = 2; 7563 } 7564 } 7565 } 7566 7567 /* Fallback to MSI if MSI-X initialization failed */ 7568 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7569 retval = lpfc_sli4_enable_msi(phba); 7570 if (!retval) { 7571 /* Indicate initialization to MSI mode */ 7572 phba->intr_type = MSI; 7573 intr_mode = 1; 7574 } 7575 } 7576 7577 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7578 if (phba->intr_type == NONE) { 7579 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7580 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7581 if (!retval) { 7582 /* Indicate initialization to INTx mode */ 7583 phba->intr_type = INTx; 7584 intr_mode = 0; 7585 for (index = 0; index < phba->cfg_fcp_eq_count; 7586 index++) { 7587 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7588 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7589 } 7590 } 7591 } 7592 return intr_mode; 7593} 7594 7595/** 7596 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 7597 * @phba: pointer to lpfc hba data structure. 7598 * 7599 * This routine is invoked to disable device interrupt and disassociate 7600 * the driver's interrupt handler(s) from interrupt vector(s) to device 7601 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 7602 * will release the interrupt vector(s) for the message signaled interrupt. 7603 **/ 7604static void 7605lpfc_sli4_disable_intr(struct lpfc_hba *phba) 7606{ 7607 /* Disable the currently initialized interrupt mode */ 7608 if (phba->intr_type == MSIX) 7609 lpfc_sli4_disable_msix(phba); 7610 else if (phba->intr_type == MSI) 7611 lpfc_sli4_disable_msi(phba); 7612 else if (phba->intr_type == INTx) 7613 free_irq(phba->pcidev->irq, phba); 7614 7615 /* Reset interrupt management states */ 7616 phba->intr_type = NONE; 7617 phba->sli.slistat.sli_intr = 0; 7618 7619 return; 7620} 7621 7622/** 7623 * lpfc_unset_hba - Unset SLI3 hba device initialization 7624 * @phba: pointer to lpfc hba data structure. 7625 * 7626 * This routine is invoked to unset the HBA device initialization steps to 7627 * a device with SLI-3 interface spec. 7628 **/ 7629static void 7630lpfc_unset_hba(struct lpfc_hba *phba) 7631{ 7632 struct lpfc_vport *vport = phba->pport; 7633 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7634 7635 spin_lock_irq(shost->host_lock); 7636 vport->load_flag |= FC_UNLOADING; 7637 spin_unlock_irq(shost->host_lock); 7638 7639 lpfc_stop_hba_timers(phba); 7640 7641 phba->pport->work_port_events = 0; 7642 7643 lpfc_sli_hba_down(phba); 7644 7645 lpfc_sli_brdrestart(phba); 7646 7647 lpfc_sli_disable_intr(phba); 7648 7649 return; 7650} 7651 7652/** 7653 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 7654 * @phba: pointer to lpfc hba data structure. 7655 * 7656 * This routine is invoked to unset the HBA device initialization steps to 7657 * a device with SLI-4 interface spec. 7658 **/ 7659static void 7660lpfc_sli4_unset_hba(struct lpfc_hba *phba) 7661{ 7662 struct lpfc_vport *vport = phba->pport; 7663 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7664 7665 spin_lock_irq(shost->host_lock); 7666 vport->load_flag |= FC_UNLOADING; 7667 spin_unlock_irq(shost->host_lock); 7668 7669 phba->pport->work_port_events = 0; 7670 7671 /* Stop the SLI4 device port */ 7672 lpfc_stop_port(phba); 7673 7674 lpfc_sli4_disable_intr(phba); 7675 7676 /* Reset SLI4 HBA FCoE function */ 7677 lpfc_pci_function_reset(phba); 7678 7679 return; 7680} 7681 7682/** 7683 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 7684 * @phba: Pointer to HBA context object. 7685 * 7686 * This function is called in the SLI4 code path to wait for completion 7687 * of device's XRIs exchange busy. It will check the XRI exchange busy 7688 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 7689 * that, it will check the XRI exchange busy on outstanding FCP and ELS 7690 * I/Os every 30 seconds, log error message, and wait forever. Only when 7691 * all XRI exchange busy complete, the driver unload shall proceed with 7692 * invoking the function reset ioctl mailbox command to the CNA and the 7693 * the rest of the driver unload resource release. 7694 **/ 7695static void 7696lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 7697{ 7698 int wait_time = 0; 7699 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 7700 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7701 7702 while (!fcp_xri_cmpl || !els_xri_cmpl) { 7703 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 7704 if (!fcp_xri_cmpl) 7705 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7706 "2877 FCP XRI exchange busy " 7707 "wait time: %d seconds.\n", 7708 wait_time/1000); 7709 if (!els_xri_cmpl) 7710 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7711 "2878 ELS XRI exchange busy " 7712 "wait time: %d seconds.\n", 7713 wait_time/1000); 7714 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 7715 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 7716 } else { 7717 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 7718 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 7719 } 7720 fcp_xri_cmpl = 7721 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 7722 els_xri_cmpl = 7723 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7724 } 7725} 7726 7727/** 7728 * lpfc_sli4_hba_unset - Unset the fcoe hba 7729 * @phba: Pointer to HBA context object. 7730 * 7731 * This function is called in the SLI4 code path to reset the HBA's FCoE 7732 * function. The caller is not required to hold any lock. This routine 7733 * issues PCI function reset mailbox command to reset the FCoE function. 7734 * At the end of the function, it calls lpfc_hba_down_post function to 7735 * free any pending commands. 7736 **/ 7737static void 7738lpfc_sli4_hba_unset(struct lpfc_hba *phba) 7739{ 7740 int wait_cnt = 0; 7741 LPFC_MBOXQ_t *mboxq; 7742 7743 lpfc_stop_hba_timers(phba); 7744 phba->sli4_hba.intr_enable = 0; 7745 7746 /* 7747 * Gracefully wait out the potential current outstanding asynchronous 7748 * mailbox command. 7749 */ 7750 7751 /* First, block any pending async mailbox command from posted */ 7752 spin_lock_irq(&phba->hbalock); 7753 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 7754 spin_unlock_irq(&phba->hbalock); 7755 /* Now, trying to wait it out if we can */ 7756 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7757 msleep(10); 7758 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 7759 break; 7760 } 7761 /* Forcefully release the outstanding mailbox command if timed out */ 7762 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7763 spin_lock_irq(&phba->hbalock); 7764 mboxq = phba->sli.mbox_active; 7765 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 7766 __lpfc_mbox_cmpl_put(phba, mboxq); 7767 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7768 phba->sli.mbox_active = NULL; 7769 spin_unlock_irq(&phba->hbalock); 7770 } 7771 7772 /* Abort all iocbs associated with the hba */ 7773 lpfc_sli_hba_iocb_abort(phba); 7774 7775 /* Wait for completion of device XRI exchange busy */ 7776 lpfc_sli4_xri_exchange_busy_wait(phba); 7777 7778 /* Disable PCI subsystem interrupt */ 7779 lpfc_sli4_disable_intr(phba); 7780 7781 /* Stop kthread signal shall trigger work_done one more time */ 7782 kthread_stop(phba->worker_thread); 7783 7784 /* Reset SLI4 HBA FCoE function */ 7785 lpfc_pci_function_reset(phba); 7786 7787 /* Stop the SLI4 device port */ 7788 phba->pport->work_port_events = 0; 7789} 7790 7791 /** 7792 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 7793 * @phba: Pointer to HBA context object. 7794 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 7795 * 7796 * This function is called in the SLI4 code path to read the port's 7797 * sli4 capabilities. 7798 * 7799 * This function may be be called from any context that can block-wait 7800 * for the completion. The expectation is that this routine is called 7801 * typically from probe_one or from the online routine. 7802 **/ 7803int 7804lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7805{ 7806 int rc; 7807 struct lpfc_mqe *mqe; 7808 struct lpfc_pc_sli4_params *sli4_params; 7809 uint32_t mbox_tmo; 7810 7811 rc = 0; 7812 mqe = &mboxq->u.mqe; 7813 7814 /* Read the port's SLI4 Parameters port capabilities */ 7815 lpfc_pc_sli4_params(mboxq); 7816 if (!phba->sli4_hba.intr_enable) 7817 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7818 else { 7819 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES); 7820 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 7821 } 7822 7823 if (unlikely(rc)) 7824 return 1; 7825 7826 sli4_params = &phba->sli4_hba.pc_sli4_params; 7827 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 7828 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 7829 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 7830 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 7831 &mqe->un.sli4_params); 7832 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 7833 &mqe->un.sli4_params); 7834 sli4_params->proto_types = mqe->un.sli4_params.word3; 7835 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 7836 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 7837 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 7838 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 7839 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 7840 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 7841 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 7842 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 7843 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 7844 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 7845 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 7846 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 7847 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 7848 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 7849 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 7850 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 7851 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 7852 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 7853 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 7854 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 7855 return rc; 7856} 7857 7858/** 7859 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 7860 * @phba: Pointer to HBA context object. 7861 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 7862 * 7863 * This function is called in the SLI4 code path to read the port's 7864 * sli4 capabilities. 7865 * 7866 * This function may be be called from any context that can block-wait 7867 * for the completion. The expectation is that this routine is called 7868 * typically from probe_one or from the online routine. 7869 **/ 7870int 7871lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7872{ 7873 int rc; 7874 struct lpfc_mqe *mqe = &mboxq->u.mqe; 7875 struct lpfc_pc_sli4_params *sli4_params; 7876 int length; 7877 struct lpfc_sli4_parameters *mbx_sli4_parameters; 7878 7879 /* Read the port's SLI4 Config Parameters */ 7880 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 7881 sizeof(struct lpfc_sli4_cfg_mhdr)); 7882 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7883 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 7884 length, LPFC_SLI4_MBX_EMBED); 7885 if (!phba->sli4_hba.intr_enable) 7886 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7887 else 7888 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, 7889 lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG)); 7890 if (unlikely(rc)) 7891 return rc; 7892 sli4_params = &phba->sli4_hba.pc_sli4_params; 7893 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 7894 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 7895 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 7896 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 7897 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 7898 mbx_sli4_parameters); 7899 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 7900 mbx_sli4_parameters); 7901 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 7902 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 7903 else 7904 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 7905 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 7906 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 7907 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 7908 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 7909 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 7910 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 7911 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 7912 mbx_sli4_parameters); 7913 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 7914 mbx_sli4_parameters); 7915 return 0; 7916} 7917 7918/** 7919 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 7920 * @pdev: pointer to PCI device 7921 * @pid: pointer to PCI device identifier 7922 * 7923 * This routine is to be called to attach a device with SLI-3 interface spec 7924 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 7925 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 7926 * information of the device and driver to see if the driver state that it can 7927 * support this kind of device. If the match is successful, the driver core 7928 * invokes this routine. If this routine determines it can claim the HBA, it 7929 * does all the initialization that it needs to do to handle the HBA properly. 7930 * 7931 * Return code 7932 * 0 - driver can claim the device 7933 * negative value - driver can not claim the device 7934 **/ 7935static int __devinit 7936lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 7937{ 7938 struct lpfc_hba *phba; 7939 struct lpfc_vport *vport = NULL; 7940 struct Scsi_Host *shost = NULL; 7941 int error; 7942 uint32_t cfg_mode, intr_mode; 7943 7944 /* Allocate memory for HBA structure */ 7945 phba = lpfc_hba_alloc(pdev); 7946 if (!phba) 7947 return -ENOMEM; 7948 7949 /* Perform generic PCI device enabling operation */ 7950 error = lpfc_enable_pci_dev(phba); 7951 if (error) { 7952 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7953 "1401 Failed to enable pci device.\n"); 7954 goto out_free_phba; 7955 } 7956 7957 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 7958 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 7959 if (error) 7960 goto out_disable_pci_dev; 7961 7962 /* Set up SLI-3 specific device PCI memory space */ 7963 error = lpfc_sli_pci_mem_setup(phba); 7964 if (error) { 7965 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7966 "1402 Failed to set up pci memory space.\n"); 7967 goto out_disable_pci_dev; 7968 } 7969 7970 /* Set up phase-1 common device driver resources */ 7971 error = lpfc_setup_driver_resource_phase1(phba); 7972 if (error) { 7973 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7974 "1403 Failed to set up driver resource.\n"); 7975 goto out_unset_pci_mem_s3; 7976 } 7977 7978 /* Set up SLI-3 specific device driver resources */ 7979 error = lpfc_sli_driver_resource_setup(phba); 7980 if (error) { 7981 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7982 "1404 Failed to set up driver resource.\n"); 7983 goto out_unset_pci_mem_s3; 7984 } 7985 7986 /* Initialize and populate the iocb list per host */ 7987 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 7988 if (error) { 7989 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7990 "1405 Failed to initialize iocb list.\n"); 7991 goto out_unset_driver_resource_s3; 7992 } 7993 7994 /* Set up common device driver resources */ 7995 error = lpfc_setup_driver_resource_phase2(phba); 7996 if (error) { 7997 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7998 "1406 Failed to set up driver resource.\n"); 7999 goto out_free_iocb_list; 8000 } 8001 8002 /* Create SCSI host to the physical port */ 8003 error = lpfc_create_shost(phba); 8004 if (error) { 8005 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8006 "1407 Failed to create scsi host.\n"); 8007 goto out_unset_driver_resource; 8008 } 8009 8010 /* Configure sysfs attributes */ 8011 vport = phba->pport; 8012 error = lpfc_alloc_sysfs_attr(vport); 8013 if (error) { 8014 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8015 "1476 Failed to allocate sysfs attr\n"); 8016 goto out_destroy_shost; 8017 } 8018 8019 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 8020 /* Now, trying to enable interrupt and bring up the device */ 8021 cfg_mode = phba->cfg_use_msi; 8022 while (true) { 8023 /* Put device to a known state before enabling interrupt */ 8024 lpfc_stop_port(phba); 8025 /* Configure and enable interrupt */ 8026 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 8027 if (intr_mode == LPFC_INTR_ERROR) { 8028 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8029 "0431 Failed to enable interrupt.\n"); 8030 error = -ENODEV; 8031 goto out_free_sysfs_attr; 8032 } 8033 /* SLI-3 HBA setup */ 8034 if (lpfc_sli_hba_setup(phba)) { 8035 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8036 "1477 Failed to set up hba\n"); 8037 error = -ENODEV; 8038 goto out_remove_device; 8039 } 8040 8041 /* Wait 50ms for the interrupts of previous mailbox commands */ 8042 msleep(50); 8043 /* Check active interrupts on message signaled interrupts */ 8044 if (intr_mode == 0 || 8045 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 8046 /* Log the current active interrupt mode */ 8047 phba->intr_mode = intr_mode; 8048 lpfc_log_intr_mode(phba, intr_mode); 8049 break; 8050 } else { 8051 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8052 "0447 Configure interrupt mode (%d) " 8053 "failed active interrupt test.\n", 8054 intr_mode); 8055 /* Disable the current interrupt mode */ 8056 lpfc_sli_disable_intr(phba); 8057 /* Try next level of interrupt mode */ 8058 cfg_mode = --intr_mode; 8059 } 8060 } 8061 8062 /* Perform post initialization setup */ 8063 lpfc_post_init_setup(phba); 8064 8065 /* Check if there are static vports to be created. */ 8066 lpfc_create_static_vport(phba); 8067 8068 return 0; 8069 8070out_remove_device: 8071 lpfc_unset_hba(phba); 8072out_free_sysfs_attr: 8073 lpfc_free_sysfs_attr(vport); 8074out_destroy_shost: 8075 lpfc_destroy_shost(phba); 8076out_unset_driver_resource: 8077 lpfc_unset_driver_resource_phase2(phba); 8078out_free_iocb_list: 8079 lpfc_free_iocb_list(phba); 8080out_unset_driver_resource_s3: 8081 lpfc_sli_driver_resource_unset(phba); 8082out_unset_pci_mem_s3: 8083 lpfc_sli_pci_mem_unset(phba); 8084out_disable_pci_dev: 8085 lpfc_disable_pci_dev(phba); 8086 if (shost) 8087 scsi_host_put(shost); 8088out_free_phba: 8089 lpfc_hba_free(phba); 8090 return error; 8091} 8092 8093/** 8094 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 8095 * @pdev: pointer to PCI device 8096 * 8097 * This routine is to be called to disattach a device with SLI-3 interface 8098 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 8099 * removed from PCI bus, it performs all the necessary cleanup for the HBA 8100 * device to be removed from the PCI subsystem properly. 8101 **/ 8102static void __devexit 8103lpfc_pci_remove_one_s3(struct pci_dev *pdev) 8104{ 8105 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8106 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8107 struct lpfc_vport **vports; 8108 struct lpfc_hba *phba = vport->phba; 8109 int i; 8110 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 8111 8112 spin_lock_irq(&phba->hbalock); 8113 vport->load_flag |= FC_UNLOADING; 8114 spin_unlock_irq(&phba->hbalock); 8115 8116 lpfc_free_sysfs_attr(vport); 8117 8118 /* Release all the vports against this physical port */ 8119 vports = lpfc_create_vport_work_array(phba); 8120 if (vports != NULL) 8121 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8122 fc_vport_terminate(vports[i]->fc_vport); 8123 lpfc_destroy_vport_work_array(phba, vports); 8124 8125 /* Remove FC host and then SCSI host with the physical port */ 8126 fc_remove_host(shost); 8127 scsi_remove_host(shost); 8128 lpfc_cleanup(vport); 8129 8130 /* 8131 * Bring down the SLI Layer. This step disable all interrupts, 8132 * clears the rings, discards all mailbox commands, and resets 8133 * the HBA. 8134 */ 8135 8136 /* HBA interrupt will be disabled after this call */ 8137 lpfc_sli_hba_down(phba); 8138 /* Stop kthread signal shall trigger work_done one more time */ 8139 kthread_stop(phba->worker_thread); 8140 /* Final cleanup of txcmplq and reset the HBA */ 8141 lpfc_sli_brdrestart(phba); 8142 8143 lpfc_stop_hba_timers(phba); 8144 spin_lock_irq(&phba->hbalock); 8145 list_del_init(&vport->listentry); 8146 spin_unlock_irq(&phba->hbalock); 8147 8148 lpfc_debugfs_terminate(vport); 8149 8150 /* Disable interrupt */ 8151 lpfc_sli_disable_intr(phba); 8152 8153 pci_set_drvdata(pdev, NULL); 8154 scsi_host_put(shost); 8155 8156 /* 8157 * Call scsi_free before mem_free since scsi bufs are released to their 8158 * corresponding pools here. 8159 */ 8160 lpfc_scsi_free(phba); 8161 lpfc_mem_free_all(phba); 8162 8163 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 8164 phba->hbqslimp.virt, phba->hbqslimp.phys); 8165 8166 /* Free resources associated with SLI2 interface */ 8167 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 8168 phba->slim2p.virt, phba->slim2p.phys); 8169 8170 /* unmap adapter SLIM and Control Registers */ 8171 iounmap(phba->ctrl_regs_memmap_p); 8172 iounmap(phba->slim_memmap_p); 8173 8174 lpfc_hba_free(phba); 8175 8176 pci_release_selected_regions(pdev, bars); 8177 pci_disable_device(pdev); 8178} 8179 8180/** 8181 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 8182 * @pdev: pointer to PCI device 8183 * @msg: power management message 8184 * 8185 * This routine is to be called from the kernel's PCI subsystem to support 8186 * system Power Management (PM) to device with SLI-3 interface spec. When 8187 * PM invokes this method, it quiesces the device by stopping the driver's 8188 * worker thread for the device, turning off device's interrupt and DMA, 8189 * and bring the device offline. Note that as the driver implements the 8190 * minimum PM requirements to a power-aware driver's PM support for the 8191 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8192 * to the suspend() method call will be treated as SUSPEND and the driver will 8193 * fully reinitialize its device during resume() method call, the driver will 8194 * set device to PCI_D3hot state in PCI config space instead of setting it 8195 * according to the @msg provided by the PM. 8196 * 8197 * Return code 8198 * 0 - driver suspended the device 8199 * Error otherwise 8200 **/ 8201static int 8202lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 8203{ 8204 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8205 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8206 8207 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8208 "0473 PCI device Power Management suspend.\n"); 8209 8210 /* Bring down the device */ 8211 lpfc_offline_prep(phba); 8212 lpfc_offline(phba); 8213 kthread_stop(phba->worker_thread); 8214 8215 /* Disable interrupt from device */ 8216 lpfc_sli_disable_intr(phba); 8217 8218 /* Save device state to PCI config space */ 8219 pci_save_state(pdev); 8220 pci_set_power_state(pdev, PCI_D3hot); 8221 8222 return 0; 8223} 8224 8225/** 8226 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 8227 * @pdev: pointer to PCI device 8228 * 8229 * This routine is to be called from the kernel's PCI subsystem to support 8230 * system Power Management (PM) to device with SLI-3 interface spec. When PM 8231 * invokes this method, it restores the device's PCI config space state and 8232 * fully reinitializes the device and brings it online. Note that as the 8233 * driver implements the minimum PM requirements to a power-aware driver's 8234 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 8235 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 8236 * driver will fully reinitialize its device during resume() method call, 8237 * the device will be set to PCI_D0 directly in PCI config space before 8238 * restoring the state. 8239 * 8240 * Return code 8241 * 0 - driver suspended the device 8242 * Error otherwise 8243 **/ 8244static int 8245lpfc_pci_resume_one_s3(struct pci_dev *pdev) 8246{ 8247 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8248 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8249 uint32_t intr_mode; 8250 int error; 8251 8252 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8253 "0452 PCI device Power Management resume.\n"); 8254 8255 /* Restore device state from PCI config space */ 8256 pci_set_power_state(pdev, PCI_D0); 8257 pci_restore_state(pdev); 8258 8259 /* 8260 * As the new kernel behavior of pci_restore_state() API call clears 8261 * device saved_state flag, need to save the restored state again. 8262 */ 8263 pci_save_state(pdev); 8264 8265 if (pdev->is_busmaster) 8266 pci_set_master(pdev); 8267 8268 /* Startup the kernel thread for this host adapter. */ 8269 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8270 "lpfc_worker_%d", phba->brd_no); 8271 if (IS_ERR(phba->worker_thread)) { 8272 error = PTR_ERR(phba->worker_thread); 8273 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8274 "0434 PM resume failed to start worker " 8275 "thread: error=x%x.\n", error); 8276 return error; 8277 } 8278 8279 /* Configure and enable interrupt */ 8280 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 8281 if (intr_mode == LPFC_INTR_ERROR) { 8282 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8283 "0430 PM resume Failed to enable interrupt\n"); 8284 return -EIO; 8285 } else 8286 phba->intr_mode = intr_mode; 8287 8288 /* Restart HBA and bring it online */ 8289 lpfc_sli_brdrestart(phba); 8290 lpfc_online(phba); 8291 8292 /* Log the current active interrupt mode */ 8293 lpfc_log_intr_mode(phba, phba->intr_mode); 8294 8295 return 0; 8296} 8297 8298/** 8299 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 8300 * @phba: pointer to lpfc hba data structure. 8301 * 8302 * This routine is called to prepare the SLI3 device for PCI slot recover. It 8303 * aborts all the outstanding SCSI I/Os to the pci device. 8304 **/ 8305static void 8306lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 8307{ 8308 struct lpfc_sli *psli = &phba->sli; 8309 struct lpfc_sli_ring *pring; 8310 8311 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8312 "2723 PCI channel I/O abort preparing for recovery\n"); 8313 8314 /* 8315 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 8316 * and let the SCSI mid-layer to retry them to recover. 8317 */ 8318 pring = &psli->ring[psli->fcp_ring]; 8319 lpfc_sli_abort_iocb_ring(phba, pring); 8320} 8321 8322/** 8323 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 8324 * @phba: pointer to lpfc hba data structure. 8325 * 8326 * This routine is called to prepare the SLI3 device for PCI slot reset. It 8327 * disables the device interrupt and pci device, and aborts the internal FCP 8328 * pending I/Os. 8329 **/ 8330static void 8331lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 8332{ 8333 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8334 "2710 PCI channel disable preparing for reset\n"); 8335 8336 /* Block any management I/Os to the device */ 8337 lpfc_block_mgmt_io(phba); 8338 8339 /* Block all SCSI devices' I/Os on the host */ 8340 lpfc_scsi_dev_block(phba); 8341 8342 /* stop all timers */ 8343 lpfc_stop_hba_timers(phba); 8344 8345 /* Disable interrupt and pci device */ 8346 lpfc_sli_disable_intr(phba); 8347 pci_disable_device(phba->pcidev); 8348 8349 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 8350 lpfc_sli_flush_fcp_rings(phba); 8351} 8352 8353/** 8354 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 8355 * @phba: pointer to lpfc hba data structure. 8356 * 8357 * This routine is called to prepare the SLI3 device for PCI slot permanently 8358 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 8359 * pending I/Os. 8360 **/ 8361static void 8362lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 8363{ 8364 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8365 "2711 PCI channel permanent disable for failure\n"); 8366 /* Block all SCSI devices' I/Os on the host */ 8367 lpfc_scsi_dev_block(phba); 8368 8369 /* stop all timers */ 8370 lpfc_stop_hba_timers(phba); 8371 8372 /* Clean up all driver's outstanding SCSI I/Os */ 8373 lpfc_sli_flush_fcp_rings(phba); 8374} 8375 8376/** 8377 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 8378 * @pdev: pointer to PCI device. 8379 * @state: the current PCI connection state. 8380 * 8381 * This routine is called from the PCI subsystem for I/O error handling to 8382 * device with SLI-3 interface spec. This function is called by the PCI 8383 * subsystem after a PCI bus error affecting this device has been detected. 8384 * When this function is invoked, it will need to stop all the I/Os and 8385 * interrupt(s) to the device. Once that is done, it will return 8386 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 8387 * as desired. 8388 * 8389 * Return codes 8390 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 8391 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8392 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8393 **/ 8394static pci_ers_result_t 8395lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 8396{ 8397 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8398 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8399 8400 switch (state) { 8401 case pci_channel_io_normal: 8402 /* Non-fatal error, prepare for recovery */ 8403 lpfc_sli_prep_dev_for_recover(phba); 8404 return PCI_ERS_RESULT_CAN_RECOVER; 8405 case pci_channel_io_frozen: 8406 /* Fatal error, prepare for slot reset */ 8407 lpfc_sli_prep_dev_for_reset(phba); 8408 return PCI_ERS_RESULT_NEED_RESET; 8409 case pci_channel_io_perm_failure: 8410 /* Permanent failure, prepare for device down */ 8411 lpfc_sli_prep_dev_for_perm_failure(phba); 8412 return PCI_ERS_RESULT_DISCONNECT; 8413 default: 8414 /* Unknown state, prepare and request slot reset */ 8415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8416 "0472 Unknown PCI error state: x%x\n", state); 8417 lpfc_sli_prep_dev_for_reset(phba); 8418 return PCI_ERS_RESULT_NEED_RESET; 8419 } 8420} 8421 8422/** 8423 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 8424 * @pdev: pointer to PCI device. 8425 * 8426 * This routine is called from the PCI subsystem for error handling to 8427 * device with SLI-3 interface spec. This is called after PCI bus has been 8428 * reset to restart the PCI card from scratch, as if from a cold-boot. 8429 * During the PCI subsystem error recovery, after driver returns 8430 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 8431 * recovery and then call this routine before calling the .resume method 8432 * to recover the device. This function will initialize the HBA device, 8433 * enable the interrupt, but it will just put the HBA to offline state 8434 * without passing any I/O traffic. 8435 * 8436 * Return codes 8437 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8438 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8439 */ 8440static pci_ers_result_t 8441lpfc_io_slot_reset_s3(struct pci_dev *pdev) 8442{ 8443 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8444 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8445 struct lpfc_sli *psli = &phba->sli; 8446 uint32_t intr_mode; 8447 8448 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 8449 if (pci_enable_device_mem(pdev)) { 8450 printk(KERN_ERR "lpfc: Cannot re-enable " 8451 "PCI device after reset.\n"); 8452 return PCI_ERS_RESULT_DISCONNECT; 8453 } 8454 8455 pci_restore_state(pdev); 8456 8457 /* 8458 * As the new kernel behavior of pci_restore_state() API call clears 8459 * device saved_state flag, need to save the restored state again. 8460 */ 8461 pci_save_state(pdev); 8462 8463 if (pdev->is_busmaster) 8464 pci_set_master(pdev); 8465 8466 spin_lock_irq(&phba->hbalock); 8467 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 8468 spin_unlock_irq(&phba->hbalock); 8469 8470 /* Configure and enable interrupt */ 8471 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 8472 if (intr_mode == LPFC_INTR_ERROR) { 8473 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8474 "0427 Cannot re-enable interrupt after " 8475 "slot reset.\n"); 8476 return PCI_ERS_RESULT_DISCONNECT; 8477 } else 8478 phba->intr_mode = intr_mode; 8479 8480 /* Take device offline, it will perform cleanup */ 8481 lpfc_offline_prep(phba); 8482 lpfc_offline(phba); 8483 lpfc_sli_brdrestart(phba); 8484 8485 /* Log the current active interrupt mode */ 8486 lpfc_log_intr_mode(phba, phba->intr_mode); 8487 8488 return PCI_ERS_RESULT_RECOVERED; 8489} 8490 8491/** 8492 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 8493 * @pdev: pointer to PCI device 8494 * 8495 * This routine is called from the PCI subsystem for error handling to device 8496 * with SLI-3 interface spec. It is called when kernel error recovery tells 8497 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 8498 * error recovery. After this call, traffic can start to flow from this device 8499 * again. 8500 */ 8501static void 8502lpfc_io_resume_s3(struct pci_dev *pdev) 8503{ 8504 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8505 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8506 8507 /* Bring device online, it will be no-op for non-fatal error resume */ 8508 lpfc_online(phba); 8509 8510 /* Clean up Advanced Error Reporting (AER) if needed */ 8511 if (phba->hba_flag & HBA_AER_ENABLED) 8512 pci_cleanup_aer_uncorrect_error_status(pdev); 8513} 8514 8515/** 8516 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 8517 * @phba: pointer to lpfc hba data structure. 8518 * 8519 * returns the number of ELS/CT IOCBs to reserve 8520 **/ 8521int 8522lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 8523{ 8524 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 8525 8526 if (phba->sli_rev == LPFC_SLI_REV4) { 8527 if (max_xri <= 100) 8528 return 10; 8529 else if (max_xri <= 256) 8530 return 25; 8531 else if (max_xri <= 512) 8532 return 50; 8533 else if (max_xri <= 1024) 8534 return 100; 8535 else 8536 return 150; 8537 } else 8538 return 0; 8539} 8540 8541/** 8542 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 8543 * @pdev: pointer to PCI device 8544 * @pid: pointer to PCI device identifier 8545 * 8546 * This routine is called from the kernel's PCI subsystem to device with 8547 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8548 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 8549 * information of the device and driver to see if the driver state that it 8550 * can support this kind of device. If the match is successful, the driver 8551 * core invokes this routine. If this routine determines it can claim the HBA, 8552 * it does all the initialization that it needs to do to handle the HBA 8553 * properly. 8554 * 8555 * Return code 8556 * 0 - driver can claim the device 8557 * negative value - driver can not claim the device 8558 **/ 8559static int __devinit 8560lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 8561{ 8562 struct lpfc_hba *phba; 8563 struct lpfc_vport *vport = NULL; 8564 struct Scsi_Host *shost = NULL; 8565 int error; 8566 uint32_t cfg_mode, intr_mode; 8567 int mcnt; 8568 8569 /* Allocate memory for HBA structure */ 8570 phba = lpfc_hba_alloc(pdev); 8571 if (!phba) 8572 return -ENOMEM; 8573 8574 /* Perform generic PCI device enabling operation */ 8575 error = lpfc_enable_pci_dev(phba); 8576 if (error) { 8577 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8578 "1409 Failed to enable pci device.\n"); 8579 goto out_free_phba; 8580 } 8581 8582 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 8583 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 8584 if (error) 8585 goto out_disable_pci_dev; 8586 8587 /* Set up SLI-4 specific device PCI memory space */ 8588 error = lpfc_sli4_pci_mem_setup(phba); 8589 if (error) { 8590 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8591 "1410 Failed to set up pci memory space.\n"); 8592 goto out_disable_pci_dev; 8593 } 8594 8595 /* Set up phase-1 common device driver resources */ 8596 error = lpfc_setup_driver_resource_phase1(phba); 8597 if (error) { 8598 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8599 "1411 Failed to set up driver resource.\n"); 8600 goto out_unset_pci_mem_s4; 8601 } 8602 8603 /* Set up SLI-4 Specific device driver resources */ 8604 error = lpfc_sli4_driver_resource_setup(phba); 8605 if (error) { 8606 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8607 "1412 Failed to set up driver resource.\n"); 8608 goto out_unset_pci_mem_s4; 8609 } 8610 8611 /* Initialize and populate the iocb list per host */ 8612 8613 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8614 "2821 initialize iocb list %d.\n", 8615 phba->cfg_iocb_cnt*1024); 8616 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); 8617 8618 if (error) { 8619 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8620 "1413 Failed to initialize iocb list.\n"); 8621 goto out_unset_driver_resource_s4; 8622 } 8623 8624 INIT_LIST_HEAD(&phba->active_rrq_list); 8625 8626 /* Set up common device driver resources */ 8627 error = lpfc_setup_driver_resource_phase2(phba); 8628 if (error) { 8629 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8630 "1414 Failed to set up driver resource.\n"); 8631 goto out_free_iocb_list; 8632 } 8633 8634 /* Create SCSI host to the physical port */ 8635 error = lpfc_create_shost(phba); 8636 if (error) { 8637 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8638 "1415 Failed to create scsi host.\n"); 8639 goto out_unset_driver_resource; 8640 } 8641 8642 /* Configure sysfs attributes */ 8643 vport = phba->pport; 8644 error = lpfc_alloc_sysfs_attr(vport); 8645 if (error) { 8646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8647 "1416 Failed to allocate sysfs attr\n"); 8648 goto out_destroy_shost; 8649 } 8650 8651 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 8652 /* Now, trying to enable interrupt and bring up the device */ 8653 cfg_mode = phba->cfg_use_msi; 8654 while (true) { 8655 /* Put device to a known state before enabling interrupt */ 8656 lpfc_stop_port(phba); 8657 /* Configure and enable interrupt */ 8658 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 8659 if (intr_mode == LPFC_INTR_ERROR) { 8660 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8661 "0426 Failed to enable interrupt.\n"); 8662 error = -ENODEV; 8663 goto out_free_sysfs_attr; 8664 } 8665 /* Default to single FCP EQ for non-MSI-X */ 8666 if (phba->intr_type != MSIX) 8667 phba->cfg_fcp_eq_count = 1; 8668 else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count) 8669 phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 8670 /* Set up SLI-4 HBA */ 8671 if (lpfc_sli4_hba_setup(phba)) { 8672 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8673 "1421 Failed to set up hba\n"); 8674 error = -ENODEV; 8675 goto out_disable_intr; 8676 } 8677 8678 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 8679 if (intr_mode != 0) 8680 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 8681 LPFC_ACT_INTR_CNT); 8682 8683 /* Check active interrupts received only for MSI/MSI-X */ 8684 if (intr_mode == 0 || 8685 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 8686 /* Log the current active interrupt mode */ 8687 phba->intr_mode = intr_mode; 8688 lpfc_log_intr_mode(phba, intr_mode); 8689 break; 8690 } 8691 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8692 "0451 Configure interrupt mode (%d) " 8693 "failed active interrupt test.\n", 8694 intr_mode); 8695 /* Unset the previous SLI-4 HBA setup. */ 8696 /* 8697 * TODO: Is this operation compatible with IF TYPE 2 8698 * devices? All port state is deleted and cleared. 8699 */ 8700 lpfc_sli4_unset_hba(phba); 8701 /* Try next level of interrupt mode */ 8702 cfg_mode = --intr_mode; 8703 } 8704 8705 /* Perform post initialization setup */ 8706 lpfc_post_init_setup(phba); 8707 8708 /* Check if there are static vports to be created. */ 8709 lpfc_create_static_vport(phba); 8710 8711 return 0; 8712 8713out_disable_intr: 8714 lpfc_sli4_disable_intr(phba); 8715out_free_sysfs_attr: 8716 lpfc_free_sysfs_attr(vport); 8717out_destroy_shost: 8718 lpfc_destroy_shost(phba); 8719out_unset_driver_resource: 8720 lpfc_unset_driver_resource_phase2(phba); 8721out_free_iocb_list: 8722 lpfc_free_iocb_list(phba); 8723out_unset_driver_resource_s4: 8724 lpfc_sli4_driver_resource_unset(phba); 8725out_unset_pci_mem_s4: 8726 lpfc_sli4_pci_mem_unset(phba); 8727out_disable_pci_dev: 8728 lpfc_disable_pci_dev(phba); 8729 if (shost) 8730 scsi_host_put(shost); 8731out_free_phba: 8732 lpfc_hba_free(phba); 8733 return error; 8734} 8735 8736/** 8737 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 8738 * @pdev: pointer to PCI device 8739 * 8740 * This routine is called from the kernel's PCI subsystem to device with 8741 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8742 * removed from PCI bus, it performs all the necessary cleanup for the HBA 8743 * device to be removed from the PCI subsystem properly. 8744 **/ 8745static void __devexit 8746lpfc_pci_remove_one_s4(struct pci_dev *pdev) 8747{ 8748 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8749 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8750 struct lpfc_vport **vports; 8751 struct lpfc_hba *phba = vport->phba; 8752 int i; 8753 8754 /* Mark the device unloading flag */ 8755 spin_lock_irq(&phba->hbalock); 8756 vport->load_flag |= FC_UNLOADING; 8757 spin_unlock_irq(&phba->hbalock); 8758 8759 /* Free the HBA sysfs attributes */ 8760 lpfc_free_sysfs_attr(vport); 8761 8762 /* Release all the vports against this physical port */ 8763 vports = lpfc_create_vport_work_array(phba); 8764 if (vports != NULL) 8765 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8766 fc_vport_terminate(vports[i]->fc_vport); 8767 lpfc_destroy_vport_work_array(phba, vports); 8768 8769 /* Remove FC host and then SCSI host with the physical port */ 8770 fc_remove_host(shost); 8771 scsi_remove_host(shost); 8772 8773 /* Perform cleanup on the physical port */ 8774 lpfc_cleanup(vport); 8775 8776 /* 8777 * Bring down the SLI Layer. This step disables all interrupts, 8778 * clears the rings, discards all mailbox commands, and resets 8779 * the HBA FCoE function. 8780 */ 8781 lpfc_debugfs_terminate(vport); 8782 lpfc_sli4_hba_unset(phba); 8783 8784 spin_lock_irq(&phba->hbalock); 8785 list_del_init(&vport->listentry); 8786 spin_unlock_irq(&phba->hbalock); 8787 8788 /* Perform scsi free before driver resource_unset since scsi 8789 * buffers are released to their corresponding pools here. 8790 */ 8791 lpfc_scsi_free(phba); 8792 lpfc_sli4_driver_resource_unset(phba); 8793 8794 /* Unmap adapter Control and Doorbell registers */ 8795 lpfc_sli4_pci_mem_unset(phba); 8796 8797 /* Release PCI resources and disable device's PCI function */ 8798 scsi_host_put(shost); 8799 lpfc_disable_pci_dev(phba); 8800 8801 /* Finally, free the driver's device data structure */ 8802 lpfc_hba_free(phba); 8803 8804 return; 8805} 8806 8807/** 8808 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 8809 * @pdev: pointer to PCI device 8810 * @msg: power management message 8811 * 8812 * This routine is called from the kernel's PCI subsystem to support system 8813 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 8814 * this method, it quiesces the device by stopping the driver's worker 8815 * thread for the device, turning off device's interrupt and DMA, and bring 8816 * the device offline. Note that as the driver implements the minimum PM 8817 * requirements to a power-aware driver's PM support for suspend/resume -- all 8818 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 8819 * method call will be treated as SUSPEND and the driver will fully 8820 * reinitialize its device during resume() method call, the driver will set 8821 * device to PCI_D3hot state in PCI config space instead of setting it 8822 * according to the @msg provided by the PM. 8823 * 8824 * Return code 8825 * 0 - driver suspended the device 8826 * Error otherwise 8827 **/ 8828static int 8829lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 8830{ 8831 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8832 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8833 8834 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8835 "2843 PCI device Power Management suspend.\n"); 8836 8837 /* Bring down the device */ 8838 lpfc_offline_prep(phba); 8839 lpfc_offline(phba); 8840 kthread_stop(phba->worker_thread); 8841 8842 /* Disable interrupt from device */ 8843 lpfc_sli4_disable_intr(phba); 8844 8845 /* Save device state to PCI config space */ 8846 pci_save_state(pdev); 8847 pci_set_power_state(pdev, PCI_D3hot); 8848 8849 return 0; 8850} 8851 8852/** 8853 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 8854 * @pdev: pointer to PCI device 8855 * 8856 * This routine is called from the kernel's PCI subsystem to support system 8857 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 8858 * this method, it restores the device's PCI config space state and fully 8859 * reinitializes the device and brings it online. Note that as the driver 8860 * implements the minimum PM requirements to a power-aware driver's PM for 8861 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8862 * to the suspend() method call will be treated as SUSPEND and the driver 8863 * will fully reinitialize its device during resume() method call, the device 8864 * will be set to PCI_D0 directly in PCI config space before restoring the 8865 * state. 8866 * 8867 * Return code 8868 * 0 - driver suspended the device 8869 * Error otherwise 8870 **/ 8871static int 8872lpfc_pci_resume_one_s4(struct pci_dev *pdev) 8873{ 8874 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8875 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8876 uint32_t intr_mode; 8877 int error; 8878 8879 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8880 "0292 PCI device Power Management resume.\n"); 8881 8882 /* Restore device state from PCI config space */ 8883 pci_set_power_state(pdev, PCI_D0); 8884 pci_restore_state(pdev); 8885 8886 /* 8887 * As the new kernel behavior of pci_restore_state() API call clears 8888 * device saved_state flag, need to save the restored state again. 8889 */ 8890 pci_save_state(pdev); 8891 8892 if (pdev->is_busmaster) 8893 pci_set_master(pdev); 8894 8895 /* Startup the kernel thread for this host adapter. */ 8896 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8897 "lpfc_worker_%d", phba->brd_no); 8898 if (IS_ERR(phba->worker_thread)) { 8899 error = PTR_ERR(phba->worker_thread); 8900 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8901 "0293 PM resume failed to start worker " 8902 "thread: error=x%x.\n", error); 8903 return error; 8904 } 8905 8906 /* Configure and enable interrupt */ 8907 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 8908 if (intr_mode == LPFC_INTR_ERROR) { 8909 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8910 "0294 PM resume Failed to enable interrupt\n"); 8911 return -EIO; 8912 } else 8913 phba->intr_mode = intr_mode; 8914 8915 /* Restart HBA and bring it online */ 8916 lpfc_sli_brdrestart(phba); 8917 lpfc_online(phba); 8918 8919 /* Log the current active interrupt mode */ 8920 lpfc_log_intr_mode(phba, phba->intr_mode); 8921 8922 return 0; 8923} 8924 8925/** 8926 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 8927 * @phba: pointer to lpfc hba data structure. 8928 * 8929 * This routine is called to prepare the SLI4 device for PCI slot recover. It 8930 * aborts all the outstanding SCSI I/Os to the pci device. 8931 **/ 8932static void 8933lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 8934{ 8935 struct lpfc_sli *psli = &phba->sli; 8936 struct lpfc_sli_ring *pring; 8937 8938 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8939 "2828 PCI channel I/O abort preparing for recovery\n"); 8940 /* 8941 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 8942 * and let the SCSI mid-layer to retry them to recover. 8943 */ 8944 pring = &psli->ring[psli->fcp_ring]; 8945 lpfc_sli_abort_iocb_ring(phba, pring); 8946} 8947 8948/** 8949 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 8950 * @phba: pointer to lpfc hba data structure. 8951 * 8952 * This routine is called to prepare the SLI4 device for PCI slot reset. It 8953 * disables the device interrupt and pci device, and aborts the internal FCP 8954 * pending I/Os. 8955 **/ 8956static void 8957lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 8958{ 8959 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8960 "2826 PCI channel disable preparing for reset\n"); 8961 8962 /* Block any management I/Os to the device */ 8963 lpfc_block_mgmt_io(phba); 8964 8965 /* Block all SCSI devices' I/Os on the host */ 8966 lpfc_scsi_dev_block(phba); 8967 8968 /* stop all timers */ 8969 lpfc_stop_hba_timers(phba); 8970 8971 /* Disable interrupt and pci device */ 8972 lpfc_sli4_disable_intr(phba); 8973 pci_disable_device(phba->pcidev); 8974 8975 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 8976 lpfc_sli_flush_fcp_rings(phba); 8977} 8978 8979/** 8980 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 8981 * @phba: pointer to lpfc hba data structure. 8982 * 8983 * This routine is called to prepare the SLI4 device for PCI slot permanently 8984 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 8985 * pending I/Os. 8986 **/ 8987static void 8988lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 8989{ 8990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8991 "2827 PCI channel permanent disable for failure\n"); 8992 8993 /* Block all SCSI devices' I/Os on the host */ 8994 lpfc_scsi_dev_block(phba); 8995 8996 /* stop all timers */ 8997 lpfc_stop_hba_timers(phba); 8998 8999 /* Clean up all driver's outstanding SCSI I/Os */ 9000 lpfc_sli_flush_fcp_rings(phba); 9001} 9002 9003/** 9004 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 9005 * @pdev: pointer to PCI device. 9006 * @state: the current PCI connection state. 9007 * 9008 * This routine is called from the PCI subsystem for error handling to device 9009 * with SLI-4 interface spec. This function is called by the PCI subsystem 9010 * after a PCI bus error affecting this device has been detected. When this 9011 * function is invoked, it will need to stop all the I/Os and interrupt(s) 9012 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 9013 * for the PCI subsystem to perform proper recovery as desired. 9014 * 9015 * Return codes 9016 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9017 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9018 **/ 9019static pci_ers_result_t 9020lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 9021{ 9022 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9023 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9024 9025 switch (state) { 9026 case pci_channel_io_normal: 9027 /* Non-fatal error, prepare for recovery */ 9028 lpfc_sli4_prep_dev_for_recover(phba); 9029 return PCI_ERS_RESULT_CAN_RECOVER; 9030 case pci_channel_io_frozen: 9031 /* Fatal error, prepare for slot reset */ 9032 lpfc_sli4_prep_dev_for_reset(phba); 9033 return PCI_ERS_RESULT_NEED_RESET; 9034 case pci_channel_io_perm_failure: 9035 /* Permanent failure, prepare for device down */ 9036 lpfc_sli4_prep_dev_for_perm_failure(phba); 9037 return PCI_ERS_RESULT_DISCONNECT; 9038 default: 9039 /* Unknown state, prepare and request slot reset */ 9040 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9041 "2825 Unknown PCI error state: x%x\n", state); 9042 lpfc_sli4_prep_dev_for_reset(phba); 9043 return PCI_ERS_RESULT_NEED_RESET; 9044 } 9045} 9046 9047/** 9048 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 9049 * @pdev: pointer to PCI device. 9050 * 9051 * This routine is called from the PCI subsystem for error handling to device 9052 * with SLI-4 interface spec. It is called after PCI bus has been reset to 9053 * restart the PCI card from scratch, as if from a cold-boot. During the 9054 * PCI subsystem error recovery, after the driver returns 9055 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 9056 * recovery and then call this routine before calling the .resume method to 9057 * recover the device. This function will initialize the HBA device, enable 9058 * the interrupt, but it will just put the HBA to offline state without 9059 * passing any I/O traffic. 9060 * 9061 * Return codes 9062 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9063 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9064 */ 9065static pci_ers_result_t 9066lpfc_io_slot_reset_s4(struct pci_dev *pdev) 9067{ 9068 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9069 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9070 struct lpfc_sli *psli = &phba->sli; 9071 uint32_t intr_mode; 9072 9073 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 9074 if (pci_enable_device_mem(pdev)) { 9075 printk(KERN_ERR "lpfc: Cannot re-enable " 9076 "PCI device after reset.\n"); 9077 return PCI_ERS_RESULT_DISCONNECT; 9078 } 9079 9080 pci_restore_state(pdev); 9081 if (pdev->is_busmaster) 9082 pci_set_master(pdev); 9083 9084 spin_lock_irq(&phba->hbalock); 9085 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 9086 spin_unlock_irq(&phba->hbalock); 9087 9088 /* Configure and enable interrupt */ 9089 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 9090 if (intr_mode == LPFC_INTR_ERROR) { 9091 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9092 "2824 Cannot re-enable interrupt after " 9093 "slot reset.\n"); 9094 return PCI_ERS_RESULT_DISCONNECT; 9095 } else 9096 phba->intr_mode = intr_mode; 9097 9098 /* Log the current active interrupt mode */ 9099 lpfc_log_intr_mode(phba, phba->intr_mode); 9100 9101 return PCI_ERS_RESULT_RECOVERED; 9102} 9103 9104/** 9105 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 9106 * @pdev: pointer to PCI device 9107 * 9108 * This routine is called from the PCI subsystem for error handling to device 9109 * with SLI-4 interface spec. It is called when kernel error recovery tells 9110 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 9111 * error recovery. After this call, traffic can start to flow from this device 9112 * again. 9113 **/ 9114static void 9115lpfc_io_resume_s4(struct pci_dev *pdev) 9116{ 9117 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9118 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9119 9120 /* 9121 * In case of slot reset, as function reset is performed through 9122 * mailbox command which needs DMA to be enabled, this operation 9123 * has to be moved to the io resume phase. Taking device offline 9124 * will perform the necessary cleanup. 9125 */ 9126 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 9127 /* Perform device reset */ 9128 lpfc_offline_prep(phba); 9129 lpfc_offline(phba); 9130 lpfc_sli_brdrestart(phba); 9131 /* Bring the device back online */ 9132 lpfc_online(phba); 9133 } 9134 9135 /* Clean up Advanced Error Reporting (AER) if needed */ 9136 if (phba->hba_flag & HBA_AER_ENABLED) 9137 pci_cleanup_aer_uncorrect_error_status(pdev); 9138} 9139 9140/** 9141 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 9142 * @pdev: pointer to PCI device 9143 * @pid: pointer to PCI device identifier 9144 * 9145 * This routine is to be registered to the kernel's PCI subsystem. When an 9146 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 9147 * at PCI device-specific information of the device and driver to see if the 9148 * driver state that it can support this kind of device. If the match is 9149 * successful, the driver core invokes this routine. This routine dispatches 9150 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 9151 * do all the initialization that it needs to do to handle the HBA device 9152 * properly. 9153 * 9154 * Return code 9155 * 0 - driver can claim the device 9156 * negative value - driver can not claim the device 9157 **/ 9158static int __devinit 9159lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 9160{ 9161 int rc; 9162 struct lpfc_sli_intf intf; 9163 9164 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 9165 return -ENODEV; 9166 9167 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 9168 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 9169 rc = lpfc_pci_probe_one_s4(pdev, pid); 9170 else 9171 rc = lpfc_pci_probe_one_s3(pdev, pid); 9172 9173 return rc; 9174} 9175 9176/** 9177 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 9178 * @pdev: pointer to PCI device 9179 * 9180 * This routine is to be registered to the kernel's PCI subsystem. When an 9181 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 9182 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 9183 * remove routine, which will perform all the necessary cleanup for the 9184 * device to be removed from the PCI subsystem properly. 9185 **/ 9186static void __devexit 9187lpfc_pci_remove_one(struct pci_dev *pdev) 9188{ 9189 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9190 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9191 9192 switch (phba->pci_dev_grp) { 9193 case LPFC_PCI_DEV_LP: 9194 lpfc_pci_remove_one_s3(pdev); 9195 break; 9196 case LPFC_PCI_DEV_OC: 9197 lpfc_pci_remove_one_s4(pdev); 9198 break; 9199 default: 9200 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9201 "1424 Invalid PCI device group: 0x%x\n", 9202 phba->pci_dev_grp); 9203 break; 9204 } 9205 return; 9206} 9207 9208/** 9209 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 9210 * @pdev: pointer to PCI device 9211 * @msg: power management message 9212 * 9213 * This routine is to be registered to the kernel's PCI subsystem to support 9214 * system Power Management (PM). When PM invokes this method, it dispatches 9215 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 9216 * suspend the device. 9217 * 9218 * Return code 9219 * 0 - driver suspended the device 9220 * Error otherwise 9221 **/ 9222static int 9223lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 9224{ 9225 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9226 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9227 int rc = -ENODEV; 9228 9229 switch (phba->pci_dev_grp) { 9230 case LPFC_PCI_DEV_LP: 9231 rc = lpfc_pci_suspend_one_s3(pdev, msg); 9232 break; 9233 case LPFC_PCI_DEV_OC: 9234 rc = lpfc_pci_suspend_one_s4(pdev, msg); 9235 break; 9236 default: 9237 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9238 "1425 Invalid PCI device group: 0x%x\n", 9239 phba->pci_dev_grp); 9240 break; 9241 } 9242 return rc; 9243} 9244 9245/** 9246 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 9247 * @pdev: pointer to PCI device 9248 * 9249 * This routine is to be registered to the kernel's PCI subsystem to support 9250 * system Power Management (PM). When PM invokes this method, it dispatches 9251 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 9252 * resume the device. 9253 * 9254 * Return code 9255 * 0 - driver suspended the device 9256 * Error otherwise 9257 **/ 9258static int 9259lpfc_pci_resume_one(struct pci_dev *pdev) 9260{ 9261 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9262 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9263 int rc = -ENODEV; 9264 9265 switch (phba->pci_dev_grp) { 9266 case LPFC_PCI_DEV_LP: 9267 rc = lpfc_pci_resume_one_s3(pdev); 9268 break; 9269 case LPFC_PCI_DEV_OC: 9270 rc = lpfc_pci_resume_one_s4(pdev); 9271 break; 9272 default: 9273 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9274 "1426 Invalid PCI device group: 0x%x\n", 9275 phba->pci_dev_grp); 9276 break; 9277 } 9278 return rc; 9279} 9280 9281/** 9282 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 9283 * @pdev: pointer to PCI device. 9284 * @state: the current PCI connection state. 9285 * 9286 * This routine is registered to the PCI subsystem for error handling. This 9287 * function is called by the PCI subsystem after a PCI bus error affecting 9288 * this device has been detected. When this routine is invoked, it dispatches 9289 * the action to the proper SLI-3 or SLI-4 device error detected handling 9290 * routine, which will perform the proper error detected operation. 9291 * 9292 * Return codes 9293 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9294 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9295 **/ 9296static pci_ers_result_t 9297lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 9298{ 9299 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9300 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9301 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 9302 9303 switch (phba->pci_dev_grp) { 9304 case LPFC_PCI_DEV_LP: 9305 rc = lpfc_io_error_detected_s3(pdev, state); 9306 break; 9307 case LPFC_PCI_DEV_OC: 9308 rc = lpfc_io_error_detected_s4(pdev, state); 9309 break; 9310 default: 9311 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9312 "1427 Invalid PCI device group: 0x%x\n", 9313 phba->pci_dev_grp); 9314 break; 9315 } 9316 return rc; 9317} 9318 9319/** 9320 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 9321 * @pdev: pointer to PCI device. 9322 * 9323 * This routine is registered to the PCI subsystem for error handling. This 9324 * function is called after PCI bus has been reset to restart the PCI card 9325 * from scratch, as if from a cold-boot. When this routine is invoked, it 9326 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 9327 * routine, which will perform the proper device reset. 9328 * 9329 * Return codes 9330 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9331 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9332 **/ 9333static pci_ers_result_t 9334lpfc_io_slot_reset(struct pci_dev *pdev) 9335{ 9336 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9337 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9338 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 9339 9340 switch (phba->pci_dev_grp) { 9341 case LPFC_PCI_DEV_LP: 9342 rc = lpfc_io_slot_reset_s3(pdev); 9343 break; 9344 case LPFC_PCI_DEV_OC: 9345 rc = lpfc_io_slot_reset_s4(pdev); 9346 break; 9347 default: 9348 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9349 "1428 Invalid PCI device group: 0x%x\n", 9350 phba->pci_dev_grp); 9351 break; 9352 } 9353 return rc; 9354} 9355 9356/** 9357 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 9358 * @pdev: pointer to PCI device 9359 * 9360 * This routine is registered to the PCI subsystem for error handling. It 9361 * is called when kernel error recovery tells the lpfc driver that it is 9362 * OK to resume normal PCI operation after PCI bus error recovery. When 9363 * this routine is invoked, it dispatches the action to the proper SLI-3 9364 * or SLI-4 device io_resume routine, which will resume the device operation. 9365 **/ 9366static void 9367lpfc_io_resume(struct pci_dev *pdev) 9368{ 9369 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9370 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9371 9372 switch (phba->pci_dev_grp) { 9373 case LPFC_PCI_DEV_LP: 9374 lpfc_io_resume_s3(pdev); 9375 break; 9376 case LPFC_PCI_DEV_OC: 9377 lpfc_io_resume_s4(pdev); 9378 break; 9379 default: 9380 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9381 "1429 Invalid PCI device group: 0x%x\n", 9382 phba->pci_dev_grp); 9383 break; 9384 } 9385 return; 9386} 9387 9388static struct pci_device_id lpfc_id_table[] = { 9389 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 9390 PCI_ANY_ID, PCI_ANY_ID, }, 9391 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 9392 PCI_ANY_ID, PCI_ANY_ID, }, 9393 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 9394 PCI_ANY_ID, PCI_ANY_ID, }, 9395 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 9396 PCI_ANY_ID, PCI_ANY_ID, }, 9397 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 9398 PCI_ANY_ID, PCI_ANY_ID, }, 9399 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 9400 PCI_ANY_ID, PCI_ANY_ID, }, 9401 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 9402 PCI_ANY_ID, PCI_ANY_ID, }, 9403 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 9404 PCI_ANY_ID, PCI_ANY_ID, }, 9405 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 9406 PCI_ANY_ID, PCI_ANY_ID, }, 9407 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 9408 PCI_ANY_ID, PCI_ANY_ID, }, 9409 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 9410 PCI_ANY_ID, PCI_ANY_ID, }, 9411 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 9412 PCI_ANY_ID, PCI_ANY_ID, }, 9413 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 9414 PCI_ANY_ID, PCI_ANY_ID, }, 9415 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 9416 PCI_ANY_ID, PCI_ANY_ID, }, 9417 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 9418 PCI_ANY_ID, PCI_ANY_ID, }, 9419 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 9420 PCI_ANY_ID, PCI_ANY_ID, }, 9421 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 9422 PCI_ANY_ID, PCI_ANY_ID, }, 9423 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 9424 PCI_ANY_ID, PCI_ANY_ID, }, 9425 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 9426 PCI_ANY_ID, PCI_ANY_ID, }, 9427 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 9428 PCI_ANY_ID, PCI_ANY_ID, }, 9429 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 9430 PCI_ANY_ID, PCI_ANY_ID, }, 9431 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 9432 PCI_ANY_ID, PCI_ANY_ID, }, 9433 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 9434 PCI_ANY_ID, PCI_ANY_ID, }, 9435 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 9436 PCI_ANY_ID, PCI_ANY_ID, }, 9437 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 9438 PCI_ANY_ID, PCI_ANY_ID, }, 9439 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 9440 PCI_ANY_ID, PCI_ANY_ID, }, 9441 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 9442 PCI_ANY_ID, PCI_ANY_ID, }, 9443 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 9444 PCI_ANY_ID, PCI_ANY_ID, }, 9445 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 9446 PCI_ANY_ID, PCI_ANY_ID, }, 9447 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 9448 PCI_ANY_ID, PCI_ANY_ID, }, 9449 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 9450 PCI_ANY_ID, PCI_ANY_ID, }, 9451 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 9452 PCI_ANY_ID, PCI_ANY_ID, }, 9453 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 9454 PCI_ANY_ID, PCI_ANY_ID, }, 9455 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 9456 PCI_ANY_ID, PCI_ANY_ID, }, 9457 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 9458 PCI_ANY_ID, PCI_ANY_ID, }, 9459 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 9460 PCI_ANY_ID, PCI_ANY_ID, }, 9461 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 9462 PCI_ANY_ID, PCI_ANY_ID, }, 9463 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 9464 PCI_ANY_ID, PCI_ANY_ID, }, 9465 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 9466 PCI_ANY_ID, PCI_ANY_ID, }, 9467 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 9468 PCI_ANY_ID, PCI_ANY_ID, }, 9469 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, 9470 PCI_ANY_ID, PCI_ANY_ID, }, 9471 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, 9472 PCI_ANY_ID, PCI_ANY_ID, }, 9473 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, 9474 PCI_ANY_ID, PCI_ANY_ID, }, 9475 { 0 } 9476}; 9477 9478MODULE_DEVICE_TABLE(pci, lpfc_id_table); 9479 9480static struct pci_error_handlers lpfc_err_handler = { 9481 .error_detected = lpfc_io_error_detected, 9482 .slot_reset = lpfc_io_slot_reset, 9483 .resume = lpfc_io_resume, 9484}; 9485 9486static struct pci_driver lpfc_driver = { 9487 .name = LPFC_DRIVER_NAME, 9488 .id_table = lpfc_id_table, 9489 .probe = lpfc_pci_probe_one, 9490 .remove = __devexit_p(lpfc_pci_remove_one), 9491 .suspend = lpfc_pci_suspend_one, 9492 .resume = lpfc_pci_resume_one, 9493 .err_handler = &lpfc_err_handler, 9494}; 9495 9496/** 9497 * lpfc_init - lpfc module initialization routine 9498 * 9499 * This routine is to be invoked when the lpfc module is loaded into the 9500 * kernel. The special kernel macro module_init() is used to indicate the 9501 * role of this routine to the kernel as lpfc module entry point. 9502 * 9503 * Return codes 9504 * 0 - successful 9505 * -ENOMEM - FC attach transport failed 9506 * all others - failed 9507 */ 9508static int __init 9509lpfc_init(void) 9510{ 9511 int error = 0; 9512 9513 printk(LPFC_MODULE_DESC "\n"); 9514 printk(LPFC_COPYRIGHT "\n"); 9515 9516 if (lpfc_enable_npiv) { 9517 lpfc_transport_functions.vport_create = lpfc_vport_create; 9518 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 9519 } 9520 lpfc_transport_template = 9521 fc_attach_transport(&lpfc_transport_functions); 9522 if (lpfc_transport_template == NULL) 9523 return -ENOMEM; 9524 if (lpfc_enable_npiv) { 9525 lpfc_vport_transport_template = 9526 fc_attach_transport(&lpfc_vport_transport_functions); 9527 if (lpfc_vport_transport_template == NULL) { 9528 fc_release_transport(lpfc_transport_template); 9529 return -ENOMEM; 9530 } 9531 } 9532 error = pci_register_driver(&lpfc_driver); 9533 if (error) { 9534 fc_release_transport(lpfc_transport_template); 9535 if (lpfc_enable_npiv) 9536 fc_release_transport(lpfc_vport_transport_template); 9537 } 9538 9539 return error; 9540} 9541 9542/** 9543 * lpfc_exit - lpfc module removal routine 9544 * 9545 * This routine is invoked when the lpfc module is removed from the kernel. 9546 * The special kernel macro module_exit() is used to indicate the role of 9547 * this routine to the kernel as lpfc module exit point. 9548 */ 9549static void __exit 9550lpfc_exit(void) 9551{ 9552 pci_unregister_driver(&lpfc_driver); 9553 fc_release_transport(lpfc_transport_template); 9554 if (lpfc_enable_npiv) 9555 fc_release_transport(lpfc_vport_transport_template); 9556 if (_dump_buf_data) { 9557 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 9558 "_dump_buf_data at 0x%p\n", 9559 (1L << _dump_buf_data_order), _dump_buf_data); 9560 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 9561 } 9562 9563 if (_dump_buf_dif) { 9564 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 9565 "_dump_buf_dif at 0x%p\n", 9566 (1L << _dump_buf_dif_order), _dump_buf_dif); 9567 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 9568 } 9569} 9570 9571module_init(lpfc_init); 9572module_exit(lpfc_exit); 9573MODULE_LICENSE("GPL"); 9574MODULE_DESCRIPTION(LPFC_MODULE_DESC); 9575MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 9576MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 9577