lpfc_init.c revision 5ac6b303834aa74855ecc3db98b4b1d9cad0de2f
1/******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/dma-mapping.h> 25#include <linux/idr.h> 26#include <linux/interrupt.h> 27#include <linux/kthread.h> 28#include <linux/pci.h> 29#include <linux/spinlock.h> 30#include <linux/ctype.h> 31#include <linux/aer.h> 32#include <linux/slab.h> 33 34#include <scsi/scsi.h> 35#include <scsi/scsi_device.h> 36#include <scsi/scsi_host.h> 37#include <scsi/scsi_transport_fc.h> 38 39#include "lpfc_hw4.h" 40#include "lpfc_hw.h" 41#include "lpfc_sli.h" 42#include "lpfc_sli4.h" 43#include "lpfc_nl.h" 44#include "lpfc_disc.h" 45#include "lpfc_scsi.h" 46#include "lpfc.h" 47#include "lpfc_logmsg.h" 48#include "lpfc_crtn.h" 49#include "lpfc_vport.h" 50#include "lpfc_version.h" 51 52char *_dump_buf_data; 53unsigned long _dump_buf_data_order; 54char *_dump_buf_dif; 55unsigned long _dump_buf_dif_order; 56spinlock_t _dump_buf_lock; 57 58static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 59static int lpfc_post_rcv_buf(struct lpfc_hba *); 60static int lpfc_sli4_queue_create(struct lpfc_hba *); 61static void lpfc_sli4_queue_destroy(struct lpfc_hba *); 62static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 63static int lpfc_setup_endian_order(struct lpfc_hba *); 64static int lpfc_sli4_read_config(struct lpfc_hba *); 65static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 66static void lpfc_free_sgl_list(struct lpfc_hba *); 67static int lpfc_init_sgl_list(struct lpfc_hba *); 68static int lpfc_init_active_sgl_array(struct lpfc_hba *); 69static void lpfc_free_active_sgl(struct lpfc_hba *); 70static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 71static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 72static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 73static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 74static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 75 76static struct scsi_transport_template *lpfc_transport_template = NULL; 77static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 78static DEFINE_IDR(lpfc_hba_index); 79 80/** 81 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 82 * @phba: pointer to lpfc hba data structure. 83 * 84 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 85 * mailbox command. It retrieves the revision information from the HBA and 86 * collects the Vital Product Data (VPD) about the HBA for preparing the 87 * configuration of the HBA. 88 * 89 * Return codes: 90 * 0 - success. 91 * -ERESTART - requests the SLI layer to reset the HBA and try again. 92 * Any other value - indicates an error. 93 **/ 94int 95lpfc_config_port_prep(struct lpfc_hba *phba) 96{ 97 lpfc_vpd_t *vp = &phba->vpd; 98 int i = 0, rc; 99 LPFC_MBOXQ_t *pmb; 100 MAILBOX_t *mb; 101 char *lpfc_vpd_data = NULL; 102 uint16_t offset = 0; 103 static char licensed[56] = 104 "key unlock for use with gnu public licensed code only\0"; 105 static int init_key = 1; 106 107 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 108 if (!pmb) { 109 phba->link_state = LPFC_HBA_ERROR; 110 return -ENOMEM; 111 } 112 113 mb = &pmb->u.mb; 114 phba->link_state = LPFC_INIT_MBX_CMDS; 115 116 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 117 if (init_key) { 118 uint32_t *ptext = (uint32_t *) licensed; 119 120 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 121 *ptext = cpu_to_be32(*ptext); 122 init_key = 0; 123 } 124 125 lpfc_read_nv(phba, pmb); 126 memset((char*)mb->un.varRDnvp.rsvd3, 0, 127 sizeof (mb->un.varRDnvp.rsvd3)); 128 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 129 sizeof (licensed)); 130 131 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 132 133 if (rc != MBX_SUCCESS) { 134 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 135 "0324 Config Port initialization " 136 "error, mbxCmd x%x READ_NVPARM, " 137 "mbxStatus x%x\n", 138 mb->mbxCommand, mb->mbxStatus); 139 mempool_free(pmb, phba->mbox_mem_pool); 140 return -ERESTART; 141 } 142 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 143 sizeof(phba->wwnn)); 144 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 145 sizeof(phba->wwpn)); 146 } 147 148 phba->sli3_options = 0x0; 149 150 /* Setup and issue mailbox READ REV command */ 151 lpfc_read_rev(phba, pmb); 152 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 153 if (rc != MBX_SUCCESS) { 154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 155 "0439 Adapter failed to init, mbxCmd x%x " 156 "READ_REV, mbxStatus x%x\n", 157 mb->mbxCommand, mb->mbxStatus); 158 mempool_free( pmb, phba->mbox_mem_pool); 159 return -ERESTART; 160 } 161 162 163 /* 164 * The value of rr must be 1 since the driver set the cv field to 1. 165 * This setting requires the FW to set all revision fields. 166 */ 167 if (mb->un.varRdRev.rr == 0) { 168 vp->rev.rBit = 0; 169 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 170 "0440 Adapter failed to init, READ_REV has " 171 "missing revision information.\n"); 172 mempool_free(pmb, phba->mbox_mem_pool); 173 return -ERESTART; 174 } 175 176 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 177 mempool_free(pmb, phba->mbox_mem_pool); 178 return -EINVAL; 179 } 180 181 /* Save information as VPD data */ 182 vp->rev.rBit = 1; 183 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 184 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 185 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 186 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 187 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 188 vp->rev.biuRev = mb->un.varRdRev.biuRev; 189 vp->rev.smRev = mb->un.varRdRev.smRev; 190 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 191 vp->rev.endecRev = mb->un.varRdRev.endecRev; 192 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 193 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 194 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 195 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 196 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 197 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 198 199 /* If the sli feature level is less then 9, we must 200 * tear down all RPIs and VPIs on link down if NPIV 201 * is enabled. 202 */ 203 if (vp->rev.feaLevelHigh < 9) 204 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 205 206 if (lpfc_is_LC_HBA(phba->pcidev->device)) 207 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 208 sizeof (phba->RandomData)); 209 210 /* Get adapter VPD information */ 211 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 212 if (!lpfc_vpd_data) 213 goto out_free_mbox; 214 215 do { 216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 218 219 if (rc != MBX_SUCCESS) { 220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 221 "0441 VPD not present on adapter, " 222 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 223 mb->mbxCommand, mb->mbxStatus); 224 mb->un.varDmp.word_cnt = 0; 225 } 226 /* dump mem may return a zero when finished or we got a 227 * mailbox error, either way we are done. 228 */ 229 if (mb->un.varDmp.word_cnt == 0) 230 break; 231 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 232 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 233 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 234 lpfc_vpd_data + offset, 235 mb->un.varDmp.word_cnt); 236 offset += mb->un.varDmp.word_cnt; 237 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 238 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 239 240 kfree(lpfc_vpd_data); 241out_free_mbox: 242 mempool_free(pmb, phba->mbox_mem_pool); 243 return 0; 244} 245 246/** 247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 248 * @phba: pointer to lpfc hba data structure. 249 * @pmboxq: pointer to the driver internal queue element for mailbox command. 250 * 251 * This is the completion handler for driver's configuring asynchronous event 252 * mailbox command to the device. If the mailbox command returns successfully, 253 * it will set internal async event support flag to 1; otherwise, it will 254 * set internal async event support flag to 0. 255 **/ 256static void 257lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 258{ 259 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 260 phba->temp_sensor_support = 1; 261 else 262 phba->temp_sensor_support = 0; 263 mempool_free(pmboxq, phba->mbox_mem_pool); 264 return; 265} 266 267/** 268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 269 * @phba: pointer to lpfc hba data structure. 270 * @pmboxq: pointer to the driver internal queue element for mailbox command. 271 * 272 * This is the completion handler for dump mailbox command for getting 273 * wake up parameters. When this command complete, the response contain 274 * Option rom version of the HBA. This function translate the version number 275 * into a human readable string and store it in OptionROMVersion. 276 **/ 277static void 278lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 279{ 280 struct prog_id *prg; 281 uint32_t prog_id_word; 282 char dist = ' '; 283 /* character array used for decoding dist type. */ 284 char dist_char[] = "nabx"; 285 286 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 287 mempool_free(pmboxq, phba->mbox_mem_pool); 288 return; 289 } 290 291 prg = (struct prog_id *) &prog_id_word; 292 293 /* word 7 contain option rom version */ 294 prog_id_word = pmboxq->u.mb.un.varWords[7]; 295 296 /* Decode the Option rom version word to a readable string */ 297 if (prg->dist < 4) 298 dist = dist_char[prg->dist]; 299 300 if ((prg->dist == 3) && (prg->num == 0)) 301 sprintf(phba->OptionROMVersion, "%d.%d%d", 302 prg->ver, prg->rev, prg->lev); 303 else 304 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 305 prg->ver, prg->rev, prg->lev, 306 dist, prg->num); 307 mempool_free(pmboxq, phba->mbox_mem_pool); 308 return; 309} 310 311/** 312 * lpfc_config_port_post - Perform lpfc initialization after config port 313 * @phba: pointer to lpfc hba data structure. 314 * 315 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 316 * command call. It performs all internal resource and state setups on the 317 * port: post IOCB buffers, enable appropriate host interrupt attentions, 318 * ELS ring timers, etc. 319 * 320 * Return codes 321 * 0 - success. 322 * Any other value - error. 323 **/ 324int 325lpfc_config_port_post(struct lpfc_hba *phba) 326{ 327 struct lpfc_vport *vport = phba->pport; 328 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 329 LPFC_MBOXQ_t *pmb; 330 MAILBOX_t *mb; 331 struct lpfc_dmabuf *mp; 332 struct lpfc_sli *psli = &phba->sli; 333 uint32_t status, timeout; 334 int i, j; 335 int rc; 336 337 spin_lock_irq(&phba->hbalock); 338 /* 339 * If the Config port completed correctly the HBA is not 340 * over heated any more. 341 */ 342 if (phba->over_temp_state == HBA_OVER_TEMP) 343 phba->over_temp_state = HBA_NORMAL_TEMP; 344 spin_unlock_irq(&phba->hbalock); 345 346 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 347 if (!pmb) { 348 phba->link_state = LPFC_HBA_ERROR; 349 return -ENOMEM; 350 } 351 mb = &pmb->u.mb; 352 353 /* Get login parameters for NID. */ 354 rc = lpfc_read_sparam(phba, pmb, 0); 355 if (rc) { 356 mempool_free(pmb, phba->mbox_mem_pool); 357 return -ENOMEM; 358 } 359 360 pmb->vport = vport; 361 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 363 "0448 Adapter failed init, mbxCmd x%x " 364 "READ_SPARM mbxStatus x%x\n", 365 mb->mbxCommand, mb->mbxStatus); 366 phba->link_state = LPFC_HBA_ERROR; 367 mp = (struct lpfc_dmabuf *) pmb->context1; 368 mempool_free(pmb, phba->mbox_mem_pool); 369 lpfc_mbuf_free(phba, mp->virt, mp->phys); 370 kfree(mp); 371 return -EIO; 372 } 373 374 mp = (struct lpfc_dmabuf *) pmb->context1; 375 376 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 377 lpfc_mbuf_free(phba, mp->virt, mp->phys); 378 kfree(mp); 379 pmb->context1 = NULL; 380 381 if (phba->cfg_soft_wwnn) 382 u64_to_wwn(phba->cfg_soft_wwnn, 383 vport->fc_sparam.nodeName.u.wwn); 384 if (phba->cfg_soft_wwpn) 385 u64_to_wwn(phba->cfg_soft_wwpn, 386 vport->fc_sparam.portName.u.wwn); 387 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 388 sizeof (struct lpfc_name)); 389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 390 sizeof (struct lpfc_name)); 391 392 /* Update the fc_host data structures with new wwn. */ 393 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 394 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 395 fc_host_max_npiv_vports(shost) = phba->max_vpi; 396 397 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 398 /* This should be consolidated into parse_vpd ? - mr */ 399 if (phba->SerialNumber[0] == 0) { 400 uint8_t *outptr; 401 402 outptr = &vport->fc_nodename.u.s.IEEE[0]; 403 for (i = 0; i < 12; i++) { 404 status = *outptr++; 405 j = ((status & 0xf0) >> 4); 406 if (j <= 9) 407 phba->SerialNumber[i] = 408 (char)((uint8_t) 0x30 + (uint8_t) j); 409 else 410 phba->SerialNumber[i] = 411 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 412 i++; 413 j = (status & 0xf); 414 if (j <= 9) 415 phba->SerialNumber[i] = 416 (char)((uint8_t) 0x30 + (uint8_t) j); 417 else 418 phba->SerialNumber[i] = 419 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 420 } 421 } 422 423 lpfc_read_config(phba, pmb); 424 pmb->vport = vport; 425 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 427 "0453 Adapter failed to init, mbxCmd x%x " 428 "READ_CONFIG, mbxStatus x%x\n", 429 mb->mbxCommand, mb->mbxStatus); 430 phba->link_state = LPFC_HBA_ERROR; 431 mempool_free( pmb, phba->mbox_mem_pool); 432 return -EIO; 433 } 434 435 /* Check if the port is disabled */ 436 lpfc_sli_read_link_ste(phba); 437 438 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 439 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 440 phba->cfg_hba_queue_depth = 441 (mb->un.varRdConfig.max_xri + 1) - 442 lpfc_sli4_get_els_iocb_cnt(phba); 443 444 phba->lmt = mb->un.varRdConfig.lmt; 445 446 /* Get the default values for Model Name and Description */ 447 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 448 449 if ((phba->cfg_link_speed > LINK_SPEED_10G) 450 || ((phba->cfg_link_speed == LINK_SPEED_1G) 451 && !(phba->lmt & LMT_1Gb)) 452 || ((phba->cfg_link_speed == LINK_SPEED_2G) 453 && !(phba->lmt & LMT_2Gb)) 454 || ((phba->cfg_link_speed == LINK_SPEED_4G) 455 && !(phba->lmt & LMT_4Gb)) 456 || ((phba->cfg_link_speed == LINK_SPEED_8G) 457 && !(phba->lmt & LMT_8Gb)) 458 || ((phba->cfg_link_speed == LINK_SPEED_10G) 459 && !(phba->lmt & LMT_10Gb))) { 460 /* Reset link speed to auto */ 461 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 462 "1302 Invalid speed for this board: " 463 "Reset link speed to auto: x%x\n", 464 phba->cfg_link_speed); 465 phba->cfg_link_speed = LINK_SPEED_AUTO; 466 } 467 468 phba->link_state = LPFC_LINK_DOWN; 469 470 /* Only process IOCBs on ELS ring till hba_state is READY */ 471 if (psli->ring[psli->extra_ring].cmdringaddr) 472 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 473 if (psli->ring[psli->fcp_ring].cmdringaddr) 474 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 475 if (psli->ring[psli->next_ring].cmdringaddr) 476 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 477 478 /* Post receive buffers for desired rings */ 479 if (phba->sli_rev != 3) 480 lpfc_post_rcv_buf(phba); 481 482 /* 483 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 484 */ 485 if (phba->intr_type == MSIX) { 486 rc = lpfc_config_msi(phba, pmb); 487 if (rc) { 488 mempool_free(pmb, phba->mbox_mem_pool); 489 return -EIO; 490 } 491 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 492 if (rc != MBX_SUCCESS) { 493 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 494 "0352 Config MSI mailbox command " 495 "failed, mbxCmd x%x, mbxStatus x%x\n", 496 pmb->u.mb.mbxCommand, 497 pmb->u.mb.mbxStatus); 498 mempool_free(pmb, phba->mbox_mem_pool); 499 return -EIO; 500 } 501 } 502 503 spin_lock_irq(&phba->hbalock); 504 /* Initialize ERATT handling flag */ 505 phba->hba_flag &= ~HBA_ERATT_HANDLED; 506 507 /* Enable appropriate host interrupts */ 508 status = readl(phba->HCregaddr); 509 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 510 if (psli->num_rings > 0) 511 status |= HC_R0INT_ENA; 512 if (psli->num_rings > 1) 513 status |= HC_R1INT_ENA; 514 if (psli->num_rings > 2) 515 status |= HC_R2INT_ENA; 516 if (psli->num_rings > 3) 517 status |= HC_R3INT_ENA; 518 519 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 520 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 521 status &= ~(HC_R0INT_ENA); 522 523 writel(status, phba->HCregaddr); 524 readl(phba->HCregaddr); /* flush */ 525 spin_unlock_irq(&phba->hbalock); 526 527 /* Set up ring-0 (ELS) timer */ 528 timeout = phba->fc_ratov * 2; 529 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 530 /* Set up heart beat (HB) timer */ 531 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 532 phba->hb_outstanding = 0; 533 phba->last_completion_time = jiffies; 534 /* Set up error attention (ERATT) polling timer */ 535 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 536 537 if (phba->hba_flag & LINK_DISABLED) { 538 lpfc_printf_log(phba, 539 KERN_ERR, LOG_INIT, 540 "2598 Adapter Link is disabled.\n"); 541 lpfc_down_link(phba, pmb); 542 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 543 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 544 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 545 lpfc_printf_log(phba, 546 KERN_ERR, LOG_INIT, 547 "2599 Adapter failed to issue DOWN_LINK" 548 " mbox command rc 0x%x\n", rc); 549 550 mempool_free(pmb, phba->mbox_mem_pool); 551 return -EIO; 552 } 553 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 554 lpfc_init_link(phba, pmb, phba->cfg_topology, 555 phba->cfg_link_speed); 556 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 557 lpfc_set_loopback_flag(phba); 558 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 559 if (rc != MBX_SUCCESS) { 560 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 561 "0454 Adapter failed to init, mbxCmd x%x " 562 "INIT_LINK, mbxStatus x%x\n", 563 mb->mbxCommand, mb->mbxStatus); 564 565 /* Clear all interrupt enable conditions */ 566 writel(0, phba->HCregaddr); 567 readl(phba->HCregaddr); /* flush */ 568 /* Clear all pending interrupts */ 569 writel(0xffffffff, phba->HAregaddr); 570 readl(phba->HAregaddr); /* flush */ 571 572 phba->link_state = LPFC_HBA_ERROR; 573 if (rc != MBX_BUSY) 574 mempool_free(pmb, phba->mbox_mem_pool); 575 return -EIO; 576 } 577 } 578 /* MBOX buffer will be freed in mbox compl */ 579 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 580 if (!pmb) { 581 phba->link_state = LPFC_HBA_ERROR; 582 return -ENOMEM; 583 } 584 585 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 586 pmb->mbox_cmpl = lpfc_config_async_cmpl; 587 pmb->vport = phba->pport; 588 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 589 590 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 591 lpfc_printf_log(phba, 592 KERN_ERR, 593 LOG_INIT, 594 "0456 Adapter failed to issue " 595 "ASYNCEVT_ENABLE mbox status x%x\n", 596 rc); 597 mempool_free(pmb, phba->mbox_mem_pool); 598 } 599 600 /* Get Option rom version */ 601 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 602 if (!pmb) { 603 phba->link_state = LPFC_HBA_ERROR; 604 return -ENOMEM; 605 } 606 607 lpfc_dump_wakeup_param(phba, pmb); 608 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 609 pmb->vport = phba->pport; 610 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 611 612 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 613 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 614 "to get Option ROM version status x%x\n", rc); 615 mempool_free(pmb, phba->mbox_mem_pool); 616 } 617 618 return 0; 619} 620 621/** 622 * lpfc_hba_init_link - Initialize the FC link 623 * @phba: pointer to lpfc hba data structure. 624 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 625 * 626 * This routine will issue the INIT_LINK mailbox command call. 627 * It is available to other drivers through the lpfc_hba data 628 * structure for use as a delayed link up mechanism with the 629 * module parameter lpfc_suppress_link_up. 630 * 631 * Return code 632 * 0 - success 633 * Any other value - error 634 **/ 635int 636lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 637{ 638 struct lpfc_vport *vport = phba->pport; 639 LPFC_MBOXQ_t *pmb; 640 MAILBOX_t *mb; 641 int rc; 642 643 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 644 if (!pmb) { 645 phba->link_state = LPFC_HBA_ERROR; 646 return -ENOMEM; 647 } 648 mb = &pmb->u.mb; 649 pmb->vport = vport; 650 651 lpfc_init_link(phba, pmb, phba->cfg_topology, 652 phba->cfg_link_speed); 653 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 654 lpfc_set_loopback_flag(phba); 655 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 656 if (rc != MBX_SUCCESS) { 657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 658 "0498 Adapter failed to init, mbxCmd x%x " 659 "INIT_LINK, mbxStatus x%x\n", 660 mb->mbxCommand, mb->mbxStatus); 661 /* Clear all interrupt enable conditions */ 662 writel(0, phba->HCregaddr); 663 readl(phba->HCregaddr); /* flush */ 664 /* Clear all pending interrupts */ 665 writel(0xffffffff, phba->HAregaddr); 666 readl(phba->HAregaddr); /* flush */ 667 phba->link_state = LPFC_HBA_ERROR; 668 if (rc != MBX_BUSY || flag == MBX_POLL) 669 mempool_free(pmb, phba->mbox_mem_pool); 670 return -EIO; 671 } 672 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 673 if (flag == MBX_POLL) 674 mempool_free(pmb, phba->mbox_mem_pool); 675 676 return 0; 677} 678 679/** 680 * lpfc_hba_down_link - this routine downs the FC link 681 * @phba: pointer to lpfc hba data structure. 682 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 683 * 684 * This routine will issue the DOWN_LINK mailbox command call. 685 * It is available to other drivers through the lpfc_hba data 686 * structure for use to stop the link. 687 * 688 * Return code 689 * 0 - success 690 * Any other value - error 691 **/ 692int 693lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 694{ 695 LPFC_MBOXQ_t *pmb; 696 int rc; 697 698 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 699 if (!pmb) { 700 phba->link_state = LPFC_HBA_ERROR; 701 return -ENOMEM; 702 } 703 704 lpfc_printf_log(phba, 705 KERN_ERR, LOG_INIT, 706 "0491 Adapter Link is disabled.\n"); 707 lpfc_down_link(phba, pmb); 708 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 709 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 710 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 711 lpfc_printf_log(phba, 712 KERN_ERR, LOG_INIT, 713 "2522 Adapter failed to issue DOWN_LINK" 714 " mbox command rc 0x%x\n", rc); 715 716 mempool_free(pmb, phba->mbox_mem_pool); 717 return -EIO; 718 } 719 if (flag == MBX_POLL) 720 mempool_free(pmb, phba->mbox_mem_pool); 721 722 return 0; 723} 724 725/** 726 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 727 * @phba: pointer to lpfc HBA data structure. 728 * 729 * This routine will do LPFC uninitialization before the HBA is reset when 730 * bringing down the SLI Layer. 731 * 732 * Return codes 733 * 0 - success. 734 * Any other value - error. 735 **/ 736int 737lpfc_hba_down_prep(struct lpfc_hba *phba) 738{ 739 struct lpfc_vport **vports; 740 int i; 741 742 if (phba->sli_rev <= LPFC_SLI_REV3) { 743 /* Disable interrupts */ 744 writel(0, phba->HCregaddr); 745 readl(phba->HCregaddr); /* flush */ 746 } 747 748 if (phba->pport->load_flag & FC_UNLOADING) 749 lpfc_cleanup_discovery_resources(phba->pport); 750 else { 751 vports = lpfc_create_vport_work_array(phba); 752 if (vports != NULL) 753 for (i = 0; i <= phba->max_vports && 754 vports[i] != NULL; i++) 755 lpfc_cleanup_discovery_resources(vports[i]); 756 lpfc_destroy_vport_work_array(phba, vports); 757 } 758 return 0; 759} 760 761/** 762 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 763 * @phba: pointer to lpfc HBA data structure. 764 * 765 * This routine will do uninitialization after the HBA is reset when bring 766 * down the SLI Layer. 767 * 768 * Return codes 769 * 0 - success. 770 * Any other value - error. 771 **/ 772static int 773lpfc_hba_down_post_s3(struct lpfc_hba *phba) 774{ 775 struct lpfc_sli *psli = &phba->sli; 776 struct lpfc_sli_ring *pring; 777 struct lpfc_dmabuf *mp, *next_mp; 778 LIST_HEAD(completions); 779 int i; 780 781 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 782 lpfc_sli_hbqbuf_free_all(phba); 783 else { 784 /* Cleanup preposted buffers on the ELS ring */ 785 pring = &psli->ring[LPFC_ELS_RING]; 786 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 787 list_del(&mp->list); 788 pring->postbufq_cnt--; 789 lpfc_mbuf_free(phba, mp->virt, mp->phys); 790 kfree(mp); 791 } 792 } 793 794 spin_lock_irq(&phba->hbalock); 795 for (i = 0; i < psli->num_rings; i++) { 796 pring = &psli->ring[i]; 797 798 /* At this point in time the HBA is either reset or DOA. Either 799 * way, nothing should be on txcmplq as it will NEVER complete. 800 */ 801 list_splice_init(&pring->txcmplq, &completions); 802 pring->txcmplq_cnt = 0; 803 spin_unlock_irq(&phba->hbalock); 804 805 /* Cancel all the IOCBs from the completions list */ 806 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 807 IOERR_SLI_ABORTED); 808 809 lpfc_sli_abort_iocb_ring(phba, pring); 810 spin_lock_irq(&phba->hbalock); 811 } 812 spin_unlock_irq(&phba->hbalock); 813 814 return 0; 815} 816/** 817 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 818 * @phba: pointer to lpfc HBA data structure. 819 * 820 * This routine will do uninitialization after the HBA is reset when bring 821 * down the SLI Layer. 822 * 823 * Return codes 824 * 0 - success. 825 * Any other value - error. 826 **/ 827static int 828lpfc_hba_down_post_s4(struct lpfc_hba *phba) 829{ 830 struct lpfc_scsi_buf *psb, *psb_next; 831 LIST_HEAD(aborts); 832 int ret; 833 unsigned long iflag = 0; 834 struct lpfc_sglq *sglq_entry = NULL; 835 836 ret = lpfc_hba_down_post_s3(phba); 837 if (ret) 838 return ret; 839 /* At this point in time the HBA is either reset or DOA. Either 840 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 841 * on the lpfc_sgl_list so that it can either be freed if the 842 * driver is unloading or reposted if the driver is restarting 843 * the port. 844 */ 845 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 846 /* scsl_buf_list */ 847 /* abts_sgl_list_lock required because worker thread uses this 848 * list. 849 */ 850 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 851 list_for_each_entry(sglq_entry, 852 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 853 sglq_entry->state = SGL_FREED; 854 855 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 856 &phba->sli4_hba.lpfc_sgl_list); 857 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 858 /* abts_scsi_buf_list_lock required because worker thread uses this 859 * list. 860 */ 861 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 862 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 863 &aborts); 864 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 865 spin_unlock_irq(&phba->hbalock); 866 867 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 868 psb->pCmd = NULL; 869 psb->status = IOSTAT_SUCCESS; 870 } 871 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 872 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 873 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 874 return 0; 875} 876 877/** 878 * lpfc_hba_down_post - Wrapper func for hba down post routine 879 * @phba: pointer to lpfc HBA data structure. 880 * 881 * This routine wraps the actual SLI3 or SLI4 routine for performing 882 * uninitialization after the HBA is reset when bring down the SLI Layer. 883 * 884 * Return codes 885 * 0 - success. 886 * Any other value - error. 887 **/ 888int 889lpfc_hba_down_post(struct lpfc_hba *phba) 890{ 891 return (*phba->lpfc_hba_down_post)(phba); 892} 893 894/** 895 * lpfc_hb_timeout - The HBA-timer timeout handler 896 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 897 * 898 * This is the HBA-timer timeout handler registered to the lpfc driver. When 899 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 900 * work-port-events bitmap and the worker thread is notified. This timeout 901 * event will be used by the worker thread to invoke the actual timeout 902 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 903 * be performed in the timeout handler and the HBA timeout event bit shall 904 * be cleared by the worker thread after it has taken the event bitmap out. 905 **/ 906static void 907lpfc_hb_timeout(unsigned long ptr) 908{ 909 struct lpfc_hba *phba; 910 uint32_t tmo_posted; 911 unsigned long iflag; 912 913 phba = (struct lpfc_hba *)ptr; 914 915 /* Check for heart beat timeout conditions */ 916 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 917 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 918 if (!tmo_posted) 919 phba->pport->work_port_events |= WORKER_HB_TMO; 920 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 921 922 /* Tell the worker thread there is work to do */ 923 if (!tmo_posted) 924 lpfc_worker_wake_up(phba); 925 return; 926} 927 928/** 929 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 930 * @phba: pointer to lpfc hba data structure. 931 * @pmboxq: pointer to the driver internal queue element for mailbox command. 932 * 933 * This is the callback function to the lpfc heart-beat mailbox command. 934 * If configured, the lpfc driver issues the heart-beat mailbox command to 935 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 936 * heart-beat mailbox command is issued, the driver shall set up heart-beat 937 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 938 * heart-beat outstanding state. Once the mailbox command comes back and 939 * no error conditions detected, the heart-beat mailbox command timer is 940 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 941 * state is cleared for the next heart-beat. If the timer expired with the 942 * heart-beat outstanding state set, the driver will put the HBA offline. 943 **/ 944static void 945lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 946{ 947 unsigned long drvr_flag; 948 949 spin_lock_irqsave(&phba->hbalock, drvr_flag); 950 phba->hb_outstanding = 0; 951 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 952 953 /* Check and reset heart-beat timer is necessary */ 954 mempool_free(pmboxq, phba->mbox_mem_pool); 955 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 956 !(phba->link_state == LPFC_HBA_ERROR) && 957 !(phba->pport->load_flag & FC_UNLOADING)) 958 mod_timer(&phba->hb_tmofunc, 959 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 960 return; 961} 962 963/** 964 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 965 * @phba: pointer to lpfc hba data structure. 966 * 967 * This is the actual HBA-timer timeout handler to be invoked by the worker 968 * thread whenever the HBA timer fired and HBA-timeout event posted. This 969 * handler performs any periodic operations needed for the device. If such 970 * periodic event has already been attended to either in the interrupt handler 971 * or by processing slow-ring or fast-ring events within the HBA-timer 972 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 973 * the timer for the next timeout period. If lpfc heart-beat mailbox command 974 * is configured and there is no heart-beat mailbox command outstanding, a 975 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 976 * has been a heart-beat mailbox command outstanding, the HBA shall be put 977 * to offline. 978 **/ 979void 980lpfc_hb_timeout_handler(struct lpfc_hba *phba) 981{ 982 struct lpfc_vport **vports; 983 LPFC_MBOXQ_t *pmboxq; 984 struct lpfc_dmabuf *buf_ptr; 985 int retval, i; 986 struct lpfc_sli *psli = &phba->sli; 987 LIST_HEAD(completions); 988 989 vports = lpfc_create_vport_work_array(phba); 990 if (vports != NULL) 991 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 992 lpfc_rcv_seq_check_edtov(vports[i]); 993 lpfc_destroy_vport_work_array(phba, vports); 994 995 if ((phba->link_state == LPFC_HBA_ERROR) || 996 (phba->pport->load_flag & FC_UNLOADING) || 997 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 998 return; 999 1000 spin_lock_irq(&phba->pport->work_port_lock); 1001 1002 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 1003 jiffies)) { 1004 spin_unlock_irq(&phba->pport->work_port_lock); 1005 if (!phba->hb_outstanding) 1006 mod_timer(&phba->hb_tmofunc, 1007 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1008 else 1009 mod_timer(&phba->hb_tmofunc, 1010 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1011 return; 1012 } 1013 spin_unlock_irq(&phba->pport->work_port_lock); 1014 1015 if (phba->elsbuf_cnt && 1016 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1017 spin_lock_irq(&phba->hbalock); 1018 list_splice_init(&phba->elsbuf, &completions); 1019 phba->elsbuf_cnt = 0; 1020 phba->elsbuf_prev_cnt = 0; 1021 spin_unlock_irq(&phba->hbalock); 1022 1023 while (!list_empty(&completions)) { 1024 list_remove_head(&completions, buf_ptr, 1025 struct lpfc_dmabuf, list); 1026 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1027 kfree(buf_ptr); 1028 } 1029 } 1030 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1031 1032 /* If there is no heart beat outstanding, issue a heartbeat command */ 1033 if (phba->cfg_enable_hba_heartbeat) { 1034 if (!phba->hb_outstanding) { 1035 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1036 (list_empty(&psli->mboxq))) { 1037 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1038 GFP_KERNEL); 1039 if (!pmboxq) { 1040 mod_timer(&phba->hb_tmofunc, 1041 jiffies + 1042 HZ * LPFC_HB_MBOX_INTERVAL); 1043 return; 1044 } 1045 1046 lpfc_heart_beat(phba, pmboxq); 1047 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1048 pmboxq->vport = phba->pport; 1049 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1050 MBX_NOWAIT); 1051 1052 if (retval != MBX_BUSY && 1053 retval != MBX_SUCCESS) { 1054 mempool_free(pmboxq, 1055 phba->mbox_mem_pool); 1056 mod_timer(&phba->hb_tmofunc, 1057 jiffies + 1058 HZ * LPFC_HB_MBOX_INTERVAL); 1059 return; 1060 } 1061 phba->skipped_hb = 0; 1062 phba->hb_outstanding = 1; 1063 } else if (time_before_eq(phba->last_completion_time, 1064 phba->skipped_hb)) { 1065 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1066 "2857 Last completion time not " 1067 " updated in %d ms\n", 1068 jiffies_to_msecs(jiffies 1069 - phba->last_completion_time)); 1070 } else 1071 phba->skipped_hb = jiffies; 1072 1073 mod_timer(&phba->hb_tmofunc, 1074 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1075 return; 1076 } else { 1077 /* 1078 * If heart beat timeout called with hb_outstanding set 1079 * we need to give the hb mailbox cmd a chance to 1080 * complete or TMO. 1081 */ 1082 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1083 "0459 Adapter heartbeat still out" 1084 "standing:last compl time was %d ms.\n", 1085 jiffies_to_msecs(jiffies 1086 - phba->last_completion_time)); 1087 mod_timer(&phba->hb_tmofunc, 1088 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1089 } 1090 } 1091} 1092 1093/** 1094 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1095 * @phba: pointer to lpfc hba data structure. 1096 * 1097 * This routine is called to bring the HBA offline when HBA hardware error 1098 * other than Port Error 6 has been detected. 1099 **/ 1100static void 1101lpfc_offline_eratt(struct lpfc_hba *phba) 1102{ 1103 struct lpfc_sli *psli = &phba->sli; 1104 1105 spin_lock_irq(&phba->hbalock); 1106 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1107 spin_unlock_irq(&phba->hbalock); 1108 lpfc_offline_prep(phba); 1109 1110 lpfc_offline(phba); 1111 lpfc_reset_barrier(phba); 1112 spin_lock_irq(&phba->hbalock); 1113 lpfc_sli_brdreset(phba); 1114 spin_unlock_irq(&phba->hbalock); 1115 lpfc_hba_down_post(phba); 1116 lpfc_sli_brdready(phba, HS_MBRDY); 1117 lpfc_unblock_mgmt_io(phba); 1118 phba->link_state = LPFC_HBA_ERROR; 1119 return; 1120} 1121 1122/** 1123 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1124 * @phba: pointer to lpfc hba data structure. 1125 * 1126 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1127 * other than Port Error 6 has been detected. 1128 **/ 1129static void 1130lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1131{ 1132 lpfc_offline_prep(phba); 1133 lpfc_offline(phba); 1134 lpfc_sli4_brdreset(phba); 1135 lpfc_hba_down_post(phba); 1136 lpfc_sli4_post_status_check(phba); 1137 lpfc_unblock_mgmt_io(phba); 1138 phba->link_state = LPFC_HBA_ERROR; 1139} 1140 1141/** 1142 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1143 * @phba: pointer to lpfc hba data structure. 1144 * 1145 * This routine is invoked to handle the deferred HBA hardware error 1146 * conditions. This type of error is indicated by HBA by setting ER1 1147 * and another ER bit in the host status register. The driver will 1148 * wait until the ER1 bit clears before handling the error condition. 1149 **/ 1150static void 1151lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1152{ 1153 uint32_t old_host_status = phba->work_hs; 1154 struct lpfc_sli_ring *pring; 1155 struct lpfc_sli *psli = &phba->sli; 1156 1157 /* If the pci channel is offline, ignore possible errors, 1158 * since we cannot communicate with the pci card anyway. 1159 */ 1160 if (pci_channel_offline(phba->pcidev)) { 1161 spin_lock_irq(&phba->hbalock); 1162 phba->hba_flag &= ~DEFER_ERATT; 1163 spin_unlock_irq(&phba->hbalock); 1164 return; 1165 } 1166 1167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1168 "0479 Deferred Adapter Hardware Error " 1169 "Data: x%x x%x x%x\n", 1170 phba->work_hs, 1171 phba->work_status[0], phba->work_status[1]); 1172 1173 spin_lock_irq(&phba->hbalock); 1174 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1175 spin_unlock_irq(&phba->hbalock); 1176 1177 1178 /* 1179 * Firmware stops when it triggred erratt. That could cause the I/Os 1180 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1181 * SCSI layer retry it after re-establishing link. 1182 */ 1183 pring = &psli->ring[psli->fcp_ring]; 1184 lpfc_sli_abort_iocb_ring(phba, pring); 1185 1186 /* 1187 * There was a firmware error. Take the hba offline and then 1188 * attempt to restart it. 1189 */ 1190 lpfc_offline_prep(phba); 1191 lpfc_offline(phba); 1192 1193 /* Wait for the ER1 bit to clear.*/ 1194 while (phba->work_hs & HS_FFER1) { 1195 msleep(100); 1196 phba->work_hs = readl(phba->HSregaddr); 1197 /* If driver is unloading let the worker thread continue */ 1198 if (phba->pport->load_flag & FC_UNLOADING) { 1199 phba->work_hs = 0; 1200 break; 1201 } 1202 } 1203 1204 /* 1205 * This is to ptrotect against a race condition in which 1206 * first write to the host attention register clear the 1207 * host status register. 1208 */ 1209 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1210 phba->work_hs = old_host_status & ~HS_FFER1; 1211 1212 spin_lock_irq(&phba->hbalock); 1213 phba->hba_flag &= ~DEFER_ERATT; 1214 spin_unlock_irq(&phba->hbalock); 1215 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1216 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1217} 1218 1219static void 1220lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1221{ 1222 struct lpfc_board_event_header board_event; 1223 struct Scsi_Host *shost; 1224 1225 board_event.event_type = FC_REG_BOARD_EVENT; 1226 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1227 shost = lpfc_shost_from_vport(phba->pport); 1228 fc_host_post_vendor_event(shost, fc_get_event_number(), 1229 sizeof(board_event), 1230 (char *) &board_event, 1231 LPFC_NL_VENDOR_ID); 1232} 1233 1234/** 1235 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1236 * @phba: pointer to lpfc hba data structure. 1237 * 1238 * This routine is invoked to handle the following HBA hardware error 1239 * conditions: 1240 * 1 - HBA error attention interrupt 1241 * 2 - DMA ring index out of range 1242 * 3 - Mailbox command came back as unknown 1243 **/ 1244static void 1245lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1246{ 1247 struct lpfc_vport *vport = phba->pport; 1248 struct lpfc_sli *psli = &phba->sli; 1249 struct lpfc_sli_ring *pring; 1250 uint32_t event_data; 1251 unsigned long temperature; 1252 struct temp_event temp_event_data; 1253 struct Scsi_Host *shost; 1254 1255 /* If the pci channel is offline, ignore possible errors, 1256 * since we cannot communicate with the pci card anyway. 1257 */ 1258 if (pci_channel_offline(phba->pcidev)) { 1259 spin_lock_irq(&phba->hbalock); 1260 phba->hba_flag &= ~DEFER_ERATT; 1261 spin_unlock_irq(&phba->hbalock); 1262 return; 1263 } 1264 1265 /* If resets are disabled then leave the HBA alone and return */ 1266 if (!phba->cfg_enable_hba_reset) 1267 return; 1268 1269 /* Send an internal error event to mgmt application */ 1270 lpfc_board_errevt_to_mgmt(phba); 1271 1272 if (phba->hba_flag & DEFER_ERATT) 1273 lpfc_handle_deferred_eratt(phba); 1274 1275 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1276 if (phba->work_hs & HS_FFER6) 1277 /* Re-establishing Link */ 1278 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1279 "1301 Re-establishing Link " 1280 "Data: x%x x%x x%x\n", 1281 phba->work_hs, phba->work_status[0], 1282 phba->work_status[1]); 1283 if (phba->work_hs & HS_FFER8) 1284 /* Device Zeroization */ 1285 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1286 "2861 Host Authentication device " 1287 "zeroization Data:x%x x%x x%x\n", 1288 phba->work_hs, phba->work_status[0], 1289 phba->work_status[1]); 1290 1291 spin_lock_irq(&phba->hbalock); 1292 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1293 spin_unlock_irq(&phba->hbalock); 1294 1295 /* 1296 * Firmware stops when it triggled erratt with HS_FFER6. 1297 * That could cause the I/Os dropped by the firmware. 1298 * Error iocb (I/O) on txcmplq and let the SCSI layer 1299 * retry it after re-establishing link. 1300 */ 1301 pring = &psli->ring[psli->fcp_ring]; 1302 lpfc_sli_abort_iocb_ring(phba, pring); 1303 1304 /* 1305 * There was a firmware error. Take the hba offline and then 1306 * attempt to restart it. 1307 */ 1308 lpfc_offline_prep(phba); 1309 lpfc_offline(phba); 1310 lpfc_sli_brdrestart(phba); 1311 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1312 lpfc_unblock_mgmt_io(phba); 1313 return; 1314 } 1315 lpfc_unblock_mgmt_io(phba); 1316 } else if (phba->work_hs & HS_CRIT_TEMP) { 1317 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1318 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1319 temp_event_data.event_code = LPFC_CRIT_TEMP; 1320 temp_event_data.data = (uint32_t)temperature; 1321 1322 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1323 "0406 Adapter maximum temperature exceeded " 1324 "(%ld), taking this port offline " 1325 "Data: x%x x%x x%x\n", 1326 temperature, phba->work_hs, 1327 phba->work_status[0], phba->work_status[1]); 1328 1329 shost = lpfc_shost_from_vport(phba->pport); 1330 fc_host_post_vendor_event(shost, fc_get_event_number(), 1331 sizeof(temp_event_data), 1332 (char *) &temp_event_data, 1333 SCSI_NL_VID_TYPE_PCI 1334 | PCI_VENDOR_ID_EMULEX); 1335 1336 spin_lock_irq(&phba->hbalock); 1337 phba->over_temp_state = HBA_OVER_TEMP; 1338 spin_unlock_irq(&phba->hbalock); 1339 lpfc_offline_eratt(phba); 1340 1341 } else { 1342 /* The if clause above forces this code path when the status 1343 * failure is a value other than FFER6. Do not call the offline 1344 * twice. This is the adapter hardware error path. 1345 */ 1346 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1347 "0457 Adapter Hardware Error " 1348 "Data: x%x x%x x%x\n", 1349 phba->work_hs, 1350 phba->work_status[0], phba->work_status[1]); 1351 1352 event_data = FC_REG_DUMP_EVENT; 1353 shost = lpfc_shost_from_vport(vport); 1354 fc_host_post_vendor_event(shost, fc_get_event_number(), 1355 sizeof(event_data), (char *) &event_data, 1356 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1357 1358 lpfc_offline_eratt(phba); 1359 } 1360 return; 1361} 1362 1363/** 1364 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1365 * @phba: pointer to lpfc hba data structure. 1366 * 1367 * This routine is invoked to handle the SLI4 HBA hardware error attention 1368 * conditions. 1369 **/ 1370static void 1371lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1372{ 1373 struct lpfc_vport *vport = phba->pport; 1374 uint32_t event_data; 1375 struct Scsi_Host *shost; 1376 1377 /* If the pci channel is offline, ignore possible errors, since 1378 * we cannot communicate with the pci card anyway. 1379 */ 1380 if (pci_channel_offline(phba->pcidev)) 1381 return; 1382 /* If resets are disabled then leave the HBA alone and return */ 1383 if (!phba->cfg_enable_hba_reset) 1384 return; 1385 1386 /* Send an internal error event to mgmt application */ 1387 lpfc_board_errevt_to_mgmt(phba); 1388 1389 /* For now, the actual action for SLI4 device handling is not 1390 * specified yet, just treated it as adaptor hardware failure 1391 */ 1392 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1393 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", 1394 phba->work_status[0], phba->work_status[1]); 1395 1396 event_data = FC_REG_DUMP_EVENT; 1397 shost = lpfc_shost_from_vport(vport); 1398 fc_host_post_vendor_event(shost, fc_get_event_number(), 1399 sizeof(event_data), (char *) &event_data, 1400 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1401 1402 lpfc_sli4_offline_eratt(phba); 1403} 1404 1405/** 1406 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1407 * @phba: pointer to lpfc HBA data structure. 1408 * 1409 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1410 * routine from the API jump table function pointer from the lpfc_hba struct. 1411 * 1412 * Return codes 1413 * 0 - success. 1414 * Any other value - error. 1415 **/ 1416void 1417lpfc_handle_eratt(struct lpfc_hba *phba) 1418{ 1419 (*phba->lpfc_handle_eratt)(phba); 1420} 1421 1422/** 1423 * lpfc_handle_latt - The HBA link event handler 1424 * @phba: pointer to lpfc hba data structure. 1425 * 1426 * This routine is invoked from the worker thread to handle a HBA host 1427 * attention link event. 1428 **/ 1429void 1430lpfc_handle_latt(struct lpfc_hba *phba) 1431{ 1432 struct lpfc_vport *vport = phba->pport; 1433 struct lpfc_sli *psli = &phba->sli; 1434 LPFC_MBOXQ_t *pmb; 1435 volatile uint32_t control; 1436 struct lpfc_dmabuf *mp; 1437 int rc = 0; 1438 1439 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1440 if (!pmb) { 1441 rc = 1; 1442 goto lpfc_handle_latt_err_exit; 1443 } 1444 1445 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1446 if (!mp) { 1447 rc = 2; 1448 goto lpfc_handle_latt_free_pmb; 1449 } 1450 1451 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1452 if (!mp->virt) { 1453 rc = 3; 1454 goto lpfc_handle_latt_free_mp; 1455 } 1456 1457 /* Cleanup any outstanding ELS commands */ 1458 lpfc_els_flush_all_cmd(phba); 1459 1460 psli->slistat.link_event++; 1461 lpfc_read_la(phba, pmb, mp); 1462 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 1463 pmb->vport = vport; 1464 /* Block ELS IOCBs until we have processed this mbox command */ 1465 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1466 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1467 if (rc == MBX_NOT_FINISHED) { 1468 rc = 4; 1469 goto lpfc_handle_latt_free_mbuf; 1470 } 1471 1472 /* Clear Link Attention in HA REG */ 1473 spin_lock_irq(&phba->hbalock); 1474 writel(HA_LATT, phba->HAregaddr); 1475 readl(phba->HAregaddr); /* flush */ 1476 spin_unlock_irq(&phba->hbalock); 1477 1478 return; 1479 1480lpfc_handle_latt_free_mbuf: 1481 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1482 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1483lpfc_handle_latt_free_mp: 1484 kfree(mp); 1485lpfc_handle_latt_free_pmb: 1486 mempool_free(pmb, phba->mbox_mem_pool); 1487lpfc_handle_latt_err_exit: 1488 /* Enable Link attention interrupts */ 1489 spin_lock_irq(&phba->hbalock); 1490 psli->sli_flag |= LPFC_PROCESS_LA; 1491 control = readl(phba->HCregaddr); 1492 control |= HC_LAINT_ENA; 1493 writel(control, phba->HCregaddr); 1494 readl(phba->HCregaddr); /* flush */ 1495 1496 /* Clear Link Attention in HA REG */ 1497 writel(HA_LATT, phba->HAregaddr); 1498 readl(phba->HAregaddr); /* flush */ 1499 spin_unlock_irq(&phba->hbalock); 1500 lpfc_linkdown(phba); 1501 phba->link_state = LPFC_HBA_ERROR; 1502 1503 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1504 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1505 1506 return; 1507} 1508 1509/** 1510 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1511 * @phba: pointer to lpfc hba data structure. 1512 * @vpd: pointer to the vital product data. 1513 * @len: length of the vital product data in bytes. 1514 * 1515 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1516 * an array of characters. In this routine, the ModelName, ProgramType, and 1517 * ModelDesc, etc. fields of the phba data structure will be populated. 1518 * 1519 * Return codes 1520 * 0 - pointer to the VPD passed in is NULL 1521 * 1 - success 1522 **/ 1523int 1524lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1525{ 1526 uint8_t lenlo, lenhi; 1527 int Length; 1528 int i, j; 1529 int finished = 0; 1530 int index = 0; 1531 1532 if (!vpd) 1533 return 0; 1534 1535 /* Vital Product */ 1536 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1537 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1538 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1539 (uint32_t) vpd[3]); 1540 while (!finished && (index < (len - 4))) { 1541 switch (vpd[index]) { 1542 case 0x82: 1543 case 0x91: 1544 index += 1; 1545 lenlo = vpd[index]; 1546 index += 1; 1547 lenhi = vpd[index]; 1548 index += 1; 1549 i = ((((unsigned short)lenhi) << 8) + lenlo); 1550 index += i; 1551 break; 1552 case 0x90: 1553 index += 1; 1554 lenlo = vpd[index]; 1555 index += 1; 1556 lenhi = vpd[index]; 1557 index += 1; 1558 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1559 if (Length > len - index) 1560 Length = len - index; 1561 while (Length > 0) { 1562 /* Look for Serial Number */ 1563 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1564 index += 2; 1565 i = vpd[index]; 1566 index += 1; 1567 j = 0; 1568 Length -= (3+i); 1569 while(i--) { 1570 phba->SerialNumber[j++] = vpd[index++]; 1571 if (j == 31) 1572 break; 1573 } 1574 phba->SerialNumber[j] = 0; 1575 continue; 1576 } 1577 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1578 phba->vpd_flag |= VPD_MODEL_DESC; 1579 index += 2; 1580 i = vpd[index]; 1581 index += 1; 1582 j = 0; 1583 Length -= (3+i); 1584 while(i--) { 1585 phba->ModelDesc[j++] = vpd[index++]; 1586 if (j == 255) 1587 break; 1588 } 1589 phba->ModelDesc[j] = 0; 1590 continue; 1591 } 1592 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1593 phba->vpd_flag |= VPD_MODEL_NAME; 1594 index += 2; 1595 i = vpd[index]; 1596 index += 1; 1597 j = 0; 1598 Length -= (3+i); 1599 while(i--) { 1600 phba->ModelName[j++] = vpd[index++]; 1601 if (j == 79) 1602 break; 1603 } 1604 phba->ModelName[j] = 0; 1605 continue; 1606 } 1607 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1608 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1609 index += 2; 1610 i = vpd[index]; 1611 index += 1; 1612 j = 0; 1613 Length -= (3+i); 1614 while(i--) { 1615 phba->ProgramType[j++] = vpd[index++]; 1616 if (j == 255) 1617 break; 1618 } 1619 phba->ProgramType[j] = 0; 1620 continue; 1621 } 1622 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1623 phba->vpd_flag |= VPD_PORT; 1624 index += 2; 1625 i = vpd[index]; 1626 index += 1; 1627 j = 0; 1628 Length -= (3+i); 1629 while(i--) { 1630 phba->Port[j++] = vpd[index++]; 1631 if (j == 19) 1632 break; 1633 } 1634 phba->Port[j] = 0; 1635 continue; 1636 } 1637 else { 1638 index += 2; 1639 i = vpd[index]; 1640 index += 1; 1641 index += i; 1642 Length -= (3 + i); 1643 } 1644 } 1645 finished = 0; 1646 break; 1647 case 0x78: 1648 finished = 1; 1649 break; 1650 default: 1651 index ++; 1652 break; 1653 } 1654 } 1655 1656 return(1); 1657} 1658 1659/** 1660 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1661 * @phba: pointer to lpfc hba data structure. 1662 * @mdp: pointer to the data structure to hold the derived model name. 1663 * @descp: pointer to the data structure to hold the derived description. 1664 * 1665 * This routine retrieves HBA's description based on its registered PCI device 1666 * ID. The @descp passed into this function points to an array of 256 chars. It 1667 * shall be returned with the model name, maximum speed, and the host bus type. 1668 * The @mdp passed into this function points to an array of 80 chars. When the 1669 * function returns, the @mdp will be filled with the model name. 1670 **/ 1671static void 1672lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1673{ 1674 lpfc_vpd_t *vp; 1675 uint16_t dev_id = phba->pcidev->device; 1676 int max_speed; 1677 int GE = 0; 1678 int oneConnect = 0; /* default is not a oneConnect */ 1679 struct { 1680 char *name; 1681 char *bus; 1682 char *function; 1683 } m = {"<Unknown>", "", ""}; 1684 1685 if (mdp && mdp[0] != '\0' 1686 && descp && descp[0] != '\0') 1687 return; 1688 1689 if (phba->lmt & LMT_10Gb) 1690 max_speed = 10; 1691 else if (phba->lmt & LMT_8Gb) 1692 max_speed = 8; 1693 else if (phba->lmt & LMT_4Gb) 1694 max_speed = 4; 1695 else if (phba->lmt & LMT_2Gb) 1696 max_speed = 2; 1697 else 1698 max_speed = 1; 1699 1700 vp = &phba->vpd; 1701 1702 switch (dev_id) { 1703 case PCI_DEVICE_ID_FIREFLY: 1704 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 1705 break; 1706 case PCI_DEVICE_ID_SUPERFLY: 1707 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1708 m = (typeof(m)){"LP7000", "PCI", 1709 "Fibre Channel Adapter"}; 1710 else 1711 m = (typeof(m)){"LP7000E", "PCI", 1712 "Fibre Channel Adapter"}; 1713 break; 1714 case PCI_DEVICE_ID_DRAGONFLY: 1715 m = (typeof(m)){"LP8000", "PCI", 1716 "Fibre Channel Adapter"}; 1717 break; 1718 case PCI_DEVICE_ID_CENTAUR: 1719 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1720 m = (typeof(m)){"LP9002", "PCI", 1721 "Fibre Channel Adapter"}; 1722 else 1723 m = (typeof(m)){"LP9000", "PCI", 1724 "Fibre Channel Adapter"}; 1725 break; 1726 case PCI_DEVICE_ID_RFLY: 1727 m = (typeof(m)){"LP952", "PCI", 1728 "Fibre Channel Adapter"}; 1729 break; 1730 case PCI_DEVICE_ID_PEGASUS: 1731 m = (typeof(m)){"LP9802", "PCI-X", 1732 "Fibre Channel Adapter"}; 1733 break; 1734 case PCI_DEVICE_ID_THOR: 1735 m = (typeof(m)){"LP10000", "PCI-X", 1736 "Fibre Channel Adapter"}; 1737 break; 1738 case PCI_DEVICE_ID_VIPER: 1739 m = (typeof(m)){"LPX1000", "PCI-X", 1740 "Fibre Channel Adapter"}; 1741 break; 1742 case PCI_DEVICE_ID_PFLY: 1743 m = (typeof(m)){"LP982", "PCI-X", 1744 "Fibre Channel Adapter"}; 1745 break; 1746 case PCI_DEVICE_ID_TFLY: 1747 m = (typeof(m)){"LP1050", "PCI-X", 1748 "Fibre Channel Adapter"}; 1749 break; 1750 case PCI_DEVICE_ID_HELIOS: 1751 m = (typeof(m)){"LP11000", "PCI-X2", 1752 "Fibre Channel Adapter"}; 1753 break; 1754 case PCI_DEVICE_ID_HELIOS_SCSP: 1755 m = (typeof(m)){"LP11000-SP", "PCI-X2", 1756 "Fibre Channel Adapter"}; 1757 break; 1758 case PCI_DEVICE_ID_HELIOS_DCSP: 1759 m = (typeof(m)){"LP11002-SP", "PCI-X2", 1760 "Fibre Channel Adapter"}; 1761 break; 1762 case PCI_DEVICE_ID_NEPTUNE: 1763 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 1764 break; 1765 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1766 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 1767 break; 1768 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1769 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 1770 break; 1771 case PCI_DEVICE_ID_BMID: 1772 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 1773 break; 1774 case PCI_DEVICE_ID_BSMB: 1775 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 1776 break; 1777 case PCI_DEVICE_ID_ZEPHYR: 1778 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1779 break; 1780 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1781 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1782 break; 1783 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1784 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 1785 GE = 1; 1786 break; 1787 case PCI_DEVICE_ID_ZMID: 1788 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 1789 break; 1790 case PCI_DEVICE_ID_ZSMB: 1791 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 1792 break; 1793 case PCI_DEVICE_ID_LP101: 1794 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 1795 break; 1796 case PCI_DEVICE_ID_LP10000S: 1797 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 1798 break; 1799 case PCI_DEVICE_ID_LP11000S: 1800 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 1801 break; 1802 case PCI_DEVICE_ID_LPE11000S: 1803 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 1804 break; 1805 case PCI_DEVICE_ID_SAT: 1806 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 1807 break; 1808 case PCI_DEVICE_ID_SAT_MID: 1809 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 1810 break; 1811 case PCI_DEVICE_ID_SAT_SMB: 1812 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 1813 break; 1814 case PCI_DEVICE_ID_SAT_DCSP: 1815 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 1816 break; 1817 case PCI_DEVICE_ID_SAT_SCSP: 1818 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 1819 break; 1820 case PCI_DEVICE_ID_SAT_S: 1821 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 1822 break; 1823 case PCI_DEVICE_ID_HORNET: 1824 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 1825 GE = 1; 1826 break; 1827 case PCI_DEVICE_ID_PROTEUS_VF: 1828 m = (typeof(m)){"LPev12000", "PCIe IOV", 1829 "Fibre Channel Adapter"}; 1830 break; 1831 case PCI_DEVICE_ID_PROTEUS_PF: 1832 m = (typeof(m)){"LPev12000", "PCIe IOV", 1833 "Fibre Channel Adapter"}; 1834 break; 1835 case PCI_DEVICE_ID_PROTEUS_S: 1836 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 1837 "Fibre Channel Adapter"}; 1838 break; 1839 case PCI_DEVICE_ID_TIGERSHARK: 1840 oneConnect = 1; 1841 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 1842 break; 1843 case PCI_DEVICE_ID_TOMCAT: 1844 oneConnect = 1; 1845 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 1846 break; 1847 case PCI_DEVICE_ID_FALCON: 1848 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 1849 "EmulexSecure Fibre"}; 1850 break; 1851 case PCI_DEVICE_ID_BALIUS: 1852 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 1853 "Fibre Channel Adapter"}; 1854 break; 1855 default: 1856 m = (typeof(m)){"Unknown", "", ""}; 1857 break; 1858 } 1859 1860 if (mdp && mdp[0] == '\0') 1861 snprintf(mdp, 79,"%s", m.name); 1862 /* oneConnect hba requires special processing, they are all initiators 1863 * and we put the port number on the end 1864 */ 1865 if (descp && descp[0] == '\0') { 1866 if (oneConnect) 1867 snprintf(descp, 255, 1868 "Emulex OneConnect %s, %s Initiator, Port %s", 1869 m.name, m.function, 1870 phba->Port); 1871 else 1872 snprintf(descp, 255, 1873 "Emulex %s %d%s %s %s", 1874 m.name, max_speed, (GE) ? "GE" : "Gb", 1875 m.bus, m.function); 1876 } 1877} 1878 1879/** 1880 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 1881 * @phba: pointer to lpfc hba data structure. 1882 * @pring: pointer to a IOCB ring. 1883 * @cnt: the number of IOCBs to be posted to the IOCB ring. 1884 * 1885 * This routine posts a given number of IOCBs with the associated DMA buffer 1886 * descriptors specified by the cnt argument to the given IOCB ring. 1887 * 1888 * Return codes 1889 * The number of IOCBs NOT able to be posted to the IOCB ring. 1890 **/ 1891int 1892lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 1893{ 1894 IOCB_t *icmd; 1895 struct lpfc_iocbq *iocb; 1896 struct lpfc_dmabuf *mp1, *mp2; 1897 1898 cnt += pring->missbufcnt; 1899 1900 /* While there are buffers to post */ 1901 while (cnt > 0) { 1902 /* Allocate buffer for command iocb */ 1903 iocb = lpfc_sli_get_iocbq(phba); 1904 if (iocb == NULL) { 1905 pring->missbufcnt = cnt; 1906 return cnt; 1907 } 1908 icmd = &iocb->iocb; 1909 1910 /* 2 buffers can be posted per command */ 1911 /* Allocate buffer to post */ 1912 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1913 if (mp1) 1914 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1915 if (!mp1 || !mp1->virt) { 1916 kfree(mp1); 1917 lpfc_sli_release_iocbq(phba, iocb); 1918 pring->missbufcnt = cnt; 1919 return cnt; 1920 } 1921 1922 INIT_LIST_HEAD(&mp1->list); 1923 /* Allocate buffer to post */ 1924 if (cnt > 1) { 1925 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1926 if (mp2) 1927 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1928 &mp2->phys); 1929 if (!mp2 || !mp2->virt) { 1930 kfree(mp2); 1931 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1932 kfree(mp1); 1933 lpfc_sli_release_iocbq(phba, iocb); 1934 pring->missbufcnt = cnt; 1935 return cnt; 1936 } 1937 1938 INIT_LIST_HEAD(&mp2->list); 1939 } else { 1940 mp2 = NULL; 1941 } 1942 1943 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 1944 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 1945 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 1946 icmd->ulpBdeCount = 1; 1947 cnt--; 1948 if (mp2) { 1949 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 1950 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 1951 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 1952 cnt--; 1953 icmd->ulpBdeCount = 2; 1954 } 1955 1956 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1957 icmd->ulpLe = 1; 1958 1959 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 1960 IOCB_ERROR) { 1961 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1962 kfree(mp1); 1963 cnt++; 1964 if (mp2) { 1965 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 1966 kfree(mp2); 1967 cnt++; 1968 } 1969 lpfc_sli_release_iocbq(phba, iocb); 1970 pring->missbufcnt = cnt; 1971 return cnt; 1972 } 1973 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1974 if (mp2) 1975 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1976 } 1977 pring->missbufcnt = 0; 1978 return 0; 1979} 1980 1981/** 1982 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 1983 * @phba: pointer to lpfc hba data structure. 1984 * 1985 * This routine posts initial receive IOCB buffers to the ELS ring. The 1986 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 1987 * set to 64 IOCBs. 1988 * 1989 * Return codes 1990 * 0 - success (currently always success) 1991 **/ 1992static int 1993lpfc_post_rcv_buf(struct lpfc_hba *phba) 1994{ 1995 struct lpfc_sli *psli = &phba->sli; 1996 1997 /* Ring 0, ELS / CT buffers */ 1998 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 1999 /* Ring 2 - FCP no buffers needed */ 2000 2001 return 0; 2002} 2003 2004#define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2005 2006/** 2007 * lpfc_sha_init - Set up initial array of hash table entries 2008 * @HashResultPointer: pointer to an array as hash table. 2009 * 2010 * This routine sets up the initial values to the array of hash table entries 2011 * for the LC HBAs. 2012 **/ 2013static void 2014lpfc_sha_init(uint32_t * HashResultPointer) 2015{ 2016 HashResultPointer[0] = 0x67452301; 2017 HashResultPointer[1] = 0xEFCDAB89; 2018 HashResultPointer[2] = 0x98BADCFE; 2019 HashResultPointer[3] = 0x10325476; 2020 HashResultPointer[4] = 0xC3D2E1F0; 2021} 2022 2023/** 2024 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2025 * @HashResultPointer: pointer to an initial/result hash table. 2026 * @HashWorkingPointer: pointer to an working hash table. 2027 * 2028 * This routine iterates an initial hash table pointed by @HashResultPointer 2029 * with the values from the working hash table pointeed by @HashWorkingPointer. 2030 * The results are putting back to the initial hash table, returned through 2031 * the @HashResultPointer as the result hash table. 2032 **/ 2033static void 2034lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2035{ 2036 int t; 2037 uint32_t TEMP; 2038 uint32_t A, B, C, D, E; 2039 t = 16; 2040 do { 2041 HashWorkingPointer[t] = 2042 S(1, 2043 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2044 8] ^ 2045 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2046 } while (++t <= 79); 2047 t = 0; 2048 A = HashResultPointer[0]; 2049 B = HashResultPointer[1]; 2050 C = HashResultPointer[2]; 2051 D = HashResultPointer[3]; 2052 E = HashResultPointer[4]; 2053 2054 do { 2055 if (t < 20) { 2056 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2057 } else if (t < 40) { 2058 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2059 } else if (t < 60) { 2060 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2061 } else { 2062 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2063 } 2064 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2065 E = D; 2066 D = C; 2067 C = S(30, B); 2068 B = A; 2069 A = TEMP; 2070 } while (++t <= 79); 2071 2072 HashResultPointer[0] += A; 2073 HashResultPointer[1] += B; 2074 HashResultPointer[2] += C; 2075 HashResultPointer[3] += D; 2076 HashResultPointer[4] += E; 2077 2078} 2079 2080/** 2081 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2082 * @RandomChallenge: pointer to the entry of host challenge random number array. 2083 * @HashWorking: pointer to the entry of the working hash array. 2084 * 2085 * This routine calculates the working hash array referred by @HashWorking 2086 * from the challenge random numbers associated with the host, referred by 2087 * @RandomChallenge. The result is put into the entry of the working hash 2088 * array and returned by reference through @HashWorking. 2089 **/ 2090static void 2091lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2092{ 2093 *HashWorking = (*RandomChallenge ^ *HashWorking); 2094} 2095 2096/** 2097 * lpfc_hba_init - Perform special handling for LC HBA initialization 2098 * @phba: pointer to lpfc hba data structure. 2099 * @hbainit: pointer to an array of unsigned 32-bit integers. 2100 * 2101 * This routine performs the special handling for LC HBA initialization. 2102 **/ 2103void 2104lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2105{ 2106 int t; 2107 uint32_t *HashWorking; 2108 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2109 2110 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2111 if (!HashWorking) 2112 return; 2113 2114 HashWorking[0] = HashWorking[78] = *pwwnn++; 2115 HashWorking[1] = HashWorking[79] = *pwwnn; 2116 2117 for (t = 0; t < 7; t++) 2118 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2119 2120 lpfc_sha_init(hbainit); 2121 lpfc_sha_iterate(hbainit, HashWorking); 2122 kfree(HashWorking); 2123} 2124 2125/** 2126 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2127 * @vport: pointer to a virtual N_Port data structure. 2128 * 2129 * This routine performs the necessary cleanups before deleting the @vport. 2130 * It invokes the discovery state machine to perform necessary state 2131 * transitions and to release the ndlps associated with the @vport. Note, 2132 * the physical port is treated as @vport 0. 2133 **/ 2134void 2135lpfc_cleanup(struct lpfc_vport *vport) 2136{ 2137 struct lpfc_hba *phba = vport->phba; 2138 struct lpfc_nodelist *ndlp, *next_ndlp; 2139 int i = 0; 2140 2141 if (phba->link_state > LPFC_LINK_DOWN) 2142 lpfc_port_link_failure(vport); 2143 2144 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2145 if (!NLP_CHK_NODE_ACT(ndlp)) { 2146 ndlp = lpfc_enable_node(vport, ndlp, 2147 NLP_STE_UNUSED_NODE); 2148 if (!ndlp) 2149 continue; 2150 spin_lock_irq(&phba->ndlp_lock); 2151 NLP_SET_FREE_REQ(ndlp); 2152 spin_unlock_irq(&phba->ndlp_lock); 2153 /* Trigger the release of the ndlp memory */ 2154 lpfc_nlp_put(ndlp); 2155 continue; 2156 } 2157 spin_lock_irq(&phba->ndlp_lock); 2158 if (NLP_CHK_FREE_REQ(ndlp)) { 2159 /* The ndlp should not be in memory free mode already */ 2160 spin_unlock_irq(&phba->ndlp_lock); 2161 continue; 2162 } else 2163 /* Indicate request for freeing ndlp memory */ 2164 NLP_SET_FREE_REQ(ndlp); 2165 spin_unlock_irq(&phba->ndlp_lock); 2166 2167 if (vport->port_type != LPFC_PHYSICAL_PORT && 2168 ndlp->nlp_DID == Fabric_DID) { 2169 /* Just free up ndlp with Fabric_DID for vports */ 2170 lpfc_nlp_put(ndlp); 2171 continue; 2172 } 2173 2174 if (ndlp->nlp_type & NLP_FABRIC) 2175 lpfc_disc_state_machine(vport, ndlp, NULL, 2176 NLP_EVT_DEVICE_RECOVERY); 2177 2178 lpfc_disc_state_machine(vport, ndlp, NULL, 2179 NLP_EVT_DEVICE_RM); 2180 2181 } 2182 2183 /* At this point, ALL ndlp's should be gone 2184 * because of the previous NLP_EVT_DEVICE_RM. 2185 * Lets wait for this to happen, if needed. 2186 */ 2187 while (!list_empty(&vport->fc_nodes)) { 2188 if (i++ > 3000) { 2189 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2190 "0233 Nodelist not empty\n"); 2191 list_for_each_entry_safe(ndlp, next_ndlp, 2192 &vport->fc_nodes, nlp_listp) { 2193 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2194 LOG_NODE, 2195 "0282 did:x%x ndlp:x%p " 2196 "usgmap:x%x refcnt:%d\n", 2197 ndlp->nlp_DID, (void *)ndlp, 2198 ndlp->nlp_usg_map, 2199 atomic_read( 2200 &ndlp->kref.refcount)); 2201 } 2202 break; 2203 } 2204 2205 /* Wait for any activity on ndlps to settle */ 2206 msleep(10); 2207 } 2208} 2209 2210/** 2211 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2212 * @vport: pointer to a virtual N_Port data structure. 2213 * 2214 * This routine stops all the timers associated with a @vport. This function 2215 * is invoked before disabling or deleting a @vport. Note that the physical 2216 * port is treated as @vport 0. 2217 **/ 2218void 2219lpfc_stop_vport_timers(struct lpfc_vport *vport) 2220{ 2221 del_timer_sync(&vport->els_tmofunc); 2222 del_timer_sync(&vport->fc_fdmitmo); 2223 lpfc_can_disctmo(vport); 2224 return; 2225} 2226 2227/** 2228 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2229 * @phba: pointer to lpfc hba data structure. 2230 * 2231 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2232 * caller of this routine should already hold the host lock. 2233 **/ 2234void 2235__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2236{ 2237 /* Clear pending FCF rediscovery wait flag */ 2238 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2239 2240 /* Now, try to stop the timer */ 2241 del_timer(&phba->fcf.redisc_wait); 2242} 2243 2244/** 2245 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2246 * @phba: pointer to lpfc hba data structure. 2247 * 2248 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2249 * checks whether the FCF rediscovery wait timer is pending with the host 2250 * lock held before proceeding with disabling the timer and clearing the 2251 * wait timer pendig flag. 2252 **/ 2253void 2254lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2255{ 2256 spin_lock_irq(&phba->hbalock); 2257 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2258 /* FCF rediscovery timer already fired or stopped */ 2259 spin_unlock_irq(&phba->hbalock); 2260 return; 2261 } 2262 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2263 /* Clear failover in progress flags */ 2264 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2265 spin_unlock_irq(&phba->hbalock); 2266} 2267 2268/** 2269 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2270 * @phba: pointer to lpfc hba data structure. 2271 * 2272 * This routine stops all the timers associated with a HBA. This function is 2273 * invoked before either putting a HBA offline or unloading the driver. 2274 **/ 2275void 2276lpfc_stop_hba_timers(struct lpfc_hba *phba) 2277{ 2278 lpfc_stop_vport_timers(phba->pport); 2279 del_timer_sync(&phba->sli.mbox_tmo); 2280 del_timer_sync(&phba->fabric_block_timer); 2281 del_timer_sync(&phba->eratt_poll); 2282 del_timer_sync(&phba->hb_tmofunc); 2283 phba->hb_outstanding = 0; 2284 2285 switch (phba->pci_dev_grp) { 2286 case LPFC_PCI_DEV_LP: 2287 /* Stop any LightPulse device specific driver timers */ 2288 del_timer_sync(&phba->fcp_poll_timer); 2289 break; 2290 case LPFC_PCI_DEV_OC: 2291 /* Stop any OneConnect device sepcific driver timers */ 2292 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2293 break; 2294 default: 2295 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2296 "0297 Invalid device group (x%x)\n", 2297 phba->pci_dev_grp); 2298 break; 2299 } 2300 return; 2301} 2302 2303/** 2304 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2305 * @phba: pointer to lpfc hba data structure. 2306 * 2307 * This routine marks a HBA's management interface as blocked. Once the HBA's 2308 * management interface is marked as blocked, all the user space access to 2309 * the HBA, whether they are from sysfs interface or libdfc interface will 2310 * all be blocked. The HBA is set to block the management interface when the 2311 * driver prepares the HBA interface for online or offline. 2312 **/ 2313static void 2314lpfc_block_mgmt_io(struct lpfc_hba * phba) 2315{ 2316 unsigned long iflag; 2317 uint8_t actcmd = MBX_HEARTBEAT; 2318 unsigned long timeout; 2319 2320 2321 spin_lock_irqsave(&phba->hbalock, iflag); 2322 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2323 if (phba->sli.mbox_active) 2324 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2325 spin_unlock_irqrestore(&phba->hbalock, iflag); 2326 /* Determine how long we might wait for the active mailbox 2327 * command to be gracefully completed by firmware. 2328 */ 2329 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + 2330 jiffies; 2331 /* Wait for the outstnading mailbox command to complete */ 2332 while (phba->sli.mbox_active) { 2333 /* Check active mailbox complete status every 2ms */ 2334 msleep(2); 2335 if (time_after(jiffies, timeout)) { 2336 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2337 "2813 Mgmt IO is Blocked %x " 2338 "- mbox cmd %x still active\n", 2339 phba->sli.sli_flag, actcmd); 2340 break; 2341 } 2342 } 2343} 2344 2345/** 2346 * lpfc_online - Initialize and bring a HBA online 2347 * @phba: pointer to lpfc hba data structure. 2348 * 2349 * This routine initializes the HBA and brings a HBA online. During this 2350 * process, the management interface is blocked to prevent user space access 2351 * to the HBA interfering with the driver initialization. 2352 * 2353 * Return codes 2354 * 0 - successful 2355 * 1 - failed 2356 **/ 2357int 2358lpfc_online(struct lpfc_hba *phba) 2359{ 2360 struct lpfc_vport *vport; 2361 struct lpfc_vport **vports; 2362 int i; 2363 2364 if (!phba) 2365 return 0; 2366 vport = phba->pport; 2367 2368 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2369 return 0; 2370 2371 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2372 "0458 Bring Adapter online\n"); 2373 2374 lpfc_block_mgmt_io(phba); 2375 2376 if (!lpfc_sli_queue_setup(phba)) { 2377 lpfc_unblock_mgmt_io(phba); 2378 return 1; 2379 } 2380 2381 if (phba->sli_rev == LPFC_SLI_REV4) { 2382 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2383 lpfc_unblock_mgmt_io(phba); 2384 return 1; 2385 } 2386 } else { 2387 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2388 lpfc_unblock_mgmt_io(phba); 2389 return 1; 2390 } 2391 } 2392 2393 vports = lpfc_create_vport_work_array(phba); 2394 if (vports != NULL) 2395 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2396 struct Scsi_Host *shost; 2397 shost = lpfc_shost_from_vport(vports[i]); 2398 spin_lock_irq(shost->host_lock); 2399 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2400 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2401 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2402 if (phba->sli_rev == LPFC_SLI_REV4) 2403 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2404 spin_unlock_irq(shost->host_lock); 2405 } 2406 lpfc_destroy_vport_work_array(phba, vports); 2407 2408 lpfc_unblock_mgmt_io(phba); 2409 return 0; 2410} 2411 2412/** 2413 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2414 * @phba: pointer to lpfc hba data structure. 2415 * 2416 * This routine marks a HBA's management interface as not blocked. Once the 2417 * HBA's management interface is marked as not blocked, all the user space 2418 * access to the HBA, whether they are from sysfs interface or libdfc 2419 * interface will be allowed. The HBA is set to block the management interface 2420 * when the driver prepares the HBA interface for online or offline and then 2421 * set to unblock the management interface afterwards. 2422 **/ 2423void 2424lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2425{ 2426 unsigned long iflag; 2427 2428 spin_lock_irqsave(&phba->hbalock, iflag); 2429 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2430 spin_unlock_irqrestore(&phba->hbalock, iflag); 2431} 2432 2433/** 2434 * lpfc_offline_prep - Prepare a HBA to be brought offline 2435 * @phba: pointer to lpfc hba data structure. 2436 * 2437 * This routine is invoked to prepare a HBA to be brought offline. It performs 2438 * unregistration login to all the nodes on all vports and flushes the mailbox 2439 * queue to make it ready to be brought offline. 2440 **/ 2441void 2442lpfc_offline_prep(struct lpfc_hba * phba) 2443{ 2444 struct lpfc_vport *vport = phba->pport; 2445 struct lpfc_nodelist *ndlp, *next_ndlp; 2446 struct lpfc_vport **vports; 2447 struct Scsi_Host *shost; 2448 int i; 2449 2450 if (vport->fc_flag & FC_OFFLINE_MODE) 2451 return; 2452 2453 lpfc_block_mgmt_io(phba); 2454 2455 lpfc_linkdown(phba); 2456 2457 /* Issue an unreg_login to all nodes on all vports */ 2458 vports = lpfc_create_vport_work_array(phba); 2459 if (vports != NULL) { 2460 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2461 if (vports[i]->load_flag & FC_UNLOADING) 2462 continue; 2463 shost = lpfc_shost_from_vport(vports[i]); 2464 spin_lock_irq(shost->host_lock); 2465 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2466 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2467 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2468 spin_unlock_irq(shost->host_lock); 2469 2470 shost = lpfc_shost_from_vport(vports[i]); 2471 list_for_each_entry_safe(ndlp, next_ndlp, 2472 &vports[i]->fc_nodes, 2473 nlp_listp) { 2474 if (!NLP_CHK_NODE_ACT(ndlp)) 2475 continue; 2476 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2477 continue; 2478 if (ndlp->nlp_type & NLP_FABRIC) { 2479 lpfc_disc_state_machine(vports[i], ndlp, 2480 NULL, NLP_EVT_DEVICE_RECOVERY); 2481 lpfc_disc_state_machine(vports[i], ndlp, 2482 NULL, NLP_EVT_DEVICE_RM); 2483 } 2484 spin_lock_irq(shost->host_lock); 2485 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2486 spin_unlock_irq(shost->host_lock); 2487 lpfc_unreg_rpi(vports[i], ndlp); 2488 } 2489 } 2490 } 2491 lpfc_destroy_vport_work_array(phba, vports); 2492 2493 lpfc_sli_mbox_sys_shutdown(phba); 2494} 2495 2496/** 2497 * lpfc_offline - Bring a HBA offline 2498 * @phba: pointer to lpfc hba data structure. 2499 * 2500 * This routine actually brings a HBA offline. It stops all the timers 2501 * associated with the HBA, brings down the SLI layer, and eventually 2502 * marks the HBA as in offline state for the upper layer protocol. 2503 **/ 2504void 2505lpfc_offline(struct lpfc_hba *phba) 2506{ 2507 struct Scsi_Host *shost; 2508 struct lpfc_vport **vports; 2509 int i; 2510 2511 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2512 return; 2513 2514 /* stop port and all timers associated with this hba */ 2515 lpfc_stop_port(phba); 2516 vports = lpfc_create_vport_work_array(phba); 2517 if (vports != NULL) 2518 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2519 lpfc_stop_vport_timers(vports[i]); 2520 lpfc_destroy_vport_work_array(phba, vports); 2521 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2522 "0460 Bring Adapter offline\n"); 2523 /* Bring down the SLI Layer and cleanup. The HBA is offline 2524 now. */ 2525 lpfc_sli_hba_down(phba); 2526 spin_lock_irq(&phba->hbalock); 2527 phba->work_ha = 0; 2528 spin_unlock_irq(&phba->hbalock); 2529 vports = lpfc_create_vport_work_array(phba); 2530 if (vports != NULL) 2531 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2532 shost = lpfc_shost_from_vport(vports[i]); 2533 spin_lock_irq(shost->host_lock); 2534 vports[i]->work_port_events = 0; 2535 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2536 spin_unlock_irq(shost->host_lock); 2537 } 2538 lpfc_destroy_vport_work_array(phba, vports); 2539} 2540 2541/** 2542 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2543 * @phba: pointer to lpfc hba data structure. 2544 * 2545 * This routine is to free all the SCSI buffers and IOCBs from the driver 2546 * list back to kernel. It is called from lpfc_pci_remove_one to free 2547 * the internal resources before the device is removed from the system. 2548 * 2549 * Return codes 2550 * 0 - successful (for now, it always returns 0) 2551 **/ 2552static int 2553lpfc_scsi_free(struct lpfc_hba *phba) 2554{ 2555 struct lpfc_scsi_buf *sb, *sb_next; 2556 struct lpfc_iocbq *io, *io_next; 2557 2558 spin_lock_irq(&phba->hbalock); 2559 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2560 spin_lock(&phba->scsi_buf_list_lock); 2561 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2562 list_del(&sb->list); 2563 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2564 sb->dma_handle); 2565 kfree(sb); 2566 phba->total_scsi_bufs--; 2567 } 2568 spin_unlock(&phba->scsi_buf_list_lock); 2569 2570 /* Release all the lpfc_iocbq entries maintained by this host. */ 2571 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2572 list_del(&io->list); 2573 kfree(io); 2574 phba->total_iocbq_bufs--; 2575 } 2576 spin_unlock_irq(&phba->hbalock); 2577 return 0; 2578} 2579 2580/** 2581 * lpfc_create_port - Create an FC port 2582 * @phba: pointer to lpfc hba data structure. 2583 * @instance: a unique integer ID to this FC port. 2584 * @dev: pointer to the device data structure. 2585 * 2586 * This routine creates a FC port for the upper layer protocol. The FC port 2587 * can be created on top of either a physical port or a virtual port provided 2588 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2589 * and associates the FC port created before adding the shost into the SCSI 2590 * layer. 2591 * 2592 * Return codes 2593 * @vport - pointer to the virtual N_Port data structure. 2594 * NULL - port create failed. 2595 **/ 2596struct lpfc_vport * 2597lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2598{ 2599 struct lpfc_vport *vport; 2600 struct Scsi_Host *shost; 2601 int error = 0; 2602 2603 if (dev != &phba->pcidev->dev) 2604 shost = scsi_host_alloc(&lpfc_vport_template, 2605 sizeof(struct lpfc_vport)); 2606 else 2607 shost = scsi_host_alloc(&lpfc_template, 2608 sizeof(struct lpfc_vport)); 2609 if (!shost) 2610 goto out; 2611 2612 vport = (struct lpfc_vport *) shost->hostdata; 2613 vport->phba = phba; 2614 vport->load_flag |= FC_LOADING; 2615 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2616 vport->fc_rscn_flush = 0; 2617 2618 lpfc_get_vport_cfgparam(vport); 2619 shost->unique_id = instance; 2620 shost->max_id = LPFC_MAX_TARGET; 2621 shost->max_lun = vport->cfg_max_luns; 2622 shost->this_id = -1; 2623 shost->max_cmd_len = 16; 2624 if (phba->sli_rev == LPFC_SLI_REV4) { 2625 shost->dma_boundary = 2626 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 2627 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2628 } 2629 2630 /* 2631 * Set initial can_queue value since 0 is no longer supported and 2632 * scsi_add_host will fail. This will be adjusted later based on the 2633 * max xri value determined in hba setup. 2634 */ 2635 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2636 if (dev != &phba->pcidev->dev) { 2637 shost->transportt = lpfc_vport_transport_template; 2638 vport->port_type = LPFC_NPIV_PORT; 2639 } else { 2640 shost->transportt = lpfc_transport_template; 2641 vport->port_type = LPFC_PHYSICAL_PORT; 2642 } 2643 2644 /* Initialize all internally managed lists. */ 2645 INIT_LIST_HEAD(&vport->fc_nodes); 2646 INIT_LIST_HEAD(&vport->rcv_buffer_list); 2647 spin_lock_init(&vport->work_port_lock); 2648 2649 init_timer(&vport->fc_disctmo); 2650 vport->fc_disctmo.function = lpfc_disc_timeout; 2651 vport->fc_disctmo.data = (unsigned long)vport; 2652 2653 init_timer(&vport->fc_fdmitmo); 2654 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2655 vport->fc_fdmitmo.data = (unsigned long)vport; 2656 2657 init_timer(&vport->els_tmofunc); 2658 vport->els_tmofunc.function = lpfc_els_timeout; 2659 vport->els_tmofunc.data = (unsigned long)vport; 2660 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2661 if (error) 2662 goto out_put_shost; 2663 2664 spin_lock_irq(&phba->hbalock); 2665 list_add_tail(&vport->listentry, &phba->port_list); 2666 spin_unlock_irq(&phba->hbalock); 2667 return vport; 2668 2669out_put_shost: 2670 scsi_host_put(shost); 2671out: 2672 return NULL; 2673} 2674 2675/** 2676 * destroy_port - destroy an FC port 2677 * @vport: pointer to an lpfc virtual N_Port data structure. 2678 * 2679 * This routine destroys a FC port from the upper layer protocol. All the 2680 * resources associated with the port are released. 2681 **/ 2682void 2683destroy_port(struct lpfc_vport *vport) 2684{ 2685 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2686 struct lpfc_hba *phba = vport->phba; 2687 2688 lpfc_debugfs_terminate(vport); 2689 fc_remove_host(shost); 2690 scsi_remove_host(shost); 2691 2692 spin_lock_irq(&phba->hbalock); 2693 list_del_init(&vport->listentry); 2694 spin_unlock_irq(&phba->hbalock); 2695 2696 lpfc_cleanup(vport); 2697 return; 2698} 2699 2700/** 2701 * lpfc_get_instance - Get a unique integer ID 2702 * 2703 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2704 * uses the kernel idr facility to perform the task. 2705 * 2706 * Return codes: 2707 * instance - a unique integer ID allocated as the new instance. 2708 * -1 - lpfc get instance failed. 2709 **/ 2710int 2711lpfc_get_instance(void) 2712{ 2713 int instance = 0; 2714 2715 /* Assign an unused number */ 2716 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2717 return -1; 2718 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2719 return -1; 2720 return instance; 2721} 2722 2723/** 2724 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2725 * @shost: pointer to SCSI host data structure. 2726 * @time: elapsed time of the scan in jiffies. 2727 * 2728 * This routine is called by the SCSI layer with a SCSI host to determine 2729 * whether the scan host is finished. 2730 * 2731 * Note: there is no scan_start function as adapter initialization will have 2732 * asynchronously kicked off the link initialization. 2733 * 2734 * Return codes 2735 * 0 - SCSI host scan is not over yet. 2736 * 1 - SCSI host scan is over. 2737 **/ 2738int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2739{ 2740 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2741 struct lpfc_hba *phba = vport->phba; 2742 int stat = 0; 2743 2744 spin_lock_irq(shost->host_lock); 2745 2746 if (vport->load_flag & FC_UNLOADING) { 2747 stat = 1; 2748 goto finished; 2749 } 2750 if (time >= 30 * HZ) { 2751 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2752 "0461 Scanning longer than 30 " 2753 "seconds. Continuing initialization\n"); 2754 stat = 1; 2755 goto finished; 2756 } 2757 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2758 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2759 "0465 Link down longer than 15 " 2760 "seconds. Continuing initialization\n"); 2761 stat = 1; 2762 goto finished; 2763 } 2764 2765 if (vport->port_state != LPFC_VPORT_READY) 2766 goto finished; 2767 if (vport->num_disc_nodes || vport->fc_prli_sent) 2768 goto finished; 2769 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2770 goto finished; 2771 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2772 goto finished; 2773 2774 stat = 1; 2775 2776finished: 2777 spin_unlock_irq(shost->host_lock); 2778 return stat; 2779} 2780 2781/** 2782 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2783 * @shost: pointer to SCSI host data structure. 2784 * 2785 * This routine initializes a given SCSI host attributes on a FC port. The 2786 * SCSI host can be either on top of a physical port or a virtual port. 2787 **/ 2788void lpfc_host_attrib_init(struct Scsi_Host *shost) 2789{ 2790 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2791 struct lpfc_hba *phba = vport->phba; 2792 /* 2793 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2794 */ 2795 2796 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2797 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2798 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2799 2800 memset(fc_host_supported_fc4s(shost), 0, 2801 sizeof(fc_host_supported_fc4s(shost))); 2802 fc_host_supported_fc4s(shost)[2] = 1; 2803 fc_host_supported_fc4s(shost)[7] = 1; 2804 2805 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2806 sizeof fc_host_symbolic_name(shost)); 2807 2808 fc_host_supported_speeds(shost) = 0; 2809 if (phba->lmt & LMT_10Gb) 2810 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2811 if (phba->lmt & LMT_8Gb) 2812 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2813 if (phba->lmt & LMT_4Gb) 2814 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2815 if (phba->lmt & LMT_2Gb) 2816 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2817 if (phba->lmt & LMT_1Gb) 2818 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2819 2820 fc_host_maxframe_size(shost) = 2821 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2822 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2823 2824 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 2825 2826 /* This value is also unchanging */ 2827 memset(fc_host_active_fc4s(shost), 0, 2828 sizeof(fc_host_active_fc4s(shost))); 2829 fc_host_active_fc4s(shost)[2] = 1; 2830 fc_host_active_fc4s(shost)[7] = 1; 2831 2832 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2833 spin_lock_irq(shost->host_lock); 2834 vport->load_flag &= ~FC_LOADING; 2835 spin_unlock_irq(shost->host_lock); 2836} 2837 2838/** 2839 * lpfc_stop_port_s3 - Stop SLI3 device port 2840 * @phba: pointer to lpfc hba data structure. 2841 * 2842 * This routine is invoked to stop an SLI3 device port, it stops the device 2843 * from generating interrupts and stops the device driver's timers for the 2844 * device. 2845 **/ 2846static void 2847lpfc_stop_port_s3(struct lpfc_hba *phba) 2848{ 2849 /* Clear all interrupt enable conditions */ 2850 writel(0, phba->HCregaddr); 2851 readl(phba->HCregaddr); /* flush */ 2852 /* Clear all pending interrupts */ 2853 writel(0xffffffff, phba->HAregaddr); 2854 readl(phba->HAregaddr); /* flush */ 2855 2856 /* Reset some HBA SLI setup states */ 2857 lpfc_stop_hba_timers(phba); 2858 phba->pport->work_port_events = 0; 2859} 2860 2861/** 2862 * lpfc_stop_port_s4 - Stop SLI4 device port 2863 * @phba: pointer to lpfc hba data structure. 2864 * 2865 * This routine is invoked to stop an SLI4 device port, it stops the device 2866 * from generating interrupts and stops the device driver's timers for the 2867 * device. 2868 **/ 2869static void 2870lpfc_stop_port_s4(struct lpfc_hba *phba) 2871{ 2872 /* Reset some HBA SLI4 setup states */ 2873 lpfc_stop_hba_timers(phba); 2874 phba->pport->work_port_events = 0; 2875 phba->sli4_hba.intr_enable = 0; 2876} 2877 2878/** 2879 * lpfc_stop_port - Wrapper function for stopping hba port 2880 * @phba: Pointer to HBA context object. 2881 * 2882 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 2883 * the API jump table function pointer from the lpfc_hba struct. 2884 **/ 2885void 2886lpfc_stop_port(struct lpfc_hba *phba) 2887{ 2888 phba->lpfc_stop_port(phba); 2889} 2890 2891/** 2892 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 2893 * @phba: Pointer to hba for which this call is being executed. 2894 * 2895 * This routine starts the timer waiting for the FCF rediscovery to complete. 2896 **/ 2897void 2898lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 2899{ 2900 unsigned long fcf_redisc_wait_tmo = 2901 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 2902 /* Start fcf rediscovery wait period timer */ 2903 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 2904 spin_lock_irq(&phba->hbalock); 2905 /* Allow action to new fcf asynchronous event */ 2906 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 2907 /* Mark the FCF rediscovery pending state */ 2908 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 2909 spin_unlock_irq(&phba->hbalock); 2910} 2911 2912/** 2913 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 2914 * @ptr: Map to lpfc_hba data structure pointer. 2915 * 2916 * This routine is invoked when waiting for FCF table rediscover has been 2917 * timed out. If new FCF record(s) has (have) been discovered during the 2918 * wait period, a new FCF event shall be added to the FCOE async event 2919 * list, and then worker thread shall be waked up for processing from the 2920 * worker thread context. 2921 **/ 2922void 2923lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 2924{ 2925 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 2926 2927 /* Don't send FCF rediscovery event if timer cancelled */ 2928 spin_lock_irq(&phba->hbalock); 2929 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2930 spin_unlock_irq(&phba->hbalock); 2931 return; 2932 } 2933 /* Clear FCF rediscovery timer pending flag */ 2934 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2935 /* FCF rediscovery event to worker thread */ 2936 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 2937 spin_unlock_irq(&phba->hbalock); 2938 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2939 "2776 FCF rediscover wait timer expired, post " 2940 "a worker thread event for FCF table scan\n"); 2941 /* wake up worker thread */ 2942 lpfc_worker_wake_up(phba); 2943} 2944 2945/** 2946 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support 2947 * @phba: pointer to lpfc hba data structure. 2948 * 2949 * This function uses the QUERY_FW_CFG mailbox command to determine if the 2950 * firmware loaded supports FCoE. A return of zero indicates that the mailbox 2951 * was successful and the firmware supports FCoE. Any other return indicates 2952 * a error. It is assumed that this function will be called before interrupts 2953 * are enabled. 2954 **/ 2955static int 2956lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba) 2957{ 2958 int rc = 0; 2959 LPFC_MBOXQ_t *mboxq; 2960 struct lpfc_mbx_query_fw_cfg *query_fw_cfg; 2961 uint32_t length; 2962 uint32_t shdr_status, shdr_add_status; 2963 2964 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2965 if (!mboxq) { 2966 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2967 "2621 Failed to allocate mbox for " 2968 "query firmware config cmd\n"); 2969 return -ENOMEM; 2970 } 2971 query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg; 2972 length = (sizeof(struct lpfc_mbx_query_fw_cfg) - 2973 sizeof(struct lpfc_sli4_cfg_mhdr)); 2974 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 2975 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 2976 length, LPFC_SLI4_MBX_EMBED); 2977 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2978 /* The IOCTL status is embedded in the mailbox subheader. */ 2979 shdr_status = bf_get(lpfc_mbox_hdr_status, 2980 &query_fw_cfg->header.cfg_shdr.response); 2981 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 2982 &query_fw_cfg->header.cfg_shdr.response); 2983 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { 2984 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2985 "2622 Query Firmware Config failed " 2986 "mbx status x%x, status x%x add_status x%x\n", 2987 rc, shdr_status, shdr_add_status); 2988 return -EINVAL; 2989 } 2990 if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) { 2991 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2992 "2623 FCoE Function not supported by firmware. " 2993 "Function mode = %08x\n", 2994 query_fw_cfg->function_mode); 2995 return -EINVAL; 2996 } 2997 if (rc != MBX_TIMEOUT) 2998 mempool_free(mboxq, phba->mbox_mem_pool); 2999 return 0; 3000} 3001 3002/** 3003 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3004 * @phba: pointer to lpfc hba data structure. 3005 * @acqe_link: pointer to the async link completion queue entry. 3006 * 3007 * This routine is to parse the SLI4 link-attention link fault code and 3008 * translate it into the base driver's read link attention mailbox command 3009 * status. 3010 * 3011 * Return: Link-attention status in terms of base driver's coding. 3012 **/ 3013static uint16_t 3014lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3015 struct lpfc_acqe_link *acqe_link) 3016{ 3017 uint16_t latt_fault; 3018 3019 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3020 case LPFC_ASYNC_LINK_FAULT_NONE: 3021 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3022 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3023 latt_fault = 0; 3024 break; 3025 default: 3026 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3027 "0398 Invalid link fault code: x%x\n", 3028 bf_get(lpfc_acqe_link_fault, acqe_link)); 3029 latt_fault = MBXERR_ERROR; 3030 break; 3031 } 3032 return latt_fault; 3033} 3034 3035/** 3036 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3037 * @phba: pointer to lpfc hba data structure. 3038 * @acqe_link: pointer to the async link completion queue entry. 3039 * 3040 * This routine is to parse the SLI4 link attention type and translate it 3041 * into the base driver's link attention type coding. 3042 * 3043 * Return: Link attention type in terms of base driver's coding. 3044 **/ 3045static uint8_t 3046lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3047 struct lpfc_acqe_link *acqe_link) 3048{ 3049 uint8_t att_type; 3050 3051 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3052 case LPFC_ASYNC_LINK_STATUS_DOWN: 3053 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3054 att_type = AT_LINK_DOWN; 3055 break; 3056 case LPFC_ASYNC_LINK_STATUS_UP: 3057 /* Ignore physical link up events - wait for logical link up */ 3058 att_type = AT_RESERVED; 3059 break; 3060 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3061 att_type = AT_LINK_UP; 3062 break; 3063 default: 3064 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3065 "0399 Invalid link attention type: x%x\n", 3066 bf_get(lpfc_acqe_link_status, acqe_link)); 3067 att_type = AT_RESERVED; 3068 break; 3069 } 3070 return att_type; 3071} 3072 3073/** 3074 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 3075 * @phba: pointer to lpfc hba data structure. 3076 * @acqe_link: pointer to the async link completion queue entry. 3077 * 3078 * This routine is to parse the SLI4 link-attention link speed and translate 3079 * it into the base driver's link-attention link speed coding. 3080 * 3081 * Return: Link-attention link speed in terms of base driver's coding. 3082 **/ 3083static uint8_t 3084lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 3085 struct lpfc_acqe_link *acqe_link) 3086{ 3087 uint8_t link_speed; 3088 3089 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3090 case LPFC_ASYNC_LINK_SPEED_ZERO: 3091 link_speed = LA_UNKNW_LINK; 3092 break; 3093 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3094 link_speed = LA_UNKNW_LINK; 3095 break; 3096 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3097 link_speed = LA_UNKNW_LINK; 3098 break; 3099 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3100 link_speed = LA_1GHZ_LINK; 3101 break; 3102 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3103 link_speed = LA_10GHZ_LINK; 3104 break; 3105 default: 3106 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3107 "0483 Invalid link-attention link speed: x%x\n", 3108 bf_get(lpfc_acqe_link_speed, acqe_link)); 3109 link_speed = LA_UNKNW_LINK; 3110 break; 3111 } 3112 return link_speed; 3113} 3114 3115/** 3116 * lpfc_sli4_async_link_evt - Process the asynchronous link event 3117 * @phba: pointer to lpfc hba data structure. 3118 * @acqe_link: pointer to the async link completion queue entry. 3119 * 3120 * This routine is to handle the SLI4 asynchronous link event. 3121 **/ 3122static void 3123lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3124 struct lpfc_acqe_link *acqe_link) 3125{ 3126 struct lpfc_dmabuf *mp; 3127 LPFC_MBOXQ_t *pmb; 3128 MAILBOX_t *mb; 3129 READ_LA_VAR *la; 3130 uint8_t att_type; 3131 3132 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3133 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) 3134 return; 3135 phba->fcoe_eventtag = acqe_link->event_tag; 3136 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3137 if (!pmb) { 3138 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3139 "0395 The mboxq allocation failed\n"); 3140 return; 3141 } 3142 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3143 if (!mp) { 3144 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3145 "0396 The lpfc_dmabuf allocation failed\n"); 3146 goto out_free_pmb; 3147 } 3148 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3149 if (!mp->virt) { 3150 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3151 "0397 The mbuf allocation failed\n"); 3152 goto out_free_dmabuf; 3153 } 3154 3155 /* Cleanup any outstanding ELS commands */ 3156 lpfc_els_flush_all_cmd(phba); 3157 3158 /* Block ELS IOCBs until we have done process link event */ 3159 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3160 3161 /* Update link event statistics */ 3162 phba->sli.slistat.link_event++; 3163 3164 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ 3165 lpfc_read_la(phba, pmb, mp); 3166 pmb->vport = phba->pport; 3167 3168 /* Parse and translate status field */ 3169 mb = &pmb->u.mb; 3170 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3171 3172 /* Parse and translate link attention fields */ 3173 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; 3174 la->eventTag = acqe_link->event_tag; 3175 la->attType = att_type; 3176 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); 3177 3178 /* Fake the the following irrelvant fields */ 3179 la->topology = TOPOLOGY_PT_PT; 3180 la->granted_AL_PA = 0; 3181 la->il = 0; 3182 la->pb = 0; 3183 la->fa = 0; 3184 la->mm = 0; 3185 3186 /* Keep the link status for extra SLI4 state machine reference */ 3187 phba->sli4_hba.link_state.speed = 3188 bf_get(lpfc_acqe_link_speed, acqe_link); 3189 phba->sli4_hba.link_state.duplex = 3190 bf_get(lpfc_acqe_link_duplex, acqe_link); 3191 phba->sli4_hba.link_state.status = 3192 bf_get(lpfc_acqe_link_status, acqe_link); 3193 phba->sli4_hba.link_state.physical = 3194 bf_get(lpfc_acqe_link_physical, acqe_link); 3195 phba->sli4_hba.link_state.fault = 3196 bf_get(lpfc_acqe_link_fault, acqe_link); 3197 phba->sli4_hba.link_state.logical_speed = 3198 bf_get(lpfc_acqe_qos_link_speed, acqe_link); 3199 3200 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3201 lpfc_mbx_cmpl_read_la(phba, pmb); 3202 3203 return; 3204 3205out_free_dmabuf: 3206 kfree(mp); 3207out_free_pmb: 3208 mempool_free(pmb, phba->mbox_mem_pool); 3209} 3210 3211/** 3212 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 3213 * @vport: pointer to vport data structure. 3214 * 3215 * This routine is to perform Clear Virtual Link (CVL) on a vport in 3216 * response to a CVL event. 3217 * 3218 * Return the pointer to the ndlp with the vport if successful, otherwise 3219 * return NULL. 3220 **/ 3221static struct lpfc_nodelist * 3222lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 3223{ 3224 struct lpfc_nodelist *ndlp; 3225 struct Scsi_Host *shost; 3226 struct lpfc_hba *phba; 3227 3228 if (!vport) 3229 return NULL; 3230 phba = vport->phba; 3231 if (!phba) 3232 return NULL; 3233 ndlp = lpfc_findnode_did(vport, Fabric_DID); 3234 if (!ndlp) { 3235 /* Cannot find existing Fabric ndlp, so allocate a new one */ 3236 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3237 if (!ndlp) 3238 return 0; 3239 lpfc_nlp_init(vport, ndlp, Fabric_DID); 3240 /* Set the node type */ 3241 ndlp->nlp_type |= NLP_FABRIC; 3242 /* Put ndlp onto node list */ 3243 lpfc_enqueue_node(vport, ndlp); 3244 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 3245 /* re-setup ndlp without removing from node list */ 3246 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 3247 if (!ndlp) 3248 return 0; 3249 } 3250 if (phba->pport->port_state < LPFC_FLOGI) 3251 return NULL; 3252 /* If virtual link is not yet instantiated ignore CVL */ 3253 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)) 3254 return NULL; 3255 shost = lpfc_shost_from_vport(vport); 3256 if (!shost) 3257 return NULL; 3258 lpfc_linkdown_port(vport); 3259 lpfc_cleanup_pending_mbox(vport); 3260 spin_lock_irq(shost->host_lock); 3261 vport->fc_flag |= FC_VPORT_CVL_RCVD; 3262 spin_unlock_irq(shost->host_lock); 3263 3264 return ndlp; 3265} 3266 3267/** 3268 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 3269 * @vport: pointer to lpfc hba data structure. 3270 * 3271 * This routine is to perform Clear Virtual Link (CVL) on all vports in 3272 * response to a FCF dead event. 3273 **/ 3274static void 3275lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 3276{ 3277 struct lpfc_vport **vports; 3278 int i; 3279 3280 vports = lpfc_create_vport_work_array(phba); 3281 if (vports) 3282 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3283 lpfc_sli4_perform_vport_cvl(vports[i]); 3284 lpfc_destroy_vport_work_array(phba, vports); 3285} 3286 3287/** 3288 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 3289 * @phba: pointer to lpfc hba data structure. 3290 * @acqe_link: pointer to the async fcoe completion queue entry. 3291 * 3292 * This routine is to handle the SLI4 asynchronous fcoe event. 3293 **/ 3294static void 3295lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, 3296 struct lpfc_acqe_fcoe *acqe_fcoe) 3297{ 3298 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 3299 int rc; 3300 struct lpfc_vport *vport; 3301 struct lpfc_nodelist *ndlp; 3302 struct Scsi_Host *shost; 3303 int active_vlink_present; 3304 struct lpfc_vport **vports; 3305 int i; 3306 3307 phba->fc_eventTag = acqe_fcoe->event_tag; 3308 phba->fcoe_eventtag = acqe_fcoe->event_tag; 3309 switch (event_type) { 3310 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3311 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: 3312 if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) 3313 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3314 LOG_DISCOVERY, 3315 "2546 New FCF found event: " 3316 "evt_tag:x%x, fcf_index:x%x\n", 3317 acqe_fcoe->event_tag, 3318 acqe_fcoe->index); 3319 else 3320 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3321 LOG_DISCOVERY, 3322 "2788 FCF parameter modified event: " 3323 "evt_tag:x%x, fcf_index:x%x\n", 3324 acqe_fcoe->event_tag, 3325 acqe_fcoe->index); 3326 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3327 /* 3328 * During period of FCF discovery, read the FCF 3329 * table record indexed by the event to update 3330 * FCF round robin failover eligible FCF bmask. 3331 */ 3332 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3333 LOG_DISCOVERY, 3334 "2779 Read new FCF record with " 3335 "fcf_index:x%x for updating FCF " 3336 "round robin failover bmask\n", 3337 acqe_fcoe->index); 3338 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); 3339 } 3340 3341 /* If the FCF discovery is in progress, do nothing. */ 3342 spin_lock_irq(&phba->hbalock); 3343 if (phba->hba_flag & FCF_DISC_INPROGRESS) { 3344 spin_unlock_irq(&phba->hbalock); 3345 break; 3346 } 3347 /* If fast FCF failover rescan event is pending, do nothing */ 3348 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3349 spin_unlock_irq(&phba->hbalock); 3350 break; 3351 } 3352 3353 /* If the FCF has been in discovered state, do nothing. */ 3354 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 3355 spin_unlock_irq(&phba->hbalock); 3356 break; 3357 } 3358 spin_unlock_irq(&phba->hbalock); 3359 3360 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3361 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3362 "2770 Start FCF table scan due to new FCF " 3363 "event: evt_tag:x%x, fcf_index:x%x\n", 3364 acqe_fcoe->event_tag, acqe_fcoe->index); 3365 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3366 LPFC_FCOE_FCF_GET_FIRST); 3367 if (rc) 3368 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3369 "2547 Issue FCF scan read FCF mailbox " 3370 "command failed 0x%x\n", rc); 3371 break; 3372 3373 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3374 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3375 "2548 FCF Table full count 0x%x tag 0x%x\n", 3376 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), 3377 acqe_fcoe->event_tag); 3378 break; 3379 3380 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3381 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3382 "2549 FCF disconnected from network index 0x%x" 3383 " tag 0x%x\n", acqe_fcoe->index, 3384 acqe_fcoe->event_tag); 3385 /* 3386 * If we are in the middle of FCF failover process, clear 3387 * the corresponding FCF bit in the roundrobin bitmap. 3388 */ 3389 spin_lock_irq(&phba->hbalock); 3390 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3391 spin_unlock_irq(&phba->hbalock); 3392 /* Update FLOGI FCF failover eligible FCF bmask */ 3393 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index); 3394 break; 3395 } 3396 spin_unlock_irq(&phba->hbalock); 3397 3398 /* If the event is not for currently used fcf do nothing */ 3399 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) 3400 break; 3401 3402 /* 3403 * Otherwise, request the port to rediscover the entire FCF 3404 * table for a fast recovery from case that the current FCF 3405 * is no longer valid as we are not in the middle of FCF 3406 * failover process already. 3407 */ 3408 spin_lock_irq(&phba->hbalock); 3409 /* Mark the fast failover process in progress */ 3410 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 3411 spin_unlock_irq(&phba->hbalock); 3412 3413 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3414 "2771 Start FCF fast failover process due to " 3415 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 3416 "\n", acqe_fcoe->event_tag, acqe_fcoe->index); 3417 rc = lpfc_sli4_redisc_fcf_table(phba); 3418 if (rc) { 3419 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3420 LOG_DISCOVERY, 3421 "2772 Issue FCF rediscover mabilbox " 3422 "command failed, fail through to FCF " 3423 "dead event\n"); 3424 spin_lock_irq(&phba->hbalock); 3425 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 3426 spin_unlock_irq(&phba->hbalock); 3427 /* 3428 * Last resort will fail over by treating this 3429 * as a link down to FCF registration. 3430 */ 3431 lpfc_sli4_fcf_dead_failthrough(phba); 3432 } else { 3433 /* Reset FCF roundrobin bmask for new discovery */ 3434 memset(phba->fcf.fcf_rr_bmask, 0, 3435 sizeof(*phba->fcf.fcf_rr_bmask)); 3436 /* 3437 * Handling fast FCF failover to a DEAD FCF event is 3438 * considered equalivant to receiving CVL to all vports. 3439 */ 3440 lpfc_sli4_perform_all_vport_cvl(phba); 3441 } 3442 break; 3443 case LPFC_FCOE_EVENT_TYPE_CVL: 3444 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3445 "2718 Clear Virtual Link Received for VPI 0x%x" 3446 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3447 vport = lpfc_find_vport_by_vpid(phba, 3448 acqe_fcoe->index - phba->vpi_base); 3449 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3450 if (!ndlp) 3451 break; 3452 active_vlink_present = 0; 3453 3454 vports = lpfc_create_vport_work_array(phba); 3455 if (vports) { 3456 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 3457 i++) { 3458 if ((!(vports[i]->fc_flag & 3459 FC_VPORT_CVL_RCVD)) && 3460 (vports[i]->port_state > LPFC_FDISC)) { 3461 active_vlink_present = 1; 3462 break; 3463 } 3464 } 3465 lpfc_destroy_vport_work_array(phba, vports); 3466 } 3467 3468 if (active_vlink_present) { 3469 /* 3470 * If there are other active VLinks present, 3471 * re-instantiate the Vlink using FDISC. 3472 */ 3473 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3474 shost = lpfc_shost_from_vport(vport); 3475 spin_lock_irq(shost->host_lock); 3476 ndlp->nlp_flag |= NLP_DELAY_TMO; 3477 spin_unlock_irq(shost->host_lock); 3478 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 3479 vport->port_state = LPFC_FDISC; 3480 } else { 3481 /* 3482 * Otherwise, we request port to rediscover 3483 * the entire FCF table for a fast recovery 3484 * from possible case that the current FCF 3485 * is no longer valid if we are not already 3486 * in the FCF failover process. 3487 */ 3488 spin_lock_irq(&phba->hbalock); 3489 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3490 spin_unlock_irq(&phba->hbalock); 3491 break; 3492 } 3493 /* Mark the fast failover process in progress */ 3494 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 3495 spin_unlock_irq(&phba->hbalock); 3496 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3497 LOG_DISCOVERY, 3498 "2773 Start FCF fast failover due " 3499 "to CVL event: evt_tag:x%x\n", 3500 acqe_fcoe->event_tag); 3501 rc = lpfc_sli4_redisc_fcf_table(phba); 3502 if (rc) { 3503 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3504 LOG_DISCOVERY, 3505 "2774 Issue FCF rediscover " 3506 "mabilbox command failed, " 3507 "through to CVL event\n"); 3508 spin_lock_irq(&phba->hbalock); 3509 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 3510 spin_unlock_irq(&phba->hbalock); 3511 /* 3512 * Last resort will be re-try on the 3513 * the current registered FCF entry. 3514 */ 3515 lpfc_retry_pport_discovery(phba); 3516 } else 3517 /* 3518 * Reset FCF roundrobin bmask for new 3519 * discovery. 3520 */ 3521 memset(phba->fcf.fcf_rr_bmask, 0, 3522 sizeof(*phba->fcf.fcf_rr_bmask)); 3523 } 3524 break; 3525 default: 3526 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3527 "0288 Unknown FCoE event type 0x%x event tag " 3528 "0x%x\n", event_type, acqe_fcoe->event_tag); 3529 break; 3530 } 3531} 3532 3533/** 3534 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 3535 * @phba: pointer to lpfc hba data structure. 3536 * @acqe_link: pointer to the async dcbx completion queue entry. 3537 * 3538 * This routine is to handle the SLI4 asynchronous dcbx event. 3539 **/ 3540static void 3541lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3542 struct lpfc_acqe_dcbx *acqe_dcbx) 3543{ 3544 phba->fc_eventTag = acqe_dcbx->event_tag; 3545 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3546 "0290 The SLI4 DCBX asynchronous event is not " 3547 "handled yet\n"); 3548} 3549 3550/** 3551 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 3552 * @phba: pointer to lpfc hba data structure. 3553 * @acqe_link: pointer to the async grp5 completion queue entry. 3554 * 3555 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 3556 * is an asynchronous notified of a logical link speed change. The Port 3557 * reports the logical link speed in units of 10Mbps. 3558 **/ 3559static void 3560lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 3561 struct lpfc_acqe_grp5 *acqe_grp5) 3562{ 3563 uint16_t prev_ll_spd; 3564 3565 phba->fc_eventTag = acqe_grp5->event_tag; 3566 phba->fcoe_eventtag = acqe_grp5->event_tag; 3567 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 3568 phba->sli4_hba.link_state.logical_speed = 3569 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); 3570 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3571 "2789 GRP5 Async Event: Updating logical link speed " 3572 "from %dMbps to %dMbps\n", (prev_ll_spd * 10), 3573 (phba->sli4_hba.link_state.logical_speed*10)); 3574} 3575 3576/** 3577 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 3578 * @phba: pointer to lpfc hba data structure. 3579 * 3580 * This routine is invoked by the worker thread to process all the pending 3581 * SLI4 asynchronous events. 3582 **/ 3583void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 3584{ 3585 struct lpfc_cq_event *cq_event; 3586 3587 /* First, declare the async event has been handled */ 3588 spin_lock_irq(&phba->hbalock); 3589 phba->hba_flag &= ~ASYNC_EVENT; 3590 spin_unlock_irq(&phba->hbalock); 3591 /* Now, handle all the async events */ 3592 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 3593 /* Get the first event from the head of the event queue */ 3594 spin_lock_irq(&phba->hbalock); 3595 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 3596 cq_event, struct lpfc_cq_event, list); 3597 spin_unlock_irq(&phba->hbalock); 3598 /* Process the asynchronous event */ 3599 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 3600 case LPFC_TRAILER_CODE_LINK: 3601 lpfc_sli4_async_link_evt(phba, 3602 &cq_event->cqe.acqe_link); 3603 break; 3604 case LPFC_TRAILER_CODE_FCOE: 3605 lpfc_sli4_async_fcoe_evt(phba, 3606 &cq_event->cqe.acqe_fcoe); 3607 break; 3608 case LPFC_TRAILER_CODE_DCBX: 3609 lpfc_sli4_async_dcbx_evt(phba, 3610 &cq_event->cqe.acqe_dcbx); 3611 break; 3612 case LPFC_TRAILER_CODE_GRP5: 3613 lpfc_sli4_async_grp5_evt(phba, 3614 &cq_event->cqe.acqe_grp5); 3615 break; 3616 default: 3617 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3618 "1804 Invalid asynchrous event code: " 3619 "x%x\n", bf_get(lpfc_trailer_code, 3620 &cq_event->cqe.mcqe_cmpl)); 3621 break; 3622 } 3623 /* Free the completion event processed to the free pool */ 3624 lpfc_sli4_cq_event_release(phba, cq_event); 3625 } 3626} 3627 3628/** 3629 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 3630 * @phba: pointer to lpfc hba data structure. 3631 * 3632 * This routine is invoked by the worker thread to process FCF table 3633 * rediscovery pending completion event. 3634 **/ 3635void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 3636{ 3637 int rc; 3638 3639 spin_lock_irq(&phba->hbalock); 3640 /* Clear FCF rediscovery timeout event */ 3641 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 3642 /* Clear driver fast failover FCF record flag */ 3643 phba->fcf.failover_rec.flag = 0; 3644 /* Set state for FCF fast failover */ 3645 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 3646 spin_unlock_irq(&phba->hbalock); 3647 3648 /* Scan FCF table from the first entry to re-discover SAN */ 3649 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3650 "2777 Start FCF table scan after FCF " 3651 "rediscovery quiescent period over\n"); 3652 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 3653 if (rc) 3654 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3655 "2747 Issue FCF scan read FCF mailbox " 3656 "command failed 0x%x\n", rc); 3657} 3658 3659/** 3660 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3661 * @phba: pointer to lpfc hba data structure. 3662 * @dev_grp: The HBA PCI-Device group number. 3663 * 3664 * This routine is invoked to set up the per HBA PCI-Device group function 3665 * API jump table entries. 3666 * 3667 * Return: 0 if success, otherwise -ENODEV 3668 **/ 3669int 3670lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3671{ 3672 int rc; 3673 3674 /* Set up lpfc PCI-device group */ 3675 phba->pci_dev_grp = dev_grp; 3676 3677 /* The LPFC_PCI_DEV_OC uses SLI4 */ 3678 if (dev_grp == LPFC_PCI_DEV_OC) 3679 phba->sli_rev = LPFC_SLI_REV4; 3680 3681 /* Set up device INIT API function jump table */ 3682 rc = lpfc_init_api_table_setup(phba, dev_grp); 3683 if (rc) 3684 return -ENODEV; 3685 /* Set up SCSI API function jump table */ 3686 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 3687 if (rc) 3688 return -ENODEV; 3689 /* Set up SLI API function jump table */ 3690 rc = lpfc_sli_api_table_setup(phba, dev_grp); 3691 if (rc) 3692 return -ENODEV; 3693 /* Set up MBOX API function jump table */ 3694 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 3695 if (rc) 3696 return -ENODEV; 3697 3698 return 0; 3699} 3700 3701/** 3702 * lpfc_log_intr_mode - Log the active interrupt mode 3703 * @phba: pointer to lpfc hba data structure. 3704 * @intr_mode: active interrupt mode adopted. 3705 * 3706 * This routine it invoked to log the currently used active interrupt mode 3707 * to the device. 3708 **/ 3709static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 3710{ 3711 switch (intr_mode) { 3712 case 0: 3713 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3714 "0470 Enable INTx interrupt mode.\n"); 3715 break; 3716 case 1: 3717 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3718 "0481 Enabled MSI interrupt mode.\n"); 3719 break; 3720 case 2: 3721 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3722 "0480 Enabled MSI-X interrupt mode.\n"); 3723 break; 3724 default: 3725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3726 "0482 Illegal interrupt mode.\n"); 3727 break; 3728 } 3729 return; 3730} 3731 3732/** 3733 * lpfc_enable_pci_dev - Enable a generic PCI device. 3734 * @phba: pointer to lpfc hba data structure. 3735 * 3736 * This routine is invoked to enable the PCI device that is common to all 3737 * PCI devices. 3738 * 3739 * Return codes 3740 * 0 - successful 3741 * other values - error 3742 **/ 3743static int 3744lpfc_enable_pci_dev(struct lpfc_hba *phba) 3745{ 3746 struct pci_dev *pdev; 3747 int bars; 3748 3749 /* Obtain PCI device reference */ 3750 if (!phba->pcidev) 3751 goto out_error; 3752 else 3753 pdev = phba->pcidev; 3754 /* Select PCI BARs */ 3755 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3756 /* Enable PCI device */ 3757 if (pci_enable_device_mem(pdev)) 3758 goto out_error; 3759 /* Request PCI resource for the device */ 3760 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 3761 goto out_disable_device; 3762 /* Set up device as PCI master and save state for EEH */ 3763 pci_set_master(pdev); 3764 pci_try_set_mwi(pdev); 3765 pci_save_state(pdev); 3766 3767 return 0; 3768 3769out_disable_device: 3770 pci_disable_device(pdev); 3771out_error: 3772 return -ENODEV; 3773} 3774 3775/** 3776 * lpfc_disable_pci_dev - Disable a generic PCI device. 3777 * @phba: pointer to lpfc hba data structure. 3778 * 3779 * This routine is invoked to disable the PCI device that is common to all 3780 * PCI devices. 3781 **/ 3782static void 3783lpfc_disable_pci_dev(struct lpfc_hba *phba) 3784{ 3785 struct pci_dev *pdev; 3786 int bars; 3787 3788 /* Obtain PCI device reference */ 3789 if (!phba->pcidev) 3790 return; 3791 else 3792 pdev = phba->pcidev; 3793 /* Select PCI BARs */ 3794 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3795 /* Release PCI resource and disable PCI device */ 3796 pci_release_selected_regions(pdev, bars); 3797 pci_disable_device(pdev); 3798 /* Null out PCI private reference to driver */ 3799 pci_set_drvdata(pdev, NULL); 3800 3801 return; 3802} 3803 3804/** 3805 * lpfc_reset_hba - Reset a hba 3806 * @phba: pointer to lpfc hba data structure. 3807 * 3808 * This routine is invoked to reset a hba device. It brings the HBA 3809 * offline, performs a board restart, and then brings the board back 3810 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 3811 * on outstanding mailbox commands. 3812 **/ 3813void 3814lpfc_reset_hba(struct lpfc_hba *phba) 3815{ 3816 /* If resets are disabled then set error state and return. */ 3817 if (!phba->cfg_enable_hba_reset) { 3818 phba->link_state = LPFC_HBA_ERROR; 3819 return; 3820 } 3821 lpfc_offline_prep(phba); 3822 lpfc_offline(phba); 3823 lpfc_sli_brdrestart(phba); 3824 lpfc_online(phba); 3825 lpfc_unblock_mgmt_io(phba); 3826} 3827 3828/** 3829 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 3830 * @phba: pointer to lpfc hba data structure. 3831 * 3832 * This routine is invoked to set up the driver internal resources specific to 3833 * support the SLI-3 HBA device it attached to. 3834 * 3835 * Return codes 3836 * 0 - successful 3837 * other values - error 3838 **/ 3839static int 3840lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 3841{ 3842 struct lpfc_sli *psli; 3843 3844 /* 3845 * Initialize timers used by driver 3846 */ 3847 3848 /* Heartbeat timer */ 3849 init_timer(&phba->hb_tmofunc); 3850 phba->hb_tmofunc.function = lpfc_hb_timeout; 3851 phba->hb_tmofunc.data = (unsigned long)phba; 3852 3853 psli = &phba->sli; 3854 /* MBOX heartbeat timer */ 3855 init_timer(&psli->mbox_tmo); 3856 psli->mbox_tmo.function = lpfc_mbox_timeout; 3857 psli->mbox_tmo.data = (unsigned long) phba; 3858 /* FCP polling mode timer */ 3859 init_timer(&phba->fcp_poll_timer); 3860 phba->fcp_poll_timer.function = lpfc_poll_timeout; 3861 phba->fcp_poll_timer.data = (unsigned long) phba; 3862 /* Fabric block timer */ 3863 init_timer(&phba->fabric_block_timer); 3864 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3865 phba->fabric_block_timer.data = (unsigned long) phba; 3866 /* EA polling mode timer */ 3867 init_timer(&phba->eratt_poll); 3868 phba->eratt_poll.function = lpfc_poll_eratt; 3869 phba->eratt_poll.data = (unsigned long) phba; 3870 3871 /* Host attention work mask setup */ 3872 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 3873 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 3874 3875 /* Get all the module params for configuring this host */ 3876 lpfc_get_cfgparam(phba); 3877 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 3878 phba->menlo_flag |= HBA_MENLO_SUPPORT; 3879 /* check for menlo minimum sg count */ 3880 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 3881 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 3882 } 3883 3884 /* 3885 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3886 * used to create the sg_dma_buf_pool must be dynamically calculated. 3887 * 2 segments are added since the IOCB needs a command and response bde. 3888 */ 3889 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 3890 sizeof(struct fcp_rsp) + 3891 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 3892 3893 if (phba->cfg_enable_bg) { 3894 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 3895 phba->cfg_sg_dma_buf_size += 3896 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 3897 } 3898 3899 /* Also reinitialize the host templates with new values. */ 3900 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3901 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3902 3903 phba->max_vpi = LPFC_MAX_VPI; 3904 /* This will be set to correct value after config_port mbox */ 3905 phba->max_vports = 0; 3906 3907 /* 3908 * Initialize the SLI Layer to run with lpfc HBAs. 3909 */ 3910 lpfc_sli_setup(phba); 3911 lpfc_sli_queue_setup(phba); 3912 3913 /* Allocate device driver memory */ 3914 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 3915 return -ENOMEM; 3916 3917 return 0; 3918} 3919 3920/** 3921 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 3922 * @phba: pointer to lpfc hba data structure. 3923 * 3924 * This routine is invoked to unset the driver internal resources set up 3925 * specific for supporting the SLI-3 HBA device it attached to. 3926 **/ 3927static void 3928lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 3929{ 3930 /* Free device driver memory allocated */ 3931 lpfc_mem_free_all(phba); 3932 3933 return; 3934} 3935 3936/** 3937 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 3938 * @phba: pointer to lpfc hba data structure. 3939 * 3940 * This routine is invoked to set up the driver internal resources specific to 3941 * support the SLI-4 HBA device it attached to. 3942 * 3943 * Return codes 3944 * 0 - successful 3945 * other values - error 3946 **/ 3947static int 3948lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 3949{ 3950 struct lpfc_sli *psli; 3951 LPFC_MBOXQ_t *mboxq; 3952 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 3953 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 3954 struct lpfc_mqe *mqe; 3955 int longs; 3956 3957 /* Before proceed, wait for POST done and device ready */ 3958 rc = lpfc_sli4_post_status_check(phba); 3959 if (rc) 3960 return -ENODEV; 3961 3962 /* 3963 * Initialize timers used by driver 3964 */ 3965 3966 /* Heartbeat timer */ 3967 init_timer(&phba->hb_tmofunc); 3968 phba->hb_tmofunc.function = lpfc_hb_timeout; 3969 phba->hb_tmofunc.data = (unsigned long)phba; 3970 3971 psli = &phba->sli; 3972 /* MBOX heartbeat timer */ 3973 init_timer(&psli->mbox_tmo); 3974 psli->mbox_tmo.function = lpfc_mbox_timeout; 3975 psli->mbox_tmo.data = (unsigned long) phba; 3976 /* Fabric block timer */ 3977 init_timer(&phba->fabric_block_timer); 3978 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3979 phba->fabric_block_timer.data = (unsigned long) phba; 3980 /* EA polling mode timer */ 3981 init_timer(&phba->eratt_poll); 3982 phba->eratt_poll.function = lpfc_poll_eratt; 3983 phba->eratt_poll.data = (unsigned long) phba; 3984 /* FCF rediscover timer */ 3985 init_timer(&phba->fcf.redisc_wait); 3986 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 3987 phba->fcf.redisc_wait.data = (unsigned long)phba; 3988 3989 /* 3990 * We need to do a READ_CONFIG mailbox command here before 3991 * calling lpfc_get_cfgparam. For VFs this will report the 3992 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 3993 * All of the resources allocated 3994 * for this Port are tied to these values. 3995 */ 3996 /* Get all the module params for configuring this host */ 3997 lpfc_get_cfgparam(phba); 3998 phba->max_vpi = LPFC_MAX_VPI; 3999 /* This will be set to correct value after the read_config mbox */ 4000 phba->max_vports = 0; 4001 4002 /* Program the default value of vlan_id and fc_map */ 4003 phba->valid_vlan = 0; 4004 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4005 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4006 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4007 4008 /* 4009 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4010 * used to create the sg_dma_buf_pool must be dynamically calculated. 4011 * 2 segments are added since the IOCB needs a command and response bde. 4012 * To insure that the scsi sgl does not cross a 4k page boundary only 4013 * sgl sizes of must be a power of 2. 4014 */ 4015 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 4016 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); 4017 /* Feature Level 1 hardware is limited to 2 pages */ 4018 if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) == 4019 LPFC_SLI_INTF_FEATURELEVEL1_1)) 4020 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 4021 else 4022 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 4023 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 4024 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 4025 dma_buf_size = dma_buf_size << 1) 4026 ; 4027 if (dma_buf_size == max_buf_size) 4028 phba->cfg_sg_seg_cnt = (dma_buf_size - 4029 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - 4030 (2 * sizeof(struct sli4_sge))) / 4031 sizeof(struct sli4_sge); 4032 phba->cfg_sg_dma_buf_size = dma_buf_size; 4033 4034 /* Initialize buffer queue management fields */ 4035 hbq_count = lpfc_sli_hbq_count(); 4036 for (i = 0; i < hbq_count; ++i) 4037 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 4038 INIT_LIST_HEAD(&phba->rb_pend_list); 4039 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 4040 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 4041 4042 /* 4043 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 4044 */ 4045 /* Initialize the Abort scsi buffer list used by driver */ 4046 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 4047 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 4048 /* This abort list used by worker thread */ 4049 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 4050 4051 /* 4052 * Initialize dirver internal slow-path work queues 4053 */ 4054 4055 /* Driver internel slow-path CQ Event pool */ 4056 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 4057 /* Response IOCB work queue list */ 4058 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 4059 /* Asynchronous event CQ Event work queue list */ 4060 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 4061 /* Fast-path XRI aborted CQ Event work queue list */ 4062 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 4063 /* Slow-path XRI aborted CQ Event work queue list */ 4064 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 4065 /* Receive queue CQ Event work queue list */ 4066 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 4067 4068 /* Initialize the driver internal SLI layer lists. */ 4069 lpfc_sli_setup(phba); 4070 lpfc_sli_queue_setup(phba); 4071 4072 /* Allocate device driver memory */ 4073 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 4074 if (rc) 4075 return -ENOMEM; 4076 4077 /* Create the bootstrap mailbox command */ 4078 rc = lpfc_create_bootstrap_mbox(phba); 4079 if (unlikely(rc)) 4080 goto out_free_mem; 4081 4082 /* Set up the host's endian order with the device. */ 4083 rc = lpfc_setup_endian_order(phba); 4084 if (unlikely(rc)) 4085 goto out_free_bsmbx; 4086 4087 rc = lpfc_sli4_fw_cfg_check(phba); 4088 if (unlikely(rc)) 4089 goto out_free_bsmbx; 4090 4091 /* Set up the hba's configuration parameters. */ 4092 rc = lpfc_sli4_read_config(phba); 4093 if (unlikely(rc)) 4094 goto out_free_bsmbx; 4095 4096 /* Perform a function reset */ 4097 rc = lpfc_pci_function_reset(phba); 4098 if (unlikely(rc)) 4099 goto out_free_bsmbx; 4100 4101 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4102 GFP_KERNEL); 4103 if (!mboxq) { 4104 rc = -ENOMEM; 4105 goto out_free_bsmbx; 4106 } 4107 4108 /* Get the Supported Pages. It is always available. */ 4109 lpfc_supported_pages(mboxq); 4110 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4111 if (unlikely(rc)) { 4112 rc = -EIO; 4113 mempool_free(mboxq, phba->mbox_mem_pool); 4114 goto out_free_bsmbx; 4115 } 4116 4117 mqe = &mboxq->u.mqe; 4118 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 4119 LPFC_MAX_SUPPORTED_PAGES); 4120 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 4121 switch (pn_page[i]) { 4122 case LPFC_SLI4_PARAMETERS: 4123 phba->sli4_hba.pc_sli4_params.supported = 1; 4124 break; 4125 default: 4126 break; 4127 } 4128 } 4129 4130 /* Read the port's SLI4 Parameters capabilities if supported. */ 4131 if (phba->sli4_hba.pc_sli4_params.supported) 4132 rc = lpfc_pc_sli4_params_get(phba, mboxq); 4133 mempool_free(mboxq, phba->mbox_mem_pool); 4134 if (rc) { 4135 rc = -EIO; 4136 goto out_free_bsmbx; 4137 } 4138 /* Create all the SLI4 queues */ 4139 rc = lpfc_sli4_queue_create(phba); 4140 if (rc) 4141 goto out_free_bsmbx; 4142 4143 /* Create driver internal CQE event pool */ 4144 rc = lpfc_sli4_cq_event_pool_create(phba); 4145 if (rc) 4146 goto out_destroy_queue; 4147 4148 /* Initialize and populate the iocb list per host */ 4149 rc = lpfc_init_sgl_list(phba); 4150 if (rc) { 4151 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4152 "1400 Failed to initialize sgl list.\n"); 4153 goto out_destroy_cq_event_pool; 4154 } 4155 rc = lpfc_init_active_sgl_array(phba); 4156 if (rc) { 4157 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4158 "1430 Failed to initialize sgl list.\n"); 4159 goto out_free_sgl_list; 4160 } 4161 4162 rc = lpfc_sli4_init_rpi_hdrs(phba); 4163 if (rc) { 4164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4165 "1432 Failed to initialize rpi headers.\n"); 4166 goto out_free_active_sgl; 4167 } 4168 4169 /* Allocate eligible FCF bmask memory for FCF round robin failover */ 4170 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 4171 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 4172 GFP_KERNEL); 4173 if (!phba->fcf.fcf_rr_bmask) { 4174 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4175 "2759 Failed allocate memory for FCF round " 4176 "robin failover bmask\n"); 4177 goto out_remove_rpi_hdrs; 4178 } 4179 4180 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4181 phba->cfg_fcp_eq_count), GFP_KERNEL); 4182 if (!phba->sli4_hba.fcp_eq_hdl) { 4183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4184 "2572 Failed allocate memory for fast-path " 4185 "per-EQ handle array\n"); 4186 goto out_free_fcf_rr_bmask; 4187 } 4188 4189 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4190 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 4191 if (!phba->sli4_hba.msix_entries) { 4192 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4193 "2573 Failed allocate memory for msi-x " 4194 "interrupt vector entries\n"); 4195 goto out_free_fcp_eq_hdl; 4196 } 4197 4198 return rc; 4199 4200out_free_fcp_eq_hdl: 4201 kfree(phba->sli4_hba.fcp_eq_hdl); 4202out_free_fcf_rr_bmask: 4203 kfree(phba->fcf.fcf_rr_bmask); 4204out_remove_rpi_hdrs: 4205 lpfc_sli4_remove_rpi_hdrs(phba); 4206out_free_active_sgl: 4207 lpfc_free_active_sgl(phba); 4208out_free_sgl_list: 4209 lpfc_free_sgl_list(phba); 4210out_destroy_cq_event_pool: 4211 lpfc_sli4_cq_event_pool_destroy(phba); 4212out_destroy_queue: 4213 lpfc_sli4_queue_destroy(phba); 4214out_free_bsmbx: 4215 lpfc_destroy_bootstrap_mbox(phba); 4216out_free_mem: 4217 lpfc_mem_free(phba); 4218 return rc; 4219} 4220 4221/** 4222 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 4223 * @phba: pointer to lpfc hba data structure. 4224 * 4225 * This routine is invoked to unset the driver internal resources set up 4226 * specific for supporting the SLI-4 HBA device it attached to. 4227 **/ 4228static void 4229lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 4230{ 4231 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 4232 4233 /* Free memory allocated for msi-x interrupt vector entries */ 4234 kfree(phba->sli4_hba.msix_entries); 4235 4236 /* Free memory allocated for fast-path work queue handles */ 4237 kfree(phba->sli4_hba.fcp_eq_hdl); 4238 4239 /* Free the allocated rpi headers. */ 4240 lpfc_sli4_remove_rpi_hdrs(phba); 4241 lpfc_sli4_remove_rpis(phba); 4242 4243 /* Free eligible FCF index bmask */ 4244 kfree(phba->fcf.fcf_rr_bmask); 4245 4246 /* Free the ELS sgl list */ 4247 lpfc_free_active_sgl(phba); 4248 lpfc_free_sgl_list(phba); 4249 4250 /* Free the SCSI sgl management array */ 4251 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4252 4253 /* Free the SLI4 queues */ 4254 lpfc_sli4_queue_destroy(phba); 4255 4256 /* Free the completion queue EQ event pool */ 4257 lpfc_sli4_cq_event_release_all(phba); 4258 lpfc_sli4_cq_event_pool_destroy(phba); 4259 4260 /* Free the bsmbx region. */ 4261 lpfc_destroy_bootstrap_mbox(phba); 4262 4263 /* Free the SLI Layer memory with SLI4 HBAs */ 4264 lpfc_mem_free_all(phba); 4265 4266 /* Free the current connect table */ 4267 list_for_each_entry_safe(conn_entry, next_conn_entry, 4268 &phba->fcf_conn_rec_list, list) { 4269 list_del_init(&conn_entry->list); 4270 kfree(conn_entry); 4271 } 4272 4273 return; 4274} 4275 4276/** 4277 * lpfc_init_api_table_setup - Set up init api fucntion jump table 4278 * @phba: The hba struct for which this call is being executed. 4279 * @dev_grp: The HBA PCI-Device group number. 4280 * 4281 * This routine sets up the device INIT interface API function jump table 4282 * in @phba struct. 4283 * 4284 * Returns: 0 - success, -ENODEV - failure. 4285 **/ 4286int 4287lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4288{ 4289 phba->lpfc_hba_init_link = lpfc_hba_init_link; 4290 phba->lpfc_hba_down_link = lpfc_hba_down_link; 4291 switch (dev_grp) { 4292 case LPFC_PCI_DEV_LP: 4293 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4294 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 4295 phba->lpfc_stop_port = lpfc_stop_port_s3; 4296 break; 4297 case LPFC_PCI_DEV_OC: 4298 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 4299 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 4300 phba->lpfc_stop_port = lpfc_stop_port_s4; 4301 break; 4302 default: 4303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4304 "1431 Invalid HBA PCI-device group: 0x%x\n", 4305 dev_grp); 4306 return -ENODEV; 4307 break; 4308 } 4309 return 0; 4310} 4311 4312/** 4313 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 4314 * @phba: pointer to lpfc hba data structure. 4315 * 4316 * This routine is invoked to set up the driver internal resources before the 4317 * device specific resource setup to support the HBA device it attached to. 4318 * 4319 * Return codes 4320 * 0 - successful 4321 * other values - error 4322 **/ 4323static int 4324lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 4325{ 4326 /* 4327 * Driver resources common to all SLI revisions 4328 */ 4329 atomic_set(&phba->fast_event_count, 0); 4330 spin_lock_init(&phba->hbalock); 4331 4332 /* Initialize ndlp management spinlock */ 4333 spin_lock_init(&phba->ndlp_lock); 4334 4335 INIT_LIST_HEAD(&phba->port_list); 4336 INIT_LIST_HEAD(&phba->work_list); 4337 init_waitqueue_head(&phba->wait_4_mlo_m_q); 4338 4339 /* Initialize the wait queue head for the kernel thread */ 4340 init_waitqueue_head(&phba->work_waitq); 4341 4342 /* Initialize the scsi buffer list used by driver for scsi IO */ 4343 spin_lock_init(&phba->scsi_buf_list_lock); 4344 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 4345 4346 /* Initialize the fabric iocb list */ 4347 INIT_LIST_HEAD(&phba->fabric_iocb_list); 4348 4349 /* Initialize list to save ELS buffers */ 4350 INIT_LIST_HEAD(&phba->elsbuf); 4351 4352 /* Initialize FCF connection rec list */ 4353 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 4354 4355 return 0; 4356} 4357 4358/** 4359 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 4360 * @phba: pointer to lpfc hba data structure. 4361 * 4362 * This routine is invoked to set up the driver internal resources after the 4363 * device specific resource setup to support the HBA device it attached to. 4364 * 4365 * Return codes 4366 * 0 - successful 4367 * other values - error 4368 **/ 4369static int 4370lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 4371{ 4372 int error; 4373 4374 /* Startup the kernel thread for this host adapter. */ 4375 phba->worker_thread = kthread_run(lpfc_do_work, phba, 4376 "lpfc_worker_%d", phba->brd_no); 4377 if (IS_ERR(phba->worker_thread)) { 4378 error = PTR_ERR(phba->worker_thread); 4379 return error; 4380 } 4381 4382 return 0; 4383} 4384 4385/** 4386 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 4387 * @phba: pointer to lpfc hba data structure. 4388 * 4389 * This routine is invoked to unset the driver internal resources set up after 4390 * the device specific resource setup for supporting the HBA device it 4391 * attached to. 4392 **/ 4393static void 4394lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 4395{ 4396 /* Stop kernel worker thread */ 4397 kthread_stop(phba->worker_thread); 4398} 4399 4400/** 4401 * lpfc_free_iocb_list - Free iocb list. 4402 * @phba: pointer to lpfc hba data structure. 4403 * 4404 * This routine is invoked to free the driver's IOCB list and memory. 4405 **/ 4406static void 4407lpfc_free_iocb_list(struct lpfc_hba *phba) 4408{ 4409 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 4410 4411 spin_lock_irq(&phba->hbalock); 4412 list_for_each_entry_safe(iocbq_entry, iocbq_next, 4413 &phba->lpfc_iocb_list, list) { 4414 list_del(&iocbq_entry->list); 4415 kfree(iocbq_entry); 4416 phba->total_iocbq_bufs--; 4417 } 4418 spin_unlock_irq(&phba->hbalock); 4419 4420 return; 4421} 4422 4423/** 4424 * lpfc_init_iocb_list - Allocate and initialize iocb list. 4425 * @phba: pointer to lpfc hba data structure. 4426 * 4427 * This routine is invoked to allocate and initizlize the driver's IOCB 4428 * list and set up the IOCB tag array accordingly. 4429 * 4430 * Return codes 4431 * 0 - successful 4432 * other values - error 4433 **/ 4434static int 4435lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 4436{ 4437 struct lpfc_iocbq *iocbq_entry = NULL; 4438 uint16_t iotag; 4439 int i; 4440 4441 /* Initialize and populate the iocb list per host. */ 4442 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 4443 for (i = 0; i < iocb_count; i++) { 4444 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 4445 if (iocbq_entry == NULL) { 4446 printk(KERN_ERR "%s: only allocated %d iocbs of " 4447 "expected %d count. Unloading driver.\n", 4448 __func__, i, LPFC_IOCB_LIST_CNT); 4449 goto out_free_iocbq; 4450 } 4451 4452 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 4453 if (iotag == 0) { 4454 kfree(iocbq_entry); 4455 printk(KERN_ERR "%s: failed to allocate IOTAG. " 4456 "Unloading driver.\n", __func__); 4457 goto out_free_iocbq; 4458 } 4459 iocbq_entry->sli4_xritag = NO_XRI; 4460 4461 spin_lock_irq(&phba->hbalock); 4462 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 4463 phba->total_iocbq_bufs++; 4464 spin_unlock_irq(&phba->hbalock); 4465 } 4466 4467 return 0; 4468 4469out_free_iocbq: 4470 lpfc_free_iocb_list(phba); 4471 4472 return -ENOMEM; 4473} 4474 4475/** 4476 * lpfc_free_sgl_list - Free sgl list. 4477 * @phba: pointer to lpfc hba data structure. 4478 * 4479 * This routine is invoked to free the driver's sgl list and memory. 4480 **/ 4481static void 4482lpfc_free_sgl_list(struct lpfc_hba *phba) 4483{ 4484 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 4485 LIST_HEAD(sglq_list); 4486 4487 spin_lock_irq(&phba->hbalock); 4488 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 4489 spin_unlock_irq(&phba->hbalock); 4490 4491 list_for_each_entry_safe(sglq_entry, sglq_next, 4492 &sglq_list, list) { 4493 list_del(&sglq_entry->list); 4494 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 4495 kfree(sglq_entry); 4496 phba->sli4_hba.total_sglq_bufs--; 4497 } 4498 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4499} 4500 4501/** 4502 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 4503 * @phba: pointer to lpfc hba data structure. 4504 * 4505 * This routine is invoked to allocate the driver's active sgl memory. 4506 * This array will hold the sglq_entry's for active IOs. 4507 **/ 4508static int 4509lpfc_init_active_sgl_array(struct lpfc_hba *phba) 4510{ 4511 int size; 4512 size = sizeof(struct lpfc_sglq *); 4513 size *= phba->sli4_hba.max_cfg_param.max_xri; 4514 4515 phba->sli4_hba.lpfc_sglq_active_list = 4516 kzalloc(size, GFP_KERNEL); 4517 if (!phba->sli4_hba.lpfc_sglq_active_list) 4518 return -ENOMEM; 4519 return 0; 4520} 4521 4522/** 4523 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 4524 * @phba: pointer to lpfc hba data structure. 4525 * 4526 * This routine is invoked to walk through the array of active sglq entries 4527 * and free all of the resources. 4528 * This is just a place holder for now. 4529 **/ 4530static void 4531lpfc_free_active_sgl(struct lpfc_hba *phba) 4532{ 4533 kfree(phba->sli4_hba.lpfc_sglq_active_list); 4534} 4535 4536/** 4537 * lpfc_init_sgl_list - Allocate and initialize sgl list. 4538 * @phba: pointer to lpfc hba data structure. 4539 * 4540 * This routine is invoked to allocate and initizlize the driver's sgl 4541 * list and set up the sgl xritag tag array accordingly. 4542 * 4543 * Return codes 4544 * 0 - successful 4545 * other values - error 4546 **/ 4547static int 4548lpfc_init_sgl_list(struct lpfc_hba *phba) 4549{ 4550 struct lpfc_sglq *sglq_entry = NULL; 4551 int i; 4552 int els_xri_cnt; 4553 4554 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4555 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4556 "2400 lpfc_init_sgl_list els %d.\n", 4557 els_xri_cnt); 4558 /* Initialize and populate the sglq list per host/VF. */ 4559 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4560 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 4561 4562 /* Sanity check on XRI management */ 4563 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 4564 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4565 "2562 No room left for SCSI XRI allocation: " 4566 "max_xri=%d, els_xri=%d\n", 4567 phba->sli4_hba.max_cfg_param.max_xri, 4568 els_xri_cnt); 4569 return -ENOMEM; 4570 } 4571 4572 /* Allocate memory for the ELS XRI management array */ 4573 phba->sli4_hba.lpfc_els_sgl_array = 4574 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), 4575 GFP_KERNEL); 4576 4577 if (!phba->sli4_hba.lpfc_els_sgl_array) { 4578 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4579 "2401 Failed to allocate memory for ELS " 4580 "XRI management array of size %d.\n", 4581 els_xri_cnt); 4582 return -ENOMEM; 4583 } 4584 4585 /* Keep the SCSI XRI into the XRI management array */ 4586 phba->sli4_hba.scsi_xri_max = 4587 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4588 phba->sli4_hba.scsi_xri_cnt = 0; 4589 4590 phba->sli4_hba.lpfc_scsi_psb_array = 4591 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4592 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4593 4594 if (!phba->sli4_hba.lpfc_scsi_psb_array) { 4595 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4596 "2563 Failed to allocate memory for SCSI " 4597 "XRI management array of size %d.\n", 4598 phba->sli4_hba.scsi_xri_max); 4599 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4600 return -ENOMEM; 4601 } 4602 4603 for (i = 0; i < els_xri_cnt; i++) { 4604 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); 4605 if (sglq_entry == NULL) { 4606 printk(KERN_ERR "%s: only allocated %d sgls of " 4607 "expected %d count. Unloading driver.\n", 4608 __func__, i, els_xri_cnt); 4609 goto out_free_mem; 4610 } 4611 4612 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); 4613 if (sglq_entry->sli4_xritag == NO_XRI) { 4614 kfree(sglq_entry); 4615 printk(KERN_ERR "%s: failed to allocate XRI.\n" 4616 "Unloading driver.\n", __func__); 4617 goto out_free_mem; 4618 } 4619 sglq_entry->buff_type = GEN_BUFF_TYPE; 4620 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4621 if (sglq_entry->virt == NULL) { 4622 kfree(sglq_entry); 4623 printk(KERN_ERR "%s: failed to allocate mbuf.\n" 4624 "Unloading driver.\n", __func__); 4625 goto out_free_mem; 4626 } 4627 sglq_entry->sgl = sglq_entry->virt; 4628 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 4629 4630 /* The list order is used by later block SGL registraton */ 4631 spin_lock_irq(&phba->hbalock); 4632 sglq_entry->state = SGL_FREED; 4633 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4634 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4635 phba->sli4_hba.total_sglq_bufs++; 4636 spin_unlock_irq(&phba->hbalock); 4637 } 4638 return 0; 4639 4640out_free_mem: 4641 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4642 lpfc_free_sgl_list(phba); 4643 return -ENOMEM; 4644} 4645 4646/** 4647 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 4648 * @phba: pointer to lpfc hba data structure. 4649 * 4650 * This routine is invoked to post rpi header templates to the 4651 * HBA consistent with the SLI-4 interface spec. This routine 4652 * posts a PAGE_SIZE memory region to the port to hold up to 4653 * PAGE_SIZE modulo 64 rpi context headers. 4654 * No locks are held here because this is an initialization routine 4655 * called only from probe or lpfc_online when interrupts are not 4656 * enabled and the driver is reinitializing the device. 4657 * 4658 * Return codes 4659 * 0 - successful 4660 * -ENOMEM - No availble memory 4661 * -EIO - The mailbox failed to complete successfully. 4662 **/ 4663int 4664lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4665{ 4666 int rc = 0; 4667 int longs; 4668 uint16_t rpi_count; 4669 struct lpfc_rpi_hdr *rpi_hdr; 4670 4671 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 4672 4673 /* 4674 * Provision an rpi bitmask range for discovery. The total count 4675 * is the difference between max and base + 1. 4676 */ 4677 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 4678 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4679 4680 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 4681 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 4682 GFP_KERNEL); 4683 if (!phba->sli4_hba.rpi_bmask) 4684 return -ENOMEM; 4685 4686 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 4687 if (!rpi_hdr) { 4688 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4689 "0391 Error during rpi post operation\n"); 4690 lpfc_sli4_remove_rpis(phba); 4691 rc = -ENODEV; 4692 } 4693 4694 return rc; 4695} 4696 4697/** 4698 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 4699 * @phba: pointer to lpfc hba data structure. 4700 * 4701 * This routine is invoked to allocate a single 4KB memory region to 4702 * support rpis and stores them in the phba. This single region 4703 * provides support for up to 64 rpis. The region is used globally 4704 * by the device. 4705 * 4706 * Returns: 4707 * A valid rpi hdr on success. 4708 * A NULL pointer on any failure. 4709 **/ 4710struct lpfc_rpi_hdr * 4711lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 4712{ 4713 uint16_t rpi_limit, curr_rpi_range; 4714 struct lpfc_dmabuf *dmabuf; 4715 struct lpfc_rpi_hdr *rpi_hdr; 4716 4717 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 4718 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4719 4720 spin_lock_irq(&phba->hbalock); 4721 curr_rpi_range = phba->sli4_hba.next_rpi; 4722 spin_unlock_irq(&phba->hbalock); 4723 4724 /* 4725 * The port has a limited number of rpis. The increment here 4726 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 4727 * and to allow the full max_rpi range per port. 4728 */ 4729 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 4730 return NULL; 4731 4732 /* 4733 * First allocate the protocol header region for the port. The 4734 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 4735 */ 4736 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4737 if (!dmabuf) 4738 return NULL; 4739 4740 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4741 LPFC_HDR_TEMPLATE_SIZE, 4742 &dmabuf->phys, 4743 GFP_KERNEL); 4744 if (!dmabuf->virt) { 4745 rpi_hdr = NULL; 4746 goto err_free_dmabuf; 4747 } 4748 4749 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 4750 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 4751 rpi_hdr = NULL; 4752 goto err_free_coherent; 4753 } 4754 4755 /* Save the rpi header data for cleanup later. */ 4756 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 4757 if (!rpi_hdr) 4758 goto err_free_coherent; 4759 4760 rpi_hdr->dmabuf = dmabuf; 4761 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 4762 rpi_hdr->page_count = 1; 4763 spin_lock_irq(&phba->hbalock); 4764 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 4765 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 4766 4767 /* 4768 * The next_rpi stores the next module-64 rpi value to post 4769 * in any subsequent rpi memory region postings. 4770 */ 4771 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; 4772 spin_unlock_irq(&phba->hbalock); 4773 return rpi_hdr; 4774 4775 err_free_coherent: 4776 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 4777 dmabuf->virt, dmabuf->phys); 4778 err_free_dmabuf: 4779 kfree(dmabuf); 4780 return NULL; 4781} 4782 4783/** 4784 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 4785 * @phba: pointer to lpfc hba data structure. 4786 * 4787 * This routine is invoked to remove all memory resources allocated 4788 * to support rpis. This routine presumes the caller has released all 4789 * rpis consumed by fabric or port logins and is prepared to have 4790 * the header pages removed. 4791 **/ 4792void 4793lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 4794{ 4795 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 4796 4797 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 4798 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 4799 list_del(&rpi_hdr->list); 4800 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 4801 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 4802 kfree(rpi_hdr->dmabuf); 4803 kfree(rpi_hdr); 4804 } 4805 4806 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4807 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 4808} 4809 4810/** 4811 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 4812 * @pdev: pointer to pci device data structure. 4813 * 4814 * This routine is invoked to allocate the driver hba data structure for an 4815 * HBA device. If the allocation is successful, the phba reference to the 4816 * PCI device data structure is set. 4817 * 4818 * Return codes 4819 * pointer to @phba - successful 4820 * NULL - error 4821 **/ 4822static struct lpfc_hba * 4823lpfc_hba_alloc(struct pci_dev *pdev) 4824{ 4825 struct lpfc_hba *phba; 4826 4827 /* Allocate memory for HBA structure */ 4828 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 4829 if (!phba) { 4830 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 4831 return NULL; 4832 } 4833 4834 /* Set reference to PCI device in HBA structure */ 4835 phba->pcidev = pdev; 4836 4837 /* Assign an unused board number */ 4838 phba->brd_no = lpfc_get_instance(); 4839 if (phba->brd_no < 0) { 4840 kfree(phba); 4841 return NULL; 4842 } 4843 4844 spin_lock_init(&phba->ct_ev_lock); 4845 INIT_LIST_HEAD(&phba->ct_ev_waiters); 4846 4847 return phba; 4848} 4849 4850/** 4851 * lpfc_hba_free - Free driver hba data structure with a device. 4852 * @phba: pointer to lpfc hba data structure. 4853 * 4854 * This routine is invoked to free the driver hba data structure with an 4855 * HBA device. 4856 **/ 4857static void 4858lpfc_hba_free(struct lpfc_hba *phba) 4859{ 4860 /* Release the driver assigned board number */ 4861 idr_remove(&lpfc_hba_index, phba->brd_no); 4862 4863 kfree(phba); 4864 return; 4865} 4866 4867/** 4868 * lpfc_create_shost - Create hba physical port with associated scsi host. 4869 * @phba: pointer to lpfc hba data structure. 4870 * 4871 * This routine is invoked to create HBA physical port and associate a SCSI 4872 * host with it. 4873 * 4874 * Return codes 4875 * 0 - successful 4876 * other values - error 4877 **/ 4878static int 4879lpfc_create_shost(struct lpfc_hba *phba) 4880{ 4881 struct lpfc_vport *vport; 4882 struct Scsi_Host *shost; 4883 4884 /* Initialize HBA FC structure */ 4885 phba->fc_edtov = FF_DEF_EDTOV; 4886 phba->fc_ratov = FF_DEF_RATOV; 4887 phba->fc_altov = FF_DEF_ALTOV; 4888 phba->fc_arbtov = FF_DEF_ARBTOV; 4889 4890 atomic_set(&phba->sdev_cnt, 0); 4891 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 4892 if (!vport) 4893 return -ENODEV; 4894 4895 shost = lpfc_shost_from_vport(vport); 4896 phba->pport = vport; 4897 lpfc_debugfs_initialize(vport); 4898 /* Put reference to SCSI host to driver's device private data */ 4899 pci_set_drvdata(phba->pcidev, shost); 4900 4901 return 0; 4902} 4903 4904/** 4905 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 4906 * @phba: pointer to lpfc hba data structure. 4907 * 4908 * This routine is invoked to destroy HBA physical port and the associated 4909 * SCSI host. 4910 **/ 4911static void 4912lpfc_destroy_shost(struct lpfc_hba *phba) 4913{ 4914 struct lpfc_vport *vport = phba->pport; 4915 4916 /* Destroy physical port that associated with the SCSI host */ 4917 destroy_port(vport); 4918 4919 return; 4920} 4921 4922/** 4923 * lpfc_setup_bg - Setup Block guard structures and debug areas. 4924 * @phba: pointer to lpfc hba data structure. 4925 * @shost: the shost to be used to detect Block guard settings. 4926 * 4927 * This routine sets up the local Block guard protocol settings for @shost. 4928 * This routine also allocates memory for debugging bg buffers. 4929 **/ 4930static void 4931lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 4932{ 4933 int pagecnt = 10; 4934 if (lpfc_prot_mask && lpfc_prot_guard) { 4935 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4936 "1478 Registering BlockGuard with the " 4937 "SCSI layer\n"); 4938 scsi_host_set_prot(shost, lpfc_prot_mask); 4939 scsi_host_set_guard(shost, lpfc_prot_guard); 4940 } 4941 if (!_dump_buf_data) { 4942 while (pagecnt) { 4943 spin_lock_init(&_dump_buf_lock); 4944 _dump_buf_data = 4945 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4946 if (_dump_buf_data) { 4947 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4948 "9043 BLKGRD: allocated %d pages for " 4949 "_dump_buf_data at 0x%p\n", 4950 (1 << pagecnt), _dump_buf_data); 4951 _dump_buf_data_order = pagecnt; 4952 memset(_dump_buf_data, 0, 4953 ((1 << PAGE_SHIFT) << pagecnt)); 4954 break; 4955 } else 4956 --pagecnt; 4957 } 4958 if (!_dump_buf_data_order) 4959 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4960 "9044 BLKGRD: ERROR unable to allocate " 4961 "memory for hexdump\n"); 4962 } else 4963 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4964 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 4965 "\n", _dump_buf_data); 4966 if (!_dump_buf_dif) { 4967 while (pagecnt) { 4968 _dump_buf_dif = 4969 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4970 if (_dump_buf_dif) { 4971 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4972 "9046 BLKGRD: allocated %d pages for " 4973 "_dump_buf_dif at 0x%p\n", 4974 (1 << pagecnt), _dump_buf_dif); 4975 _dump_buf_dif_order = pagecnt; 4976 memset(_dump_buf_dif, 0, 4977 ((1 << PAGE_SHIFT) << pagecnt)); 4978 break; 4979 } else 4980 --pagecnt; 4981 } 4982 if (!_dump_buf_dif_order) 4983 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4984 "9047 BLKGRD: ERROR unable to allocate " 4985 "memory for hexdump\n"); 4986 } else 4987 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4988 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 4989 _dump_buf_dif); 4990} 4991 4992/** 4993 * lpfc_post_init_setup - Perform necessary device post initialization setup. 4994 * @phba: pointer to lpfc hba data structure. 4995 * 4996 * This routine is invoked to perform all the necessary post initialization 4997 * setup for the device. 4998 **/ 4999static void 5000lpfc_post_init_setup(struct lpfc_hba *phba) 5001{ 5002 struct Scsi_Host *shost; 5003 struct lpfc_adapter_event_header adapter_event; 5004 5005 /* Get the default values for Model Name and Description */ 5006 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 5007 5008 /* 5009 * hba setup may have changed the hba_queue_depth so we need to 5010 * adjust the value of can_queue. 5011 */ 5012 shost = pci_get_drvdata(phba->pcidev); 5013 shost->can_queue = phba->cfg_hba_queue_depth - 10; 5014 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 5015 lpfc_setup_bg(phba, shost); 5016 5017 lpfc_host_attrib_init(shost); 5018 5019 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 5020 spin_lock_irq(shost->host_lock); 5021 lpfc_poll_start_timer(phba); 5022 spin_unlock_irq(shost->host_lock); 5023 } 5024 5025 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5026 "0428 Perform SCSI scan\n"); 5027 /* Send board arrival event to upper layer */ 5028 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 5029 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 5030 fc_host_post_vendor_event(shost, fc_get_event_number(), 5031 sizeof(adapter_event), 5032 (char *) &adapter_event, 5033 LPFC_NL_VENDOR_ID); 5034 return; 5035} 5036 5037/** 5038 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 5039 * @phba: pointer to lpfc hba data structure. 5040 * 5041 * This routine is invoked to set up the PCI device memory space for device 5042 * with SLI-3 interface spec. 5043 * 5044 * Return codes 5045 * 0 - successful 5046 * other values - error 5047 **/ 5048static int 5049lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 5050{ 5051 struct pci_dev *pdev; 5052 unsigned long bar0map_len, bar2map_len; 5053 int i, hbq_count; 5054 void *ptr; 5055 int error = -ENODEV; 5056 5057 /* Obtain PCI device reference */ 5058 if (!phba->pcidev) 5059 return error; 5060 else 5061 pdev = phba->pcidev; 5062 5063 /* Set the device DMA mask size */ 5064 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 5065 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 5066 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 5067 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 5068 return error; 5069 } 5070 } 5071 5072 /* Get the bus address of Bar0 and Bar2 and the number of bytes 5073 * required by each mapping. 5074 */ 5075 phba->pci_bar0_map = pci_resource_start(pdev, 0); 5076 bar0map_len = pci_resource_len(pdev, 0); 5077 5078 phba->pci_bar2_map = pci_resource_start(pdev, 2); 5079 bar2map_len = pci_resource_len(pdev, 2); 5080 5081 /* Map HBA SLIM to a kernel virtual address. */ 5082 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 5083 if (!phba->slim_memmap_p) { 5084 dev_printk(KERN_ERR, &pdev->dev, 5085 "ioremap failed for SLIM memory.\n"); 5086 goto out; 5087 } 5088 5089 /* Map HBA Control Registers to a kernel virtual address. */ 5090 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 5091 if (!phba->ctrl_regs_memmap_p) { 5092 dev_printk(KERN_ERR, &pdev->dev, 5093 "ioremap failed for HBA control registers.\n"); 5094 goto out_iounmap_slim; 5095 } 5096 5097 /* Allocate memory for SLI-2 structures */ 5098 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 5099 SLI2_SLIM_SIZE, 5100 &phba->slim2p.phys, 5101 GFP_KERNEL); 5102 if (!phba->slim2p.virt) 5103 goto out_iounmap; 5104 5105 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 5106 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 5107 phba->mbox_ext = (phba->slim2p.virt + 5108 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 5109 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 5110 phba->IOCBs = (phba->slim2p.virt + 5111 offsetof(struct lpfc_sli2_slim, IOCBs)); 5112 5113 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 5114 lpfc_sli_hbq_size(), 5115 &phba->hbqslimp.phys, 5116 GFP_KERNEL); 5117 if (!phba->hbqslimp.virt) 5118 goto out_free_slim; 5119 5120 hbq_count = lpfc_sli_hbq_count(); 5121 ptr = phba->hbqslimp.virt; 5122 for (i = 0; i < hbq_count; ++i) { 5123 phba->hbqs[i].hbq_virt = ptr; 5124 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5125 ptr += (lpfc_hbq_defs[i]->entry_count * 5126 sizeof(struct lpfc_hbq_entry)); 5127 } 5128 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 5129 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 5130 5131 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 5132 5133 INIT_LIST_HEAD(&phba->rb_pend_list); 5134 5135 phba->MBslimaddr = phba->slim_memmap_p; 5136 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 5137 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 5138 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 5139 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 5140 5141 return 0; 5142 5143out_free_slim: 5144 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5145 phba->slim2p.virt, phba->slim2p.phys); 5146out_iounmap: 5147 iounmap(phba->ctrl_regs_memmap_p); 5148out_iounmap_slim: 5149 iounmap(phba->slim_memmap_p); 5150out: 5151 return error; 5152} 5153 5154/** 5155 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 5156 * @phba: pointer to lpfc hba data structure. 5157 * 5158 * This routine is invoked to unset the PCI device memory space for device 5159 * with SLI-3 interface spec. 5160 **/ 5161static void 5162lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 5163{ 5164 struct pci_dev *pdev; 5165 5166 /* Obtain PCI device reference */ 5167 if (!phba->pcidev) 5168 return; 5169 else 5170 pdev = phba->pcidev; 5171 5172 /* Free coherent DMA memory allocated */ 5173 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 5174 phba->hbqslimp.virt, phba->hbqslimp.phys); 5175 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5176 phba->slim2p.virt, phba->slim2p.phys); 5177 5178 /* I/O memory unmap */ 5179 iounmap(phba->ctrl_regs_memmap_p); 5180 iounmap(phba->slim_memmap_p); 5181 5182 return; 5183} 5184 5185/** 5186 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 5187 * @phba: pointer to lpfc hba data structure. 5188 * 5189 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 5190 * done and check status. 5191 * 5192 * Return 0 if successful, otherwise -ENODEV. 5193 **/ 5194int 5195lpfc_sli4_post_status_check(struct lpfc_hba *phba) 5196{ 5197 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg; 5198 int i, port_error = -ENODEV; 5199 5200 if (!phba->sli4_hba.STAregaddr) 5201 return -ENODEV; 5202 5203 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 5204 for (i = 0; i < 3000; i++) { 5205 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); 5206 /* Encounter fatal POST error, break out */ 5207 if (bf_get(lpfc_hst_state_perr, &sta_reg)) { 5208 port_error = -ENODEV; 5209 break; 5210 } 5211 if (LPFC_POST_STAGE_ARMFW_READY == 5212 bf_get(lpfc_hst_state_port_status, &sta_reg)) { 5213 port_error = 0; 5214 break; 5215 } 5216 msleep(10); 5217 } 5218 5219 if (port_error) 5220 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5221 "1408 Failure HBA POST Status: sta_reg=0x%x, " 5222 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " 5223 "dl=x%x, pstatus=x%x\n", sta_reg.word0, 5224 bf_get(lpfc_hst_state_perr, &sta_reg), 5225 bf_get(lpfc_hst_state_sfi, &sta_reg), 5226 bf_get(lpfc_hst_state_nip, &sta_reg), 5227 bf_get(lpfc_hst_state_ipc, &sta_reg), 5228 bf_get(lpfc_hst_state_xrom, &sta_reg), 5229 bf_get(lpfc_hst_state_dl, &sta_reg), 5230 bf_get(lpfc_hst_state_port_status, &sta_reg)); 5231 5232 /* Log device information */ 5233 phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr); 5234 if (bf_get(lpfc_sli_intf_valid, 5235 &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) { 5236 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5237 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " 5238 "FeatureL1=0x%x, FeatureL2=0x%x\n", 5239 bf_get(lpfc_sli_intf_sli_family, 5240 &phba->sli4_hba.sli_intf), 5241 bf_get(lpfc_sli_intf_slirev, 5242 &phba->sli4_hba.sli_intf), 5243 bf_get(lpfc_sli_intf_featurelevel1, 5244 &phba->sli4_hba.sli_intf), 5245 bf_get(lpfc_sli_intf_featurelevel2, 5246 &phba->sli4_hba.sli_intf)); 5247 } 5248 phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr); 5249 phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr); 5250 /* With uncoverable error, log the error message and return error */ 5251 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); 5252 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); 5253 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 5254 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 5255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5256 "1422 HBA Unrecoverable error: " 5257 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 5258 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n", 5259 uerrlo_reg.word0, uerrhi_reg.word0, 5260 phba->sli4_hba.ue_mask_lo, 5261 phba->sli4_hba.ue_mask_hi); 5262 return -ENODEV; 5263 } 5264 5265 return port_error; 5266} 5267 5268/** 5269 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 5270 * @phba: pointer to lpfc hba data structure. 5271 * 5272 * This routine is invoked to set up SLI4 BAR0 PCI config space register 5273 * memory map. 5274 **/ 5275static void 5276lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) 5277{ 5278 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 5279 LPFC_UERR_STATUS_LO; 5280 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5281 LPFC_UERR_STATUS_HI; 5282 phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 5283 LPFC_UE_MASK_LO; 5284 phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5285 LPFC_UE_MASK_HI; 5286 phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p + 5287 LPFC_SLI_INTF; 5288} 5289 5290/** 5291 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 5292 * @phba: pointer to lpfc hba data structure. 5293 * 5294 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 5295 * memory map. 5296 **/ 5297static void 5298lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 5299{ 5300 5301 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5302 LPFC_HST_STATE; 5303 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5304 LPFC_HST_ISR0; 5305 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5306 LPFC_HST_IMR0; 5307 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5308 LPFC_HST_ISCR0; 5309 return; 5310} 5311 5312/** 5313 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 5314 * @phba: pointer to lpfc hba data structure. 5315 * @vf: virtual function number 5316 * 5317 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 5318 * based on the given viftual function number, @vf. 5319 * 5320 * Return 0 if successful, otherwise -ENODEV. 5321 **/ 5322static int 5323lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 5324{ 5325 if (vf > LPFC_VIR_FUNC_MAX) 5326 return -ENODEV; 5327 5328 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5329 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 5330 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5331 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 5332 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5333 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 5334 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5335 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 5336 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5337 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 5338 return 0; 5339} 5340 5341/** 5342 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 5343 * @phba: pointer to lpfc hba data structure. 5344 * 5345 * This routine is invoked to create the bootstrap mailbox 5346 * region consistent with the SLI-4 interface spec. This 5347 * routine allocates all memory necessary to communicate 5348 * mailbox commands to the port and sets up all alignment 5349 * needs. No locks are expected to be held when calling 5350 * this routine. 5351 * 5352 * Return codes 5353 * 0 - successful 5354 * -ENOMEM - could not allocated memory. 5355 **/ 5356static int 5357lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 5358{ 5359 uint32_t bmbx_size; 5360 struct lpfc_dmabuf *dmabuf; 5361 struct dma_address *dma_address; 5362 uint32_t pa_addr; 5363 uint64_t phys_addr; 5364 5365 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5366 if (!dmabuf) 5367 return -ENOMEM; 5368 5369 /* 5370 * The bootstrap mailbox region is comprised of 2 parts 5371 * plus an alignment restriction of 16 bytes. 5372 */ 5373 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 5374 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5375 bmbx_size, 5376 &dmabuf->phys, 5377 GFP_KERNEL); 5378 if (!dmabuf->virt) { 5379 kfree(dmabuf); 5380 return -ENOMEM; 5381 } 5382 memset(dmabuf->virt, 0, bmbx_size); 5383 5384 /* 5385 * Initialize the bootstrap mailbox pointers now so that the register 5386 * operations are simple later. The mailbox dma address is required 5387 * to be 16-byte aligned. Also align the virtual memory as each 5388 * maibox is copied into the bmbx mailbox region before issuing the 5389 * command to the port. 5390 */ 5391 phba->sli4_hba.bmbx.dmabuf = dmabuf; 5392 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 5393 5394 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 5395 LPFC_ALIGN_16_BYTE); 5396 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 5397 LPFC_ALIGN_16_BYTE); 5398 5399 /* 5400 * Set the high and low physical addresses now. The SLI4 alignment 5401 * requirement is 16 bytes and the mailbox is posted to the port 5402 * as two 30-bit addresses. The other data is a bit marking whether 5403 * the 30-bit address is the high or low address. 5404 * Upcast bmbx aphys to 64bits so shift instruction compiles 5405 * clean on 32 bit machines. 5406 */ 5407 dma_address = &phba->sli4_hba.bmbx.dma_address; 5408 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 5409 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 5410 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 5411 LPFC_BMBX_BIT1_ADDR_HI); 5412 5413 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 5414 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 5415 LPFC_BMBX_BIT1_ADDR_LO); 5416 return 0; 5417} 5418 5419/** 5420 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 5421 * @phba: pointer to lpfc hba data structure. 5422 * 5423 * This routine is invoked to teardown the bootstrap mailbox 5424 * region and release all host resources. This routine requires 5425 * the caller to ensure all mailbox commands recovered, no 5426 * additional mailbox comands are sent, and interrupts are disabled 5427 * before calling this routine. 5428 * 5429 **/ 5430static void 5431lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 5432{ 5433 dma_free_coherent(&phba->pcidev->dev, 5434 phba->sli4_hba.bmbx.bmbx_size, 5435 phba->sli4_hba.bmbx.dmabuf->virt, 5436 phba->sli4_hba.bmbx.dmabuf->phys); 5437 5438 kfree(phba->sli4_hba.bmbx.dmabuf); 5439 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 5440} 5441 5442/** 5443 * lpfc_sli4_read_config - Get the config parameters. 5444 * @phba: pointer to lpfc hba data structure. 5445 * 5446 * This routine is invoked to read the configuration parameters from the HBA. 5447 * The configuration parameters are used to set the base and maximum values 5448 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 5449 * allocation for the port. 5450 * 5451 * Return codes 5452 * 0 - successful 5453 * -ENOMEM - No availble memory 5454 * -EIO - The mailbox failed to complete successfully. 5455 **/ 5456static int 5457lpfc_sli4_read_config(struct lpfc_hba *phba) 5458{ 5459 LPFC_MBOXQ_t *pmb; 5460 struct lpfc_mbx_read_config *rd_config; 5461 uint32_t rc = 0; 5462 5463 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5464 if (!pmb) { 5465 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5466 "2011 Unable to allocate memory for issuing " 5467 "SLI_CONFIG_SPECIAL mailbox command\n"); 5468 return -ENOMEM; 5469 } 5470 5471 lpfc_read_config(phba, pmb); 5472 5473 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 5474 if (rc != MBX_SUCCESS) { 5475 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5476 "2012 Mailbox failed , mbxCmd x%x " 5477 "READ_CONFIG, mbxStatus x%x\n", 5478 bf_get(lpfc_mqe_command, &pmb->u.mqe), 5479 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 5480 rc = -EIO; 5481 } else { 5482 rd_config = &pmb->u.mqe.un.rd_config; 5483 phba->sli4_hba.max_cfg_param.max_xri = 5484 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 5485 phba->sli4_hba.max_cfg_param.xri_base = 5486 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 5487 phba->sli4_hba.max_cfg_param.max_vpi = 5488 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 5489 phba->sli4_hba.max_cfg_param.vpi_base = 5490 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 5491 phba->sli4_hba.max_cfg_param.max_rpi = 5492 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 5493 phba->sli4_hba.max_cfg_param.rpi_base = 5494 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 5495 phba->sli4_hba.max_cfg_param.max_vfi = 5496 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 5497 phba->sli4_hba.max_cfg_param.vfi_base = 5498 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 5499 phba->sli4_hba.max_cfg_param.max_fcfi = 5500 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 5501 phba->sli4_hba.max_cfg_param.fcfi_base = 5502 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); 5503 phba->sli4_hba.max_cfg_param.max_eq = 5504 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 5505 phba->sli4_hba.max_cfg_param.max_rq = 5506 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 5507 phba->sli4_hba.max_cfg_param.max_wq = 5508 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 5509 phba->sli4_hba.max_cfg_param.max_cq = 5510 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 5511 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 5512 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 5513 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 5514 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 5515 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5516 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 5517 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 5518 phba->max_vports = phba->max_vpi; 5519 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5520 "2003 cfg params XRI(B:%d M:%d), " 5521 "VPI(B:%d M:%d) " 5522 "VFI(B:%d M:%d) " 5523 "RPI(B:%d M:%d) " 5524 "FCFI(B:%d M:%d)\n", 5525 phba->sli4_hba.max_cfg_param.xri_base, 5526 phba->sli4_hba.max_cfg_param.max_xri, 5527 phba->sli4_hba.max_cfg_param.vpi_base, 5528 phba->sli4_hba.max_cfg_param.max_vpi, 5529 phba->sli4_hba.max_cfg_param.vfi_base, 5530 phba->sli4_hba.max_cfg_param.max_vfi, 5531 phba->sli4_hba.max_cfg_param.rpi_base, 5532 phba->sli4_hba.max_cfg_param.max_rpi, 5533 phba->sli4_hba.max_cfg_param.fcfi_base, 5534 phba->sli4_hba.max_cfg_param.max_fcfi); 5535 } 5536 mempool_free(pmb, phba->mbox_mem_pool); 5537 5538 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 5539 if (phba->cfg_hba_queue_depth > 5540 (phba->sli4_hba.max_cfg_param.max_xri - 5541 lpfc_sli4_get_els_iocb_cnt(phba))) 5542 phba->cfg_hba_queue_depth = 5543 phba->sli4_hba.max_cfg_param.max_xri - 5544 lpfc_sli4_get_els_iocb_cnt(phba); 5545 return rc; 5546} 5547 5548/** 5549 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. 5550 * @phba: pointer to lpfc hba data structure. 5551 * 5552 * This routine is invoked to setup the host-side endian order to the 5553 * HBA consistent with the SLI-4 interface spec. 5554 * 5555 * Return codes 5556 * 0 - successful 5557 * -ENOMEM - No availble memory 5558 * -EIO - The mailbox failed to complete successfully. 5559 **/ 5560static int 5561lpfc_setup_endian_order(struct lpfc_hba *phba) 5562{ 5563 LPFC_MBOXQ_t *mboxq; 5564 uint32_t rc = 0; 5565 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 5566 HOST_ENDIAN_HIGH_WORD1}; 5567 5568 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5569 if (!mboxq) { 5570 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5571 "0492 Unable to allocate memory for issuing " 5572 "SLI_CONFIG_SPECIAL mailbox command\n"); 5573 return -ENOMEM; 5574 } 5575 5576 /* 5577 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two 5578 * words to contain special data values and no other data. 5579 */ 5580 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 5581 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 5582 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5583 if (rc != MBX_SUCCESS) { 5584 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5585 "0493 SLI_CONFIG_SPECIAL mailbox failed with " 5586 "status x%x\n", 5587 rc); 5588 rc = -EIO; 5589 } 5590 5591 mempool_free(mboxq, phba->mbox_mem_pool); 5592 return rc; 5593} 5594 5595/** 5596 * lpfc_sli4_queue_create - Create all the SLI4 queues 5597 * @phba: pointer to lpfc hba data structure. 5598 * 5599 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 5600 * operation. For each SLI4 queue type, the parameters such as queue entry 5601 * count (queue depth) shall be taken from the module parameter. For now, 5602 * we just use some constant number as place holder. 5603 * 5604 * Return codes 5605 * 0 - successful 5606 * -ENOMEM - No availble memory 5607 * -EIO - The mailbox failed to complete successfully. 5608 **/ 5609static int 5610lpfc_sli4_queue_create(struct lpfc_hba *phba) 5611{ 5612 struct lpfc_queue *qdesc; 5613 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5614 int cfg_fcp_wq_count; 5615 int cfg_fcp_eq_count; 5616 5617 /* 5618 * Sanity check for confiugred queue parameters against the run-time 5619 * device parameters 5620 */ 5621 5622 /* Sanity check on FCP fast-path WQ parameters */ 5623 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 5624 if (cfg_fcp_wq_count > 5625 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 5626 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 5627 LPFC_SP_WQN_DEF; 5628 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 5629 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5630 "2581 Not enough WQs (%d) from " 5631 "the pci function for supporting " 5632 "FCP WQs (%d)\n", 5633 phba->sli4_hba.max_cfg_param.max_wq, 5634 phba->cfg_fcp_wq_count); 5635 goto out_error; 5636 } 5637 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5638 "2582 Not enough WQs (%d) from the pci " 5639 "function for supporting the requested " 5640 "FCP WQs (%d), the actual FCP WQs can " 5641 "be supported: %d\n", 5642 phba->sli4_hba.max_cfg_param.max_wq, 5643 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 5644 } 5645 /* The actual number of FCP work queues adopted */ 5646 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 5647 5648 /* Sanity check on FCP fast-path EQ parameters */ 5649 cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 5650 if (cfg_fcp_eq_count > 5651 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 5652 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 5653 LPFC_SP_EQN_DEF; 5654 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 5655 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5656 "2574 Not enough EQs (%d) from the " 5657 "pci function for supporting FCP " 5658 "EQs (%d)\n", 5659 phba->sli4_hba.max_cfg_param.max_eq, 5660 phba->cfg_fcp_eq_count); 5661 goto out_error; 5662 } 5663 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5664 "2575 Not enough EQs (%d) from the pci " 5665 "function for supporting the requested " 5666 "FCP EQs (%d), the actual FCP EQs can " 5667 "be supported: %d\n", 5668 phba->sli4_hba.max_cfg_param.max_eq, 5669 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 5670 } 5671 /* It does not make sense to have more EQs than WQs */ 5672 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 5673 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5674 "2593 The FCP EQ count(%d) cannot be greater " 5675 "than the FCP WQ count(%d), limiting the " 5676 "FCP EQ count to %d\n", cfg_fcp_eq_count, 5677 phba->cfg_fcp_wq_count, 5678 phba->cfg_fcp_wq_count); 5679 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 5680 } 5681 /* The actual number of FCP event queues adopted */ 5682 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 5683 /* The overall number of event queues used */ 5684 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 5685 5686 /* 5687 * Create Event Queues (EQs) 5688 */ 5689 5690 /* Get EQ depth from module parameter, fake the default for now */ 5691 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 5692 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 5693 5694 /* Create slow path event queue */ 5695 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5696 phba->sli4_hba.eq_ecount); 5697 if (!qdesc) { 5698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5699 "0496 Failed allocate slow-path EQ\n"); 5700 goto out_error; 5701 } 5702 phba->sli4_hba.sp_eq = qdesc; 5703 5704 /* Create fast-path FCP Event Queue(s) */ 5705 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 5706 phba->cfg_fcp_eq_count), GFP_KERNEL); 5707 if (!phba->sli4_hba.fp_eq) { 5708 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5709 "2576 Failed allocate memory for fast-path " 5710 "EQ record array\n"); 5711 goto out_free_sp_eq; 5712 } 5713 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5714 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5715 phba->sli4_hba.eq_ecount); 5716 if (!qdesc) { 5717 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5718 "0497 Failed allocate fast-path EQ\n"); 5719 goto out_free_fp_eq; 5720 } 5721 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 5722 } 5723 5724 /* 5725 * Create Complete Queues (CQs) 5726 */ 5727 5728 /* Get CQ depth from module parameter, fake the default for now */ 5729 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 5730 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 5731 5732 /* Create slow-path Mailbox Command Complete Queue */ 5733 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5734 phba->sli4_hba.cq_ecount); 5735 if (!qdesc) { 5736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5737 "0500 Failed allocate slow-path mailbox CQ\n"); 5738 goto out_free_fp_eq; 5739 } 5740 phba->sli4_hba.mbx_cq = qdesc; 5741 5742 /* Create slow-path ELS Complete Queue */ 5743 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5744 phba->sli4_hba.cq_ecount); 5745 if (!qdesc) { 5746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5747 "0501 Failed allocate slow-path ELS CQ\n"); 5748 goto out_free_mbx_cq; 5749 } 5750 phba->sli4_hba.els_cq = qdesc; 5751 5752 5753 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 5754 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 5755 phba->cfg_fcp_eq_count), GFP_KERNEL); 5756 if (!phba->sli4_hba.fcp_cq) { 5757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5758 "2577 Failed allocate memory for fast-path " 5759 "CQ record array\n"); 5760 goto out_free_els_cq; 5761 } 5762 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5763 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5764 phba->sli4_hba.cq_ecount); 5765 if (!qdesc) { 5766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5767 "0499 Failed allocate fast-path FCP " 5768 "CQ (%d)\n", fcp_cqidx); 5769 goto out_free_fcp_cq; 5770 } 5771 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 5772 } 5773 5774 /* Create Mailbox Command Queue */ 5775 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 5776 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 5777 5778 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 5779 phba->sli4_hba.mq_ecount); 5780 if (!qdesc) { 5781 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5782 "0505 Failed allocate slow-path MQ\n"); 5783 goto out_free_fcp_cq; 5784 } 5785 phba->sli4_hba.mbx_wq = qdesc; 5786 5787 /* 5788 * Create all the Work Queues (WQs) 5789 */ 5790 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 5791 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 5792 5793 /* Create slow-path ELS Work Queue */ 5794 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5795 phba->sli4_hba.wq_ecount); 5796 if (!qdesc) { 5797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5798 "0504 Failed allocate slow-path ELS WQ\n"); 5799 goto out_free_mbx_wq; 5800 } 5801 phba->sli4_hba.els_wq = qdesc; 5802 5803 /* Create fast-path FCP Work Queue(s) */ 5804 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 5805 phba->cfg_fcp_wq_count), GFP_KERNEL); 5806 if (!phba->sli4_hba.fcp_wq) { 5807 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5808 "2578 Failed allocate memory for fast-path " 5809 "WQ record array\n"); 5810 goto out_free_els_wq; 5811 } 5812 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 5813 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5814 phba->sli4_hba.wq_ecount); 5815 if (!qdesc) { 5816 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5817 "0503 Failed allocate fast-path FCP " 5818 "WQ (%d)\n", fcp_wqidx); 5819 goto out_free_fcp_wq; 5820 } 5821 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 5822 } 5823 5824 /* 5825 * Create Receive Queue (RQ) 5826 */ 5827 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 5828 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 5829 5830 /* Create Receive Queue for header */ 5831 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5832 phba->sli4_hba.rq_ecount); 5833 if (!qdesc) { 5834 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5835 "0506 Failed allocate receive HRQ\n"); 5836 goto out_free_fcp_wq; 5837 } 5838 phba->sli4_hba.hdr_rq = qdesc; 5839 5840 /* Create Receive Queue for data */ 5841 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5842 phba->sli4_hba.rq_ecount); 5843 if (!qdesc) { 5844 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5845 "0507 Failed allocate receive DRQ\n"); 5846 goto out_free_hdr_rq; 5847 } 5848 phba->sli4_hba.dat_rq = qdesc; 5849 5850 return 0; 5851 5852out_free_hdr_rq: 5853 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5854 phba->sli4_hba.hdr_rq = NULL; 5855out_free_fcp_wq: 5856 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 5857 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 5858 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 5859 } 5860 kfree(phba->sli4_hba.fcp_wq); 5861out_free_els_wq: 5862 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5863 phba->sli4_hba.els_wq = NULL; 5864out_free_mbx_wq: 5865 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5866 phba->sli4_hba.mbx_wq = NULL; 5867out_free_fcp_cq: 5868 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 5869 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 5870 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 5871 } 5872 kfree(phba->sli4_hba.fcp_cq); 5873out_free_els_cq: 5874 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5875 phba->sli4_hba.els_cq = NULL; 5876out_free_mbx_cq: 5877 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5878 phba->sli4_hba.mbx_cq = NULL; 5879out_free_fp_eq: 5880 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 5881 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 5882 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 5883 } 5884 kfree(phba->sli4_hba.fp_eq); 5885out_free_sp_eq: 5886 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5887 phba->sli4_hba.sp_eq = NULL; 5888out_error: 5889 return -ENOMEM; 5890} 5891 5892/** 5893 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 5894 * @phba: pointer to lpfc hba data structure. 5895 * 5896 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 5897 * operation. 5898 * 5899 * Return codes 5900 * 0 - successful 5901 * -ENOMEM - No availble memory 5902 * -EIO - The mailbox failed to complete successfully. 5903 **/ 5904static void 5905lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 5906{ 5907 int fcp_qidx; 5908 5909 /* Release mailbox command work queue */ 5910 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5911 phba->sli4_hba.mbx_wq = NULL; 5912 5913 /* Release ELS work queue */ 5914 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5915 phba->sli4_hba.els_wq = NULL; 5916 5917 /* Release FCP work queue */ 5918 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 5919 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 5920 kfree(phba->sli4_hba.fcp_wq); 5921 phba->sli4_hba.fcp_wq = NULL; 5922 5923 /* Release unsolicited receive queue */ 5924 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5925 phba->sli4_hba.hdr_rq = NULL; 5926 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 5927 phba->sli4_hba.dat_rq = NULL; 5928 5929 /* Release ELS complete queue */ 5930 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5931 phba->sli4_hba.els_cq = NULL; 5932 5933 /* Release mailbox command complete queue */ 5934 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5935 phba->sli4_hba.mbx_cq = NULL; 5936 5937 /* Release FCP response complete queue */ 5938 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5939 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 5940 kfree(phba->sli4_hba.fcp_cq); 5941 phba->sli4_hba.fcp_cq = NULL; 5942 5943 /* Release fast-path event queue */ 5944 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5945 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 5946 kfree(phba->sli4_hba.fp_eq); 5947 phba->sli4_hba.fp_eq = NULL; 5948 5949 /* Release slow-path event queue */ 5950 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5951 phba->sli4_hba.sp_eq = NULL; 5952 5953 return; 5954} 5955 5956/** 5957 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 5958 * @phba: pointer to lpfc hba data structure. 5959 * 5960 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 5961 * operation. 5962 * 5963 * Return codes 5964 * 0 - successful 5965 * -ENOMEM - No availble memory 5966 * -EIO - The mailbox failed to complete successfully. 5967 **/ 5968int 5969lpfc_sli4_queue_setup(struct lpfc_hba *phba) 5970{ 5971 int rc = -ENOMEM; 5972 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5973 int fcp_cq_index = 0; 5974 5975 /* 5976 * Set up Event Queues (EQs) 5977 */ 5978 5979 /* Set up slow-path event queue */ 5980 if (!phba->sli4_hba.sp_eq) { 5981 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5982 "0520 Slow-path EQ not allocated\n"); 5983 goto out_error; 5984 } 5985 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 5986 LPFC_SP_DEF_IMAX); 5987 if (rc) { 5988 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5989 "0521 Failed setup of slow-path EQ: " 5990 "rc = 0x%x\n", rc); 5991 goto out_error; 5992 } 5993 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5994 "2583 Slow-path EQ setup: queue-id=%d\n", 5995 phba->sli4_hba.sp_eq->queue_id); 5996 5997 /* Set up fast-path event queue */ 5998 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5999 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6000 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6001 "0522 Fast-path EQ (%d) not " 6002 "allocated\n", fcp_eqidx); 6003 goto out_destroy_fp_eq; 6004 } 6005 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 6006 phba->cfg_fcp_imax); 6007 if (rc) { 6008 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6009 "0523 Failed setup of fast-path EQ " 6010 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 6011 goto out_destroy_fp_eq; 6012 } 6013 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6014 "2584 Fast-path EQ setup: " 6015 "queue[%d]-id=%d\n", fcp_eqidx, 6016 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 6017 } 6018 6019 /* 6020 * Set up Complete Queues (CQs) 6021 */ 6022 6023 /* Set up slow-path MBOX Complete Queue as the first CQ */ 6024 if (!phba->sli4_hba.mbx_cq) { 6025 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6026 "0528 Mailbox CQ not allocated\n"); 6027 goto out_destroy_fp_eq; 6028 } 6029 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 6030 LPFC_MCQ, LPFC_MBOX); 6031 if (rc) { 6032 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6033 "0529 Failed setup of slow-path mailbox CQ: " 6034 "rc = 0x%x\n", rc); 6035 goto out_destroy_fp_eq; 6036 } 6037 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6038 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 6039 phba->sli4_hba.mbx_cq->queue_id, 6040 phba->sli4_hba.sp_eq->queue_id); 6041 6042 /* Set up slow-path ELS Complete Queue */ 6043 if (!phba->sli4_hba.els_cq) { 6044 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6045 "0530 ELS CQ not allocated\n"); 6046 goto out_destroy_mbx_cq; 6047 } 6048 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 6049 LPFC_WCQ, LPFC_ELS); 6050 if (rc) { 6051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6052 "0531 Failed setup of slow-path ELS CQ: " 6053 "rc = 0x%x\n", rc); 6054 goto out_destroy_mbx_cq; 6055 } 6056 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6057 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 6058 phba->sli4_hba.els_cq->queue_id, 6059 phba->sli4_hba.sp_eq->queue_id); 6060 6061 /* Set up fast-path FCP Response Complete Queue */ 6062 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6063 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6064 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6065 "0526 Fast-path FCP CQ (%d) not " 6066 "allocated\n", fcp_cqidx); 6067 goto out_destroy_fcp_cq; 6068 } 6069 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 6070 phba->sli4_hba.fp_eq[fcp_cqidx], 6071 LPFC_WCQ, LPFC_FCP); 6072 if (rc) { 6073 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6074 "0527 Failed setup of fast-path FCP " 6075 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 6076 goto out_destroy_fcp_cq; 6077 } 6078 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6079 "2588 FCP CQ setup: cq[%d]-id=%d, " 6080 "parent eq[%d]-id=%d\n", 6081 fcp_cqidx, 6082 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 6083 fcp_cqidx, 6084 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 6085 } 6086 6087 /* 6088 * Set up all the Work Queues (WQs) 6089 */ 6090 6091 /* Set up Mailbox Command Queue */ 6092 if (!phba->sli4_hba.mbx_wq) { 6093 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6094 "0538 Slow-path MQ not allocated\n"); 6095 goto out_destroy_fcp_cq; 6096 } 6097 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 6098 phba->sli4_hba.mbx_cq, LPFC_MBOX); 6099 if (rc) { 6100 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6101 "0539 Failed setup of slow-path MQ: " 6102 "rc = 0x%x\n", rc); 6103 goto out_destroy_fcp_cq; 6104 } 6105 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6106 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 6107 phba->sli4_hba.mbx_wq->queue_id, 6108 phba->sli4_hba.mbx_cq->queue_id); 6109 6110 /* Set up slow-path ELS Work Queue */ 6111 if (!phba->sli4_hba.els_wq) { 6112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6113 "0536 Slow-path ELS WQ not allocated\n"); 6114 goto out_destroy_mbx_wq; 6115 } 6116 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 6117 phba->sli4_hba.els_cq, LPFC_ELS); 6118 if (rc) { 6119 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6120 "0537 Failed setup of slow-path ELS WQ: " 6121 "rc = 0x%x\n", rc); 6122 goto out_destroy_mbx_wq; 6123 } 6124 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6125 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 6126 phba->sli4_hba.els_wq->queue_id, 6127 phba->sli4_hba.els_cq->queue_id); 6128 6129 /* Set up fast-path FCP Work Queue */ 6130 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6131 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 6132 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6133 "0534 Fast-path FCP WQ (%d) not " 6134 "allocated\n", fcp_wqidx); 6135 goto out_destroy_fcp_wq; 6136 } 6137 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 6138 phba->sli4_hba.fcp_cq[fcp_cq_index], 6139 LPFC_FCP); 6140 if (rc) { 6141 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6142 "0535 Failed setup of fast-path FCP " 6143 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 6144 goto out_destroy_fcp_wq; 6145 } 6146 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6147 "2591 FCP WQ setup: wq[%d]-id=%d, " 6148 "parent cq[%d]-id=%d\n", 6149 fcp_wqidx, 6150 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 6151 fcp_cq_index, 6152 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 6153 /* Round robin FCP Work Queue's Completion Queue assignment */ 6154 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 6155 } 6156 6157 /* 6158 * Create Receive Queue (RQ) 6159 */ 6160 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 6161 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6162 "0540 Receive Queue not allocated\n"); 6163 goto out_destroy_fcp_wq; 6164 } 6165 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 6166 phba->sli4_hba.els_cq, LPFC_USOL); 6167 if (rc) { 6168 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6169 "0541 Failed setup of Receive Queue: " 6170 "rc = 0x%x\n", rc); 6171 goto out_destroy_fcp_wq; 6172 } 6173 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6174 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 6175 "parent cq-id=%d\n", 6176 phba->sli4_hba.hdr_rq->queue_id, 6177 phba->sli4_hba.dat_rq->queue_id, 6178 phba->sli4_hba.els_cq->queue_id); 6179 return 0; 6180 6181out_destroy_fcp_wq: 6182 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 6183 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 6184 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6185out_destroy_mbx_wq: 6186 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6187out_destroy_fcp_cq: 6188 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6189 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6190 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6191out_destroy_mbx_cq: 6192 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6193out_destroy_fp_eq: 6194 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 6195 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 6196 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6197out_error: 6198 return rc; 6199} 6200 6201/** 6202 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 6203 * @phba: pointer to lpfc hba data structure. 6204 * 6205 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 6206 * operation. 6207 * 6208 * Return codes 6209 * 0 - successful 6210 * -ENOMEM - No availble memory 6211 * -EIO - The mailbox failed to complete successfully. 6212 **/ 6213void 6214lpfc_sli4_queue_unset(struct lpfc_hba *phba) 6215{ 6216 int fcp_qidx; 6217 6218 /* Unset mailbox command work queue */ 6219 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6220 /* Unset ELS work queue */ 6221 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6222 /* Unset unsolicited receive queue */ 6223 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 6224 /* Unset FCP work queue */ 6225 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6226 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 6227 /* Unset mailbox command complete queue */ 6228 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6229 /* Unset ELS complete queue */ 6230 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6231 /* Unset FCP response complete queue */ 6232 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6233 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6234 /* Unset fast-path event queue */ 6235 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6236 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6237 /* Unset slow-path event queue */ 6238 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6239} 6240 6241/** 6242 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 6243 * @phba: pointer to lpfc hba data structure. 6244 * 6245 * This routine is invoked to allocate and set up a pool of completion queue 6246 * events. The body of the completion queue event is a completion queue entry 6247 * CQE. For now, this pool is used for the interrupt service routine to queue 6248 * the following HBA completion queue events for the worker thread to process: 6249 * - Mailbox asynchronous events 6250 * - Receive queue completion unsolicited events 6251 * Later, this can be used for all the slow-path events. 6252 * 6253 * Return codes 6254 * 0 - successful 6255 * -ENOMEM - No availble memory 6256 **/ 6257static int 6258lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 6259{ 6260 struct lpfc_cq_event *cq_event; 6261 int i; 6262 6263 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 6264 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 6265 if (!cq_event) 6266 goto out_pool_create_fail; 6267 list_add_tail(&cq_event->list, 6268 &phba->sli4_hba.sp_cqe_event_pool); 6269 } 6270 return 0; 6271 6272out_pool_create_fail: 6273 lpfc_sli4_cq_event_pool_destroy(phba); 6274 return -ENOMEM; 6275} 6276 6277/** 6278 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 6279 * @phba: pointer to lpfc hba data structure. 6280 * 6281 * This routine is invoked to free the pool of completion queue events at 6282 * driver unload time. Note that, it is the responsibility of the driver 6283 * cleanup routine to free all the outstanding completion-queue events 6284 * allocated from this pool back into the pool before invoking this routine 6285 * to destroy the pool. 6286 **/ 6287static void 6288lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 6289{ 6290 struct lpfc_cq_event *cq_event, *next_cq_event; 6291 6292 list_for_each_entry_safe(cq_event, next_cq_event, 6293 &phba->sli4_hba.sp_cqe_event_pool, list) { 6294 list_del(&cq_event->list); 6295 kfree(cq_event); 6296 } 6297} 6298 6299/** 6300 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6301 * @phba: pointer to lpfc hba data structure. 6302 * 6303 * This routine is the lock free version of the API invoked to allocate a 6304 * completion-queue event from the free pool. 6305 * 6306 * Return: Pointer to the newly allocated completion-queue event if successful 6307 * NULL otherwise. 6308 **/ 6309struct lpfc_cq_event * 6310__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6311{ 6312 struct lpfc_cq_event *cq_event = NULL; 6313 6314 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 6315 struct lpfc_cq_event, list); 6316 return cq_event; 6317} 6318 6319/** 6320 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6321 * @phba: pointer to lpfc hba data structure. 6322 * 6323 * This routine is the lock version of the API invoked to allocate a 6324 * completion-queue event from the free pool. 6325 * 6326 * Return: Pointer to the newly allocated completion-queue event if successful 6327 * NULL otherwise. 6328 **/ 6329struct lpfc_cq_event * 6330lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6331{ 6332 struct lpfc_cq_event *cq_event; 6333 unsigned long iflags; 6334 6335 spin_lock_irqsave(&phba->hbalock, iflags); 6336 cq_event = __lpfc_sli4_cq_event_alloc(phba); 6337 spin_unlock_irqrestore(&phba->hbalock, iflags); 6338 return cq_event; 6339} 6340 6341/** 6342 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6343 * @phba: pointer to lpfc hba data structure. 6344 * @cq_event: pointer to the completion queue event to be freed. 6345 * 6346 * This routine is the lock free version of the API invoked to release a 6347 * completion-queue event back into the free pool. 6348 **/ 6349void 6350__lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6351 struct lpfc_cq_event *cq_event) 6352{ 6353 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 6354} 6355 6356/** 6357 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6358 * @phba: pointer to lpfc hba data structure. 6359 * @cq_event: pointer to the completion queue event to be freed. 6360 * 6361 * This routine is the lock version of the API invoked to release a 6362 * completion-queue event back into the free pool. 6363 **/ 6364void 6365lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6366 struct lpfc_cq_event *cq_event) 6367{ 6368 unsigned long iflags; 6369 spin_lock_irqsave(&phba->hbalock, iflags); 6370 __lpfc_sli4_cq_event_release(phba, cq_event); 6371 spin_unlock_irqrestore(&phba->hbalock, iflags); 6372} 6373 6374/** 6375 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 6376 * @phba: pointer to lpfc hba data structure. 6377 * 6378 * This routine is to free all the pending completion-queue events to the 6379 * back into the free pool for device reset. 6380 **/ 6381static void 6382lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 6383{ 6384 LIST_HEAD(cqelist); 6385 struct lpfc_cq_event *cqe; 6386 unsigned long iflags; 6387 6388 /* Retrieve all the pending WCQEs from pending WCQE lists */ 6389 spin_lock_irqsave(&phba->hbalock, iflags); 6390 /* Pending FCP XRI abort events */ 6391 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 6392 &cqelist); 6393 /* Pending ELS XRI abort events */ 6394 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 6395 &cqelist); 6396 /* Pending asynnc events */ 6397 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 6398 &cqelist); 6399 spin_unlock_irqrestore(&phba->hbalock, iflags); 6400 6401 while (!list_empty(&cqelist)) { 6402 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 6403 lpfc_sli4_cq_event_release(phba, cqe); 6404 } 6405} 6406 6407/** 6408 * lpfc_pci_function_reset - Reset pci function. 6409 * @phba: pointer to lpfc hba data structure. 6410 * 6411 * This routine is invoked to request a PCI function reset. It will destroys 6412 * all resources assigned to the PCI function which originates this request. 6413 * 6414 * Return codes 6415 * 0 - successful 6416 * -ENOMEM - No availble memory 6417 * -EIO - The mailbox failed to complete successfully. 6418 **/ 6419int 6420lpfc_pci_function_reset(struct lpfc_hba *phba) 6421{ 6422 LPFC_MBOXQ_t *mboxq; 6423 uint32_t rc = 0; 6424 uint32_t shdr_status, shdr_add_status; 6425 union lpfc_sli4_cfg_shdr *shdr; 6426 6427 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6428 if (!mboxq) { 6429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6430 "0494 Unable to allocate memory for issuing " 6431 "SLI_FUNCTION_RESET mailbox command\n"); 6432 return -ENOMEM; 6433 } 6434 6435 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ 6436 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6437 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 6438 LPFC_SLI4_MBX_EMBED); 6439 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6440 shdr = (union lpfc_sli4_cfg_shdr *) 6441 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6442 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6443 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6444 if (rc != MBX_TIMEOUT) 6445 mempool_free(mboxq, phba->mbox_mem_pool); 6446 if (shdr_status || shdr_add_status || rc) { 6447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6448 "0495 SLI_FUNCTION_RESET mailbox failed with " 6449 "status x%x add_status x%x, mbx status x%x\n", 6450 shdr_status, shdr_add_status, rc); 6451 rc = -ENXIO; 6452 } 6453 return rc; 6454} 6455 6456/** 6457 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 6458 * @phba: pointer to lpfc hba data structure. 6459 * @cnt: number of nop mailbox commands to send. 6460 * 6461 * This routine is invoked to send a number @cnt of NOP mailbox command and 6462 * wait for each command to complete. 6463 * 6464 * Return: the number of NOP mailbox command completed. 6465 **/ 6466static int 6467lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 6468{ 6469 LPFC_MBOXQ_t *mboxq; 6470 int length, cmdsent; 6471 uint32_t mbox_tmo; 6472 uint32_t rc = 0; 6473 uint32_t shdr_status, shdr_add_status; 6474 union lpfc_sli4_cfg_shdr *shdr; 6475 6476 if (cnt == 0) { 6477 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6478 "2518 Requested to send 0 NOP mailbox cmd\n"); 6479 return cnt; 6480 } 6481 6482 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6483 if (!mboxq) { 6484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6485 "2519 Unable to allocate memory for issuing " 6486 "NOP mailbox command\n"); 6487 return 0; 6488 } 6489 6490 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 6491 length = (sizeof(struct lpfc_mbx_nop) - 6492 sizeof(struct lpfc_sli4_cfg_mhdr)); 6493 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6494 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 6495 6496 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 6497 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 6498 if (!phba->sli4_hba.intr_enable) 6499 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6500 else 6501 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 6502 if (rc == MBX_TIMEOUT) 6503 break; 6504 /* Check return status */ 6505 shdr = (union lpfc_sli4_cfg_shdr *) 6506 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6507 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6508 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 6509 &shdr->response); 6510 if (shdr_status || shdr_add_status || rc) { 6511 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6512 "2520 NOP mailbox command failed " 6513 "status x%x add_status x%x mbx " 6514 "status x%x\n", shdr_status, 6515 shdr_add_status, rc); 6516 break; 6517 } 6518 } 6519 6520 if (rc != MBX_TIMEOUT) 6521 mempool_free(mboxq, phba->mbox_mem_pool); 6522 6523 return cmdsent; 6524} 6525 6526/** 6527 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 6528 * @phba: pointer to lpfc hba data structure. 6529 * 6530 * This routine is invoked to set up the PCI device memory space for device 6531 * with SLI-4 interface spec. 6532 * 6533 * Return codes 6534 * 0 - successful 6535 * other values - error 6536 **/ 6537static int 6538lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 6539{ 6540 struct pci_dev *pdev; 6541 unsigned long bar0map_len, bar1map_len, bar2map_len; 6542 int error = -ENODEV; 6543 6544 /* Obtain PCI device reference */ 6545 if (!phba->pcidev) 6546 return error; 6547 else 6548 pdev = phba->pcidev; 6549 6550 /* Set the device DMA mask size */ 6551 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6552 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6553 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6554 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6555 return error; 6556 } 6557 } 6558 6559 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 6560 * number of bytes required by each mapping. They are actually 6561 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device. 6562 */ 6563 if (pci_resource_start(pdev, 0)) { 6564 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6565 bar0map_len = pci_resource_len(pdev, 0); 6566 } else { 6567 phba->pci_bar0_map = pci_resource_start(pdev, 1); 6568 bar0map_len = pci_resource_len(pdev, 1); 6569 } 6570 phba->pci_bar1_map = pci_resource_start(pdev, 2); 6571 bar1map_len = pci_resource_len(pdev, 2); 6572 6573 phba->pci_bar2_map = pci_resource_start(pdev, 4); 6574 bar2map_len = pci_resource_len(pdev, 4); 6575 6576 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ 6577 phba->sli4_hba.conf_regs_memmap_p = 6578 ioremap(phba->pci_bar0_map, bar0map_len); 6579 if (!phba->sli4_hba.conf_regs_memmap_p) { 6580 dev_printk(KERN_ERR, &pdev->dev, 6581 "ioremap failed for SLI4 PCI config registers.\n"); 6582 goto out; 6583 } 6584 6585 /* Map SLI4 HBA Control Register base to a kernel virtual address. */ 6586 phba->sli4_hba.ctrl_regs_memmap_p = 6587 ioremap(phba->pci_bar1_map, bar1map_len); 6588 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 6589 dev_printk(KERN_ERR, &pdev->dev, 6590 "ioremap failed for SLI4 HBA control registers.\n"); 6591 goto out_iounmap_conf; 6592 } 6593 6594 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ 6595 phba->sli4_hba.drbl_regs_memmap_p = 6596 ioremap(phba->pci_bar2_map, bar2map_len); 6597 if (!phba->sli4_hba.drbl_regs_memmap_p) { 6598 dev_printk(KERN_ERR, &pdev->dev, 6599 "ioremap failed for SLI4 HBA doorbell registers.\n"); 6600 goto out_iounmap_ctrl; 6601 } 6602 6603 /* Set up BAR0 PCI config space register memory map */ 6604 lpfc_sli4_bar0_register_memmap(phba); 6605 6606 /* Set up BAR1 register memory map */ 6607 lpfc_sli4_bar1_register_memmap(phba); 6608 6609 /* Set up BAR2 register memory map */ 6610 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 6611 if (error) 6612 goto out_iounmap_all; 6613 6614 return 0; 6615 6616out_iounmap_all: 6617 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 6618out_iounmap_ctrl: 6619 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 6620out_iounmap_conf: 6621 iounmap(phba->sli4_hba.conf_regs_memmap_p); 6622out: 6623 return error; 6624} 6625 6626/** 6627 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 6628 * @phba: pointer to lpfc hba data structure. 6629 * 6630 * This routine is invoked to unset the PCI device memory space for device 6631 * with SLI-4 interface spec. 6632 **/ 6633static void 6634lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 6635{ 6636 struct pci_dev *pdev; 6637 6638 /* Obtain PCI device reference */ 6639 if (!phba->pcidev) 6640 return; 6641 else 6642 pdev = phba->pcidev; 6643 6644 /* Free coherent DMA memory allocated */ 6645 6646 /* Unmap I/O memory space */ 6647 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 6648 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 6649 iounmap(phba->sli4_hba.conf_regs_memmap_p); 6650 6651 return; 6652} 6653 6654/** 6655 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 6656 * @phba: pointer to lpfc hba data structure. 6657 * 6658 * This routine is invoked to enable the MSI-X interrupt vectors to device 6659 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 6660 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 6661 * invoked, enables either all or nothing, depending on the current 6662 * availability of PCI vector resources. The device driver is responsible 6663 * for calling the individual request_irq() to register each MSI-X vector 6664 * with a interrupt handler, which is done in this function. Note that 6665 * later when device is unloading, the driver should always call free_irq() 6666 * on all MSI-X vectors it has done request_irq() on before calling 6667 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 6668 * will be left with MSI-X enabled and leaks its vectors. 6669 * 6670 * Return codes 6671 * 0 - successful 6672 * other values - error 6673 **/ 6674static int 6675lpfc_sli_enable_msix(struct lpfc_hba *phba) 6676{ 6677 int rc, i; 6678 LPFC_MBOXQ_t *pmb; 6679 6680 /* Set up MSI-X multi-message vectors */ 6681 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6682 phba->msix_entries[i].entry = i; 6683 6684 /* Configure MSI-X capability structure */ 6685 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 6686 ARRAY_SIZE(phba->msix_entries)); 6687 if (rc) { 6688 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6689 "0420 PCI enable MSI-X failed (%d)\n", rc); 6690 goto msi_fail_out; 6691 } 6692 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6693 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6694 "0477 MSI-X entry[%d]: vector=x%x " 6695 "message=%d\n", i, 6696 phba->msix_entries[i].vector, 6697 phba->msix_entries[i].entry); 6698 /* 6699 * Assign MSI-X vectors to interrupt handlers 6700 */ 6701 6702 /* vector-0 is associated to slow-path handler */ 6703 rc = request_irq(phba->msix_entries[0].vector, 6704 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 6705 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6706 if (rc) { 6707 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6708 "0421 MSI-X slow-path request_irq failed " 6709 "(%d)\n", rc); 6710 goto msi_fail_out; 6711 } 6712 6713 /* vector-1 is associated to fast-path handler */ 6714 rc = request_irq(phba->msix_entries[1].vector, 6715 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 6716 LPFC_FP_DRIVER_HANDLER_NAME, phba); 6717 6718 if (rc) { 6719 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6720 "0429 MSI-X fast-path request_irq failed " 6721 "(%d)\n", rc); 6722 goto irq_fail_out; 6723 } 6724 6725 /* 6726 * Configure HBA MSI-X attention conditions to messages 6727 */ 6728 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6729 6730 if (!pmb) { 6731 rc = -ENOMEM; 6732 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6733 "0474 Unable to allocate memory for issuing " 6734 "MBOX_CONFIG_MSI command\n"); 6735 goto mem_fail_out; 6736 } 6737 rc = lpfc_config_msi(phba, pmb); 6738 if (rc) 6739 goto mbx_fail_out; 6740 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6741 if (rc != MBX_SUCCESS) { 6742 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6743 "0351 Config MSI mailbox command failed, " 6744 "mbxCmd x%x, mbxStatus x%x\n", 6745 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 6746 goto mbx_fail_out; 6747 } 6748 6749 /* Free memory allocated for mailbox command */ 6750 mempool_free(pmb, phba->mbox_mem_pool); 6751 return rc; 6752 6753mbx_fail_out: 6754 /* Free memory allocated for mailbox command */ 6755 mempool_free(pmb, phba->mbox_mem_pool); 6756 6757mem_fail_out: 6758 /* free the irq already requested */ 6759 free_irq(phba->msix_entries[1].vector, phba); 6760 6761irq_fail_out: 6762 /* free the irq already requested */ 6763 free_irq(phba->msix_entries[0].vector, phba); 6764 6765msi_fail_out: 6766 /* Unconfigure MSI-X capability structure */ 6767 pci_disable_msix(phba->pcidev); 6768 return rc; 6769} 6770 6771/** 6772 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 6773 * @phba: pointer to lpfc hba data structure. 6774 * 6775 * This routine is invoked to release the MSI-X vectors and then disable the 6776 * MSI-X interrupt mode to device with SLI-3 interface spec. 6777 **/ 6778static void 6779lpfc_sli_disable_msix(struct lpfc_hba *phba) 6780{ 6781 int i; 6782 6783 /* Free up MSI-X multi-message vectors */ 6784 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6785 free_irq(phba->msix_entries[i].vector, phba); 6786 /* Disable MSI-X */ 6787 pci_disable_msix(phba->pcidev); 6788 6789 return; 6790} 6791 6792/** 6793 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 6794 * @phba: pointer to lpfc hba data structure. 6795 * 6796 * This routine is invoked to enable the MSI interrupt mode to device with 6797 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 6798 * enable the MSI vector. The device driver is responsible for calling the 6799 * request_irq() to register MSI vector with a interrupt the handler, which 6800 * is done in this function. 6801 * 6802 * Return codes 6803 * 0 - successful 6804 * other values - error 6805 */ 6806static int 6807lpfc_sli_enable_msi(struct lpfc_hba *phba) 6808{ 6809 int rc; 6810 6811 rc = pci_enable_msi(phba->pcidev); 6812 if (!rc) 6813 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6814 "0462 PCI enable MSI mode success.\n"); 6815 else { 6816 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6817 "0471 PCI enable MSI mode failed (%d)\n", rc); 6818 return rc; 6819 } 6820 6821 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6822 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6823 if (rc) { 6824 pci_disable_msi(phba->pcidev); 6825 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6826 "0478 MSI request_irq failed (%d)\n", rc); 6827 } 6828 return rc; 6829} 6830 6831/** 6832 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 6833 * @phba: pointer to lpfc hba data structure. 6834 * 6835 * This routine is invoked to disable the MSI interrupt mode to device with 6836 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 6837 * done request_irq() on before calling pci_disable_msi(). Failure to do so 6838 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 6839 * its vector. 6840 */ 6841static void 6842lpfc_sli_disable_msi(struct lpfc_hba *phba) 6843{ 6844 free_irq(phba->pcidev->irq, phba); 6845 pci_disable_msi(phba->pcidev); 6846 return; 6847} 6848 6849/** 6850 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 6851 * @phba: pointer to lpfc hba data structure. 6852 * 6853 * This routine is invoked to enable device interrupt and associate driver's 6854 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 6855 * spec. Depends on the interrupt mode configured to the driver, the driver 6856 * will try to fallback from the configured interrupt mode to an interrupt 6857 * mode which is supported by the platform, kernel, and device in the order 6858 * of: 6859 * MSI-X -> MSI -> IRQ. 6860 * 6861 * Return codes 6862 * 0 - successful 6863 * other values - error 6864 **/ 6865static uint32_t 6866lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6867{ 6868 uint32_t intr_mode = LPFC_INTR_ERROR; 6869 int retval; 6870 6871 if (cfg_mode == 2) { 6872 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 6873 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 6874 if (!retval) { 6875 /* Now, try to enable MSI-X interrupt mode */ 6876 retval = lpfc_sli_enable_msix(phba); 6877 if (!retval) { 6878 /* Indicate initialization to MSI-X mode */ 6879 phba->intr_type = MSIX; 6880 intr_mode = 2; 6881 } 6882 } 6883 } 6884 6885 /* Fallback to MSI if MSI-X initialization failed */ 6886 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6887 retval = lpfc_sli_enable_msi(phba); 6888 if (!retval) { 6889 /* Indicate initialization to MSI mode */ 6890 phba->intr_type = MSI; 6891 intr_mode = 1; 6892 } 6893 } 6894 6895 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6896 if (phba->intr_type == NONE) { 6897 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6898 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6899 if (!retval) { 6900 /* Indicate initialization to INTx mode */ 6901 phba->intr_type = INTx; 6902 intr_mode = 0; 6903 } 6904 } 6905 return intr_mode; 6906} 6907 6908/** 6909 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 6910 * @phba: pointer to lpfc hba data structure. 6911 * 6912 * This routine is invoked to disable device interrupt and disassociate the 6913 * driver's interrupt handler(s) from interrupt vector(s) to device with 6914 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 6915 * release the interrupt vector(s) for the message signaled interrupt. 6916 **/ 6917static void 6918lpfc_sli_disable_intr(struct lpfc_hba *phba) 6919{ 6920 /* Disable the currently initialized interrupt mode */ 6921 if (phba->intr_type == MSIX) 6922 lpfc_sli_disable_msix(phba); 6923 else if (phba->intr_type == MSI) 6924 lpfc_sli_disable_msi(phba); 6925 else if (phba->intr_type == INTx) 6926 free_irq(phba->pcidev->irq, phba); 6927 6928 /* Reset interrupt management states */ 6929 phba->intr_type = NONE; 6930 phba->sli.slistat.sli_intr = 0; 6931 6932 return; 6933} 6934 6935/** 6936 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 6937 * @phba: pointer to lpfc hba data structure. 6938 * 6939 * This routine is invoked to enable the MSI-X interrupt vectors to device 6940 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 6941 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 6942 * enables either all or nothing, depending on the current availability of 6943 * PCI vector resources. The device driver is responsible for calling the 6944 * individual request_irq() to register each MSI-X vector with a interrupt 6945 * handler, which is done in this function. Note that later when device is 6946 * unloading, the driver should always call free_irq() on all MSI-X vectors 6947 * it has done request_irq() on before calling pci_disable_msix(). Failure 6948 * to do so results in a BUG_ON() and a device will be left with MSI-X 6949 * enabled and leaks its vectors. 6950 * 6951 * Return codes 6952 * 0 - successful 6953 * other values - error 6954 **/ 6955static int 6956lpfc_sli4_enable_msix(struct lpfc_hba *phba) 6957{ 6958 int vectors, rc, index; 6959 6960 /* Set up MSI-X multi-message vectors */ 6961 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 6962 phba->sli4_hba.msix_entries[index].entry = index; 6963 6964 /* Configure MSI-X capability structure */ 6965 vectors = phba->sli4_hba.cfg_eqn; 6966enable_msix_vectors: 6967 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 6968 vectors); 6969 if (rc > 1) { 6970 vectors = rc; 6971 goto enable_msix_vectors; 6972 } else if (rc) { 6973 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6974 "0484 PCI enable MSI-X failed (%d)\n", rc); 6975 goto msi_fail_out; 6976 } 6977 6978 /* Log MSI-X vector assignment */ 6979 for (index = 0; index < vectors; index++) 6980 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6981 "0489 MSI-X entry[%d]: vector=x%x " 6982 "message=%d\n", index, 6983 phba->sli4_hba.msix_entries[index].vector, 6984 phba->sli4_hba.msix_entries[index].entry); 6985 /* 6986 * Assign MSI-X vectors to interrupt handlers 6987 */ 6988 6989 /* The first vector must associated to slow-path handler for MQ */ 6990 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 6991 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 6992 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6993 if (rc) { 6994 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6995 "0485 MSI-X slow-path request_irq failed " 6996 "(%d)\n", rc); 6997 goto msi_fail_out; 6998 } 6999 7000 /* The rest of the vector(s) are associated to fast-path handler(s) */ 7001 for (index = 1; index < vectors; index++) { 7002 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 7003 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 7004 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 7005 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 7006 LPFC_FP_DRIVER_HANDLER_NAME, 7007 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7008 if (rc) { 7009 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7010 "0486 MSI-X fast-path (%d) " 7011 "request_irq failed (%d)\n", index, rc); 7012 goto cfg_fail_out; 7013 } 7014 } 7015 phba->sli4_hba.msix_vec_nr = vectors; 7016 7017 return rc; 7018 7019cfg_fail_out: 7020 /* free the irq already requested */ 7021 for (--index; index >= 1; index--) 7022 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 7023 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7024 7025 /* free the irq already requested */ 7026 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7027 7028msi_fail_out: 7029 /* Unconfigure MSI-X capability structure */ 7030 pci_disable_msix(phba->pcidev); 7031 return rc; 7032} 7033 7034/** 7035 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 7036 * @phba: pointer to lpfc hba data structure. 7037 * 7038 * This routine is invoked to release the MSI-X vectors and then disable the 7039 * MSI-X interrupt mode to device with SLI-4 interface spec. 7040 **/ 7041static void 7042lpfc_sli4_disable_msix(struct lpfc_hba *phba) 7043{ 7044 int index; 7045 7046 /* Free up MSI-X multi-message vectors */ 7047 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7048 7049 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++) 7050 free_irq(phba->sli4_hba.msix_entries[index].vector, 7051 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7052 7053 /* Disable MSI-X */ 7054 pci_disable_msix(phba->pcidev); 7055 7056 return; 7057} 7058 7059/** 7060 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 7061 * @phba: pointer to lpfc hba data structure. 7062 * 7063 * This routine is invoked to enable the MSI interrupt mode to device with 7064 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 7065 * to enable the MSI vector. The device driver is responsible for calling 7066 * the request_irq() to register MSI vector with a interrupt the handler, 7067 * which is done in this function. 7068 * 7069 * Return codes 7070 * 0 - successful 7071 * other values - error 7072 **/ 7073static int 7074lpfc_sli4_enable_msi(struct lpfc_hba *phba) 7075{ 7076 int rc, index; 7077 7078 rc = pci_enable_msi(phba->pcidev); 7079 if (!rc) 7080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7081 "0487 PCI enable MSI mode success.\n"); 7082 else { 7083 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7084 "0488 PCI enable MSI mode failed (%d)\n", rc); 7085 return rc; 7086 } 7087 7088 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7089 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7090 if (rc) { 7091 pci_disable_msi(phba->pcidev); 7092 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7093 "0490 MSI request_irq failed (%d)\n", rc); 7094 return rc; 7095 } 7096 7097 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 7098 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7099 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7100 } 7101 7102 return 0; 7103} 7104 7105/** 7106 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 7107 * @phba: pointer to lpfc hba data structure. 7108 * 7109 * This routine is invoked to disable the MSI interrupt mode to device with 7110 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 7111 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7112 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7113 * its vector. 7114 **/ 7115static void 7116lpfc_sli4_disable_msi(struct lpfc_hba *phba) 7117{ 7118 free_irq(phba->pcidev->irq, phba); 7119 pci_disable_msi(phba->pcidev); 7120 return; 7121} 7122 7123/** 7124 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 7125 * @phba: pointer to lpfc hba data structure. 7126 * 7127 * This routine is invoked to enable device interrupt and associate driver's 7128 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 7129 * interface spec. Depends on the interrupt mode configured to the driver, 7130 * the driver will try to fallback from the configured interrupt mode to an 7131 * interrupt mode which is supported by the platform, kernel, and device in 7132 * the order of: 7133 * MSI-X -> MSI -> IRQ. 7134 * 7135 * Return codes 7136 * 0 - successful 7137 * other values - error 7138 **/ 7139static uint32_t 7140lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 7141{ 7142 uint32_t intr_mode = LPFC_INTR_ERROR; 7143 int retval, index; 7144 7145 if (cfg_mode == 2) { 7146 /* Preparation before conf_msi mbox cmd */ 7147 retval = 0; 7148 if (!retval) { 7149 /* Now, try to enable MSI-X interrupt mode */ 7150 retval = lpfc_sli4_enable_msix(phba); 7151 if (!retval) { 7152 /* Indicate initialization to MSI-X mode */ 7153 phba->intr_type = MSIX; 7154 intr_mode = 2; 7155 } 7156 } 7157 } 7158 7159 /* Fallback to MSI if MSI-X initialization failed */ 7160 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7161 retval = lpfc_sli4_enable_msi(phba); 7162 if (!retval) { 7163 /* Indicate initialization to MSI mode */ 7164 phba->intr_type = MSI; 7165 intr_mode = 1; 7166 } 7167 } 7168 7169 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7170 if (phba->intr_type == NONE) { 7171 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7172 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7173 if (!retval) { 7174 /* Indicate initialization to INTx mode */ 7175 phba->intr_type = INTx; 7176 intr_mode = 0; 7177 for (index = 0; index < phba->cfg_fcp_eq_count; 7178 index++) { 7179 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7180 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7181 } 7182 } 7183 } 7184 return intr_mode; 7185} 7186 7187/** 7188 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 7189 * @phba: pointer to lpfc hba data structure. 7190 * 7191 * This routine is invoked to disable device interrupt and disassociate 7192 * the driver's interrupt handler(s) from interrupt vector(s) to device 7193 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 7194 * will release the interrupt vector(s) for the message signaled interrupt. 7195 **/ 7196static void 7197lpfc_sli4_disable_intr(struct lpfc_hba *phba) 7198{ 7199 /* Disable the currently initialized interrupt mode */ 7200 if (phba->intr_type == MSIX) 7201 lpfc_sli4_disable_msix(phba); 7202 else if (phba->intr_type == MSI) 7203 lpfc_sli4_disable_msi(phba); 7204 else if (phba->intr_type == INTx) 7205 free_irq(phba->pcidev->irq, phba); 7206 7207 /* Reset interrupt management states */ 7208 phba->intr_type = NONE; 7209 phba->sli.slistat.sli_intr = 0; 7210 7211 return; 7212} 7213 7214/** 7215 * lpfc_unset_hba - Unset SLI3 hba device initialization 7216 * @phba: pointer to lpfc hba data structure. 7217 * 7218 * This routine is invoked to unset the HBA device initialization steps to 7219 * a device with SLI-3 interface spec. 7220 **/ 7221static void 7222lpfc_unset_hba(struct lpfc_hba *phba) 7223{ 7224 struct lpfc_vport *vport = phba->pport; 7225 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7226 7227 spin_lock_irq(shost->host_lock); 7228 vport->load_flag |= FC_UNLOADING; 7229 spin_unlock_irq(shost->host_lock); 7230 7231 lpfc_stop_hba_timers(phba); 7232 7233 phba->pport->work_port_events = 0; 7234 7235 lpfc_sli_hba_down(phba); 7236 7237 lpfc_sli_brdrestart(phba); 7238 7239 lpfc_sli_disable_intr(phba); 7240 7241 return; 7242} 7243 7244/** 7245 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 7246 * @phba: pointer to lpfc hba data structure. 7247 * 7248 * This routine is invoked to unset the HBA device initialization steps to 7249 * a device with SLI-4 interface spec. 7250 **/ 7251static void 7252lpfc_sli4_unset_hba(struct lpfc_hba *phba) 7253{ 7254 struct lpfc_vport *vport = phba->pport; 7255 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7256 7257 spin_lock_irq(shost->host_lock); 7258 vport->load_flag |= FC_UNLOADING; 7259 spin_unlock_irq(shost->host_lock); 7260 7261 phba->pport->work_port_events = 0; 7262 7263 /* Stop the SLI4 device port */ 7264 lpfc_stop_port(phba); 7265 7266 lpfc_sli4_disable_intr(phba); 7267 7268 /* Reset SLI4 HBA FCoE function */ 7269 lpfc_pci_function_reset(phba); 7270 7271 return; 7272} 7273 7274/** 7275 * lpfc_sli4_hba_unset - Unset the fcoe hba 7276 * @phba: Pointer to HBA context object. 7277 * 7278 * This function is called in the SLI4 code path to reset the HBA's FCoE 7279 * function. The caller is not required to hold any lock. This routine 7280 * issues PCI function reset mailbox command to reset the FCoE function. 7281 * At the end of the function, it calls lpfc_hba_down_post function to 7282 * free any pending commands. 7283 **/ 7284static void 7285lpfc_sli4_hba_unset(struct lpfc_hba *phba) 7286{ 7287 int wait_cnt = 0; 7288 LPFC_MBOXQ_t *mboxq; 7289 7290 lpfc_stop_hba_timers(phba); 7291 phba->sli4_hba.intr_enable = 0; 7292 7293 /* 7294 * Gracefully wait out the potential current outstanding asynchronous 7295 * mailbox command. 7296 */ 7297 7298 /* First, block any pending async mailbox command from posted */ 7299 spin_lock_irq(&phba->hbalock); 7300 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 7301 spin_unlock_irq(&phba->hbalock); 7302 /* Now, trying to wait it out if we can */ 7303 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7304 msleep(10); 7305 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 7306 break; 7307 } 7308 /* Forcefully release the outstanding mailbox command if timed out */ 7309 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7310 spin_lock_irq(&phba->hbalock); 7311 mboxq = phba->sli.mbox_active; 7312 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 7313 __lpfc_mbox_cmpl_put(phba, mboxq); 7314 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7315 phba->sli.mbox_active = NULL; 7316 spin_unlock_irq(&phba->hbalock); 7317 } 7318 7319 /* Disable PCI subsystem interrupt */ 7320 lpfc_sli4_disable_intr(phba); 7321 7322 /* Stop kthread signal shall trigger work_done one more time */ 7323 kthread_stop(phba->worker_thread); 7324 7325 /* Reset SLI4 HBA FCoE function */ 7326 lpfc_pci_function_reset(phba); 7327 7328 /* Stop the SLI4 device port */ 7329 phba->pport->work_port_events = 0; 7330} 7331 7332 /** 7333 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 7334 * @phba: Pointer to HBA context object. 7335 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 7336 * 7337 * This function is called in the SLI4 code path to read the port's 7338 * sli4 capabilities. 7339 * 7340 * This function may be be called from any context that can block-wait 7341 * for the completion. The expectation is that this routine is called 7342 * typically from probe_one or from the online routine. 7343 **/ 7344int 7345lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7346{ 7347 int rc; 7348 struct lpfc_mqe *mqe; 7349 struct lpfc_pc_sli4_params *sli4_params; 7350 uint32_t mbox_tmo; 7351 7352 rc = 0; 7353 mqe = &mboxq->u.mqe; 7354 7355 /* Read the port's SLI4 Parameters port capabilities */ 7356 lpfc_sli4_params(mboxq); 7357 if (!phba->sli4_hba.intr_enable) 7358 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7359 else { 7360 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES); 7361 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 7362 } 7363 7364 if (unlikely(rc)) 7365 return 1; 7366 7367 sli4_params = &phba->sli4_hba.pc_sli4_params; 7368 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 7369 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 7370 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 7371 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 7372 &mqe->un.sli4_params); 7373 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 7374 &mqe->un.sli4_params); 7375 sli4_params->proto_types = mqe->un.sli4_params.word3; 7376 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 7377 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 7378 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 7379 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 7380 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 7381 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 7382 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 7383 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 7384 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 7385 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 7386 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 7387 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 7388 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 7389 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 7390 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 7391 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 7392 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 7393 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 7394 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 7395 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 7396 return rc; 7397} 7398 7399/** 7400 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 7401 * @pdev: pointer to PCI device 7402 * @pid: pointer to PCI device identifier 7403 * 7404 * This routine is to be called to attach a device with SLI-3 interface spec 7405 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 7406 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 7407 * information of the device and driver to see if the driver state that it can 7408 * support this kind of device. If the match is successful, the driver core 7409 * invokes this routine. If this routine determines it can claim the HBA, it 7410 * does all the initialization that it needs to do to handle the HBA properly. 7411 * 7412 * Return code 7413 * 0 - driver can claim the device 7414 * negative value - driver can not claim the device 7415 **/ 7416static int __devinit 7417lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 7418{ 7419 struct lpfc_hba *phba; 7420 struct lpfc_vport *vport = NULL; 7421 struct Scsi_Host *shost = NULL; 7422 int error; 7423 uint32_t cfg_mode, intr_mode; 7424 7425 /* Allocate memory for HBA structure */ 7426 phba = lpfc_hba_alloc(pdev); 7427 if (!phba) 7428 return -ENOMEM; 7429 7430 /* Perform generic PCI device enabling operation */ 7431 error = lpfc_enable_pci_dev(phba); 7432 if (error) { 7433 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7434 "1401 Failed to enable pci device.\n"); 7435 goto out_free_phba; 7436 } 7437 7438 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 7439 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 7440 if (error) 7441 goto out_disable_pci_dev; 7442 7443 /* Set up SLI-3 specific device PCI memory space */ 7444 error = lpfc_sli_pci_mem_setup(phba); 7445 if (error) { 7446 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7447 "1402 Failed to set up pci memory space.\n"); 7448 goto out_disable_pci_dev; 7449 } 7450 7451 /* Set up phase-1 common device driver resources */ 7452 error = lpfc_setup_driver_resource_phase1(phba); 7453 if (error) { 7454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7455 "1403 Failed to set up driver resource.\n"); 7456 goto out_unset_pci_mem_s3; 7457 } 7458 7459 /* Set up SLI-3 specific device driver resources */ 7460 error = lpfc_sli_driver_resource_setup(phba); 7461 if (error) { 7462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7463 "1404 Failed to set up driver resource.\n"); 7464 goto out_unset_pci_mem_s3; 7465 } 7466 7467 /* Initialize and populate the iocb list per host */ 7468 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 7469 if (error) { 7470 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7471 "1405 Failed to initialize iocb list.\n"); 7472 goto out_unset_driver_resource_s3; 7473 } 7474 7475 /* Set up common device driver resources */ 7476 error = lpfc_setup_driver_resource_phase2(phba); 7477 if (error) { 7478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7479 "1406 Failed to set up driver resource.\n"); 7480 goto out_free_iocb_list; 7481 } 7482 7483 /* Create SCSI host to the physical port */ 7484 error = lpfc_create_shost(phba); 7485 if (error) { 7486 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7487 "1407 Failed to create scsi host.\n"); 7488 goto out_unset_driver_resource; 7489 } 7490 7491 /* Configure sysfs attributes */ 7492 vport = phba->pport; 7493 error = lpfc_alloc_sysfs_attr(vport); 7494 if (error) { 7495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7496 "1476 Failed to allocate sysfs attr\n"); 7497 goto out_destroy_shost; 7498 } 7499 7500 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 7501 /* Now, trying to enable interrupt and bring up the device */ 7502 cfg_mode = phba->cfg_use_msi; 7503 while (true) { 7504 /* Put device to a known state before enabling interrupt */ 7505 lpfc_stop_port(phba); 7506 /* Configure and enable interrupt */ 7507 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 7508 if (intr_mode == LPFC_INTR_ERROR) { 7509 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7510 "0431 Failed to enable interrupt.\n"); 7511 error = -ENODEV; 7512 goto out_free_sysfs_attr; 7513 } 7514 /* SLI-3 HBA setup */ 7515 if (lpfc_sli_hba_setup(phba)) { 7516 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7517 "1477 Failed to set up hba\n"); 7518 error = -ENODEV; 7519 goto out_remove_device; 7520 } 7521 7522 /* Wait 50ms for the interrupts of previous mailbox commands */ 7523 msleep(50); 7524 /* Check active interrupts on message signaled interrupts */ 7525 if (intr_mode == 0 || 7526 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 7527 /* Log the current active interrupt mode */ 7528 phba->intr_mode = intr_mode; 7529 lpfc_log_intr_mode(phba, intr_mode); 7530 break; 7531 } else { 7532 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7533 "0447 Configure interrupt mode (%d) " 7534 "failed active interrupt test.\n", 7535 intr_mode); 7536 /* Disable the current interrupt mode */ 7537 lpfc_sli_disable_intr(phba); 7538 /* Try next level of interrupt mode */ 7539 cfg_mode = --intr_mode; 7540 } 7541 } 7542 7543 /* Perform post initialization setup */ 7544 lpfc_post_init_setup(phba); 7545 7546 /* Check if there are static vports to be created. */ 7547 lpfc_create_static_vport(phba); 7548 7549 return 0; 7550 7551out_remove_device: 7552 lpfc_unset_hba(phba); 7553out_free_sysfs_attr: 7554 lpfc_free_sysfs_attr(vport); 7555out_destroy_shost: 7556 lpfc_destroy_shost(phba); 7557out_unset_driver_resource: 7558 lpfc_unset_driver_resource_phase2(phba); 7559out_free_iocb_list: 7560 lpfc_free_iocb_list(phba); 7561out_unset_driver_resource_s3: 7562 lpfc_sli_driver_resource_unset(phba); 7563out_unset_pci_mem_s3: 7564 lpfc_sli_pci_mem_unset(phba); 7565out_disable_pci_dev: 7566 lpfc_disable_pci_dev(phba); 7567 if (shost) 7568 scsi_host_put(shost); 7569out_free_phba: 7570 lpfc_hba_free(phba); 7571 return error; 7572} 7573 7574/** 7575 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 7576 * @pdev: pointer to PCI device 7577 * 7578 * This routine is to be called to disattach a device with SLI-3 interface 7579 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 7580 * removed from PCI bus, it performs all the necessary cleanup for the HBA 7581 * device to be removed from the PCI subsystem properly. 7582 **/ 7583static void __devexit 7584lpfc_pci_remove_one_s3(struct pci_dev *pdev) 7585{ 7586 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7587 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 7588 struct lpfc_vport **vports; 7589 struct lpfc_hba *phba = vport->phba; 7590 int i; 7591 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 7592 7593 spin_lock_irq(&phba->hbalock); 7594 vport->load_flag |= FC_UNLOADING; 7595 spin_unlock_irq(&phba->hbalock); 7596 7597 lpfc_free_sysfs_attr(vport); 7598 7599 /* Release all the vports against this physical port */ 7600 vports = lpfc_create_vport_work_array(phba); 7601 if (vports != NULL) 7602 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 7603 fc_vport_terminate(vports[i]->fc_vport); 7604 lpfc_destroy_vport_work_array(phba, vports); 7605 7606 /* Remove FC host and then SCSI host with the physical port */ 7607 fc_remove_host(shost); 7608 scsi_remove_host(shost); 7609 lpfc_cleanup(vport); 7610 7611 /* 7612 * Bring down the SLI Layer. This step disable all interrupts, 7613 * clears the rings, discards all mailbox commands, and resets 7614 * the HBA. 7615 */ 7616 7617 /* HBA interrupt will be diabled after this call */ 7618 lpfc_sli_hba_down(phba); 7619 /* Stop kthread signal shall trigger work_done one more time */ 7620 kthread_stop(phba->worker_thread); 7621 /* Final cleanup of txcmplq and reset the HBA */ 7622 lpfc_sli_brdrestart(phba); 7623 7624 lpfc_stop_hba_timers(phba); 7625 spin_lock_irq(&phba->hbalock); 7626 list_del_init(&vport->listentry); 7627 spin_unlock_irq(&phba->hbalock); 7628 7629 lpfc_debugfs_terminate(vport); 7630 7631 /* Disable interrupt */ 7632 lpfc_sli_disable_intr(phba); 7633 7634 pci_set_drvdata(pdev, NULL); 7635 scsi_host_put(shost); 7636 7637 /* 7638 * Call scsi_free before mem_free since scsi bufs are released to their 7639 * corresponding pools here. 7640 */ 7641 lpfc_scsi_free(phba); 7642 lpfc_mem_free_all(phba); 7643 7644 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7645 phba->hbqslimp.virt, phba->hbqslimp.phys); 7646 7647 /* Free resources associated with SLI2 interface */ 7648 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7649 phba->slim2p.virt, phba->slim2p.phys); 7650 7651 /* unmap adapter SLIM and Control Registers */ 7652 iounmap(phba->ctrl_regs_memmap_p); 7653 iounmap(phba->slim_memmap_p); 7654 7655 lpfc_hba_free(phba); 7656 7657 pci_release_selected_regions(pdev, bars); 7658 pci_disable_device(pdev); 7659} 7660 7661/** 7662 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 7663 * @pdev: pointer to PCI device 7664 * @msg: power management message 7665 * 7666 * This routine is to be called from the kernel's PCI subsystem to support 7667 * system Power Management (PM) to device with SLI-3 interface spec. When 7668 * PM invokes this method, it quiesces the device by stopping the driver's 7669 * worker thread for the device, turning off device's interrupt and DMA, 7670 * and bring the device offline. Note that as the driver implements the 7671 * minimum PM requirements to a power-aware driver's PM support for the 7672 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 7673 * to the suspend() method call will be treated as SUSPEND and the driver will 7674 * fully reinitialize its device during resume() method call, the driver will 7675 * set device to PCI_D3hot state in PCI config space instead of setting it 7676 * according to the @msg provided by the PM. 7677 * 7678 * Return code 7679 * 0 - driver suspended the device 7680 * Error otherwise 7681 **/ 7682static int 7683lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 7684{ 7685 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7686 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7687 7688 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7689 "0473 PCI device Power Management suspend.\n"); 7690 7691 /* Bring down the device */ 7692 lpfc_offline_prep(phba); 7693 lpfc_offline(phba); 7694 kthread_stop(phba->worker_thread); 7695 7696 /* Disable interrupt from device */ 7697 lpfc_sli_disable_intr(phba); 7698 7699 /* Save device state to PCI config space */ 7700 pci_save_state(pdev); 7701 pci_set_power_state(pdev, PCI_D3hot); 7702 7703 return 0; 7704} 7705 7706/** 7707 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 7708 * @pdev: pointer to PCI device 7709 * 7710 * This routine is to be called from the kernel's PCI subsystem to support 7711 * system Power Management (PM) to device with SLI-3 interface spec. When PM 7712 * invokes this method, it restores the device's PCI config space state and 7713 * fully reinitializes the device and brings it online. Note that as the 7714 * driver implements the minimum PM requirements to a power-aware driver's 7715 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 7716 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 7717 * driver will fully reinitialize its device during resume() method call, 7718 * the device will be set to PCI_D0 directly in PCI config space before 7719 * restoring the state. 7720 * 7721 * Return code 7722 * 0 - driver suspended the device 7723 * Error otherwise 7724 **/ 7725static int 7726lpfc_pci_resume_one_s3(struct pci_dev *pdev) 7727{ 7728 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7729 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7730 uint32_t intr_mode; 7731 int error; 7732 7733 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7734 "0452 PCI device Power Management resume.\n"); 7735 7736 /* Restore device state from PCI config space */ 7737 pci_set_power_state(pdev, PCI_D0); 7738 pci_restore_state(pdev); 7739 7740 /* 7741 * As the new kernel behavior of pci_restore_state() API call clears 7742 * device saved_state flag, need to save the restored state again. 7743 */ 7744 pci_save_state(pdev); 7745 7746 if (pdev->is_busmaster) 7747 pci_set_master(pdev); 7748 7749 /* Startup the kernel thread for this host adapter. */ 7750 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7751 "lpfc_worker_%d", phba->brd_no); 7752 if (IS_ERR(phba->worker_thread)) { 7753 error = PTR_ERR(phba->worker_thread); 7754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7755 "0434 PM resume failed to start worker " 7756 "thread: error=x%x.\n", error); 7757 return error; 7758 } 7759 7760 /* Configure and enable interrupt */ 7761 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7762 if (intr_mode == LPFC_INTR_ERROR) { 7763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7764 "0430 PM resume Failed to enable interrupt\n"); 7765 return -EIO; 7766 } else 7767 phba->intr_mode = intr_mode; 7768 7769 /* Restart HBA and bring it online */ 7770 lpfc_sli_brdrestart(phba); 7771 lpfc_online(phba); 7772 7773 /* Log the current active interrupt mode */ 7774 lpfc_log_intr_mode(phba, phba->intr_mode); 7775 7776 return 0; 7777} 7778 7779/** 7780 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 7781 * @phba: pointer to lpfc hba data structure. 7782 * 7783 * This routine is called to prepare the SLI3 device for PCI slot recover. It 7784 * aborts all the outstanding SCSI I/Os to the pci device. 7785 **/ 7786static void 7787lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 7788{ 7789 struct lpfc_sli *psli = &phba->sli; 7790 struct lpfc_sli_ring *pring; 7791 7792 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7793 "2723 PCI channel I/O abort preparing for recovery\n"); 7794 7795 /* 7796 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 7797 * and let the SCSI mid-layer to retry them to recover. 7798 */ 7799 pring = &psli->ring[psli->fcp_ring]; 7800 lpfc_sli_abort_iocb_ring(phba, pring); 7801} 7802 7803/** 7804 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 7805 * @phba: pointer to lpfc hba data structure. 7806 * 7807 * This routine is called to prepare the SLI3 device for PCI slot reset. It 7808 * disables the device interrupt and pci device, and aborts the internal FCP 7809 * pending I/Os. 7810 **/ 7811static void 7812lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 7813{ 7814 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7815 "2710 PCI channel disable preparing for reset\n"); 7816 7817 /* Block any management I/Os to the device */ 7818 lpfc_block_mgmt_io(phba); 7819 7820 /* Block all SCSI devices' I/Os on the host */ 7821 lpfc_scsi_dev_block(phba); 7822 7823 /* stop all timers */ 7824 lpfc_stop_hba_timers(phba); 7825 7826 /* Disable interrupt and pci device */ 7827 lpfc_sli_disable_intr(phba); 7828 pci_disable_device(phba->pcidev); 7829 7830 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 7831 lpfc_sli_flush_fcp_rings(phba); 7832} 7833 7834/** 7835 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 7836 * @phba: pointer to lpfc hba data structure. 7837 * 7838 * This routine is called to prepare the SLI3 device for PCI slot permanently 7839 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 7840 * pending I/Os. 7841 **/ 7842static void 7843lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 7844{ 7845 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7846 "2711 PCI channel permanent disable for failure\n"); 7847 /* Block all SCSI devices' I/Os on the host */ 7848 lpfc_scsi_dev_block(phba); 7849 7850 /* stop all timers */ 7851 lpfc_stop_hba_timers(phba); 7852 7853 /* Clean up all driver's outstanding SCSI I/Os */ 7854 lpfc_sli_flush_fcp_rings(phba); 7855} 7856 7857/** 7858 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 7859 * @pdev: pointer to PCI device. 7860 * @state: the current PCI connection state. 7861 * 7862 * This routine is called from the PCI subsystem for I/O error handling to 7863 * device with SLI-3 interface spec. This function is called by the PCI 7864 * subsystem after a PCI bus error affecting this device has been detected. 7865 * When this function is invoked, it will need to stop all the I/Os and 7866 * interrupt(s) to the device. Once that is done, it will return 7867 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 7868 * as desired. 7869 * 7870 * Return codes 7871 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 7872 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7873 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7874 **/ 7875static pci_ers_result_t 7876lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 7877{ 7878 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7879 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7880 7881 switch (state) { 7882 case pci_channel_io_normal: 7883 /* Non-fatal error, prepare for recovery */ 7884 lpfc_sli_prep_dev_for_recover(phba); 7885 return PCI_ERS_RESULT_CAN_RECOVER; 7886 case pci_channel_io_frozen: 7887 /* Fatal error, prepare for slot reset */ 7888 lpfc_sli_prep_dev_for_reset(phba); 7889 return PCI_ERS_RESULT_NEED_RESET; 7890 case pci_channel_io_perm_failure: 7891 /* Permanent failure, prepare for device down */ 7892 lpfc_sli_prep_dev_for_perm_failure(phba); 7893 return PCI_ERS_RESULT_DISCONNECT; 7894 default: 7895 /* Unknown state, prepare and request slot reset */ 7896 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7897 "0472 Unknown PCI error state: x%x\n", state); 7898 lpfc_sli_prep_dev_for_reset(phba); 7899 return PCI_ERS_RESULT_NEED_RESET; 7900 } 7901} 7902 7903/** 7904 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 7905 * @pdev: pointer to PCI device. 7906 * 7907 * This routine is called from the PCI subsystem for error handling to 7908 * device with SLI-3 interface spec. This is called after PCI bus has been 7909 * reset to restart the PCI card from scratch, as if from a cold-boot. 7910 * During the PCI subsystem error recovery, after driver returns 7911 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 7912 * recovery and then call this routine before calling the .resume method 7913 * to recover the device. This function will initialize the HBA device, 7914 * enable the interrupt, but it will just put the HBA to offline state 7915 * without passing any I/O traffic. 7916 * 7917 * Return codes 7918 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7919 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7920 */ 7921static pci_ers_result_t 7922lpfc_io_slot_reset_s3(struct pci_dev *pdev) 7923{ 7924 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7925 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7926 struct lpfc_sli *psli = &phba->sli; 7927 uint32_t intr_mode; 7928 7929 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 7930 if (pci_enable_device_mem(pdev)) { 7931 printk(KERN_ERR "lpfc: Cannot re-enable " 7932 "PCI device after reset.\n"); 7933 return PCI_ERS_RESULT_DISCONNECT; 7934 } 7935 7936 pci_restore_state(pdev); 7937 7938 /* 7939 * As the new kernel behavior of pci_restore_state() API call clears 7940 * device saved_state flag, need to save the restored state again. 7941 */ 7942 pci_save_state(pdev); 7943 7944 if (pdev->is_busmaster) 7945 pci_set_master(pdev); 7946 7947 spin_lock_irq(&phba->hbalock); 7948 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 7949 spin_unlock_irq(&phba->hbalock); 7950 7951 /* Configure and enable interrupt */ 7952 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7953 if (intr_mode == LPFC_INTR_ERROR) { 7954 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7955 "0427 Cannot re-enable interrupt after " 7956 "slot reset.\n"); 7957 return PCI_ERS_RESULT_DISCONNECT; 7958 } else 7959 phba->intr_mode = intr_mode; 7960 7961 /* Take device offline, it will perform cleanup */ 7962 lpfc_offline_prep(phba); 7963 lpfc_offline(phba); 7964 lpfc_sli_brdrestart(phba); 7965 7966 /* Log the current active interrupt mode */ 7967 lpfc_log_intr_mode(phba, phba->intr_mode); 7968 7969 return PCI_ERS_RESULT_RECOVERED; 7970} 7971 7972/** 7973 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 7974 * @pdev: pointer to PCI device 7975 * 7976 * This routine is called from the PCI subsystem for error handling to device 7977 * with SLI-3 interface spec. It is called when kernel error recovery tells 7978 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 7979 * error recovery. After this call, traffic can start to flow from this device 7980 * again. 7981 */ 7982static void 7983lpfc_io_resume_s3(struct pci_dev *pdev) 7984{ 7985 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7986 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7987 7988 /* Bring device online, it will be no-op for non-fatal error resume */ 7989 lpfc_online(phba); 7990 7991 /* Clean up Advanced Error Reporting (AER) if needed */ 7992 if (phba->hba_flag & HBA_AER_ENABLED) 7993 pci_cleanup_aer_uncorrect_error_status(pdev); 7994} 7995 7996/** 7997 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 7998 * @phba: pointer to lpfc hba data structure. 7999 * 8000 * returns the number of ELS/CT IOCBs to reserve 8001 **/ 8002int 8003lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 8004{ 8005 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 8006 8007 if (phba->sli_rev == LPFC_SLI_REV4) { 8008 if (max_xri <= 100) 8009 return 10; 8010 else if (max_xri <= 256) 8011 return 25; 8012 else if (max_xri <= 512) 8013 return 50; 8014 else if (max_xri <= 1024) 8015 return 100; 8016 else 8017 return 150; 8018 } else 8019 return 0; 8020} 8021 8022/** 8023 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 8024 * @pdev: pointer to PCI device 8025 * @pid: pointer to PCI device identifier 8026 * 8027 * This routine is called from the kernel's PCI subsystem to device with 8028 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8029 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 8030 * information of the device and driver to see if the driver state that it 8031 * can support this kind of device. If the match is successful, the driver 8032 * core invokes this routine. If this routine determines it can claim the HBA, 8033 * it does all the initialization that it needs to do to handle the HBA 8034 * properly. 8035 * 8036 * Return code 8037 * 0 - driver can claim the device 8038 * negative value - driver can not claim the device 8039 **/ 8040static int __devinit 8041lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 8042{ 8043 struct lpfc_hba *phba; 8044 struct lpfc_vport *vport = NULL; 8045 struct Scsi_Host *shost = NULL; 8046 int error; 8047 uint32_t cfg_mode, intr_mode; 8048 int mcnt; 8049 8050 /* Allocate memory for HBA structure */ 8051 phba = lpfc_hba_alloc(pdev); 8052 if (!phba) 8053 return -ENOMEM; 8054 8055 /* Perform generic PCI device enabling operation */ 8056 error = lpfc_enable_pci_dev(phba); 8057 if (error) { 8058 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8059 "1409 Failed to enable pci device.\n"); 8060 goto out_free_phba; 8061 } 8062 8063 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 8064 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 8065 if (error) 8066 goto out_disable_pci_dev; 8067 8068 /* Set up SLI-4 specific device PCI memory space */ 8069 error = lpfc_sli4_pci_mem_setup(phba); 8070 if (error) { 8071 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8072 "1410 Failed to set up pci memory space.\n"); 8073 goto out_disable_pci_dev; 8074 } 8075 8076 /* Set up phase-1 common device driver resources */ 8077 error = lpfc_setup_driver_resource_phase1(phba); 8078 if (error) { 8079 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8080 "1411 Failed to set up driver resource.\n"); 8081 goto out_unset_pci_mem_s4; 8082 } 8083 8084 /* Set up SLI-4 Specific device driver resources */ 8085 error = lpfc_sli4_driver_resource_setup(phba); 8086 if (error) { 8087 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8088 "1412 Failed to set up driver resource.\n"); 8089 goto out_unset_pci_mem_s4; 8090 } 8091 8092 /* Initialize and populate the iocb list per host */ 8093 8094 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8095 "2821 initialize iocb list %d.\n", 8096 phba->cfg_iocb_cnt*1024); 8097 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); 8098 8099 if (error) { 8100 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8101 "1413 Failed to initialize iocb list.\n"); 8102 goto out_unset_driver_resource_s4; 8103 } 8104 8105 /* Set up common device driver resources */ 8106 error = lpfc_setup_driver_resource_phase2(phba); 8107 if (error) { 8108 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8109 "1414 Failed to set up driver resource.\n"); 8110 goto out_free_iocb_list; 8111 } 8112 8113 /* Create SCSI host to the physical port */ 8114 error = lpfc_create_shost(phba); 8115 if (error) { 8116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8117 "1415 Failed to create scsi host.\n"); 8118 goto out_unset_driver_resource; 8119 } 8120 8121 /* Configure sysfs attributes */ 8122 vport = phba->pport; 8123 error = lpfc_alloc_sysfs_attr(vport); 8124 if (error) { 8125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8126 "1416 Failed to allocate sysfs attr\n"); 8127 goto out_destroy_shost; 8128 } 8129 8130 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 8131 /* Now, trying to enable interrupt and bring up the device */ 8132 cfg_mode = phba->cfg_use_msi; 8133 while (true) { 8134 /* Put device to a known state before enabling interrupt */ 8135 lpfc_stop_port(phba); 8136 /* Configure and enable interrupt */ 8137 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 8138 if (intr_mode == LPFC_INTR_ERROR) { 8139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8140 "0426 Failed to enable interrupt.\n"); 8141 error = -ENODEV; 8142 goto out_free_sysfs_attr; 8143 } 8144 /* Default to single FCP EQ for non-MSI-X */ 8145 if (phba->intr_type != MSIX) 8146 phba->cfg_fcp_eq_count = 1; 8147 else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count) 8148 phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 8149 /* Set up SLI-4 HBA */ 8150 if (lpfc_sli4_hba_setup(phba)) { 8151 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8152 "1421 Failed to set up hba\n"); 8153 error = -ENODEV; 8154 goto out_disable_intr; 8155 } 8156 8157 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 8158 if (intr_mode != 0) 8159 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 8160 LPFC_ACT_INTR_CNT); 8161 8162 /* Check active interrupts received only for MSI/MSI-X */ 8163 if (intr_mode == 0 || 8164 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 8165 /* Log the current active interrupt mode */ 8166 phba->intr_mode = intr_mode; 8167 lpfc_log_intr_mode(phba, intr_mode); 8168 break; 8169 } 8170 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8171 "0451 Configure interrupt mode (%d) " 8172 "failed active interrupt test.\n", 8173 intr_mode); 8174 /* Unset the preivous SLI-4 HBA setup */ 8175 lpfc_sli4_unset_hba(phba); 8176 /* Try next level of interrupt mode */ 8177 cfg_mode = --intr_mode; 8178 } 8179 8180 /* Perform post initialization setup */ 8181 lpfc_post_init_setup(phba); 8182 8183 /* Check if there are static vports to be created. */ 8184 lpfc_create_static_vport(phba); 8185 8186 return 0; 8187 8188out_disable_intr: 8189 lpfc_sli4_disable_intr(phba); 8190out_free_sysfs_attr: 8191 lpfc_free_sysfs_attr(vport); 8192out_destroy_shost: 8193 lpfc_destroy_shost(phba); 8194out_unset_driver_resource: 8195 lpfc_unset_driver_resource_phase2(phba); 8196out_free_iocb_list: 8197 lpfc_free_iocb_list(phba); 8198out_unset_driver_resource_s4: 8199 lpfc_sli4_driver_resource_unset(phba); 8200out_unset_pci_mem_s4: 8201 lpfc_sli4_pci_mem_unset(phba); 8202out_disable_pci_dev: 8203 lpfc_disable_pci_dev(phba); 8204 if (shost) 8205 scsi_host_put(shost); 8206out_free_phba: 8207 lpfc_hba_free(phba); 8208 return error; 8209} 8210 8211/** 8212 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 8213 * @pdev: pointer to PCI device 8214 * 8215 * This routine is called from the kernel's PCI subsystem to device with 8216 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8217 * removed from PCI bus, it performs all the necessary cleanup for the HBA 8218 * device to be removed from the PCI subsystem properly. 8219 **/ 8220static void __devexit 8221lpfc_pci_remove_one_s4(struct pci_dev *pdev) 8222{ 8223 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8224 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8225 struct lpfc_vport **vports; 8226 struct lpfc_hba *phba = vport->phba; 8227 int i; 8228 8229 /* Mark the device unloading flag */ 8230 spin_lock_irq(&phba->hbalock); 8231 vport->load_flag |= FC_UNLOADING; 8232 spin_unlock_irq(&phba->hbalock); 8233 8234 /* Free the HBA sysfs attributes */ 8235 lpfc_free_sysfs_attr(vport); 8236 8237 /* Release all the vports against this physical port */ 8238 vports = lpfc_create_vport_work_array(phba); 8239 if (vports != NULL) 8240 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8241 fc_vport_terminate(vports[i]->fc_vport); 8242 lpfc_destroy_vport_work_array(phba, vports); 8243 8244 /* Remove FC host and then SCSI host with the physical port */ 8245 fc_remove_host(shost); 8246 scsi_remove_host(shost); 8247 8248 /* Perform cleanup on the physical port */ 8249 lpfc_cleanup(vport); 8250 8251 /* 8252 * Bring down the SLI Layer. This step disables all interrupts, 8253 * clears the rings, discards all mailbox commands, and resets 8254 * the HBA FCoE function. 8255 */ 8256 lpfc_debugfs_terminate(vport); 8257 lpfc_sli4_hba_unset(phba); 8258 8259 spin_lock_irq(&phba->hbalock); 8260 list_del_init(&vport->listentry); 8261 spin_unlock_irq(&phba->hbalock); 8262 8263 /* Perform scsi free before driver resource_unset since scsi 8264 * buffers are released to their corresponding pools here. 8265 */ 8266 lpfc_scsi_free(phba); 8267 lpfc_sli4_driver_resource_unset(phba); 8268 8269 /* Unmap adapter Control and Doorbell registers */ 8270 lpfc_sli4_pci_mem_unset(phba); 8271 8272 /* Release PCI resources and disable device's PCI function */ 8273 scsi_host_put(shost); 8274 lpfc_disable_pci_dev(phba); 8275 8276 /* Finally, free the driver's device data structure */ 8277 lpfc_hba_free(phba); 8278 8279 return; 8280} 8281 8282/** 8283 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 8284 * @pdev: pointer to PCI device 8285 * @msg: power management message 8286 * 8287 * This routine is called from the kernel's PCI subsystem to support system 8288 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 8289 * this method, it quiesces the device by stopping the driver's worker 8290 * thread for the device, turning off device's interrupt and DMA, and bring 8291 * the device offline. Note that as the driver implements the minimum PM 8292 * requirements to a power-aware driver's PM support for suspend/resume -- all 8293 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 8294 * method call will be treated as SUSPEND and the driver will fully 8295 * reinitialize its device during resume() method call, the driver will set 8296 * device to PCI_D3hot state in PCI config space instead of setting it 8297 * according to the @msg provided by the PM. 8298 * 8299 * Return code 8300 * 0 - driver suspended the device 8301 * Error otherwise 8302 **/ 8303static int 8304lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 8305{ 8306 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8307 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8308 8309 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8310 "2843 PCI device Power Management suspend.\n"); 8311 8312 /* Bring down the device */ 8313 lpfc_offline_prep(phba); 8314 lpfc_offline(phba); 8315 kthread_stop(phba->worker_thread); 8316 8317 /* Disable interrupt from device */ 8318 lpfc_sli4_disable_intr(phba); 8319 8320 /* Save device state to PCI config space */ 8321 pci_save_state(pdev); 8322 pci_set_power_state(pdev, PCI_D3hot); 8323 8324 return 0; 8325} 8326 8327/** 8328 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 8329 * @pdev: pointer to PCI device 8330 * 8331 * This routine is called from the kernel's PCI subsystem to support system 8332 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 8333 * this method, it restores the device's PCI config space state and fully 8334 * reinitializes the device and brings it online. Note that as the driver 8335 * implements the minimum PM requirements to a power-aware driver's PM for 8336 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8337 * to the suspend() method call will be treated as SUSPEND and the driver 8338 * will fully reinitialize its device during resume() method call, the device 8339 * will be set to PCI_D0 directly in PCI config space before restoring the 8340 * state. 8341 * 8342 * Return code 8343 * 0 - driver suspended the device 8344 * Error otherwise 8345 **/ 8346static int 8347lpfc_pci_resume_one_s4(struct pci_dev *pdev) 8348{ 8349 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8350 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8351 uint32_t intr_mode; 8352 int error; 8353 8354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8355 "0292 PCI device Power Management resume.\n"); 8356 8357 /* Restore device state from PCI config space */ 8358 pci_set_power_state(pdev, PCI_D0); 8359 pci_restore_state(pdev); 8360 8361 /* 8362 * As the new kernel behavior of pci_restore_state() API call clears 8363 * device saved_state flag, need to save the restored state again. 8364 */ 8365 pci_save_state(pdev); 8366 8367 if (pdev->is_busmaster) 8368 pci_set_master(pdev); 8369 8370 /* Startup the kernel thread for this host adapter. */ 8371 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8372 "lpfc_worker_%d", phba->brd_no); 8373 if (IS_ERR(phba->worker_thread)) { 8374 error = PTR_ERR(phba->worker_thread); 8375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8376 "0293 PM resume failed to start worker " 8377 "thread: error=x%x.\n", error); 8378 return error; 8379 } 8380 8381 /* Configure and enable interrupt */ 8382 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 8383 if (intr_mode == LPFC_INTR_ERROR) { 8384 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8385 "0294 PM resume Failed to enable interrupt\n"); 8386 return -EIO; 8387 } else 8388 phba->intr_mode = intr_mode; 8389 8390 /* Restart HBA and bring it online */ 8391 lpfc_sli_brdrestart(phba); 8392 lpfc_online(phba); 8393 8394 /* Log the current active interrupt mode */ 8395 lpfc_log_intr_mode(phba, phba->intr_mode); 8396 8397 return 0; 8398} 8399 8400/** 8401 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 8402 * @phba: pointer to lpfc hba data structure. 8403 * 8404 * This routine is called to prepare the SLI4 device for PCI slot recover. It 8405 * aborts all the outstanding SCSI I/Os to the pci device. 8406 **/ 8407static void 8408lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 8409{ 8410 struct lpfc_sli *psli = &phba->sli; 8411 struct lpfc_sli_ring *pring; 8412 8413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8414 "2828 PCI channel I/O abort preparing for recovery\n"); 8415 /* 8416 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 8417 * and let the SCSI mid-layer to retry them to recover. 8418 */ 8419 pring = &psli->ring[psli->fcp_ring]; 8420 lpfc_sli_abort_iocb_ring(phba, pring); 8421} 8422 8423/** 8424 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 8425 * @phba: pointer to lpfc hba data structure. 8426 * 8427 * This routine is called to prepare the SLI4 device for PCI slot reset. It 8428 * disables the device interrupt and pci device, and aborts the internal FCP 8429 * pending I/Os. 8430 **/ 8431static void 8432lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 8433{ 8434 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8435 "2826 PCI channel disable preparing for reset\n"); 8436 8437 /* Block any management I/Os to the device */ 8438 lpfc_block_mgmt_io(phba); 8439 8440 /* Block all SCSI devices' I/Os on the host */ 8441 lpfc_scsi_dev_block(phba); 8442 8443 /* stop all timers */ 8444 lpfc_stop_hba_timers(phba); 8445 8446 /* Disable interrupt and pci device */ 8447 lpfc_sli4_disable_intr(phba); 8448 pci_disable_device(phba->pcidev); 8449 8450 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 8451 lpfc_sli_flush_fcp_rings(phba); 8452} 8453 8454/** 8455 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 8456 * @phba: pointer to lpfc hba data structure. 8457 * 8458 * This routine is called to prepare the SLI4 device for PCI slot permanently 8459 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 8460 * pending I/Os. 8461 **/ 8462static void 8463lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 8464{ 8465 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8466 "2827 PCI channel permanent disable for failure\n"); 8467 8468 /* Block all SCSI devices' I/Os on the host */ 8469 lpfc_scsi_dev_block(phba); 8470 8471 /* stop all timers */ 8472 lpfc_stop_hba_timers(phba); 8473 8474 /* Clean up all driver's outstanding SCSI I/Os */ 8475 lpfc_sli_flush_fcp_rings(phba); 8476} 8477 8478/** 8479 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 8480 * @pdev: pointer to PCI device. 8481 * @state: the current PCI connection state. 8482 * 8483 * This routine is called from the PCI subsystem for error handling to device 8484 * with SLI-4 interface spec. This function is called by the PCI subsystem 8485 * after a PCI bus error affecting this device has been detected. When this 8486 * function is invoked, it will need to stop all the I/Os and interrupt(s) 8487 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 8488 * for the PCI subsystem to perform proper recovery as desired. 8489 * 8490 * Return codes 8491 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8492 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8493 **/ 8494static pci_ers_result_t 8495lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 8496{ 8497 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8498 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8499 8500 switch (state) { 8501 case pci_channel_io_normal: 8502 /* Non-fatal error, prepare for recovery */ 8503 lpfc_sli4_prep_dev_for_recover(phba); 8504 return PCI_ERS_RESULT_CAN_RECOVER; 8505 case pci_channel_io_frozen: 8506 /* Fatal error, prepare for slot reset */ 8507 lpfc_sli4_prep_dev_for_reset(phba); 8508 return PCI_ERS_RESULT_NEED_RESET; 8509 case pci_channel_io_perm_failure: 8510 /* Permanent failure, prepare for device down */ 8511 lpfc_sli4_prep_dev_for_perm_failure(phba); 8512 return PCI_ERS_RESULT_DISCONNECT; 8513 default: 8514 /* Unknown state, prepare and request slot reset */ 8515 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8516 "2825 Unknown PCI error state: x%x\n", state); 8517 lpfc_sli4_prep_dev_for_reset(phba); 8518 return PCI_ERS_RESULT_NEED_RESET; 8519 } 8520} 8521 8522/** 8523 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 8524 * @pdev: pointer to PCI device. 8525 * 8526 * This routine is called from the PCI subsystem for error handling to device 8527 * with SLI-4 interface spec. It is called after PCI bus has been reset to 8528 * restart the PCI card from scratch, as if from a cold-boot. During the 8529 * PCI subsystem error recovery, after the driver returns 8530 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 8531 * recovery and then call this routine before calling the .resume method to 8532 * recover the device. This function will initialize the HBA device, enable 8533 * the interrupt, but it will just put the HBA to offline state without 8534 * passing any I/O traffic. 8535 * 8536 * Return codes 8537 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8538 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8539 */ 8540static pci_ers_result_t 8541lpfc_io_slot_reset_s4(struct pci_dev *pdev) 8542{ 8543 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8544 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8545 struct lpfc_sli *psli = &phba->sli; 8546 uint32_t intr_mode; 8547 8548 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 8549 if (pci_enable_device_mem(pdev)) { 8550 printk(KERN_ERR "lpfc: Cannot re-enable " 8551 "PCI device after reset.\n"); 8552 return PCI_ERS_RESULT_DISCONNECT; 8553 } 8554 8555 pci_restore_state(pdev); 8556 if (pdev->is_busmaster) 8557 pci_set_master(pdev); 8558 8559 spin_lock_irq(&phba->hbalock); 8560 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 8561 spin_unlock_irq(&phba->hbalock); 8562 8563 /* Configure and enable interrupt */ 8564 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 8565 if (intr_mode == LPFC_INTR_ERROR) { 8566 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8567 "2824 Cannot re-enable interrupt after " 8568 "slot reset.\n"); 8569 return PCI_ERS_RESULT_DISCONNECT; 8570 } else 8571 phba->intr_mode = intr_mode; 8572 8573 /* Log the current active interrupt mode */ 8574 lpfc_log_intr_mode(phba, phba->intr_mode); 8575 8576 return PCI_ERS_RESULT_RECOVERED; 8577} 8578 8579/** 8580 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 8581 * @pdev: pointer to PCI device 8582 * 8583 * This routine is called from the PCI subsystem for error handling to device 8584 * with SLI-4 interface spec. It is called when kernel error recovery tells 8585 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 8586 * error recovery. After this call, traffic can start to flow from this device 8587 * again. 8588 **/ 8589static void 8590lpfc_io_resume_s4(struct pci_dev *pdev) 8591{ 8592 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8593 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8594 8595 /* 8596 * In case of slot reset, as function reset is performed through 8597 * mailbox command which needs DMA to be enabled, this operation 8598 * has to be moved to the io resume phase. Taking device offline 8599 * will perform the necessary cleanup. 8600 */ 8601 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 8602 /* Perform device reset */ 8603 lpfc_offline_prep(phba); 8604 lpfc_offline(phba); 8605 lpfc_sli_brdrestart(phba); 8606 /* Bring the device back online */ 8607 lpfc_online(phba); 8608 } 8609 8610 /* Clean up Advanced Error Reporting (AER) if needed */ 8611 if (phba->hba_flag & HBA_AER_ENABLED) 8612 pci_cleanup_aer_uncorrect_error_status(pdev); 8613} 8614 8615/** 8616 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 8617 * @pdev: pointer to PCI device 8618 * @pid: pointer to PCI device identifier 8619 * 8620 * This routine is to be registered to the kernel's PCI subsystem. When an 8621 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 8622 * at PCI device-specific information of the device and driver to see if the 8623 * driver state that it can support this kind of device. If the match is 8624 * successful, the driver core invokes this routine. This routine dispatches 8625 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 8626 * do all the initialization that it needs to do to handle the HBA device 8627 * properly. 8628 * 8629 * Return code 8630 * 0 - driver can claim the device 8631 * negative value - driver can not claim the device 8632 **/ 8633static int __devinit 8634lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 8635{ 8636 int rc; 8637 struct lpfc_sli_intf intf; 8638 8639 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 8640 return -ENODEV; 8641 8642 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 8643 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 8644 rc = lpfc_pci_probe_one_s4(pdev, pid); 8645 else 8646 rc = lpfc_pci_probe_one_s3(pdev, pid); 8647 8648 return rc; 8649} 8650 8651/** 8652 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 8653 * @pdev: pointer to PCI device 8654 * 8655 * This routine is to be registered to the kernel's PCI subsystem. When an 8656 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 8657 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 8658 * remove routine, which will perform all the necessary cleanup for the 8659 * device to be removed from the PCI subsystem properly. 8660 **/ 8661static void __devexit 8662lpfc_pci_remove_one(struct pci_dev *pdev) 8663{ 8664 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8665 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8666 8667 switch (phba->pci_dev_grp) { 8668 case LPFC_PCI_DEV_LP: 8669 lpfc_pci_remove_one_s3(pdev); 8670 break; 8671 case LPFC_PCI_DEV_OC: 8672 lpfc_pci_remove_one_s4(pdev); 8673 break; 8674 default: 8675 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8676 "1424 Invalid PCI device group: 0x%x\n", 8677 phba->pci_dev_grp); 8678 break; 8679 } 8680 return; 8681} 8682 8683/** 8684 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 8685 * @pdev: pointer to PCI device 8686 * @msg: power management message 8687 * 8688 * This routine is to be registered to the kernel's PCI subsystem to support 8689 * system Power Management (PM). When PM invokes this method, it dispatches 8690 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 8691 * suspend the device. 8692 * 8693 * Return code 8694 * 0 - driver suspended the device 8695 * Error otherwise 8696 **/ 8697static int 8698lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 8699{ 8700 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8701 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8702 int rc = -ENODEV; 8703 8704 switch (phba->pci_dev_grp) { 8705 case LPFC_PCI_DEV_LP: 8706 rc = lpfc_pci_suspend_one_s3(pdev, msg); 8707 break; 8708 case LPFC_PCI_DEV_OC: 8709 rc = lpfc_pci_suspend_one_s4(pdev, msg); 8710 break; 8711 default: 8712 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8713 "1425 Invalid PCI device group: 0x%x\n", 8714 phba->pci_dev_grp); 8715 break; 8716 } 8717 return rc; 8718} 8719 8720/** 8721 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 8722 * @pdev: pointer to PCI device 8723 * 8724 * This routine is to be registered to the kernel's PCI subsystem to support 8725 * system Power Management (PM). When PM invokes this method, it dispatches 8726 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 8727 * resume the device. 8728 * 8729 * Return code 8730 * 0 - driver suspended the device 8731 * Error otherwise 8732 **/ 8733static int 8734lpfc_pci_resume_one(struct pci_dev *pdev) 8735{ 8736 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8737 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8738 int rc = -ENODEV; 8739 8740 switch (phba->pci_dev_grp) { 8741 case LPFC_PCI_DEV_LP: 8742 rc = lpfc_pci_resume_one_s3(pdev); 8743 break; 8744 case LPFC_PCI_DEV_OC: 8745 rc = lpfc_pci_resume_one_s4(pdev); 8746 break; 8747 default: 8748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8749 "1426 Invalid PCI device group: 0x%x\n", 8750 phba->pci_dev_grp); 8751 break; 8752 } 8753 return rc; 8754} 8755 8756/** 8757 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 8758 * @pdev: pointer to PCI device. 8759 * @state: the current PCI connection state. 8760 * 8761 * This routine is registered to the PCI subsystem for error handling. This 8762 * function is called by the PCI subsystem after a PCI bus error affecting 8763 * this device has been detected. When this routine is invoked, it dispatches 8764 * the action to the proper SLI-3 or SLI-4 device error detected handling 8765 * routine, which will perform the proper error detected operation. 8766 * 8767 * Return codes 8768 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8769 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8770 **/ 8771static pci_ers_result_t 8772lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 8773{ 8774 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8775 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8776 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 8777 8778 switch (phba->pci_dev_grp) { 8779 case LPFC_PCI_DEV_LP: 8780 rc = lpfc_io_error_detected_s3(pdev, state); 8781 break; 8782 case LPFC_PCI_DEV_OC: 8783 rc = lpfc_io_error_detected_s4(pdev, state); 8784 break; 8785 default: 8786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8787 "1427 Invalid PCI device group: 0x%x\n", 8788 phba->pci_dev_grp); 8789 break; 8790 } 8791 return rc; 8792} 8793 8794/** 8795 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 8796 * @pdev: pointer to PCI device. 8797 * 8798 * This routine is registered to the PCI subsystem for error handling. This 8799 * function is called after PCI bus has been reset to restart the PCI card 8800 * from scratch, as if from a cold-boot. When this routine is invoked, it 8801 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 8802 * routine, which will perform the proper device reset. 8803 * 8804 * Return codes 8805 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8806 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8807 **/ 8808static pci_ers_result_t 8809lpfc_io_slot_reset(struct pci_dev *pdev) 8810{ 8811 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8812 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8813 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 8814 8815 switch (phba->pci_dev_grp) { 8816 case LPFC_PCI_DEV_LP: 8817 rc = lpfc_io_slot_reset_s3(pdev); 8818 break; 8819 case LPFC_PCI_DEV_OC: 8820 rc = lpfc_io_slot_reset_s4(pdev); 8821 break; 8822 default: 8823 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8824 "1428 Invalid PCI device group: 0x%x\n", 8825 phba->pci_dev_grp); 8826 break; 8827 } 8828 return rc; 8829} 8830 8831/** 8832 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 8833 * @pdev: pointer to PCI device 8834 * 8835 * This routine is registered to the PCI subsystem for error handling. It 8836 * is called when kernel error recovery tells the lpfc driver that it is 8837 * OK to resume normal PCI operation after PCI bus error recovery. When 8838 * this routine is invoked, it dispatches the action to the proper SLI-3 8839 * or SLI-4 device io_resume routine, which will resume the device operation. 8840 **/ 8841static void 8842lpfc_io_resume(struct pci_dev *pdev) 8843{ 8844 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8845 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8846 8847 switch (phba->pci_dev_grp) { 8848 case LPFC_PCI_DEV_LP: 8849 lpfc_io_resume_s3(pdev); 8850 break; 8851 case LPFC_PCI_DEV_OC: 8852 lpfc_io_resume_s4(pdev); 8853 break; 8854 default: 8855 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8856 "1429 Invalid PCI device group: 0x%x\n", 8857 phba->pci_dev_grp); 8858 break; 8859 } 8860 return; 8861} 8862 8863static struct pci_device_id lpfc_id_table[] = { 8864 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 8865 PCI_ANY_ID, PCI_ANY_ID, }, 8866 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 8867 PCI_ANY_ID, PCI_ANY_ID, }, 8868 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 8869 PCI_ANY_ID, PCI_ANY_ID, }, 8870 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 8871 PCI_ANY_ID, PCI_ANY_ID, }, 8872 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 8873 PCI_ANY_ID, PCI_ANY_ID, }, 8874 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 8875 PCI_ANY_ID, PCI_ANY_ID, }, 8876 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 8877 PCI_ANY_ID, PCI_ANY_ID, }, 8878 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 8879 PCI_ANY_ID, PCI_ANY_ID, }, 8880 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 8881 PCI_ANY_ID, PCI_ANY_ID, }, 8882 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 8883 PCI_ANY_ID, PCI_ANY_ID, }, 8884 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 8885 PCI_ANY_ID, PCI_ANY_ID, }, 8886 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 8887 PCI_ANY_ID, PCI_ANY_ID, }, 8888 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 8889 PCI_ANY_ID, PCI_ANY_ID, }, 8890 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 8891 PCI_ANY_ID, PCI_ANY_ID, }, 8892 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 8893 PCI_ANY_ID, PCI_ANY_ID, }, 8894 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 8895 PCI_ANY_ID, PCI_ANY_ID, }, 8896 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 8897 PCI_ANY_ID, PCI_ANY_ID, }, 8898 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 8899 PCI_ANY_ID, PCI_ANY_ID, }, 8900 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 8901 PCI_ANY_ID, PCI_ANY_ID, }, 8902 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 8903 PCI_ANY_ID, PCI_ANY_ID, }, 8904 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 8905 PCI_ANY_ID, PCI_ANY_ID, }, 8906 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 8907 PCI_ANY_ID, PCI_ANY_ID, }, 8908 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 8909 PCI_ANY_ID, PCI_ANY_ID, }, 8910 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 8911 PCI_ANY_ID, PCI_ANY_ID, }, 8912 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 8913 PCI_ANY_ID, PCI_ANY_ID, }, 8914 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 8915 PCI_ANY_ID, PCI_ANY_ID, }, 8916 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 8917 PCI_ANY_ID, PCI_ANY_ID, }, 8918 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 8919 PCI_ANY_ID, PCI_ANY_ID, }, 8920 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 8921 PCI_ANY_ID, PCI_ANY_ID, }, 8922 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 8923 PCI_ANY_ID, PCI_ANY_ID, }, 8924 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 8925 PCI_ANY_ID, PCI_ANY_ID, }, 8926 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 8927 PCI_ANY_ID, PCI_ANY_ID, }, 8928 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 8929 PCI_ANY_ID, PCI_ANY_ID, }, 8930 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 8931 PCI_ANY_ID, PCI_ANY_ID, }, 8932 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 8933 PCI_ANY_ID, PCI_ANY_ID, }, 8934 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 8935 PCI_ANY_ID, PCI_ANY_ID, }, 8936 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 8937 PCI_ANY_ID, PCI_ANY_ID, }, 8938 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 8939 PCI_ANY_ID, PCI_ANY_ID, }, 8940 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 8941 PCI_ANY_ID, PCI_ANY_ID, }, 8942 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 8943 PCI_ANY_ID, PCI_ANY_ID, }, 8944 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, 8945 PCI_ANY_ID, PCI_ANY_ID, }, 8946 { 0 } 8947}; 8948 8949MODULE_DEVICE_TABLE(pci, lpfc_id_table); 8950 8951static struct pci_error_handlers lpfc_err_handler = { 8952 .error_detected = lpfc_io_error_detected, 8953 .slot_reset = lpfc_io_slot_reset, 8954 .resume = lpfc_io_resume, 8955}; 8956 8957static struct pci_driver lpfc_driver = { 8958 .name = LPFC_DRIVER_NAME, 8959 .id_table = lpfc_id_table, 8960 .probe = lpfc_pci_probe_one, 8961 .remove = __devexit_p(lpfc_pci_remove_one), 8962 .suspend = lpfc_pci_suspend_one, 8963 .resume = lpfc_pci_resume_one, 8964 .err_handler = &lpfc_err_handler, 8965}; 8966 8967/** 8968 * lpfc_init - lpfc module initialization routine 8969 * 8970 * This routine is to be invoked when the lpfc module is loaded into the 8971 * kernel. The special kernel macro module_init() is used to indicate the 8972 * role of this routine to the kernel as lpfc module entry point. 8973 * 8974 * Return codes 8975 * 0 - successful 8976 * -ENOMEM - FC attach transport failed 8977 * all others - failed 8978 */ 8979static int __init 8980lpfc_init(void) 8981{ 8982 int error = 0; 8983 8984 printk(LPFC_MODULE_DESC "\n"); 8985 printk(LPFC_COPYRIGHT "\n"); 8986 8987 if (lpfc_enable_npiv) { 8988 lpfc_transport_functions.vport_create = lpfc_vport_create; 8989 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 8990 } 8991 lpfc_transport_template = 8992 fc_attach_transport(&lpfc_transport_functions); 8993 if (lpfc_transport_template == NULL) 8994 return -ENOMEM; 8995 if (lpfc_enable_npiv) { 8996 lpfc_vport_transport_template = 8997 fc_attach_transport(&lpfc_vport_transport_functions); 8998 if (lpfc_vport_transport_template == NULL) { 8999 fc_release_transport(lpfc_transport_template); 9000 return -ENOMEM; 9001 } 9002 } 9003 error = pci_register_driver(&lpfc_driver); 9004 if (error) { 9005 fc_release_transport(lpfc_transport_template); 9006 if (lpfc_enable_npiv) 9007 fc_release_transport(lpfc_vport_transport_template); 9008 } 9009 9010 return error; 9011} 9012 9013/** 9014 * lpfc_exit - lpfc module removal routine 9015 * 9016 * This routine is invoked when the lpfc module is removed from the kernel. 9017 * The special kernel macro module_exit() is used to indicate the role of 9018 * this routine to the kernel as lpfc module exit point. 9019 */ 9020static void __exit 9021lpfc_exit(void) 9022{ 9023 pci_unregister_driver(&lpfc_driver); 9024 fc_release_transport(lpfc_transport_template); 9025 if (lpfc_enable_npiv) 9026 fc_release_transport(lpfc_vport_transport_template); 9027 if (_dump_buf_data) { 9028 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 9029 "_dump_buf_data at 0x%p\n", 9030 (1L << _dump_buf_data_order), _dump_buf_data); 9031 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 9032 } 9033 9034 if (_dump_buf_dif) { 9035 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 9036 "_dump_buf_dif at 0x%p\n", 9037 (1L << _dump_buf_dif_order), _dump_buf_dif); 9038 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 9039 } 9040} 9041 9042module_init(lpfc_init); 9043module_exit(lpfc_exit); 9044MODULE_LICENSE("GPL"); 9045MODULE_DESCRIPTION(LPFC_MODULE_DESC); 9046MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 9047MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 9048