lpfc_init.c revision 7f86059ac016d8662e5fbfab4875529510977b47
1/******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/dma-mapping.h> 25#include <linux/idr.h> 26#include <linux/interrupt.h> 27#include <linux/kthread.h> 28#include <linux/pci.h> 29#include <linux/spinlock.h> 30#include <linux/ctype.h> 31#include <linux/aer.h> 32#include <linux/slab.h> 33 34#include <scsi/scsi.h> 35#include <scsi/scsi_device.h> 36#include <scsi/scsi_host.h> 37#include <scsi/scsi_transport_fc.h> 38 39#include "lpfc_hw4.h" 40#include "lpfc_hw.h" 41#include "lpfc_sli.h" 42#include "lpfc_sli4.h" 43#include "lpfc_nl.h" 44#include "lpfc_disc.h" 45#include "lpfc_scsi.h" 46#include "lpfc.h" 47#include "lpfc_logmsg.h" 48#include "lpfc_crtn.h" 49#include "lpfc_vport.h" 50#include "lpfc_version.h" 51 52char *_dump_buf_data; 53unsigned long _dump_buf_data_order; 54char *_dump_buf_dif; 55unsigned long _dump_buf_dif_order; 56spinlock_t _dump_buf_lock; 57 58static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 59static int lpfc_post_rcv_buf(struct lpfc_hba *); 60static int lpfc_sli4_queue_create(struct lpfc_hba *); 61static void lpfc_sli4_queue_destroy(struct lpfc_hba *); 62static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 63static int lpfc_setup_endian_order(struct lpfc_hba *); 64static int lpfc_sli4_read_config(struct lpfc_hba *); 65static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 66static void lpfc_free_sgl_list(struct lpfc_hba *); 67static int lpfc_init_sgl_list(struct lpfc_hba *); 68static int lpfc_init_active_sgl_array(struct lpfc_hba *); 69static void lpfc_free_active_sgl(struct lpfc_hba *); 70static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 71static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 72static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 73static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 74static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 75 76static struct scsi_transport_template *lpfc_transport_template = NULL; 77static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 78static DEFINE_IDR(lpfc_hba_index); 79 80/** 81 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 82 * @phba: pointer to lpfc hba data structure. 83 * 84 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 85 * mailbox command. It retrieves the revision information from the HBA and 86 * collects the Vital Product Data (VPD) about the HBA for preparing the 87 * configuration of the HBA. 88 * 89 * Return codes: 90 * 0 - success. 91 * -ERESTART - requests the SLI layer to reset the HBA and try again. 92 * Any other value - indicates an error. 93 **/ 94int 95lpfc_config_port_prep(struct lpfc_hba *phba) 96{ 97 lpfc_vpd_t *vp = &phba->vpd; 98 int i = 0, rc; 99 LPFC_MBOXQ_t *pmb; 100 MAILBOX_t *mb; 101 char *lpfc_vpd_data = NULL; 102 uint16_t offset = 0; 103 static char licensed[56] = 104 "key unlock for use with gnu public licensed code only\0"; 105 static int init_key = 1; 106 107 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 108 if (!pmb) { 109 phba->link_state = LPFC_HBA_ERROR; 110 return -ENOMEM; 111 } 112 113 mb = &pmb->u.mb; 114 phba->link_state = LPFC_INIT_MBX_CMDS; 115 116 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 117 if (init_key) { 118 uint32_t *ptext = (uint32_t *) licensed; 119 120 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 121 *ptext = cpu_to_be32(*ptext); 122 init_key = 0; 123 } 124 125 lpfc_read_nv(phba, pmb); 126 memset((char*)mb->un.varRDnvp.rsvd3, 0, 127 sizeof (mb->un.varRDnvp.rsvd3)); 128 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 129 sizeof (licensed)); 130 131 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 132 133 if (rc != MBX_SUCCESS) { 134 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 135 "0324 Config Port initialization " 136 "error, mbxCmd x%x READ_NVPARM, " 137 "mbxStatus x%x\n", 138 mb->mbxCommand, mb->mbxStatus); 139 mempool_free(pmb, phba->mbox_mem_pool); 140 return -ERESTART; 141 } 142 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 143 sizeof(phba->wwnn)); 144 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 145 sizeof(phba->wwpn)); 146 } 147 148 phba->sli3_options = 0x0; 149 150 /* Setup and issue mailbox READ REV command */ 151 lpfc_read_rev(phba, pmb); 152 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 153 if (rc != MBX_SUCCESS) { 154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 155 "0439 Adapter failed to init, mbxCmd x%x " 156 "READ_REV, mbxStatus x%x\n", 157 mb->mbxCommand, mb->mbxStatus); 158 mempool_free( pmb, phba->mbox_mem_pool); 159 return -ERESTART; 160 } 161 162 163 /* 164 * The value of rr must be 1 since the driver set the cv field to 1. 165 * This setting requires the FW to set all revision fields. 166 */ 167 if (mb->un.varRdRev.rr == 0) { 168 vp->rev.rBit = 0; 169 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 170 "0440 Adapter failed to init, READ_REV has " 171 "missing revision information.\n"); 172 mempool_free(pmb, phba->mbox_mem_pool); 173 return -ERESTART; 174 } 175 176 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 177 mempool_free(pmb, phba->mbox_mem_pool); 178 return -EINVAL; 179 } 180 181 /* Save information as VPD data */ 182 vp->rev.rBit = 1; 183 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 184 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 185 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 186 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 187 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 188 vp->rev.biuRev = mb->un.varRdRev.biuRev; 189 vp->rev.smRev = mb->un.varRdRev.smRev; 190 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 191 vp->rev.endecRev = mb->un.varRdRev.endecRev; 192 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 193 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 194 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 195 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 196 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 197 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 198 199 /* If the sli feature level is less then 9, we must 200 * tear down all RPIs and VPIs on link down if NPIV 201 * is enabled. 202 */ 203 if (vp->rev.feaLevelHigh < 9) 204 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 205 206 if (lpfc_is_LC_HBA(phba->pcidev->device)) 207 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 208 sizeof (phba->RandomData)); 209 210 /* Get adapter VPD information */ 211 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 212 if (!lpfc_vpd_data) 213 goto out_free_mbox; 214 215 do { 216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 218 219 if (rc != MBX_SUCCESS) { 220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 221 "0441 VPD not present on adapter, " 222 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 223 mb->mbxCommand, mb->mbxStatus); 224 mb->un.varDmp.word_cnt = 0; 225 } 226 /* dump mem may return a zero when finished or we got a 227 * mailbox error, either way we are done. 228 */ 229 if (mb->un.varDmp.word_cnt == 0) 230 break; 231 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 232 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 233 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 234 lpfc_vpd_data + offset, 235 mb->un.varDmp.word_cnt); 236 offset += mb->un.varDmp.word_cnt; 237 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 238 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 239 240 kfree(lpfc_vpd_data); 241out_free_mbox: 242 mempool_free(pmb, phba->mbox_mem_pool); 243 return 0; 244} 245 246/** 247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 248 * @phba: pointer to lpfc hba data structure. 249 * @pmboxq: pointer to the driver internal queue element for mailbox command. 250 * 251 * This is the completion handler for driver's configuring asynchronous event 252 * mailbox command to the device. If the mailbox command returns successfully, 253 * it will set internal async event support flag to 1; otherwise, it will 254 * set internal async event support flag to 0. 255 **/ 256static void 257lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 258{ 259 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 260 phba->temp_sensor_support = 1; 261 else 262 phba->temp_sensor_support = 0; 263 mempool_free(pmboxq, phba->mbox_mem_pool); 264 return; 265} 266 267/** 268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 269 * @phba: pointer to lpfc hba data structure. 270 * @pmboxq: pointer to the driver internal queue element for mailbox command. 271 * 272 * This is the completion handler for dump mailbox command for getting 273 * wake up parameters. When this command complete, the response contain 274 * Option rom version of the HBA. This function translate the version number 275 * into a human readable string and store it in OptionROMVersion. 276 **/ 277static void 278lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 279{ 280 struct prog_id *prg; 281 uint32_t prog_id_word; 282 char dist = ' '; 283 /* character array used for decoding dist type. */ 284 char dist_char[] = "nabx"; 285 286 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 287 mempool_free(pmboxq, phba->mbox_mem_pool); 288 return; 289 } 290 291 prg = (struct prog_id *) &prog_id_word; 292 293 /* word 7 contain option rom version */ 294 prog_id_word = pmboxq->u.mb.un.varWords[7]; 295 296 /* Decode the Option rom version word to a readable string */ 297 if (prg->dist < 4) 298 dist = dist_char[prg->dist]; 299 300 if ((prg->dist == 3) && (prg->num == 0)) 301 sprintf(phba->OptionROMVersion, "%d.%d%d", 302 prg->ver, prg->rev, prg->lev); 303 else 304 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 305 prg->ver, prg->rev, prg->lev, 306 dist, prg->num); 307 mempool_free(pmboxq, phba->mbox_mem_pool); 308 return; 309} 310 311/** 312 * lpfc_config_port_post - Perform lpfc initialization after config port 313 * @phba: pointer to lpfc hba data structure. 314 * 315 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 316 * command call. It performs all internal resource and state setups on the 317 * port: post IOCB buffers, enable appropriate host interrupt attentions, 318 * ELS ring timers, etc. 319 * 320 * Return codes 321 * 0 - success. 322 * Any other value - error. 323 **/ 324int 325lpfc_config_port_post(struct lpfc_hba *phba) 326{ 327 struct lpfc_vport *vport = phba->pport; 328 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 329 LPFC_MBOXQ_t *pmb; 330 MAILBOX_t *mb; 331 struct lpfc_dmabuf *mp; 332 struct lpfc_sli *psli = &phba->sli; 333 uint32_t status, timeout; 334 int i, j; 335 int rc; 336 337 spin_lock_irq(&phba->hbalock); 338 /* 339 * If the Config port completed correctly the HBA is not 340 * over heated any more. 341 */ 342 if (phba->over_temp_state == HBA_OVER_TEMP) 343 phba->over_temp_state = HBA_NORMAL_TEMP; 344 spin_unlock_irq(&phba->hbalock); 345 346 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 347 if (!pmb) { 348 phba->link_state = LPFC_HBA_ERROR; 349 return -ENOMEM; 350 } 351 mb = &pmb->u.mb; 352 353 /* Get login parameters for NID. */ 354 rc = lpfc_read_sparam(phba, pmb, 0); 355 if (rc) { 356 mempool_free(pmb, phba->mbox_mem_pool); 357 return -ENOMEM; 358 } 359 360 pmb->vport = vport; 361 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 363 "0448 Adapter failed init, mbxCmd x%x " 364 "READ_SPARM mbxStatus x%x\n", 365 mb->mbxCommand, mb->mbxStatus); 366 phba->link_state = LPFC_HBA_ERROR; 367 mp = (struct lpfc_dmabuf *) pmb->context1; 368 mempool_free(pmb, phba->mbox_mem_pool); 369 lpfc_mbuf_free(phba, mp->virt, mp->phys); 370 kfree(mp); 371 return -EIO; 372 } 373 374 mp = (struct lpfc_dmabuf *) pmb->context1; 375 376 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 377 lpfc_mbuf_free(phba, mp->virt, mp->phys); 378 kfree(mp); 379 pmb->context1 = NULL; 380 381 if (phba->cfg_soft_wwnn) 382 u64_to_wwn(phba->cfg_soft_wwnn, 383 vport->fc_sparam.nodeName.u.wwn); 384 if (phba->cfg_soft_wwpn) 385 u64_to_wwn(phba->cfg_soft_wwpn, 386 vport->fc_sparam.portName.u.wwn); 387 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 388 sizeof (struct lpfc_name)); 389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 390 sizeof (struct lpfc_name)); 391 392 /* Update the fc_host data structures with new wwn. */ 393 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 394 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 395 fc_host_max_npiv_vports(shost) = phba->max_vpi; 396 397 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 398 /* This should be consolidated into parse_vpd ? - mr */ 399 if (phba->SerialNumber[0] == 0) { 400 uint8_t *outptr; 401 402 outptr = &vport->fc_nodename.u.s.IEEE[0]; 403 for (i = 0; i < 12; i++) { 404 status = *outptr++; 405 j = ((status & 0xf0) >> 4); 406 if (j <= 9) 407 phba->SerialNumber[i] = 408 (char)((uint8_t) 0x30 + (uint8_t) j); 409 else 410 phba->SerialNumber[i] = 411 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 412 i++; 413 j = (status & 0xf); 414 if (j <= 9) 415 phba->SerialNumber[i] = 416 (char)((uint8_t) 0x30 + (uint8_t) j); 417 else 418 phba->SerialNumber[i] = 419 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 420 } 421 } 422 423 lpfc_read_config(phba, pmb); 424 pmb->vport = vport; 425 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 427 "0453 Adapter failed to init, mbxCmd x%x " 428 "READ_CONFIG, mbxStatus x%x\n", 429 mb->mbxCommand, mb->mbxStatus); 430 phba->link_state = LPFC_HBA_ERROR; 431 mempool_free( pmb, phba->mbox_mem_pool); 432 return -EIO; 433 } 434 435 /* Check if the port is disabled */ 436 lpfc_sli_read_link_ste(phba); 437 438 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 439 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 440 phba->cfg_hba_queue_depth = 441 (mb->un.varRdConfig.max_xri + 1) - 442 lpfc_sli4_get_els_iocb_cnt(phba); 443 444 phba->lmt = mb->un.varRdConfig.lmt; 445 446 /* Get the default values for Model Name and Description */ 447 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 448 449 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G) 450 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) 451 && !(phba->lmt & LMT_1Gb)) 452 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) 453 && !(phba->lmt & LMT_2Gb)) 454 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) 455 && !(phba->lmt & LMT_4Gb)) 456 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) 457 && !(phba->lmt & LMT_8Gb)) 458 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) 459 && !(phba->lmt & LMT_10Gb)) 460 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) 461 && !(phba->lmt & LMT_16Gb))) { 462 /* Reset link speed to auto */ 463 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 464 "1302 Invalid speed for this board: " 465 "Reset link speed to auto: x%x\n", 466 phba->cfg_link_speed); 467 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 468 } 469 470 phba->link_state = LPFC_LINK_DOWN; 471 472 /* Only process IOCBs on ELS ring till hba_state is READY */ 473 if (psli->ring[psli->extra_ring].cmdringaddr) 474 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 475 if (psli->ring[psli->fcp_ring].cmdringaddr) 476 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 477 if (psli->ring[psli->next_ring].cmdringaddr) 478 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 479 480 /* Post receive buffers for desired rings */ 481 if (phba->sli_rev != 3) 482 lpfc_post_rcv_buf(phba); 483 484 /* 485 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 486 */ 487 if (phba->intr_type == MSIX) { 488 rc = lpfc_config_msi(phba, pmb); 489 if (rc) { 490 mempool_free(pmb, phba->mbox_mem_pool); 491 return -EIO; 492 } 493 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 494 if (rc != MBX_SUCCESS) { 495 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 496 "0352 Config MSI mailbox command " 497 "failed, mbxCmd x%x, mbxStatus x%x\n", 498 pmb->u.mb.mbxCommand, 499 pmb->u.mb.mbxStatus); 500 mempool_free(pmb, phba->mbox_mem_pool); 501 return -EIO; 502 } 503 } 504 505 spin_lock_irq(&phba->hbalock); 506 /* Initialize ERATT handling flag */ 507 phba->hba_flag &= ~HBA_ERATT_HANDLED; 508 509 /* Enable appropriate host interrupts */ 510 status = readl(phba->HCregaddr); 511 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 512 if (psli->num_rings > 0) 513 status |= HC_R0INT_ENA; 514 if (psli->num_rings > 1) 515 status |= HC_R1INT_ENA; 516 if (psli->num_rings > 2) 517 status |= HC_R2INT_ENA; 518 if (psli->num_rings > 3) 519 status |= HC_R3INT_ENA; 520 521 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 522 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 523 status &= ~(HC_R0INT_ENA); 524 525 writel(status, phba->HCregaddr); 526 readl(phba->HCregaddr); /* flush */ 527 spin_unlock_irq(&phba->hbalock); 528 529 /* Set up ring-0 (ELS) timer */ 530 timeout = phba->fc_ratov * 2; 531 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 532 /* Set up heart beat (HB) timer */ 533 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 534 phba->hb_outstanding = 0; 535 phba->last_completion_time = jiffies; 536 /* Set up error attention (ERATT) polling timer */ 537 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 538 539 if (phba->hba_flag & LINK_DISABLED) { 540 lpfc_printf_log(phba, 541 KERN_ERR, LOG_INIT, 542 "2598 Adapter Link is disabled.\n"); 543 lpfc_down_link(phba, pmb); 544 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 545 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 546 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 547 lpfc_printf_log(phba, 548 KERN_ERR, LOG_INIT, 549 "2599 Adapter failed to issue DOWN_LINK" 550 " mbox command rc 0x%x\n", rc); 551 552 mempool_free(pmb, phba->mbox_mem_pool); 553 return -EIO; 554 } 555 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 556 lpfc_init_link(phba, pmb, phba->cfg_topology, 557 phba->cfg_link_speed); 558 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 559 lpfc_set_loopback_flag(phba); 560 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 561 if (rc != MBX_SUCCESS) { 562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 563 "0454 Adapter failed to init, mbxCmd x%x " 564 "INIT_LINK, mbxStatus x%x\n", 565 mb->mbxCommand, mb->mbxStatus); 566 567 /* Clear all interrupt enable conditions */ 568 writel(0, phba->HCregaddr); 569 readl(phba->HCregaddr); /* flush */ 570 /* Clear all pending interrupts */ 571 writel(0xffffffff, phba->HAregaddr); 572 readl(phba->HAregaddr); /* flush */ 573 574 phba->link_state = LPFC_HBA_ERROR; 575 if (rc != MBX_BUSY) 576 mempool_free(pmb, phba->mbox_mem_pool); 577 return -EIO; 578 } 579 } 580 /* MBOX buffer will be freed in mbox compl */ 581 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 582 if (!pmb) { 583 phba->link_state = LPFC_HBA_ERROR; 584 return -ENOMEM; 585 } 586 587 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 588 pmb->mbox_cmpl = lpfc_config_async_cmpl; 589 pmb->vport = phba->pport; 590 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 591 592 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 593 lpfc_printf_log(phba, 594 KERN_ERR, 595 LOG_INIT, 596 "0456 Adapter failed to issue " 597 "ASYNCEVT_ENABLE mbox status x%x\n", 598 rc); 599 mempool_free(pmb, phba->mbox_mem_pool); 600 } 601 602 /* Get Option rom version */ 603 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 604 if (!pmb) { 605 phba->link_state = LPFC_HBA_ERROR; 606 return -ENOMEM; 607 } 608 609 lpfc_dump_wakeup_param(phba, pmb); 610 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 611 pmb->vport = phba->pport; 612 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 613 614 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 616 "to get Option ROM version status x%x\n", rc); 617 mempool_free(pmb, phba->mbox_mem_pool); 618 } 619 620 return 0; 621} 622 623/** 624 * lpfc_hba_init_link - Initialize the FC link 625 * @phba: pointer to lpfc hba data structure. 626 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 627 * 628 * This routine will issue the INIT_LINK mailbox command call. 629 * It is available to other drivers through the lpfc_hba data 630 * structure for use as a delayed link up mechanism with the 631 * module parameter lpfc_suppress_link_up. 632 * 633 * Return code 634 * 0 - success 635 * Any other value - error 636 **/ 637int 638lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 639{ 640 struct lpfc_vport *vport = phba->pport; 641 LPFC_MBOXQ_t *pmb; 642 MAILBOX_t *mb; 643 int rc; 644 645 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 646 if (!pmb) { 647 phba->link_state = LPFC_HBA_ERROR; 648 return -ENOMEM; 649 } 650 mb = &pmb->u.mb; 651 pmb->vport = vport; 652 653 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 654 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 655 lpfc_set_loopback_flag(phba); 656 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 657 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 659 "0498 Adapter failed to init, mbxCmd x%x " 660 "INIT_LINK, mbxStatus x%x\n", 661 mb->mbxCommand, mb->mbxStatus); 662 if (phba->sli_rev <= LPFC_SLI_REV3) { 663 /* Clear all interrupt enable conditions */ 664 writel(0, phba->HCregaddr); 665 readl(phba->HCregaddr); /* flush */ 666 /* Clear all pending interrupts */ 667 writel(0xffffffff, phba->HAregaddr); 668 readl(phba->HAregaddr); /* flush */ 669 } 670 phba->link_state = LPFC_HBA_ERROR; 671 if (rc != MBX_BUSY || flag == MBX_POLL) 672 mempool_free(pmb, phba->mbox_mem_pool); 673 return -EIO; 674 } 675 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 676 if (flag == MBX_POLL) 677 mempool_free(pmb, phba->mbox_mem_pool); 678 679 return 0; 680} 681 682/** 683 * lpfc_hba_down_link - this routine downs the FC link 684 * @phba: pointer to lpfc hba data structure. 685 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 686 * 687 * This routine will issue the DOWN_LINK mailbox command call. 688 * It is available to other drivers through the lpfc_hba data 689 * structure for use to stop the link. 690 * 691 * Return code 692 * 0 - success 693 * Any other value - error 694 **/ 695int 696lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 697{ 698 LPFC_MBOXQ_t *pmb; 699 int rc; 700 701 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 702 if (!pmb) { 703 phba->link_state = LPFC_HBA_ERROR; 704 return -ENOMEM; 705 } 706 707 lpfc_printf_log(phba, 708 KERN_ERR, LOG_INIT, 709 "0491 Adapter Link is disabled.\n"); 710 lpfc_down_link(phba, pmb); 711 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 712 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 713 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 714 lpfc_printf_log(phba, 715 KERN_ERR, LOG_INIT, 716 "2522 Adapter failed to issue DOWN_LINK" 717 " mbox command rc 0x%x\n", rc); 718 719 mempool_free(pmb, phba->mbox_mem_pool); 720 return -EIO; 721 } 722 if (flag == MBX_POLL) 723 mempool_free(pmb, phba->mbox_mem_pool); 724 725 return 0; 726} 727 728/** 729 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 730 * @phba: pointer to lpfc HBA data structure. 731 * 732 * This routine will do LPFC uninitialization before the HBA is reset when 733 * bringing down the SLI Layer. 734 * 735 * Return codes 736 * 0 - success. 737 * Any other value - error. 738 **/ 739int 740lpfc_hba_down_prep(struct lpfc_hba *phba) 741{ 742 struct lpfc_vport **vports; 743 int i; 744 745 if (phba->sli_rev <= LPFC_SLI_REV3) { 746 /* Disable interrupts */ 747 writel(0, phba->HCregaddr); 748 readl(phba->HCregaddr); /* flush */ 749 } 750 751 if (phba->pport->load_flag & FC_UNLOADING) 752 lpfc_cleanup_discovery_resources(phba->pport); 753 else { 754 vports = lpfc_create_vport_work_array(phba); 755 if (vports != NULL) 756 for (i = 0; i <= phba->max_vports && 757 vports[i] != NULL; i++) 758 lpfc_cleanup_discovery_resources(vports[i]); 759 lpfc_destroy_vport_work_array(phba, vports); 760 } 761 return 0; 762} 763 764/** 765 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 766 * @phba: pointer to lpfc HBA data structure. 767 * 768 * This routine will do uninitialization after the HBA is reset when bring 769 * down the SLI Layer. 770 * 771 * Return codes 772 * 0 - success. 773 * Any other value - error. 774 **/ 775static int 776lpfc_hba_down_post_s3(struct lpfc_hba *phba) 777{ 778 struct lpfc_sli *psli = &phba->sli; 779 struct lpfc_sli_ring *pring; 780 struct lpfc_dmabuf *mp, *next_mp; 781 LIST_HEAD(completions); 782 int i; 783 784 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 785 lpfc_sli_hbqbuf_free_all(phba); 786 else { 787 /* Cleanup preposted buffers on the ELS ring */ 788 pring = &psli->ring[LPFC_ELS_RING]; 789 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 790 list_del(&mp->list); 791 pring->postbufq_cnt--; 792 lpfc_mbuf_free(phba, mp->virt, mp->phys); 793 kfree(mp); 794 } 795 } 796 797 spin_lock_irq(&phba->hbalock); 798 for (i = 0; i < psli->num_rings; i++) { 799 pring = &psli->ring[i]; 800 801 /* At this point in time the HBA is either reset or DOA. Either 802 * way, nothing should be on txcmplq as it will NEVER complete. 803 */ 804 list_splice_init(&pring->txcmplq, &completions); 805 pring->txcmplq_cnt = 0; 806 spin_unlock_irq(&phba->hbalock); 807 808 /* Cancel all the IOCBs from the completions list */ 809 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 810 IOERR_SLI_ABORTED); 811 812 lpfc_sli_abort_iocb_ring(phba, pring); 813 spin_lock_irq(&phba->hbalock); 814 } 815 spin_unlock_irq(&phba->hbalock); 816 817 return 0; 818} 819 820/** 821 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 822 * @phba: pointer to lpfc HBA data structure. 823 * 824 * This routine will do uninitialization after the HBA is reset when bring 825 * down the SLI Layer. 826 * 827 * Return codes 828 * 0 - success. 829 * Any other value - error. 830 **/ 831static int 832lpfc_hba_down_post_s4(struct lpfc_hba *phba) 833{ 834 struct lpfc_scsi_buf *psb, *psb_next; 835 LIST_HEAD(aborts); 836 int ret; 837 unsigned long iflag = 0; 838 struct lpfc_sglq *sglq_entry = NULL; 839 840 ret = lpfc_hba_down_post_s3(phba); 841 if (ret) 842 return ret; 843 /* At this point in time the HBA is either reset or DOA. Either 844 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 845 * on the lpfc_sgl_list so that it can either be freed if the 846 * driver is unloading or reposted if the driver is restarting 847 * the port. 848 */ 849 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 850 /* scsl_buf_list */ 851 /* abts_sgl_list_lock required because worker thread uses this 852 * list. 853 */ 854 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 855 list_for_each_entry(sglq_entry, 856 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 857 sglq_entry->state = SGL_FREED; 858 859 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 860 &phba->sli4_hba.lpfc_sgl_list); 861 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 862 /* abts_scsi_buf_list_lock required because worker thread uses this 863 * list. 864 */ 865 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 866 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 867 &aborts); 868 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 869 spin_unlock_irq(&phba->hbalock); 870 871 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 872 psb->pCmd = NULL; 873 psb->status = IOSTAT_SUCCESS; 874 } 875 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 876 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 877 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 878 return 0; 879} 880 881/** 882 * lpfc_hba_down_post - Wrapper func for hba down post routine 883 * @phba: pointer to lpfc HBA data structure. 884 * 885 * This routine wraps the actual SLI3 or SLI4 routine for performing 886 * uninitialization after the HBA is reset when bring down the SLI Layer. 887 * 888 * Return codes 889 * 0 - success. 890 * Any other value - error. 891 **/ 892int 893lpfc_hba_down_post(struct lpfc_hba *phba) 894{ 895 return (*phba->lpfc_hba_down_post)(phba); 896} 897 898/** 899 * lpfc_hb_timeout - The HBA-timer timeout handler 900 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 901 * 902 * This is the HBA-timer timeout handler registered to the lpfc driver. When 903 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 904 * work-port-events bitmap and the worker thread is notified. This timeout 905 * event will be used by the worker thread to invoke the actual timeout 906 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 907 * be performed in the timeout handler and the HBA timeout event bit shall 908 * be cleared by the worker thread after it has taken the event bitmap out. 909 **/ 910static void 911lpfc_hb_timeout(unsigned long ptr) 912{ 913 struct lpfc_hba *phba; 914 uint32_t tmo_posted; 915 unsigned long iflag; 916 917 phba = (struct lpfc_hba *)ptr; 918 919 /* Check for heart beat timeout conditions */ 920 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 921 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 922 if (!tmo_posted) 923 phba->pport->work_port_events |= WORKER_HB_TMO; 924 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 925 926 /* Tell the worker thread there is work to do */ 927 if (!tmo_posted) 928 lpfc_worker_wake_up(phba); 929 return; 930} 931 932/** 933 * lpfc_rrq_timeout - The RRQ-timer timeout handler 934 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 935 * 936 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 937 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 938 * work-port-events bitmap and the worker thread is notified. This timeout 939 * event will be used by the worker thread to invoke the actual timeout 940 * handler routine, lpfc_rrq_handler. Any periodical operations will 941 * be performed in the timeout handler and the RRQ timeout event bit shall 942 * be cleared by the worker thread after it has taken the event bitmap out. 943 **/ 944static void 945lpfc_rrq_timeout(unsigned long ptr) 946{ 947 struct lpfc_hba *phba; 948 unsigned long iflag; 949 950 phba = (struct lpfc_hba *)ptr; 951 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 952 phba->hba_flag |= HBA_RRQ_ACTIVE; 953 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 954 lpfc_worker_wake_up(phba); 955} 956 957/** 958 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 959 * @phba: pointer to lpfc hba data structure. 960 * @pmboxq: pointer to the driver internal queue element for mailbox command. 961 * 962 * This is the callback function to the lpfc heart-beat mailbox command. 963 * If configured, the lpfc driver issues the heart-beat mailbox command to 964 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 965 * heart-beat mailbox command is issued, the driver shall set up heart-beat 966 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 967 * heart-beat outstanding state. Once the mailbox command comes back and 968 * no error conditions detected, the heart-beat mailbox command timer is 969 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 970 * state is cleared for the next heart-beat. If the timer expired with the 971 * heart-beat outstanding state set, the driver will put the HBA offline. 972 **/ 973static void 974lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 975{ 976 unsigned long drvr_flag; 977 978 spin_lock_irqsave(&phba->hbalock, drvr_flag); 979 phba->hb_outstanding = 0; 980 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 981 982 /* Check and reset heart-beat timer is necessary */ 983 mempool_free(pmboxq, phba->mbox_mem_pool); 984 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 985 !(phba->link_state == LPFC_HBA_ERROR) && 986 !(phba->pport->load_flag & FC_UNLOADING)) 987 mod_timer(&phba->hb_tmofunc, 988 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 989 return; 990} 991 992/** 993 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 994 * @phba: pointer to lpfc hba data structure. 995 * 996 * This is the actual HBA-timer timeout handler to be invoked by the worker 997 * thread whenever the HBA timer fired and HBA-timeout event posted. This 998 * handler performs any periodic operations needed for the device. If such 999 * periodic event has already been attended to either in the interrupt handler 1000 * or by processing slow-ring or fast-ring events within the HBA-timer 1001 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1002 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1003 * is configured and there is no heart-beat mailbox command outstanding, a 1004 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1005 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1006 * to offline. 1007 **/ 1008void 1009lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1010{ 1011 struct lpfc_vport **vports; 1012 LPFC_MBOXQ_t *pmboxq; 1013 struct lpfc_dmabuf *buf_ptr; 1014 int retval, i; 1015 struct lpfc_sli *psli = &phba->sli; 1016 LIST_HEAD(completions); 1017 1018 vports = lpfc_create_vport_work_array(phba); 1019 if (vports != NULL) 1020 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1021 lpfc_rcv_seq_check_edtov(vports[i]); 1022 lpfc_destroy_vport_work_array(phba, vports); 1023 1024 if ((phba->link_state == LPFC_HBA_ERROR) || 1025 (phba->pport->load_flag & FC_UNLOADING) || 1026 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1027 return; 1028 1029 spin_lock_irq(&phba->pport->work_port_lock); 1030 1031 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 1032 jiffies)) { 1033 spin_unlock_irq(&phba->pport->work_port_lock); 1034 if (!phba->hb_outstanding) 1035 mod_timer(&phba->hb_tmofunc, 1036 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1037 else 1038 mod_timer(&phba->hb_tmofunc, 1039 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1040 return; 1041 } 1042 spin_unlock_irq(&phba->pport->work_port_lock); 1043 1044 if (phba->elsbuf_cnt && 1045 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1046 spin_lock_irq(&phba->hbalock); 1047 list_splice_init(&phba->elsbuf, &completions); 1048 phba->elsbuf_cnt = 0; 1049 phba->elsbuf_prev_cnt = 0; 1050 spin_unlock_irq(&phba->hbalock); 1051 1052 while (!list_empty(&completions)) { 1053 list_remove_head(&completions, buf_ptr, 1054 struct lpfc_dmabuf, list); 1055 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1056 kfree(buf_ptr); 1057 } 1058 } 1059 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1060 1061 /* If there is no heart beat outstanding, issue a heartbeat command */ 1062 if (phba->cfg_enable_hba_heartbeat) { 1063 if (!phba->hb_outstanding) { 1064 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1065 (list_empty(&psli->mboxq))) { 1066 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1067 GFP_KERNEL); 1068 if (!pmboxq) { 1069 mod_timer(&phba->hb_tmofunc, 1070 jiffies + 1071 HZ * LPFC_HB_MBOX_INTERVAL); 1072 return; 1073 } 1074 1075 lpfc_heart_beat(phba, pmboxq); 1076 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1077 pmboxq->vport = phba->pport; 1078 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1079 MBX_NOWAIT); 1080 1081 if (retval != MBX_BUSY && 1082 retval != MBX_SUCCESS) { 1083 mempool_free(pmboxq, 1084 phba->mbox_mem_pool); 1085 mod_timer(&phba->hb_tmofunc, 1086 jiffies + 1087 HZ * LPFC_HB_MBOX_INTERVAL); 1088 return; 1089 } 1090 phba->skipped_hb = 0; 1091 phba->hb_outstanding = 1; 1092 } else if (time_before_eq(phba->last_completion_time, 1093 phba->skipped_hb)) { 1094 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1095 "2857 Last completion time not " 1096 " updated in %d ms\n", 1097 jiffies_to_msecs(jiffies 1098 - phba->last_completion_time)); 1099 } else 1100 phba->skipped_hb = jiffies; 1101 1102 mod_timer(&phba->hb_tmofunc, 1103 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1104 return; 1105 } else { 1106 /* 1107 * If heart beat timeout called with hb_outstanding set 1108 * we need to give the hb mailbox cmd a chance to 1109 * complete or TMO. 1110 */ 1111 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1112 "0459 Adapter heartbeat still out" 1113 "standing:last compl time was %d ms.\n", 1114 jiffies_to_msecs(jiffies 1115 - phba->last_completion_time)); 1116 mod_timer(&phba->hb_tmofunc, 1117 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1118 } 1119 } 1120} 1121 1122/** 1123 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1124 * @phba: pointer to lpfc hba data structure. 1125 * 1126 * This routine is called to bring the HBA offline when HBA hardware error 1127 * other than Port Error 6 has been detected. 1128 **/ 1129static void 1130lpfc_offline_eratt(struct lpfc_hba *phba) 1131{ 1132 struct lpfc_sli *psli = &phba->sli; 1133 1134 spin_lock_irq(&phba->hbalock); 1135 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1136 spin_unlock_irq(&phba->hbalock); 1137 lpfc_offline_prep(phba); 1138 1139 lpfc_offline(phba); 1140 lpfc_reset_barrier(phba); 1141 spin_lock_irq(&phba->hbalock); 1142 lpfc_sli_brdreset(phba); 1143 spin_unlock_irq(&phba->hbalock); 1144 lpfc_hba_down_post(phba); 1145 lpfc_sli_brdready(phba, HS_MBRDY); 1146 lpfc_unblock_mgmt_io(phba); 1147 phba->link_state = LPFC_HBA_ERROR; 1148 return; 1149} 1150 1151/** 1152 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1153 * @phba: pointer to lpfc hba data structure. 1154 * 1155 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1156 * other than Port Error 6 has been detected. 1157 **/ 1158static void 1159lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1160{ 1161 lpfc_offline_prep(phba); 1162 lpfc_offline(phba); 1163 lpfc_sli4_brdreset(phba); 1164 lpfc_hba_down_post(phba); 1165 lpfc_sli4_post_status_check(phba); 1166 lpfc_unblock_mgmt_io(phba); 1167 phba->link_state = LPFC_HBA_ERROR; 1168} 1169 1170/** 1171 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1172 * @phba: pointer to lpfc hba data structure. 1173 * 1174 * This routine is invoked to handle the deferred HBA hardware error 1175 * conditions. This type of error is indicated by HBA by setting ER1 1176 * and another ER bit in the host status register. The driver will 1177 * wait until the ER1 bit clears before handling the error condition. 1178 **/ 1179static void 1180lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1181{ 1182 uint32_t old_host_status = phba->work_hs; 1183 struct lpfc_sli_ring *pring; 1184 struct lpfc_sli *psli = &phba->sli; 1185 1186 /* If the pci channel is offline, ignore possible errors, 1187 * since we cannot communicate with the pci card anyway. 1188 */ 1189 if (pci_channel_offline(phba->pcidev)) { 1190 spin_lock_irq(&phba->hbalock); 1191 phba->hba_flag &= ~DEFER_ERATT; 1192 spin_unlock_irq(&phba->hbalock); 1193 return; 1194 } 1195 1196 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1197 "0479 Deferred Adapter Hardware Error " 1198 "Data: x%x x%x x%x\n", 1199 phba->work_hs, 1200 phba->work_status[0], phba->work_status[1]); 1201 1202 spin_lock_irq(&phba->hbalock); 1203 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1204 spin_unlock_irq(&phba->hbalock); 1205 1206 1207 /* 1208 * Firmware stops when it triggred erratt. That could cause the I/Os 1209 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1210 * SCSI layer retry it after re-establishing link. 1211 */ 1212 pring = &psli->ring[psli->fcp_ring]; 1213 lpfc_sli_abort_iocb_ring(phba, pring); 1214 1215 /* 1216 * There was a firmware error. Take the hba offline and then 1217 * attempt to restart it. 1218 */ 1219 lpfc_offline_prep(phba); 1220 lpfc_offline(phba); 1221 1222 /* Wait for the ER1 bit to clear.*/ 1223 while (phba->work_hs & HS_FFER1) { 1224 msleep(100); 1225 phba->work_hs = readl(phba->HSregaddr); 1226 /* If driver is unloading let the worker thread continue */ 1227 if (phba->pport->load_flag & FC_UNLOADING) { 1228 phba->work_hs = 0; 1229 break; 1230 } 1231 } 1232 1233 /* 1234 * This is to ptrotect against a race condition in which 1235 * first write to the host attention register clear the 1236 * host status register. 1237 */ 1238 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1239 phba->work_hs = old_host_status & ~HS_FFER1; 1240 1241 spin_lock_irq(&phba->hbalock); 1242 phba->hba_flag &= ~DEFER_ERATT; 1243 spin_unlock_irq(&phba->hbalock); 1244 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1245 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1246} 1247 1248static void 1249lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1250{ 1251 struct lpfc_board_event_header board_event; 1252 struct Scsi_Host *shost; 1253 1254 board_event.event_type = FC_REG_BOARD_EVENT; 1255 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1256 shost = lpfc_shost_from_vport(phba->pport); 1257 fc_host_post_vendor_event(shost, fc_get_event_number(), 1258 sizeof(board_event), 1259 (char *) &board_event, 1260 LPFC_NL_VENDOR_ID); 1261} 1262 1263/** 1264 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1265 * @phba: pointer to lpfc hba data structure. 1266 * 1267 * This routine is invoked to handle the following HBA hardware error 1268 * conditions: 1269 * 1 - HBA error attention interrupt 1270 * 2 - DMA ring index out of range 1271 * 3 - Mailbox command came back as unknown 1272 **/ 1273static void 1274lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1275{ 1276 struct lpfc_vport *vport = phba->pport; 1277 struct lpfc_sli *psli = &phba->sli; 1278 struct lpfc_sli_ring *pring; 1279 uint32_t event_data; 1280 unsigned long temperature; 1281 struct temp_event temp_event_data; 1282 struct Scsi_Host *shost; 1283 1284 /* If the pci channel is offline, ignore possible errors, 1285 * since we cannot communicate with the pci card anyway. 1286 */ 1287 if (pci_channel_offline(phba->pcidev)) { 1288 spin_lock_irq(&phba->hbalock); 1289 phba->hba_flag &= ~DEFER_ERATT; 1290 spin_unlock_irq(&phba->hbalock); 1291 return; 1292 } 1293 1294 /* If resets are disabled then leave the HBA alone and return */ 1295 if (!phba->cfg_enable_hba_reset) 1296 return; 1297 1298 /* Send an internal error event to mgmt application */ 1299 lpfc_board_errevt_to_mgmt(phba); 1300 1301 if (phba->hba_flag & DEFER_ERATT) 1302 lpfc_handle_deferred_eratt(phba); 1303 1304 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1305 if (phba->work_hs & HS_FFER6) 1306 /* Re-establishing Link */ 1307 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1308 "1301 Re-establishing Link " 1309 "Data: x%x x%x x%x\n", 1310 phba->work_hs, phba->work_status[0], 1311 phba->work_status[1]); 1312 if (phba->work_hs & HS_FFER8) 1313 /* Device Zeroization */ 1314 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1315 "2861 Host Authentication device " 1316 "zeroization Data:x%x x%x x%x\n", 1317 phba->work_hs, phba->work_status[0], 1318 phba->work_status[1]); 1319 1320 spin_lock_irq(&phba->hbalock); 1321 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1322 spin_unlock_irq(&phba->hbalock); 1323 1324 /* 1325 * Firmware stops when it triggled erratt with HS_FFER6. 1326 * That could cause the I/Os dropped by the firmware. 1327 * Error iocb (I/O) on txcmplq and let the SCSI layer 1328 * retry it after re-establishing link. 1329 */ 1330 pring = &psli->ring[psli->fcp_ring]; 1331 lpfc_sli_abort_iocb_ring(phba, pring); 1332 1333 /* 1334 * There was a firmware error. Take the hba offline and then 1335 * attempt to restart it. 1336 */ 1337 lpfc_offline_prep(phba); 1338 lpfc_offline(phba); 1339 lpfc_sli_brdrestart(phba); 1340 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1341 lpfc_unblock_mgmt_io(phba); 1342 return; 1343 } 1344 lpfc_unblock_mgmt_io(phba); 1345 } else if (phba->work_hs & HS_CRIT_TEMP) { 1346 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1347 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1348 temp_event_data.event_code = LPFC_CRIT_TEMP; 1349 temp_event_data.data = (uint32_t)temperature; 1350 1351 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1352 "0406 Adapter maximum temperature exceeded " 1353 "(%ld), taking this port offline " 1354 "Data: x%x x%x x%x\n", 1355 temperature, phba->work_hs, 1356 phba->work_status[0], phba->work_status[1]); 1357 1358 shost = lpfc_shost_from_vport(phba->pport); 1359 fc_host_post_vendor_event(shost, fc_get_event_number(), 1360 sizeof(temp_event_data), 1361 (char *) &temp_event_data, 1362 SCSI_NL_VID_TYPE_PCI 1363 | PCI_VENDOR_ID_EMULEX); 1364 1365 spin_lock_irq(&phba->hbalock); 1366 phba->over_temp_state = HBA_OVER_TEMP; 1367 spin_unlock_irq(&phba->hbalock); 1368 lpfc_offline_eratt(phba); 1369 1370 } else { 1371 /* The if clause above forces this code path when the status 1372 * failure is a value other than FFER6. Do not call the offline 1373 * twice. This is the adapter hardware error path. 1374 */ 1375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1376 "0457 Adapter Hardware Error " 1377 "Data: x%x x%x x%x\n", 1378 phba->work_hs, 1379 phba->work_status[0], phba->work_status[1]); 1380 1381 event_data = FC_REG_DUMP_EVENT; 1382 shost = lpfc_shost_from_vport(vport); 1383 fc_host_post_vendor_event(shost, fc_get_event_number(), 1384 sizeof(event_data), (char *) &event_data, 1385 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1386 1387 lpfc_offline_eratt(phba); 1388 } 1389 return; 1390} 1391 1392/** 1393 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1394 * @phba: pointer to lpfc hba data structure. 1395 * 1396 * This routine is invoked to handle the SLI4 HBA hardware error attention 1397 * conditions. 1398 **/ 1399static void 1400lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1401{ 1402 struct lpfc_vport *vport = phba->pport; 1403 uint32_t event_data; 1404 struct Scsi_Host *shost; 1405 uint32_t if_type; 1406 struct lpfc_register portstat_reg; 1407 1408 /* If the pci channel is offline, ignore possible errors, since 1409 * we cannot communicate with the pci card anyway. 1410 */ 1411 if (pci_channel_offline(phba->pcidev)) 1412 return; 1413 /* If resets are disabled then leave the HBA alone and return */ 1414 if (!phba->cfg_enable_hba_reset) 1415 return; 1416 1417 /* Send an internal error event to mgmt application */ 1418 lpfc_board_errevt_to_mgmt(phba); 1419 1420 /* For now, the actual action for SLI4 device handling is not 1421 * specified yet, just treated it as adaptor hardware failure 1422 */ 1423 event_data = FC_REG_DUMP_EVENT; 1424 shost = lpfc_shost_from_vport(vport); 1425 fc_host_post_vendor_event(shost, fc_get_event_number(), 1426 sizeof(event_data), (char *) &event_data, 1427 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1428 1429 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1430 switch (if_type) { 1431 case LPFC_SLI_INTF_IF_TYPE_0: 1432 lpfc_sli4_offline_eratt(phba); 1433 break; 1434 case LPFC_SLI_INTF_IF_TYPE_2: 1435 portstat_reg.word0 = 1436 readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 1437 1438 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1439 /* TODO: Register for Overtemp async events. */ 1440 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1441 "2889 Port Overtemperature event, " 1442 "taking port\n"); 1443 spin_lock_irq(&phba->hbalock); 1444 phba->over_temp_state = HBA_OVER_TEMP; 1445 spin_unlock_irq(&phba->hbalock); 1446 lpfc_sli4_offline_eratt(phba); 1447 return; 1448 } 1449 if (bf_get(lpfc_sliport_status_rn, &portstat_reg)) { 1450 /* 1451 * TODO: Attempt port recovery via a port reset. 1452 * When fully implemented, the driver should 1453 * attempt to recover the port here and return. 1454 * For now, log an error and take the port offline. 1455 */ 1456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1457 "2887 Port Error: Attempting " 1458 "Port Recovery\n"); 1459 } 1460 lpfc_sli4_offline_eratt(phba); 1461 break; 1462 case LPFC_SLI_INTF_IF_TYPE_1: 1463 default: 1464 break; 1465 } 1466} 1467 1468/** 1469 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1470 * @phba: pointer to lpfc HBA data structure. 1471 * 1472 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1473 * routine from the API jump table function pointer from the lpfc_hba struct. 1474 * 1475 * Return codes 1476 * 0 - success. 1477 * Any other value - error. 1478 **/ 1479void 1480lpfc_handle_eratt(struct lpfc_hba *phba) 1481{ 1482 (*phba->lpfc_handle_eratt)(phba); 1483} 1484 1485/** 1486 * lpfc_handle_latt - The HBA link event handler 1487 * @phba: pointer to lpfc hba data structure. 1488 * 1489 * This routine is invoked from the worker thread to handle a HBA host 1490 * attention link event. 1491 **/ 1492void 1493lpfc_handle_latt(struct lpfc_hba *phba) 1494{ 1495 struct lpfc_vport *vport = phba->pport; 1496 struct lpfc_sli *psli = &phba->sli; 1497 LPFC_MBOXQ_t *pmb; 1498 volatile uint32_t control; 1499 struct lpfc_dmabuf *mp; 1500 int rc = 0; 1501 1502 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1503 if (!pmb) { 1504 rc = 1; 1505 goto lpfc_handle_latt_err_exit; 1506 } 1507 1508 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1509 if (!mp) { 1510 rc = 2; 1511 goto lpfc_handle_latt_free_pmb; 1512 } 1513 1514 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1515 if (!mp->virt) { 1516 rc = 3; 1517 goto lpfc_handle_latt_free_mp; 1518 } 1519 1520 /* Cleanup any outstanding ELS commands */ 1521 lpfc_els_flush_all_cmd(phba); 1522 1523 psli->slistat.link_event++; 1524 lpfc_read_topology(phba, pmb, mp); 1525 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 1526 pmb->vport = vport; 1527 /* Block ELS IOCBs until we have processed this mbox command */ 1528 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1529 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1530 if (rc == MBX_NOT_FINISHED) { 1531 rc = 4; 1532 goto lpfc_handle_latt_free_mbuf; 1533 } 1534 1535 /* Clear Link Attention in HA REG */ 1536 spin_lock_irq(&phba->hbalock); 1537 writel(HA_LATT, phba->HAregaddr); 1538 readl(phba->HAregaddr); /* flush */ 1539 spin_unlock_irq(&phba->hbalock); 1540 1541 return; 1542 1543lpfc_handle_latt_free_mbuf: 1544 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1545 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1546lpfc_handle_latt_free_mp: 1547 kfree(mp); 1548lpfc_handle_latt_free_pmb: 1549 mempool_free(pmb, phba->mbox_mem_pool); 1550lpfc_handle_latt_err_exit: 1551 /* Enable Link attention interrupts */ 1552 spin_lock_irq(&phba->hbalock); 1553 psli->sli_flag |= LPFC_PROCESS_LA; 1554 control = readl(phba->HCregaddr); 1555 control |= HC_LAINT_ENA; 1556 writel(control, phba->HCregaddr); 1557 readl(phba->HCregaddr); /* flush */ 1558 1559 /* Clear Link Attention in HA REG */ 1560 writel(HA_LATT, phba->HAregaddr); 1561 readl(phba->HAregaddr); /* flush */ 1562 spin_unlock_irq(&phba->hbalock); 1563 lpfc_linkdown(phba); 1564 phba->link_state = LPFC_HBA_ERROR; 1565 1566 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1567 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1568 1569 return; 1570} 1571 1572/** 1573 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1574 * @phba: pointer to lpfc hba data structure. 1575 * @vpd: pointer to the vital product data. 1576 * @len: length of the vital product data in bytes. 1577 * 1578 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1579 * an array of characters. In this routine, the ModelName, ProgramType, and 1580 * ModelDesc, etc. fields of the phba data structure will be populated. 1581 * 1582 * Return codes 1583 * 0 - pointer to the VPD passed in is NULL 1584 * 1 - success 1585 **/ 1586int 1587lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1588{ 1589 uint8_t lenlo, lenhi; 1590 int Length; 1591 int i, j; 1592 int finished = 0; 1593 int index = 0; 1594 1595 if (!vpd) 1596 return 0; 1597 1598 /* Vital Product */ 1599 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1600 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1601 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1602 (uint32_t) vpd[3]); 1603 while (!finished && (index < (len - 4))) { 1604 switch (vpd[index]) { 1605 case 0x82: 1606 case 0x91: 1607 index += 1; 1608 lenlo = vpd[index]; 1609 index += 1; 1610 lenhi = vpd[index]; 1611 index += 1; 1612 i = ((((unsigned short)lenhi) << 8) + lenlo); 1613 index += i; 1614 break; 1615 case 0x90: 1616 index += 1; 1617 lenlo = vpd[index]; 1618 index += 1; 1619 lenhi = vpd[index]; 1620 index += 1; 1621 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1622 if (Length > len - index) 1623 Length = len - index; 1624 while (Length > 0) { 1625 /* Look for Serial Number */ 1626 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1627 index += 2; 1628 i = vpd[index]; 1629 index += 1; 1630 j = 0; 1631 Length -= (3+i); 1632 while(i--) { 1633 phba->SerialNumber[j++] = vpd[index++]; 1634 if (j == 31) 1635 break; 1636 } 1637 phba->SerialNumber[j] = 0; 1638 continue; 1639 } 1640 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1641 phba->vpd_flag |= VPD_MODEL_DESC; 1642 index += 2; 1643 i = vpd[index]; 1644 index += 1; 1645 j = 0; 1646 Length -= (3+i); 1647 while(i--) { 1648 phba->ModelDesc[j++] = vpd[index++]; 1649 if (j == 255) 1650 break; 1651 } 1652 phba->ModelDesc[j] = 0; 1653 continue; 1654 } 1655 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1656 phba->vpd_flag |= VPD_MODEL_NAME; 1657 index += 2; 1658 i = vpd[index]; 1659 index += 1; 1660 j = 0; 1661 Length -= (3+i); 1662 while(i--) { 1663 phba->ModelName[j++] = vpd[index++]; 1664 if (j == 79) 1665 break; 1666 } 1667 phba->ModelName[j] = 0; 1668 continue; 1669 } 1670 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1671 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1672 index += 2; 1673 i = vpd[index]; 1674 index += 1; 1675 j = 0; 1676 Length -= (3+i); 1677 while(i--) { 1678 phba->ProgramType[j++] = vpd[index++]; 1679 if (j == 255) 1680 break; 1681 } 1682 phba->ProgramType[j] = 0; 1683 continue; 1684 } 1685 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1686 phba->vpd_flag |= VPD_PORT; 1687 index += 2; 1688 i = vpd[index]; 1689 index += 1; 1690 j = 0; 1691 Length -= (3+i); 1692 while(i--) { 1693 phba->Port[j++] = vpd[index++]; 1694 if (j == 19) 1695 break; 1696 } 1697 phba->Port[j] = 0; 1698 continue; 1699 } 1700 else { 1701 index += 2; 1702 i = vpd[index]; 1703 index += 1; 1704 index += i; 1705 Length -= (3 + i); 1706 } 1707 } 1708 finished = 0; 1709 break; 1710 case 0x78: 1711 finished = 1; 1712 break; 1713 default: 1714 index ++; 1715 break; 1716 } 1717 } 1718 1719 return(1); 1720} 1721 1722/** 1723 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1724 * @phba: pointer to lpfc hba data structure. 1725 * @mdp: pointer to the data structure to hold the derived model name. 1726 * @descp: pointer to the data structure to hold the derived description. 1727 * 1728 * This routine retrieves HBA's description based on its registered PCI device 1729 * ID. The @descp passed into this function points to an array of 256 chars. It 1730 * shall be returned with the model name, maximum speed, and the host bus type. 1731 * The @mdp passed into this function points to an array of 80 chars. When the 1732 * function returns, the @mdp will be filled with the model name. 1733 **/ 1734static void 1735lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1736{ 1737 lpfc_vpd_t *vp; 1738 uint16_t dev_id = phba->pcidev->device; 1739 int max_speed; 1740 int GE = 0; 1741 int oneConnect = 0; /* default is not a oneConnect */ 1742 struct { 1743 char *name; 1744 char *bus; 1745 char *function; 1746 } m = {"<Unknown>", "", ""}; 1747 1748 if (mdp && mdp[0] != '\0' 1749 && descp && descp[0] != '\0') 1750 return; 1751 1752 if (phba->lmt & LMT_10Gb) 1753 max_speed = 10; 1754 else if (phba->lmt & LMT_8Gb) 1755 max_speed = 8; 1756 else if (phba->lmt & LMT_4Gb) 1757 max_speed = 4; 1758 else if (phba->lmt & LMT_2Gb) 1759 max_speed = 2; 1760 else 1761 max_speed = 1; 1762 1763 vp = &phba->vpd; 1764 1765 switch (dev_id) { 1766 case PCI_DEVICE_ID_FIREFLY: 1767 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 1768 break; 1769 case PCI_DEVICE_ID_SUPERFLY: 1770 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1771 m = (typeof(m)){"LP7000", "PCI", 1772 "Fibre Channel Adapter"}; 1773 else 1774 m = (typeof(m)){"LP7000E", "PCI", 1775 "Fibre Channel Adapter"}; 1776 break; 1777 case PCI_DEVICE_ID_DRAGONFLY: 1778 m = (typeof(m)){"LP8000", "PCI", 1779 "Fibre Channel Adapter"}; 1780 break; 1781 case PCI_DEVICE_ID_CENTAUR: 1782 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1783 m = (typeof(m)){"LP9002", "PCI", 1784 "Fibre Channel Adapter"}; 1785 else 1786 m = (typeof(m)){"LP9000", "PCI", 1787 "Fibre Channel Adapter"}; 1788 break; 1789 case PCI_DEVICE_ID_RFLY: 1790 m = (typeof(m)){"LP952", "PCI", 1791 "Fibre Channel Adapter"}; 1792 break; 1793 case PCI_DEVICE_ID_PEGASUS: 1794 m = (typeof(m)){"LP9802", "PCI-X", 1795 "Fibre Channel Adapter"}; 1796 break; 1797 case PCI_DEVICE_ID_THOR: 1798 m = (typeof(m)){"LP10000", "PCI-X", 1799 "Fibre Channel Adapter"}; 1800 break; 1801 case PCI_DEVICE_ID_VIPER: 1802 m = (typeof(m)){"LPX1000", "PCI-X", 1803 "Fibre Channel Adapter"}; 1804 break; 1805 case PCI_DEVICE_ID_PFLY: 1806 m = (typeof(m)){"LP982", "PCI-X", 1807 "Fibre Channel Adapter"}; 1808 break; 1809 case PCI_DEVICE_ID_TFLY: 1810 m = (typeof(m)){"LP1050", "PCI-X", 1811 "Fibre Channel Adapter"}; 1812 break; 1813 case PCI_DEVICE_ID_HELIOS: 1814 m = (typeof(m)){"LP11000", "PCI-X2", 1815 "Fibre Channel Adapter"}; 1816 break; 1817 case PCI_DEVICE_ID_HELIOS_SCSP: 1818 m = (typeof(m)){"LP11000-SP", "PCI-X2", 1819 "Fibre Channel Adapter"}; 1820 break; 1821 case PCI_DEVICE_ID_HELIOS_DCSP: 1822 m = (typeof(m)){"LP11002-SP", "PCI-X2", 1823 "Fibre Channel Adapter"}; 1824 break; 1825 case PCI_DEVICE_ID_NEPTUNE: 1826 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 1827 break; 1828 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1829 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 1830 break; 1831 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1832 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 1833 break; 1834 case PCI_DEVICE_ID_BMID: 1835 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 1836 break; 1837 case PCI_DEVICE_ID_BSMB: 1838 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 1839 break; 1840 case PCI_DEVICE_ID_ZEPHYR: 1841 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1842 break; 1843 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1844 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1845 break; 1846 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1847 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 1848 GE = 1; 1849 break; 1850 case PCI_DEVICE_ID_ZMID: 1851 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 1852 break; 1853 case PCI_DEVICE_ID_ZSMB: 1854 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 1855 break; 1856 case PCI_DEVICE_ID_LP101: 1857 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 1858 break; 1859 case PCI_DEVICE_ID_LP10000S: 1860 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 1861 break; 1862 case PCI_DEVICE_ID_LP11000S: 1863 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 1864 break; 1865 case PCI_DEVICE_ID_LPE11000S: 1866 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 1867 break; 1868 case PCI_DEVICE_ID_SAT: 1869 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 1870 break; 1871 case PCI_DEVICE_ID_SAT_MID: 1872 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 1873 break; 1874 case PCI_DEVICE_ID_SAT_SMB: 1875 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 1876 break; 1877 case PCI_DEVICE_ID_SAT_DCSP: 1878 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 1879 break; 1880 case PCI_DEVICE_ID_SAT_SCSP: 1881 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 1882 break; 1883 case PCI_DEVICE_ID_SAT_S: 1884 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 1885 break; 1886 case PCI_DEVICE_ID_HORNET: 1887 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 1888 GE = 1; 1889 break; 1890 case PCI_DEVICE_ID_PROTEUS_VF: 1891 m = (typeof(m)){"LPev12000", "PCIe IOV", 1892 "Fibre Channel Adapter"}; 1893 break; 1894 case PCI_DEVICE_ID_PROTEUS_PF: 1895 m = (typeof(m)){"LPev12000", "PCIe IOV", 1896 "Fibre Channel Adapter"}; 1897 break; 1898 case PCI_DEVICE_ID_PROTEUS_S: 1899 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 1900 "Fibre Channel Adapter"}; 1901 break; 1902 case PCI_DEVICE_ID_TIGERSHARK: 1903 oneConnect = 1; 1904 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 1905 break; 1906 case PCI_DEVICE_ID_TOMCAT: 1907 oneConnect = 1; 1908 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 1909 break; 1910 case PCI_DEVICE_ID_FALCON: 1911 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 1912 "EmulexSecure Fibre"}; 1913 break; 1914 case PCI_DEVICE_ID_BALIUS: 1915 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 1916 "Fibre Channel Adapter"}; 1917 break; 1918 case PCI_DEVICE_ID_LANCER_FC: 1919 oneConnect = 1; 1920 m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"}; 1921 break; 1922 case PCI_DEVICE_ID_LANCER_FCOE: 1923 oneConnect = 1; 1924 m = (typeof(m)){"Undefined", "PCIe", "FCoE"}; 1925 break; 1926 default: 1927 m = (typeof(m)){"Unknown", "", ""}; 1928 break; 1929 } 1930 1931 if (mdp && mdp[0] == '\0') 1932 snprintf(mdp, 79,"%s", m.name); 1933 /* oneConnect hba requires special processing, they are all initiators 1934 * and we put the port number on the end 1935 */ 1936 if (descp && descp[0] == '\0') { 1937 if (oneConnect) 1938 snprintf(descp, 255, 1939 "Emulex OneConnect %s, %s Initiator, Port %s", 1940 m.name, m.function, 1941 phba->Port); 1942 else 1943 snprintf(descp, 255, 1944 "Emulex %s %d%s %s %s", 1945 m.name, max_speed, (GE) ? "GE" : "Gb", 1946 m.bus, m.function); 1947 } 1948} 1949 1950/** 1951 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 1952 * @phba: pointer to lpfc hba data structure. 1953 * @pring: pointer to a IOCB ring. 1954 * @cnt: the number of IOCBs to be posted to the IOCB ring. 1955 * 1956 * This routine posts a given number of IOCBs with the associated DMA buffer 1957 * descriptors specified by the cnt argument to the given IOCB ring. 1958 * 1959 * Return codes 1960 * The number of IOCBs NOT able to be posted to the IOCB ring. 1961 **/ 1962int 1963lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 1964{ 1965 IOCB_t *icmd; 1966 struct lpfc_iocbq *iocb; 1967 struct lpfc_dmabuf *mp1, *mp2; 1968 1969 cnt += pring->missbufcnt; 1970 1971 /* While there are buffers to post */ 1972 while (cnt > 0) { 1973 /* Allocate buffer for command iocb */ 1974 iocb = lpfc_sli_get_iocbq(phba); 1975 if (iocb == NULL) { 1976 pring->missbufcnt = cnt; 1977 return cnt; 1978 } 1979 icmd = &iocb->iocb; 1980 1981 /* 2 buffers can be posted per command */ 1982 /* Allocate buffer to post */ 1983 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1984 if (mp1) 1985 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1986 if (!mp1 || !mp1->virt) { 1987 kfree(mp1); 1988 lpfc_sli_release_iocbq(phba, iocb); 1989 pring->missbufcnt = cnt; 1990 return cnt; 1991 } 1992 1993 INIT_LIST_HEAD(&mp1->list); 1994 /* Allocate buffer to post */ 1995 if (cnt > 1) { 1996 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1997 if (mp2) 1998 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1999 &mp2->phys); 2000 if (!mp2 || !mp2->virt) { 2001 kfree(mp2); 2002 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2003 kfree(mp1); 2004 lpfc_sli_release_iocbq(phba, iocb); 2005 pring->missbufcnt = cnt; 2006 return cnt; 2007 } 2008 2009 INIT_LIST_HEAD(&mp2->list); 2010 } else { 2011 mp2 = NULL; 2012 } 2013 2014 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2015 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2016 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2017 icmd->ulpBdeCount = 1; 2018 cnt--; 2019 if (mp2) { 2020 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2021 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2022 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2023 cnt--; 2024 icmd->ulpBdeCount = 2; 2025 } 2026 2027 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2028 icmd->ulpLe = 1; 2029 2030 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2031 IOCB_ERROR) { 2032 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2033 kfree(mp1); 2034 cnt++; 2035 if (mp2) { 2036 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2037 kfree(mp2); 2038 cnt++; 2039 } 2040 lpfc_sli_release_iocbq(phba, iocb); 2041 pring->missbufcnt = cnt; 2042 return cnt; 2043 } 2044 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2045 if (mp2) 2046 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2047 } 2048 pring->missbufcnt = 0; 2049 return 0; 2050} 2051 2052/** 2053 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2054 * @phba: pointer to lpfc hba data structure. 2055 * 2056 * This routine posts initial receive IOCB buffers to the ELS ring. The 2057 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2058 * set to 64 IOCBs. 2059 * 2060 * Return codes 2061 * 0 - success (currently always success) 2062 **/ 2063static int 2064lpfc_post_rcv_buf(struct lpfc_hba *phba) 2065{ 2066 struct lpfc_sli *psli = &phba->sli; 2067 2068 /* Ring 0, ELS / CT buffers */ 2069 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2070 /* Ring 2 - FCP no buffers needed */ 2071 2072 return 0; 2073} 2074 2075#define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2076 2077/** 2078 * lpfc_sha_init - Set up initial array of hash table entries 2079 * @HashResultPointer: pointer to an array as hash table. 2080 * 2081 * This routine sets up the initial values to the array of hash table entries 2082 * for the LC HBAs. 2083 **/ 2084static void 2085lpfc_sha_init(uint32_t * HashResultPointer) 2086{ 2087 HashResultPointer[0] = 0x67452301; 2088 HashResultPointer[1] = 0xEFCDAB89; 2089 HashResultPointer[2] = 0x98BADCFE; 2090 HashResultPointer[3] = 0x10325476; 2091 HashResultPointer[4] = 0xC3D2E1F0; 2092} 2093 2094/** 2095 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2096 * @HashResultPointer: pointer to an initial/result hash table. 2097 * @HashWorkingPointer: pointer to an working hash table. 2098 * 2099 * This routine iterates an initial hash table pointed by @HashResultPointer 2100 * with the values from the working hash table pointeed by @HashWorkingPointer. 2101 * The results are putting back to the initial hash table, returned through 2102 * the @HashResultPointer as the result hash table. 2103 **/ 2104static void 2105lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2106{ 2107 int t; 2108 uint32_t TEMP; 2109 uint32_t A, B, C, D, E; 2110 t = 16; 2111 do { 2112 HashWorkingPointer[t] = 2113 S(1, 2114 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2115 8] ^ 2116 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2117 } while (++t <= 79); 2118 t = 0; 2119 A = HashResultPointer[0]; 2120 B = HashResultPointer[1]; 2121 C = HashResultPointer[2]; 2122 D = HashResultPointer[3]; 2123 E = HashResultPointer[4]; 2124 2125 do { 2126 if (t < 20) { 2127 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2128 } else if (t < 40) { 2129 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2130 } else if (t < 60) { 2131 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2132 } else { 2133 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2134 } 2135 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2136 E = D; 2137 D = C; 2138 C = S(30, B); 2139 B = A; 2140 A = TEMP; 2141 } while (++t <= 79); 2142 2143 HashResultPointer[0] += A; 2144 HashResultPointer[1] += B; 2145 HashResultPointer[2] += C; 2146 HashResultPointer[3] += D; 2147 HashResultPointer[4] += E; 2148 2149} 2150 2151/** 2152 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2153 * @RandomChallenge: pointer to the entry of host challenge random number array. 2154 * @HashWorking: pointer to the entry of the working hash array. 2155 * 2156 * This routine calculates the working hash array referred by @HashWorking 2157 * from the challenge random numbers associated with the host, referred by 2158 * @RandomChallenge. The result is put into the entry of the working hash 2159 * array and returned by reference through @HashWorking. 2160 **/ 2161static void 2162lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2163{ 2164 *HashWorking = (*RandomChallenge ^ *HashWorking); 2165} 2166 2167/** 2168 * lpfc_hba_init - Perform special handling for LC HBA initialization 2169 * @phba: pointer to lpfc hba data structure. 2170 * @hbainit: pointer to an array of unsigned 32-bit integers. 2171 * 2172 * This routine performs the special handling for LC HBA initialization. 2173 **/ 2174void 2175lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2176{ 2177 int t; 2178 uint32_t *HashWorking; 2179 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2180 2181 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2182 if (!HashWorking) 2183 return; 2184 2185 HashWorking[0] = HashWorking[78] = *pwwnn++; 2186 HashWorking[1] = HashWorking[79] = *pwwnn; 2187 2188 for (t = 0; t < 7; t++) 2189 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2190 2191 lpfc_sha_init(hbainit); 2192 lpfc_sha_iterate(hbainit, HashWorking); 2193 kfree(HashWorking); 2194} 2195 2196/** 2197 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2198 * @vport: pointer to a virtual N_Port data structure. 2199 * 2200 * This routine performs the necessary cleanups before deleting the @vport. 2201 * It invokes the discovery state machine to perform necessary state 2202 * transitions and to release the ndlps associated with the @vport. Note, 2203 * the physical port is treated as @vport 0. 2204 **/ 2205void 2206lpfc_cleanup(struct lpfc_vport *vport) 2207{ 2208 struct lpfc_hba *phba = vport->phba; 2209 struct lpfc_nodelist *ndlp, *next_ndlp; 2210 int i = 0; 2211 2212 if (phba->link_state > LPFC_LINK_DOWN) 2213 lpfc_port_link_failure(vport); 2214 2215 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2216 if (!NLP_CHK_NODE_ACT(ndlp)) { 2217 ndlp = lpfc_enable_node(vport, ndlp, 2218 NLP_STE_UNUSED_NODE); 2219 if (!ndlp) 2220 continue; 2221 spin_lock_irq(&phba->ndlp_lock); 2222 NLP_SET_FREE_REQ(ndlp); 2223 spin_unlock_irq(&phba->ndlp_lock); 2224 /* Trigger the release of the ndlp memory */ 2225 lpfc_nlp_put(ndlp); 2226 continue; 2227 } 2228 spin_lock_irq(&phba->ndlp_lock); 2229 if (NLP_CHK_FREE_REQ(ndlp)) { 2230 /* The ndlp should not be in memory free mode already */ 2231 spin_unlock_irq(&phba->ndlp_lock); 2232 continue; 2233 } else 2234 /* Indicate request for freeing ndlp memory */ 2235 NLP_SET_FREE_REQ(ndlp); 2236 spin_unlock_irq(&phba->ndlp_lock); 2237 2238 if (vport->port_type != LPFC_PHYSICAL_PORT && 2239 ndlp->nlp_DID == Fabric_DID) { 2240 /* Just free up ndlp with Fabric_DID for vports */ 2241 lpfc_nlp_put(ndlp); 2242 continue; 2243 } 2244 2245 if (ndlp->nlp_type & NLP_FABRIC) 2246 lpfc_disc_state_machine(vport, ndlp, NULL, 2247 NLP_EVT_DEVICE_RECOVERY); 2248 2249 lpfc_disc_state_machine(vport, ndlp, NULL, 2250 NLP_EVT_DEVICE_RM); 2251 2252 } 2253 2254 /* At this point, ALL ndlp's should be gone 2255 * because of the previous NLP_EVT_DEVICE_RM. 2256 * Lets wait for this to happen, if needed. 2257 */ 2258 while (!list_empty(&vport->fc_nodes)) { 2259 if (i++ > 3000) { 2260 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2261 "0233 Nodelist not empty\n"); 2262 list_for_each_entry_safe(ndlp, next_ndlp, 2263 &vport->fc_nodes, nlp_listp) { 2264 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2265 LOG_NODE, 2266 "0282 did:x%x ndlp:x%p " 2267 "usgmap:x%x refcnt:%d\n", 2268 ndlp->nlp_DID, (void *)ndlp, 2269 ndlp->nlp_usg_map, 2270 atomic_read( 2271 &ndlp->kref.refcount)); 2272 } 2273 break; 2274 } 2275 2276 /* Wait for any activity on ndlps to settle */ 2277 msleep(10); 2278 } 2279 lpfc_cleanup_vports_rrqs(vport, NULL); 2280} 2281 2282/** 2283 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2284 * @vport: pointer to a virtual N_Port data structure. 2285 * 2286 * This routine stops all the timers associated with a @vport. This function 2287 * is invoked before disabling or deleting a @vport. Note that the physical 2288 * port is treated as @vport 0. 2289 **/ 2290void 2291lpfc_stop_vport_timers(struct lpfc_vport *vport) 2292{ 2293 del_timer_sync(&vport->els_tmofunc); 2294 del_timer_sync(&vport->fc_fdmitmo); 2295 del_timer_sync(&vport->delayed_disc_tmo); 2296 lpfc_can_disctmo(vport); 2297 return; 2298} 2299 2300/** 2301 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2302 * @phba: pointer to lpfc hba data structure. 2303 * 2304 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2305 * caller of this routine should already hold the host lock. 2306 **/ 2307void 2308__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2309{ 2310 /* Clear pending FCF rediscovery wait flag */ 2311 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2312 2313 /* Now, try to stop the timer */ 2314 del_timer(&phba->fcf.redisc_wait); 2315} 2316 2317/** 2318 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2319 * @phba: pointer to lpfc hba data structure. 2320 * 2321 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2322 * checks whether the FCF rediscovery wait timer is pending with the host 2323 * lock held before proceeding with disabling the timer and clearing the 2324 * wait timer pendig flag. 2325 **/ 2326void 2327lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2328{ 2329 spin_lock_irq(&phba->hbalock); 2330 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2331 /* FCF rediscovery timer already fired or stopped */ 2332 spin_unlock_irq(&phba->hbalock); 2333 return; 2334 } 2335 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2336 /* Clear failover in progress flags */ 2337 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2338 spin_unlock_irq(&phba->hbalock); 2339} 2340 2341/** 2342 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2343 * @phba: pointer to lpfc hba data structure. 2344 * 2345 * This routine stops all the timers associated with a HBA. This function is 2346 * invoked before either putting a HBA offline or unloading the driver. 2347 **/ 2348void 2349lpfc_stop_hba_timers(struct lpfc_hba *phba) 2350{ 2351 lpfc_stop_vport_timers(phba->pport); 2352 del_timer_sync(&phba->sli.mbox_tmo); 2353 del_timer_sync(&phba->fabric_block_timer); 2354 del_timer_sync(&phba->eratt_poll); 2355 del_timer_sync(&phba->hb_tmofunc); 2356 if (phba->sli_rev == LPFC_SLI_REV4) { 2357 del_timer_sync(&phba->rrq_tmr); 2358 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2359 } 2360 phba->hb_outstanding = 0; 2361 2362 switch (phba->pci_dev_grp) { 2363 case LPFC_PCI_DEV_LP: 2364 /* Stop any LightPulse device specific driver timers */ 2365 del_timer_sync(&phba->fcp_poll_timer); 2366 break; 2367 case LPFC_PCI_DEV_OC: 2368 /* Stop any OneConnect device sepcific driver timers */ 2369 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2370 break; 2371 default: 2372 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2373 "0297 Invalid device group (x%x)\n", 2374 phba->pci_dev_grp); 2375 break; 2376 } 2377 return; 2378} 2379 2380/** 2381 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2382 * @phba: pointer to lpfc hba data structure. 2383 * 2384 * This routine marks a HBA's management interface as blocked. Once the HBA's 2385 * management interface is marked as blocked, all the user space access to 2386 * the HBA, whether they are from sysfs interface or libdfc interface will 2387 * all be blocked. The HBA is set to block the management interface when the 2388 * driver prepares the HBA interface for online or offline. 2389 **/ 2390static void 2391lpfc_block_mgmt_io(struct lpfc_hba * phba) 2392{ 2393 unsigned long iflag; 2394 uint8_t actcmd = MBX_HEARTBEAT; 2395 unsigned long timeout; 2396 2397 2398 spin_lock_irqsave(&phba->hbalock, iflag); 2399 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2400 if (phba->sli.mbox_active) 2401 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2402 spin_unlock_irqrestore(&phba->hbalock, iflag); 2403 /* Determine how long we might wait for the active mailbox 2404 * command to be gracefully completed by firmware. 2405 */ 2406 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + 2407 jiffies; 2408 /* Wait for the outstnading mailbox command to complete */ 2409 while (phba->sli.mbox_active) { 2410 /* Check active mailbox complete status every 2ms */ 2411 msleep(2); 2412 if (time_after(jiffies, timeout)) { 2413 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2414 "2813 Mgmt IO is Blocked %x " 2415 "- mbox cmd %x still active\n", 2416 phba->sli.sli_flag, actcmd); 2417 break; 2418 } 2419 } 2420} 2421 2422/** 2423 * lpfc_online - Initialize and bring a HBA online 2424 * @phba: pointer to lpfc hba data structure. 2425 * 2426 * This routine initializes the HBA and brings a HBA online. During this 2427 * process, the management interface is blocked to prevent user space access 2428 * to the HBA interfering with the driver initialization. 2429 * 2430 * Return codes 2431 * 0 - successful 2432 * 1 - failed 2433 **/ 2434int 2435lpfc_online(struct lpfc_hba *phba) 2436{ 2437 struct lpfc_vport *vport; 2438 struct lpfc_vport **vports; 2439 int i; 2440 2441 if (!phba) 2442 return 0; 2443 vport = phba->pport; 2444 2445 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2446 return 0; 2447 2448 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2449 "0458 Bring Adapter online\n"); 2450 2451 lpfc_block_mgmt_io(phba); 2452 2453 if (!lpfc_sli_queue_setup(phba)) { 2454 lpfc_unblock_mgmt_io(phba); 2455 return 1; 2456 } 2457 2458 if (phba->sli_rev == LPFC_SLI_REV4) { 2459 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2460 lpfc_unblock_mgmt_io(phba); 2461 return 1; 2462 } 2463 } else { 2464 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2465 lpfc_unblock_mgmt_io(phba); 2466 return 1; 2467 } 2468 } 2469 2470 vports = lpfc_create_vport_work_array(phba); 2471 if (vports != NULL) 2472 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2473 struct Scsi_Host *shost; 2474 shost = lpfc_shost_from_vport(vports[i]); 2475 spin_lock_irq(shost->host_lock); 2476 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2477 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2478 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2479 if (phba->sli_rev == LPFC_SLI_REV4) 2480 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2481 spin_unlock_irq(shost->host_lock); 2482 } 2483 lpfc_destroy_vport_work_array(phba, vports); 2484 2485 lpfc_unblock_mgmt_io(phba); 2486 return 0; 2487} 2488 2489/** 2490 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2491 * @phba: pointer to lpfc hba data structure. 2492 * 2493 * This routine marks a HBA's management interface as not blocked. Once the 2494 * HBA's management interface is marked as not blocked, all the user space 2495 * access to the HBA, whether they are from sysfs interface or libdfc 2496 * interface will be allowed. The HBA is set to block the management interface 2497 * when the driver prepares the HBA interface for online or offline and then 2498 * set to unblock the management interface afterwards. 2499 **/ 2500void 2501lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2502{ 2503 unsigned long iflag; 2504 2505 spin_lock_irqsave(&phba->hbalock, iflag); 2506 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2507 spin_unlock_irqrestore(&phba->hbalock, iflag); 2508} 2509 2510/** 2511 * lpfc_offline_prep - Prepare a HBA to be brought offline 2512 * @phba: pointer to lpfc hba data structure. 2513 * 2514 * This routine is invoked to prepare a HBA to be brought offline. It performs 2515 * unregistration login to all the nodes on all vports and flushes the mailbox 2516 * queue to make it ready to be brought offline. 2517 **/ 2518void 2519lpfc_offline_prep(struct lpfc_hba * phba) 2520{ 2521 struct lpfc_vport *vport = phba->pport; 2522 struct lpfc_nodelist *ndlp, *next_ndlp; 2523 struct lpfc_vport **vports; 2524 struct Scsi_Host *shost; 2525 int i; 2526 2527 if (vport->fc_flag & FC_OFFLINE_MODE) 2528 return; 2529 2530 lpfc_block_mgmt_io(phba); 2531 2532 lpfc_linkdown(phba); 2533 2534 /* Issue an unreg_login to all nodes on all vports */ 2535 vports = lpfc_create_vport_work_array(phba); 2536 if (vports != NULL) { 2537 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2538 if (vports[i]->load_flag & FC_UNLOADING) 2539 continue; 2540 shost = lpfc_shost_from_vport(vports[i]); 2541 spin_lock_irq(shost->host_lock); 2542 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2543 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2544 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2545 spin_unlock_irq(shost->host_lock); 2546 2547 shost = lpfc_shost_from_vport(vports[i]); 2548 list_for_each_entry_safe(ndlp, next_ndlp, 2549 &vports[i]->fc_nodes, 2550 nlp_listp) { 2551 if (!NLP_CHK_NODE_ACT(ndlp)) 2552 continue; 2553 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2554 continue; 2555 if (ndlp->nlp_type & NLP_FABRIC) { 2556 lpfc_disc_state_machine(vports[i], ndlp, 2557 NULL, NLP_EVT_DEVICE_RECOVERY); 2558 lpfc_disc_state_machine(vports[i], ndlp, 2559 NULL, NLP_EVT_DEVICE_RM); 2560 } 2561 spin_lock_irq(shost->host_lock); 2562 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2563 spin_unlock_irq(shost->host_lock); 2564 lpfc_unreg_rpi(vports[i], ndlp); 2565 } 2566 } 2567 } 2568 lpfc_destroy_vport_work_array(phba, vports); 2569 2570 lpfc_sli_mbox_sys_shutdown(phba); 2571} 2572 2573/** 2574 * lpfc_offline - Bring a HBA offline 2575 * @phba: pointer to lpfc hba data structure. 2576 * 2577 * This routine actually brings a HBA offline. It stops all the timers 2578 * associated with the HBA, brings down the SLI layer, and eventually 2579 * marks the HBA as in offline state for the upper layer protocol. 2580 **/ 2581void 2582lpfc_offline(struct lpfc_hba *phba) 2583{ 2584 struct Scsi_Host *shost; 2585 struct lpfc_vport **vports; 2586 int i; 2587 2588 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2589 return; 2590 2591 /* stop port and all timers associated with this hba */ 2592 lpfc_stop_port(phba); 2593 vports = lpfc_create_vport_work_array(phba); 2594 if (vports != NULL) 2595 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2596 lpfc_stop_vport_timers(vports[i]); 2597 lpfc_destroy_vport_work_array(phba, vports); 2598 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2599 "0460 Bring Adapter offline\n"); 2600 /* Bring down the SLI Layer and cleanup. The HBA is offline 2601 now. */ 2602 lpfc_sli_hba_down(phba); 2603 spin_lock_irq(&phba->hbalock); 2604 phba->work_ha = 0; 2605 spin_unlock_irq(&phba->hbalock); 2606 vports = lpfc_create_vport_work_array(phba); 2607 if (vports != NULL) 2608 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2609 shost = lpfc_shost_from_vport(vports[i]); 2610 spin_lock_irq(shost->host_lock); 2611 vports[i]->work_port_events = 0; 2612 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2613 spin_unlock_irq(shost->host_lock); 2614 } 2615 lpfc_destroy_vport_work_array(phba, vports); 2616} 2617 2618/** 2619 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2620 * @phba: pointer to lpfc hba data structure. 2621 * 2622 * This routine is to free all the SCSI buffers and IOCBs from the driver 2623 * list back to kernel. It is called from lpfc_pci_remove_one to free 2624 * the internal resources before the device is removed from the system. 2625 * 2626 * Return codes 2627 * 0 - successful (for now, it always returns 0) 2628 **/ 2629static int 2630lpfc_scsi_free(struct lpfc_hba *phba) 2631{ 2632 struct lpfc_scsi_buf *sb, *sb_next; 2633 struct lpfc_iocbq *io, *io_next; 2634 2635 spin_lock_irq(&phba->hbalock); 2636 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2637 spin_lock(&phba->scsi_buf_list_lock); 2638 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2639 list_del(&sb->list); 2640 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2641 sb->dma_handle); 2642 kfree(sb); 2643 phba->total_scsi_bufs--; 2644 } 2645 spin_unlock(&phba->scsi_buf_list_lock); 2646 2647 /* Release all the lpfc_iocbq entries maintained by this host. */ 2648 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2649 list_del(&io->list); 2650 kfree(io); 2651 phba->total_iocbq_bufs--; 2652 } 2653 spin_unlock_irq(&phba->hbalock); 2654 return 0; 2655} 2656 2657/** 2658 * lpfc_create_port - Create an FC port 2659 * @phba: pointer to lpfc hba data structure. 2660 * @instance: a unique integer ID to this FC port. 2661 * @dev: pointer to the device data structure. 2662 * 2663 * This routine creates a FC port for the upper layer protocol. The FC port 2664 * can be created on top of either a physical port or a virtual port provided 2665 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2666 * and associates the FC port created before adding the shost into the SCSI 2667 * layer. 2668 * 2669 * Return codes 2670 * @vport - pointer to the virtual N_Port data structure. 2671 * NULL - port create failed. 2672 **/ 2673struct lpfc_vport * 2674lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2675{ 2676 struct lpfc_vport *vport; 2677 struct Scsi_Host *shost; 2678 int error = 0; 2679 2680 if (dev != &phba->pcidev->dev) 2681 shost = scsi_host_alloc(&lpfc_vport_template, 2682 sizeof(struct lpfc_vport)); 2683 else 2684 shost = scsi_host_alloc(&lpfc_template, 2685 sizeof(struct lpfc_vport)); 2686 if (!shost) 2687 goto out; 2688 2689 vport = (struct lpfc_vport *) shost->hostdata; 2690 vport->phba = phba; 2691 vport->load_flag |= FC_LOADING; 2692 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2693 vport->fc_rscn_flush = 0; 2694 2695 lpfc_get_vport_cfgparam(vport); 2696 shost->unique_id = instance; 2697 shost->max_id = LPFC_MAX_TARGET; 2698 shost->max_lun = vport->cfg_max_luns; 2699 shost->this_id = -1; 2700 shost->max_cmd_len = 16; 2701 if (phba->sli_rev == LPFC_SLI_REV4) { 2702 shost->dma_boundary = 2703 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 2704 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2705 } 2706 2707 /* 2708 * Set initial can_queue value since 0 is no longer supported and 2709 * scsi_add_host will fail. This will be adjusted later based on the 2710 * max xri value determined in hba setup. 2711 */ 2712 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2713 if (dev != &phba->pcidev->dev) { 2714 shost->transportt = lpfc_vport_transport_template; 2715 vport->port_type = LPFC_NPIV_PORT; 2716 } else { 2717 shost->transportt = lpfc_transport_template; 2718 vport->port_type = LPFC_PHYSICAL_PORT; 2719 } 2720 2721 /* Initialize all internally managed lists. */ 2722 INIT_LIST_HEAD(&vport->fc_nodes); 2723 INIT_LIST_HEAD(&vport->rcv_buffer_list); 2724 spin_lock_init(&vport->work_port_lock); 2725 2726 init_timer(&vport->fc_disctmo); 2727 vport->fc_disctmo.function = lpfc_disc_timeout; 2728 vport->fc_disctmo.data = (unsigned long)vport; 2729 2730 init_timer(&vport->fc_fdmitmo); 2731 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2732 vport->fc_fdmitmo.data = (unsigned long)vport; 2733 2734 init_timer(&vport->els_tmofunc); 2735 vport->els_tmofunc.function = lpfc_els_timeout; 2736 vport->els_tmofunc.data = (unsigned long)vport; 2737 2738 init_timer(&vport->delayed_disc_tmo); 2739 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; 2740 vport->delayed_disc_tmo.data = (unsigned long)vport; 2741 2742 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2743 if (error) 2744 goto out_put_shost; 2745 2746 spin_lock_irq(&phba->hbalock); 2747 list_add_tail(&vport->listentry, &phba->port_list); 2748 spin_unlock_irq(&phba->hbalock); 2749 return vport; 2750 2751out_put_shost: 2752 scsi_host_put(shost); 2753out: 2754 return NULL; 2755} 2756 2757/** 2758 * destroy_port - destroy an FC port 2759 * @vport: pointer to an lpfc virtual N_Port data structure. 2760 * 2761 * This routine destroys a FC port from the upper layer protocol. All the 2762 * resources associated with the port are released. 2763 **/ 2764void 2765destroy_port(struct lpfc_vport *vport) 2766{ 2767 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2768 struct lpfc_hba *phba = vport->phba; 2769 2770 lpfc_debugfs_terminate(vport); 2771 fc_remove_host(shost); 2772 scsi_remove_host(shost); 2773 2774 spin_lock_irq(&phba->hbalock); 2775 list_del_init(&vport->listentry); 2776 spin_unlock_irq(&phba->hbalock); 2777 2778 lpfc_cleanup(vport); 2779 return; 2780} 2781 2782/** 2783 * lpfc_get_instance - Get a unique integer ID 2784 * 2785 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2786 * uses the kernel idr facility to perform the task. 2787 * 2788 * Return codes: 2789 * instance - a unique integer ID allocated as the new instance. 2790 * -1 - lpfc get instance failed. 2791 **/ 2792int 2793lpfc_get_instance(void) 2794{ 2795 int instance = 0; 2796 2797 /* Assign an unused number */ 2798 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2799 return -1; 2800 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2801 return -1; 2802 return instance; 2803} 2804 2805/** 2806 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2807 * @shost: pointer to SCSI host data structure. 2808 * @time: elapsed time of the scan in jiffies. 2809 * 2810 * This routine is called by the SCSI layer with a SCSI host to determine 2811 * whether the scan host is finished. 2812 * 2813 * Note: there is no scan_start function as adapter initialization will have 2814 * asynchronously kicked off the link initialization. 2815 * 2816 * Return codes 2817 * 0 - SCSI host scan is not over yet. 2818 * 1 - SCSI host scan is over. 2819 **/ 2820int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2821{ 2822 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2823 struct lpfc_hba *phba = vport->phba; 2824 int stat = 0; 2825 2826 spin_lock_irq(shost->host_lock); 2827 2828 if (vport->load_flag & FC_UNLOADING) { 2829 stat = 1; 2830 goto finished; 2831 } 2832 if (time >= 30 * HZ) { 2833 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2834 "0461 Scanning longer than 30 " 2835 "seconds. Continuing initialization\n"); 2836 stat = 1; 2837 goto finished; 2838 } 2839 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2840 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2841 "0465 Link down longer than 15 " 2842 "seconds. Continuing initialization\n"); 2843 stat = 1; 2844 goto finished; 2845 } 2846 2847 if (vport->port_state != LPFC_VPORT_READY) 2848 goto finished; 2849 if (vport->num_disc_nodes || vport->fc_prli_sent) 2850 goto finished; 2851 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2852 goto finished; 2853 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2854 goto finished; 2855 2856 stat = 1; 2857 2858finished: 2859 spin_unlock_irq(shost->host_lock); 2860 return stat; 2861} 2862 2863/** 2864 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2865 * @shost: pointer to SCSI host data structure. 2866 * 2867 * This routine initializes a given SCSI host attributes on a FC port. The 2868 * SCSI host can be either on top of a physical port or a virtual port. 2869 **/ 2870void lpfc_host_attrib_init(struct Scsi_Host *shost) 2871{ 2872 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2873 struct lpfc_hba *phba = vport->phba; 2874 /* 2875 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2876 */ 2877 2878 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2879 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2880 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2881 2882 memset(fc_host_supported_fc4s(shost), 0, 2883 sizeof(fc_host_supported_fc4s(shost))); 2884 fc_host_supported_fc4s(shost)[2] = 1; 2885 fc_host_supported_fc4s(shost)[7] = 1; 2886 2887 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2888 sizeof fc_host_symbolic_name(shost)); 2889 2890 fc_host_supported_speeds(shost) = 0; 2891 if (phba->lmt & LMT_10Gb) 2892 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2893 if (phba->lmt & LMT_8Gb) 2894 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2895 if (phba->lmt & LMT_4Gb) 2896 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2897 if (phba->lmt & LMT_2Gb) 2898 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2899 if (phba->lmt & LMT_1Gb) 2900 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2901 2902 fc_host_maxframe_size(shost) = 2903 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2904 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2905 2906 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 2907 2908 /* This value is also unchanging */ 2909 memset(fc_host_active_fc4s(shost), 0, 2910 sizeof(fc_host_active_fc4s(shost))); 2911 fc_host_active_fc4s(shost)[2] = 1; 2912 fc_host_active_fc4s(shost)[7] = 1; 2913 2914 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2915 spin_lock_irq(shost->host_lock); 2916 vport->load_flag &= ~FC_LOADING; 2917 spin_unlock_irq(shost->host_lock); 2918} 2919 2920/** 2921 * lpfc_stop_port_s3 - Stop SLI3 device port 2922 * @phba: pointer to lpfc hba data structure. 2923 * 2924 * This routine is invoked to stop an SLI3 device port, it stops the device 2925 * from generating interrupts and stops the device driver's timers for the 2926 * device. 2927 **/ 2928static void 2929lpfc_stop_port_s3(struct lpfc_hba *phba) 2930{ 2931 /* Clear all interrupt enable conditions */ 2932 writel(0, phba->HCregaddr); 2933 readl(phba->HCregaddr); /* flush */ 2934 /* Clear all pending interrupts */ 2935 writel(0xffffffff, phba->HAregaddr); 2936 readl(phba->HAregaddr); /* flush */ 2937 2938 /* Reset some HBA SLI setup states */ 2939 lpfc_stop_hba_timers(phba); 2940 phba->pport->work_port_events = 0; 2941} 2942 2943/** 2944 * lpfc_stop_port_s4 - Stop SLI4 device port 2945 * @phba: pointer to lpfc hba data structure. 2946 * 2947 * This routine is invoked to stop an SLI4 device port, it stops the device 2948 * from generating interrupts and stops the device driver's timers for the 2949 * device. 2950 **/ 2951static void 2952lpfc_stop_port_s4(struct lpfc_hba *phba) 2953{ 2954 /* Reset some HBA SLI4 setup states */ 2955 lpfc_stop_hba_timers(phba); 2956 phba->pport->work_port_events = 0; 2957 phba->sli4_hba.intr_enable = 0; 2958} 2959 2960/** 2961 * lpfc_stop_port - Wrapper function for stopping hba port 2962 * @phba: Pointer to HBA context object. 2963 * 2964 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 2965 * the API jump table function pointer from the lpfc_hba struct. 2966 **/ 2967void 2968lpfc_stop_port(struct lpfc_hba *phba) 2969{ 2970 phba->lpfc_stop_port(phba); 2971} 2972 2973/** 2974 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 2975 * @phba: Pointer to hba for which this call is being executed. 2976 * 2977 * This routine starts the timer waiting for the FCF rediscovery to complete. 2978 **/ 2979void 2980lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 2981{ 2982 unsigned long fcf_redisc_wait_tmo = 2983 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 2984 /* Start fcf rediscovery wait period timer */ 2985 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 2986 spin_lock_irq(&phba->hbalock); 2987 /* Allow action to new fcf asynchronous event */ 2988 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 2989 /* Mark the FCF rediscovery pending state */ 2990 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 2991 spin_unlock_irq(&phba->hbalock); 2992} 2993 2994/** 2995 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 2996 * @ptr: Map to lpfc_hba data structure pointer. 2997 * 2998 * This routine is invoked when waiting for FCF table rediscover has been 2999 * timed out. If new FCF record(s) has (have) been discovered during the 3000 * wait period, a new FCF event shall be added to the FCOE async event 3001 * list, and then worker thread shall be waked up for processing from the 3002 * worker thread context. 3003 **/ 3004void 3005lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 3006{ 3007 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 3008 3009 /* Don't send FCF rediscovery event if timer cancelled */ 3010 spin_lock_irq(&phba->hbalock); 3011 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3012 spin_unlock_irq(&phba->hbalock); 3013 return; 3014 } 3015 /* Clear FCF rediscovery timer pending flag */ 3016 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3017 /* FCF rediscovery event to worker thread */ 3018 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 3019 spin_unlock_irq(&phba->hbalock); 3020 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3021 "2776 FCF rediscover quiescent timer expired\n"); 3022 /* wake up worker thread */ 3023 lpfc_worker_wake_up(phba); 3024} 3025 3026/** 3027 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3028 * @phba: pointer to lpfc hba data structure. 3029 * @acqe_link: pointer to the async link completion queue entry. 3030 * 3031 * This routine is to parse the SLI4 link-attention link fault code and 3032 * translate it into the base driver's read link attention mailbox command 3033 * status. 3034 * 3035 * Return: Link-attention status in terms of base driver's coding. 3036 **/ 3037static uint16_t 3038lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3039 struct lpfc_acqe_link *acqe_link) 3040{ 3041 uint16_t latt_fault; 3042 3043 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3044 case LPFC_ASYNC_LINK_FAULT_NONE: 3045 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3046 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3047 latt_fault = 0; 3048 break; 3049 default: 3050 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3051 "0398 Invalid link fault code: x%x\n", 3052 bf_get(lpfc_acqe_link_fault, acqe_link)); 3053 latt_fault = MBXERR_ERROR; 3054 break; 3055 } 3056 return latt_fault; 3057} 3058 3059/** 3060 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3061 * @phba: pointer to lpfc hba data structure. 3062 * @acqe_link: pointer to the async link completion queue entry. 3063 * 3064 * This routine is to parse the SLI4 link attention type and translate it 3065 * into the base driver's link attention type coding. 3066 * 3067 * Return: Link attention type in terms of base driver's coding. 3068 **/ 3069static uint8_t 3070lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3071 struct lpfc_acqe_link *acqe_link) 3072{ 3073 uint8_t att_type; 3074 3075 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3076 case LPFC_ASYNC_LINK_STATUS_DOWN: 3077 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3078 att_type = LPFC_ATT_LINK_DOWN; 3079 break; 3080 case LPFC_ASYNC_LINK_STATUS_UP: 3081 /* Ignore physical link up events - wait for logical link up */ 3082 att_type = LPFC_ATT_RESERVED; 3083 break; 3084 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3085 att_type = LPFC_ATT_LINK_UP; 3086 break; 3087 default: 3088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3089 "0399 Invalid link attention type: x%x\n", 3090 bf_get(lpfc_acqe_link_status, acqe_link)); 3091 att_type = LPFC_ATT_RESERVED; 3092 break; 3093 } 3094 return att_type; 3095} 3096 3097/** 3098 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 3099 * @phba: pointer to lpfc hba data structure. 3100 * @acqe_link: pointer to the async link completion queue entry. 3101 * 3102 * This routine is to parse the SLI4 link-attention link speed and translate 3103 * it into the base driver's link-attention link speed coding. 3104 * 3105 * Return: Link-attention link speed in terms of base driver's coding. 3106 **/ 3107static uint8_t 3108lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 3109 struct lpfc_acqe_link *acqe_link) 3110{ 3111 uint8_t link_speed; 3112 3113 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3114 case LPFC_ASYNC_LINK_SPEED_ZERO: 3115 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3116 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3117 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3118 break; 3119 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3120 link_speed = LPFC_LINK_SPEED_1GHZ; 3121 break; 3122 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3123 link_speed = LPFC_LINK_SPEED_10GHZ; 3124 break; 3125 default: 3126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3127 "0483 Invalid link-attention link speed: x%x\n", 3128 bf_get(lpfc_acqe_link_speed, acqe_link)); 3129 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3130 break; 3131 } 3132 return link_speed; 3133} 3134 3135/** 3136 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 3137 * @phba: pointer to lpfc hba data structure. 3138 * @acqe_link: pointer to the async link completion queue entry. 3139 * 3140 * This routine is to handle the SLI4 asynchronous FCoE link event. 3141 **/ 3142static void 3143lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3144 struct lpfc_acqe_link *acqe_link) 3145{ 3146 struct lpfc_dmabuf *mp; 3147 LPFC_MBOXQ_t *pmb; 3148 MAILBOX_t *mb; 3149 struct lpfc_mbx_read_top *la; 3150 uint8_t att_type; 3151 int rc; 3152 3153 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3154 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 3155 return; 3156 phba->fcoe_eventtag = acqe_link->event_tag; 3157 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3158 if (!pmb) { 3159 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3160 "0395 The mboxq allocation failed\n"); 3161 return; 3162 } 3163 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3164 if (!mp) { 3165 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3166 "0396 The lpfc_dmabuf allocation failed\n"); 3167 goto out_free_pmb; 3168 } 3169 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3170 if (!mp->virt) { 3171 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3172 "0397 The mbuf allocation failed\n"); 3173 goto out_free_dmabuf; 3174 } 3175 3176 /* Cleanup any outstanding ELS commands */ 3177 lpfc_els_flush_all_cmd(phba); 3178 3179 /* Block ELS IOCBs until we have done process link event */ 3180 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3181 3182 /* Update link event statistics */ 3183 phba->sli.slistat.link_event++; 3184 3185 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3186 lpfc_read_topology(phba, pmb, mp); 3187 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3188 pmb->vport = phba->pport; 3189 3190 /* Keep the link status for extra SLI4 state machine reference */ 3191 phba->sli4_hba.link_state.speed = 3192 bf_get(lpfc_acqe_link_speed, acqe_link); 3193 phba->sli4_hba.link_state.duplex = 3194 bf_get(lpfc_acqe_link_duplex, acqe_link); 3195 phba->sli4_hba.link_state.status = 3196 bf_get(lpfc_acqe_link_status, acqe_link); 3197 phba->sli4_hba.link_state.type = 3198 bf_get(lpfc_acqe_link_type, acqe_link); 3199 phba->sli4_hba.link_state.number = 3200 bf_get(lpfc_acqe_link_number, acqe_link); 3201 phba->sli4_hba.link_state.fault = 3202 bf_get(lpfc_acqe_link_fault, acqe_link); 3203 phba->sli4_hba.link_state.logical_speed = 3204 bf_get(lpfc_acqe_logical_link_speed, acqe_link); 3205 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3206 "2900 Async FCoE Link event - Speed:%dGBit duplex:x%x " 3207 "LA Type:x%x Port Type:%d Port Number:%d Logical " 3208 "speed:%dMbps Fault:%d\n", 3209 phba->sli4_hba.link_state.speed, 3210 phba->sli4_hba.link_state.topology, 3211 phba->sli4_hba.link_state.status, 3212 phba->sli4_hba.link_state.type, 3213 phba->sli4_hba.link_state.number, 3214 phba->sli4_hba.link_state.logical_speed * 10, 3215 phba->sli4_hba.link_state.fault); 3216 /* 3217 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 3218 * topology info. Note: Optional for non FC-AL ports. 3219 */ 3220 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3221 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3222 if (rc == MBX_NOT_FINISHED) 3223 goto out_free_dmabuf; 3224 return; 3225 } 3226 /* 3227 * For FCoE Mode: fill in all the topology information we need and call 3228 * the READ_TOPOLOGY completion routine to continue without actually 3229 * sending the READ_TOPOLOGY mailbox command to the port. 3230 */ 3231 /* Parse and translate status field */ 3232 mb = &pmb->u.mb; 3233 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3234 3235 /* Parse and translate link attention fields */ 3236 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 3237 la->eventTag = acqe_link->event_tag; 3238 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 3239 bf_set(lpfc_mbx_read_top_link_spd, la, 3240 lpfc_sli4_parse_latt_link_speed(phba, acqe_link)); 3241 3242 /* Fake the the following irrelvant fields */ 3243 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 3244 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 3245 bf_set(lpfc_mbx_read_top_il, la, 0); 3246 bf_set(lpfc_mbx_read_top_pb, la, 0); 3247 bf_set(lpfc_mbx_read_top_fa, la, 0); 3248 bf_set(lpfc_mbx_read_top_mm, la, 0); 3249 3250 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3251 lpfc_mbx_cmpl_read_topology(phba, pmb); 3252 3253 return; 3254 3255out_free_dmabuf: 3256 kfree(mp); 3257out_free_pmb: 3258 mempool_free(pmb, phba->mbox_mem_pool); 3259} 3260 3261/** 3262 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 3263 * @phba: pointer to lpfc hba data structure. 3264 * @acqe_fc: pointer to the async fc completion queue entry. 3265 * 3266 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 3267 * that the event was received and then issue a read_topology mailbox command so 3268 * that the rest of the driver will treat it the same as SLI3. 3269 **/ 3270static void 3271lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 3272{ 3273 struct lpfc_dmabuf *mp; 3274 LPFC_MBOXQ_t *pmb; 3275 int rc; 3276 3277 if (bf_get(lpfc_trailer_type, acqe_fc) != 3278 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 3279 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3280 "2895 Non FC link Event detected.(%d)\n", 3281 bf_get(lpfc_trailer_type, acqe_fc)); 3282 return; 3283 } 3284 /* Keep the link status for extra SLI4 state machine reference */ 3285 phba->sli4_hba.link_state.speed = 3286 bf_get(lpfc_acqe_fc_la_speed, acqe_fc); 3287 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 3288 phba->sli4_hba.link_state.topology = 3289 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 3290 phba->sli4_hba.link_state.status = 3291 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 3292 phba->sli4_hba.link_state.type = 3293 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 3294 phba->sli4_hba.link_state.number = 3295 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 3296 phba->sli4_hba.link_state.fault = 3297 bf_get(lpfc_acqe_link_fault, acqe_fc); 3298 phba->sli4_hba.link_state.logical_speed = 3299 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc); 3300 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3301 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 3302 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 3303 "%dMbps Fault:%d\n", 3304 phba->sli4_hba.link_state.speed, 3305 phba->sli4_hba.link_state.topology, 3306 phba->sli4_hba.link_state.status, 3307 phba->sli4_hba.link_state.type, 3308 phba->sli4_hba.link_state.number, 3309 phba->sli4_hba.link_state.logical_speed * 10, 3310 phba->sli4_hba.link_state.fault); 3311 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3312 if (!pmb) { 3313 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3314 "2897 The mboxq allocation failed\n"); 3315 return; 3316 } 3317 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3318 if (!mp) { 3319 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3320 "2898 The lpfc_dmabuf allocation failed\n"); 3321 goto out_free_pmb; 3322 } 3323 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3324 if (!mp->virt) { 3325 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3326 "2899 The mbuf allocation failed\n"); 3327 goto out_free_dmabuf; 3328 } 3329 3330 /* Cleanup any outstanding ELS commands */ 3331 lpfc_els_flush_all_cmd(phba); 3332 3333 /* Block ELS IOCBs until we have done process link event */ 3334 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3335 3336 /* Update link event statistics */ 3337 phba->sli.slistat.link_event++; 3338 3339 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3340 lpfc_read_topology(phba, pmb, mp); 3341 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3342 pmb->vport = phba->pport; 3343 3344 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3345 if (rc == MBX_NOT_FINISHED) 3346 goto out_free_dmabuf; 3347 return; 3348 3349out_free_dmabuf: 3350 kfree(mp); 3351out_free_pmb: 3352 mempool_free(pmb, phba->mbox_mem_pool); 3353} 3354 3355/** 3356 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 3357 * @phba: pointer to lpfc hba data structure. 3358 * @acqe_fc: pointer to the async SLI completion queue entry. 3359 * 3360 * This routine is to handle the SLI4 asynchronous SLI events. 3361 **/ 3362static void 3363lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 3364{ 3365 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3366 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 3367 "x%08x SLI Event Type:%d", 3368 acqe_sli->event_data1, acqe_sli->event_data2, 3369 bf_get(lpfc_trailer_type, acqe_sli)); 3370 return; 3371} 3372 3373/** 3374 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 3375 * @vport: pointer to vport data structure. 3376 * 3377 * This routine is to perform Clear Virtual Link (CVL) on a vport in 3378 * response to a CVL event. 3379 * 3380 * Return the pointer to the ndlp with the vport if successful, otherwise 3381 * return NULL. 3382 **/ 3383static struct lpfc_nodelist * 3384lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 3385{ 3386 struct lpfc_nodelist *ndlp; 3387 struct Scsi_Host *shost; 3388 struct lpfc_hba *phba; 3389 3390 if (!vport) 3391 return NULL; 3392 phba = vport->phba; 3393 if (!phba) 3394 return NULL; 3395 ndlp = lpfc_findnode_did(vport, Fabric_DID); 3396 if (!ndlp) { 3397 /* Cannot find existing Fabric ndlp, so allocate a new one */ 3398 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3399 if (!ndlp) 3400 return 0; 3401 lpfc_nlp_init(vport, ndlp, Fabric_DID); 3402 /* Set the node type */ 3403 ndlp->nlp_type |= NLP_FABRIC; 3404 /* Put ndlp onto node list */ 3405 lpfc_enqueue_node(vport, ndlp); 3406 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 3407 /* re-setup ndlp without removing from node list */ 3408 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 3409 if (!ndlp) 3410 return 0; 3411 } 3412 if ((phba->pport->port_state < LPFC_FLOGI) && 3413 (phba->pport->port_state != LPFC_VPORT_FAILED)) 3414 return NULL; 3415 /* If virtual link is not yet instantiated ignore CVL */ 3416 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 3417 && (vport->port_state != LPFC_VPORT_FAILED)) 3418 return NULL; 3419 shost = lpfc_shost_from_vport(vport); 3420 if (!shost) 3421 return NULL; 3422 lpfc_linkdown_port(vport); 3423 lpfc_cleanup_pending_mbox(vport); 3424 spin_lock_irq(shost->host_lock); 3425 vport->fc_flag |= FC_VPORT_CVL_RCVD; 3426 spin_unlock_irq(shost->host_lock); 3427 3428 return ndlp; 3429} 3430 3431/** 3432 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 3433 * @vport: pointer to lpfc hba data structure. 3434 * 3435 * This routine is to perform Clear Virtual Link (CVL) on all vports in 3436 * response to a FCF dead event. 3437 **/ 3438static void 3439lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 3440{ 3441 struct lpfc_vport **vports; 3442 int i; 3443 3444 vports = lpfc_create_vport_work_array(phba); 3445 if (vports) 3446 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3447 lpfc_sli4_perform_vport_cvl(vports[i]); 3448 lpfc_destroy_vport_work_array(phba, vports); 3449} 3450 3451/** 3452 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 3453 * @phba: pointer to lpfc hba data structure. 3454 * @acqe_link: pointer to the async fcoe completion queue entry. 3455 * 3456 * This routine is to handle the SLI4 asynchronous fcoe event. 3457 **/ 3458static void 3459lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 3460 struct lpfc_acqe_fip *acqe_fip) 3461{ 3462 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 3463 int rc; 3464 struct lpfc_vport *vport; 3465 struct lpfc_nodelist *ndlp; 3466 struct Scsi_Host *shost; 3467 int active_vlink_present; 3468 struct lpfc_vport **vports; 3469 int i; 3470 3471 phba->fc_eventTag = acqe_fip->event_tag; 3472 phba->fcoe_eventtag = acqe_fip->event_tag; 3473 switch (event_type) { 3474 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 3475 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 3476 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 3477 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3478 LOG_DISCOVERY, 3479 "2546 New FCF event, evt_tag:x%x, " 3480 "index:x%x\n", 3481 acqe_fip->event_tag, 3482 acqe_fip->index); 3483 else 3484 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3485 LOG_DISCOVERY, 3486 "2788 FCF param modified event, " 3487 "evt_tag:x%x, index:x%x\n", 3488 acqe_fip->event_tag, 3489 acqe_fip->index); 3490 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3491 /* 3492 * During period of FCF discovery, read the FCF 3493 * table record indexed by the event to update 3494 * FCF roundrobin failover eligible FCF bmask. 3495 */ 3496 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3497 LOG_DISCOVERY, 3498 "2779 Read FCF (x%x) for updating " 3499 "roundrobin FCF failover bmask\n", 3500 acqe_fip->index); 3501 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 3502 } 3503 3504 /* If the FCF discovery is in progress, do nothing. */ 3505 spin_lock_irq(&phba->hbalock); 3506 if (phba->hba_flag & FCF_TS_INPROG) { 3507 spin_unlock_irq(&phba->hbalock); 3508 break; 3509 } 3510 /* If fast FCF failover rescan event is pending, do nothing */ 3511 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3512 spin_unlock_irq(&phba->hbalock); 3513 break; 3514 } 3515 3516 /* If the FCF has been in discovered state, do nothing. */ 3517 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 3518 spin_unlock_irq(&phba->hbalock); 3519 break; 3520 } 3521 spin_unlock_irq(&phba->hbalock); 3522 3523 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3524 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3525 "2770 Start FCF table scan per async FCF " 3526 "event, evt_tag:x%x, index:x%x\n", 3527 acqe_fip->event_tag, acqe_fip->index); 3528 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3529 LPFC_FCOE_FCF_GET_FIRST); 3530 if (rc) 3531 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3532 "2547 Issue FCF scan read FCF mailbox " 3533 "command failed (x%x)\n", rc); 3534 break; 3535 3536 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 3537 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3538 "2548 FCF Table full count 0x%x tag 0x%x\n", 3539 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 3540 acqe_fip->event_tag); 3541 break; 3542 3543 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 3544 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3545 "2549 FCF (x%x) disconnected from network, " 3546 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 3547 /* 3548 * If we are in the middle of FCF failover process, clear 3549 * the corresponding FCF bit in the roundrobin bitmap. 3550 */ 3551 spin_lock_irq(&phba->hbalock); 3552 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3553 spin_unlock_irq(&phba->hbalock); 3554 /* Update FLOGI FCF failover eligible FCF bmask */ 3555 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 3556 break; 3557 } 3558 spin_unlock_irq(&phba->hbalock); 3559 3560 /* If the event is not for currently used fcf do nothing */ 3561 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 3562 break; 3563 3564 /* 3565 * Otherwise, request the port to rediscover the entire FCF 3566 * table for a fast recovery from case that the current FCF 3567 * is no longer valid as we are not in the middle of FCF 3568 * failover process already. 3569 */ 3570 spin_lock_irq(&phba->hbalock); 3571 /* Mark the fast failover process in progress */ 3572 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 3573 spin_unlock_irq(&phba->hbalock); 3574 3575 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3576 "2771 Start FCF fast failover process due to " 3577 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 3578 "\n", acqe_fip->event_tag, acqe_fip->index); 3579 rc = lpfc_sli4_redisc_fcf_table(phba); 3580 if (rc) { 3581 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3582 LOG_DISCOVERY, 3583 "2772 Issue FCF rediscover mabilbox " 3584 "command failed, fail through to FCF " 3585 "dead event\n"); 3586 spin_lock_irq(&phba->hbalock); 3587 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 3588 spin_unlock_irq(&phba->hbalock); 3589 /* 3590 * Last resort will fail over by treating this 3591 * as a link down to FCF registration. 3592 */ 3593 lpfc_sli4_fcf_dead_failthrough(phba); 3594 } else { 3595 /* Reset FCF roundrobin bmask for new discovery */ 3596 memset(phba->fcf.fcf_rr_bmask, 0, 3597 sizeof(*phba->fcf.fcf_rr_bmask)); 3598 /* 3599 * Handling fast FCF failover to a DEAD FCF event is 3600 * considered equalivant to receiving CVL to all vports. 3601 */ 3602 lpfc_sli4_perform_all_vport_cvl(phba); 3603 } 3604 break; 3605 case LPFC_FIP_EVENT_TYPE_CVL: 3606 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3607 "2718 Clear Virtual Link Received for VPI 0x%x" 3608 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 3609 vport = lpfc_find_vport_by_vpid(phba, 3610 acqe_fip->index - phba->vpi_base); 3611 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3612 if (!ndlp) 3613 break; 3614 active_vlink_present = 0; 3615 3616 vports = lpfc_create_vport_work_array(phba); 3617 if (vports) { 3618 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 3619 i++) { 3620 if ((!(vports[i]->fc_flag & 3621 FC_VPORT_CVL_RCVD)) && 3622 (vports[i]->port_state > LPFC_FDISC)) { 3623 active_vlink_present = 1; 3624 break; 3625 } 3626 } 3627 lpfc_destroy_vport_work_array(phba, vports); 3628 } 3629 3630 if (active_vlink_present) { 3631 /* 3632 * If there are other active VLinks present, 3633 * re-instantiate the Vlink using FDISC. 3634 */ 3635 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3636 shost = lpfc_shost_from_vport(vport); 3637 spin_lock_irq(shost->host_lock); 3638 ndlp->nlp_flag |= NLP_DELAY_TMO; 3639 spin_unlock_irq(shost->host_lock); 3640 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 3641 vport->port_state = LPFC_FDISC; 3642 } else { 3643 /* 3644 * Otherwise, we request port to rediscover 3645 * the entire FCF table for a fast recovery 3646 * from possible case that the current FCF 3647 * is no longer valid if we are not already 3648 * in the FCF failover process. 3649 */ 3650 spin_lock_irq(&phba->hbalock); 3651 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3652 spin_unlock_irq(&phba->hbalock); 3653 break; 3654 } 3655 /* Mark the fast failover process in progress */ 3656 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 3657 spin_unlock_irq(&phba->hbalock); 3658 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3659 LOG_DISCOVERY, 3660 "2773 Start FCF failover per CVL, " 3661 "evt_tag:x%x\n", acqe_fip->event_tag); 3662 rc = lpfc_sli4_redisc_fcf_table(phba); 3663 if (rc) { 3664 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3665 LOG_DISCOVERY, 3666 "2774 Issue FCF rediscover " 3667 "mabilbox command failed, " 3668 "through to CVL event\n"); 3669 spin_lock_irq(&phba->hbalock); 3670 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 3671 spin_unlock_irq(&phba->hbalock); 3672 /* 3673 * Last resort will be re-try on the 3674 * the current registered FCF entry. 3675 */ 3676 lpfc_retry_pport_discovery(phba); 3677 } else 3678 /* 3679 * Reset FCF roundrobin bmask for new 3680 * discovery. 3681 */ 3682 memset(phba->fcf.fcf_rr_bmask, 0, 3683 sizeof(*phba->fcf.fcf_rr_bmask)); 3684 } 3685 break; 3686 default: 3687 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3688 "0288 Unknown FCoE event type 0x%x event tag " 3689 "0x%x\n", event_type, acqe_fip->event_tag); 3690 break; 3691 } 3692} 3693 3694/** 3695 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 3696 * @phba: pointer to lpfc hba data structure. 3697 * @acqe_link: pointer to the async dcbx completion queue entry. 3698 * 3699 * This routine is to handle the SLI4 asynchronous dcbx event. 3700 **/ 3701static void 3702lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3703 struct lpfc_acqe_dcbx *acqe_dcbx) 3704{ 3705 phba->fc_eventTag = acqe_dcbx->event_tag; 3706 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3707 "0290 The SLI4 DCBX asynchronous event is not " 3708 "handled yet\n"); 3709} 3710 3711/** 3712 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 3713 * @phba: pointer to lpfc hba data structure. 3714 * @acqe_link: pointer to the async grp5 completion queue entry. 3715 * 3716 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 3717 * is an asynchronous notified of a logical link speed change. The Port 3718 * reports the logical link speed in units of 10Mbps. 3719 **/ 3720static void 3721lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 3722 struct lpfc_acqe_grp5 *acqe_grp5) 3723{ 3724 uint16_t prev_ll_spd; 3725 3726 phba->fc_eventTag = acqe_grp5->event_tag; 3727 phba->fcoe_eventtag = acqe_grp5->event_tag; 3728 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 3729 phba->sli4_hba.link_state.logical_speed = 3730 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); 3731 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3732 "2789 GRP5 Async Event: Updating logical link speed " 3733 "from %dMbps to %dMbps\n", (prev_ll_spd * 10), 3734 (phba->sli4_hba.link_state.logical_speed*10)); 3735} 3736 3737/** 3738 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 3739 * @phba: pointer to lpfc hba data structure. 3740 * 3741 * This routine is invoked by the worker thread to process all the pending 3742 * SLI4 asynchronous events. 3743 **/ 3744void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 3745{ 3746 struct lpfc_cq_event *cq_event; 3747 3748 /* First, declare the async event has been handled */ 3749 spin_lock_irq(&phba->hbalock); 3750 phba->hba_flag &= ~ASYNC_EVENT; 3751 spin_unlock_irq(&phba->hbalock); 3752 /* Now, handle all the async events */ 3753 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 3754 /* Get the first event from the head of the event queue */ 3755 spin_lock_irq(&phba->hbalock); 3756 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 3757 cq_event, struct lpfc_cq_event, list); 3758 spin_unlock_irq(&phba->hbalock); 3759 /* Process the asynchronous event */ 3760 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 3761 case LPFC_TRAILER_CODE_LINK: 3762 lpfc_sli4_async_link_evt(phba, 3763 &cq_event->cqe.acqe_link); 3764 break; 3765 case LPFC_TRAILER_CODE_FCOE: 3766 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 3767 break; 3768 case LPFC_TRAILER_CODE_DCBX: 3769 lpfc_sli4_async_dcbx_evt(phba, 3770 &cq_event->cqe.acqe_dcbx); 3771 break; 3772 case LPFC_TRAILER_CODE_GRP5: 3773 lpfc_sli4_async_grp5_evt(phba, 3774 &cq_event->cqe.acqe_grp5); 3775 break; 3776 case LPFC_TRAILER_CODE_FC: 3777 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 3778 break; 3779 case LPFC_TRAILER_CODE_SLI: 3780 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 3781 break; 3782 default: 3783 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3784 "1804 Invalid asynchrous event code: " 3785 "x%x\n", bf_get(lpfc_trailer_code, 3786 &cq_event->cqe.mcqe_cmpl)); 3787 break; 3788 } 3789 /* Free the completion event processed to the free pool */ 3790 lpfc_sli4_cq_event_release(phba, cq_event); 3791 } 3792} 3793 3794/** 3795 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 3796 * @phba: pointer to lpfc hba data structure. 3797 * 3798 * This routine is invoked by the worker thread to process FCF table 3799 * rediscovery pending completion event. 3800 **/ 3801void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 3802{ 3803 int rc; 3804 3805 spin_lock_irq(&phba->hbalock); 3806 /* Clear FCF rediscovery timeout event */ 3807 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 3808 /* Clear driver fast failover FCF record flag */ 3809 phba->fcf.failover_rec.flag = 0; 3810 /* Set state for FCF fast failover */ 3811 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 3812 spin_unlock_irq(&phba->hbalock); 3813 3814 /* Scan FCF table from the first entry to re-discover SAN */ 3815 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3816 "2777 Start post-quiescent FCF table scan\n"); 3817 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 3818 if (rc) 3819 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3820 "2747 Issue FCF scan read FCF mailbox " 3821 "command failed 0x%x\n", rc); 3822} 3823 3824/** 3825 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3826 * @phba: pointer to lpfc hba data structure. 3827 * @dev_grp: The HBA PCI-Device group number. 3828 * 3829 * This routine is invoked to set up the per HBA PCI-Device group function 3830 * API jump table entries. 3831 * 3832 * Return: 0 if success, otherwise -ENODEV 3833 **/ 3834int 3835lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3836{ 3837 int rc; 3838 3839 /* Set up lpfc PCI-device group */ 3840 phba->pci_dev_grp = dev_grp; 3841 3842 /* The LPFC_PCI_DEV_OC uses SLI4 */ 3843 if (dev_grp == LPFC_PCI_DEV_OC) 3844 phba->sli_rev = LPFC_SLI_REV4; 3845 3846 /* Set up device INIT API function jump table */ 3847 rc = lpfc_init_api_table_setup(phba, dev_grp); 3848 if (rc) 3849 return -ENODEV; 3850 /* Set up SCSI API function jump table */ 3851 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 3852 if (rc) 3853 return -ENODEV; 3854 /* Set up SLI API function jump table */ 3855 rc = lpfc_sli_api_table_setup(phba, dev_grp); 3856 if (rc) 3857 return -ENODEV; 3858 /* Set up MBOX API function jump table */ 3859 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 3860 if (rc) 3861 return -ENODEV; 3862 3863 return 0; 3864} 3865 3866/** 3867 * lpfc_log_intr_mode - Log the active interrupt mode 3868 * @phba: pointer to lpfc hba data structure. 3869 * @intr_mode: active interrupt mode adopted. 3870 * 3871 * This routine it invoked to log the currently used active interrupt mode 3872 * to the device. 3873 **/ 3874static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 3875{ 3876 switch (intr_mode) { 3877 case 0: 3878 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3879 "0470 Enable INTx interrupt mode.\n"); 3880 break; 3881 case 1: 3882 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3883 "0481 Enabled MSI interrupt mode.\n"); 3884 break; 3885 case 2: 3886 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3887 "0480 Enabled MSI-X interrupt mode.\n"); 3888 break; 3889 default: 3890 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3891 "0482 Illegal interrupt mode.\n"); 3892 break; 3893 } 3894 return; 3895} 3896 3897/** 3898 * lpfc_enable_pci_dev - Enable a generic PCI device. 3899 * @phba: pointer to lpfc hba data structure. 3900 * 3901 * This routine is invoked to enable the PCI device that is common to all 3902 * PCI devices. 3903 * 3904 * Return codes 3905 * 0 - successful 3906 * other values - error 3907 **/ 3908static int 3909lpfc_enable_pci_dev(struct lpfc_hba *phba) 3910{ 3911 struct pci_dev *pdev; 3912 int bars; 3913 3914 /* Obtain PCI device reference */ 3915 if (!phba->pcidev) 3916 goto out_error; 3917 else 3918 pdev = phba->pcidev; 3919 /* Select PCI BARs */ 3920 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3921 /* Enable PCI device */ 3922 if (pci_enable_device_mem(pdev)) 3923 goto out_error; 3924 /* Request PCI resource for the device */ 3925 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 3926 goto out_disable_device; 3927 /* Set up device as PCI master and save state for EEH */ 3928 pci_set_master(pdev); 3929 pci_try_set_mwi(pdev); 3930 pci_save_state(pdev); 3931 3932 return 0; 3933 3934out_disable_device: 3935 pci_disable_device(pdev); 3936out_error: 3937 return -ENODEV; 3938} 3939 3940/** 3941 * lpfc_disable_pci_dev - Disable a generic PCI device. 3942 * @phba: pointer to lpfc hba data structure. 3943 * 3944 * This routine is invoked to disable the PCI device that is common to all 3945 * PCI devices. 3946 **/ 3947static void 3948lpfc_disable_pci_dev(struct lpfc_hba *phba) 3949{ 3950 struct pci_dev *pdev; 3951 int bars; 3952 3953 /* Obtain PCI device reference */ 3954 if (!phba->pcidev) 3955 return; 3956 else 3957 pdev = phba->pcidev; 3958 /* Select PCI BARs */ 3959 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3960 /* Release PCI resource and disable PCI device */ 3961 pci_release_selected_regions(pdev, bars); 3962 pci_disable_device(pdev); 3963 /* Null out PCI private reference to driver */ 3964 pci_set_drvdata(pdev, NULL); 3965 3966 return; 3967} 3968 3969/** 3970 * lpfc_reset_hba - Reset a hba 3971 * @phba: pointer to lpfc hba data structure. 3972 * 3973 * This routine is invoked to reset a hba device. It brings the HBA 3974 * offline, performs a board restart, and then brings the board back 3975 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 3976 * on outstanding mailbox commands. 3977 **/ 3978void 3979lpfc_reset_hba(struct lpfc_hba *phba) 3980{ 3981 /* If resets are disabled then set error state and return. */ 3982 if (!phba->cfg_enable_hba_reset) { 3983 phba->link_state = LPFC_HBA_ERROR; 3984 return; 3985 } 3986 lpfc_offline_prep(phba); 3987 lpfc_offline(phba); 3988 lpfc_sli_brdrestart(phba); 3989 lpfc_online(phba); 3990 lpfc_unblock_mgmt_io(phba); 3991} 3992 3993/** 3994 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 3995 * @phba: pointer to lpfc hba data structure. 3996 * 3997 * This routine is invoked to set up the driver internal resources specific to 3998 * support the SLI-3 HBA device it attached to. 3999 * 4000 * Return codes 4001 * 0 - successful 4002 * other values - error 4003 **/ 4004static int 4005lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 4006{ 4007 struct lpfc_sli *psli; 4008 4009 /* 4010 * Initialize timers used by driver 4011 */ 4012 4013 /* Heartbeat timer */ 4014 init_timer(&phba->hb_tmofunc); 4015 phba->hb_tmofunc.function = lpfc_hb_timeout; 4016 phba->hb_tmofunc.data = (unsigned long)phba; 4017 4018 psli = &phba->sli; 4019 /* MBOX heartbeat timer */ 4020 init_timer(&psli->mbox_tmo); 4021 psli->mbox_tmo.function = lpfc_mbox_timeout; 4022 psli->mbox_tmo.data = (unsigned long) phba; 4023 /* FCP polling mode timer */ 4024 init_timer(&phba->fcp_poll_timer); 4025 phba->fcp_poll_timer.function = lpfc_poll_timeout; 4026 phba->fcp_poll_timer.data = (unsigned long) phba; 4027 /* Fabric block timer */ 4028 init_timer(&phba->fabric_block_timer); 4029 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4030 phba->fabric_block_timer.data = (unsigned long) phba; 4031 /* EA polling mode timer */ 4032 init_timer(&phba->eratt_poll); 4033 phba->eratt_poll.function = lpfc_poll_eratt; 4034 phba->eratt_poll.data = (unsigned long) phba; 4035 4036 /* Host attention work mask setup */ 4037 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 4038 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 4039 4040 /* Get all the module params for configuring this host */ 4041 lpfc_get_cfgparam(phba); 4042 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 4043 phba->menlo_flag |= HBA_MENLO_SUPPORT; 4044 /* check for menlo minimum sg count */ 4045 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 4046 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 4047 } 4048 4049 /* 4050 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4051 * used to create the sg_dma_buf_pool must be dynamically calculated. 4052 * 2 segments are added since the IOCB needs a command and response bde. 4053 */ 4054 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 4055 sizeof(struct fcp_rsp) + 4056 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 4057 4058 if (phba->cfg_enable_bg) { 4059 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 4060 phba->cfg_sg_dma_buf_size += 4061 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 4062 } 4063 4064 /* Also reinitialize the host templates with new values. */ 4065 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4066 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4067 4068 phba->max_vpi = LPFC_MAX_VPI; 4069 /* This will be set to correct value after config_port mbox */ 4070 phba->max_vports = 0; 4071 4072 /* 4073 * Initialize the SLI Layer to run with lpfc HBAs. 4074 */ 4075 lpfc_sli_setup(phba); 4076 lpfc_sli_queue_setup(phba); 4077 4078 /* Allocate device driver memory */ 4079 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 4080 return -ENOMEM; 4081 4082 return 0; 4083} 4084 4085/** 4086 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 4087 * @phba: pointer to lpfc hba data structure. 4088 * 4089 * This routine is invoked to unset the driver internal resources set up 4090 * specific for supporting the SLI-3 HBA device it attached to. 4091 **/ 4092static void 4093lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 4094{ 4095 /* Free device driver memory allocated */ 4096 lpfc_mem_free_all(phba); 4097 4098 return; 4099} 4100 4101/** 4102 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 4103 * @phba: pointer to lpfc hba data structure. 4104 * 4105 * This routine is invoked to set up the driver internal resources specific to 4106 * support the SLI-4 HBA device it attached to. 4107 * 4108 * Return codes 4109 * 0 - successful 4110 * other values - error 4111 **/ 4112static int 4113lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 4114{ 4115 struct lpfc_sli *psli; 4116 LPFC_MBOXQ_t *mboxq; 4117 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 4118 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4119 struct lpfc_mqe *mqe; 4120 int longs, sli_family; 4121 4122 /* Before proceed, wait for POST done and device ready */ 4123 rc = lpfc_sli4_post_status_check(phba); 4124 if (rc) 4125 return -ENODEV; 4126 4127 /* 4128 * Initialize timers used by driver 4129 */ 4130 4131 /* Heartbeat timer */ 4132 init_timer(&phba->hb_tmofunc); 4133 phba->hb_tmofunc.function = lpfc_hb_timeout; 4134 phba->hb_tmofunc.data = (unsigned long)phba; 4135 init_timer(&phba->rrq_tmr); 4136 phba->rrq_tmr.function = lpfc_rrq_timeout; 4137 phba->rrq_tmr.data = (unsigned long)phba; 4138 4139 psli = &phba->sli; 4140 /* MBOX heartbeat timer */ 4141 init_timer(&psli->mbox_tmo); 4142 psli->mbox_tmo.function = lpfc_mbox_timeout; 4143 psli->mbox_tmo.data = (unsigned long) phba; 4144 /* Fabric block timer */ 4145 init_timer(&phba->fabric_block_timer); 4146 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4147 phba->fabric_block_timer.data = (unsigned long) phba; 4148 /* EA polling mode timer */ 4149 init_timer(&phba->eratt_poll); 4150 phba->eratt_poll.function = lpfc_poll_eratt; 4151 phba->eratt_poll.data = (unsigned long) phba; 4152 /* FCF rediscover timer */ 4153 init_timer(&phba->fcf.redisc_wait); 4154 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 4155 phba->fcf.redisc_wait.data = (unsigned long)phba; 4156 4157 /* 4158 * We need to do a READ_CONFIG mailbox command here before 4159 * calling lpfc_get_cfgparam. For VFs this will report the 4160 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 4161 * All of the resources allocated 4162 * for this Port are tied to these values. 4163 */ 4164 /* Get all the module params for configuring this host */ 4165 lpfc_get_cfgparam(phba); 4166 phba->max_vpi = LPFC_MAX_VPI; 4167 /* This will be set to correct value after the read_config mbox */ 4168 phba->max_vports = 0; 4169 4170 /* Program the default value of vlan_id and fc_map */ 4171 phba->valid_vlan = 0; 4172 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4173 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4174 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4175 4176 /* 4177 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4178 * used to create the sg_dma_buf_pool must be dynamically calculated. 4179 * 2 segments are added since the IOCB needs a command and response bde. 4180 * To insure that the scsi sgl does not cross a 4k page boundary only 4181 * sgl sizes of must be a power of 2. 4182 */ 4183 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 4184 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); 4185 4186 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 4187 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 4188 switch (sli_family) { 4189 case LPFC_SLI_INTF_FAMILY_BE2: 4190 case LPFC_SLI_INTF_FAMILY_BE3: 4191 /* There is a single hint for BE - 2 pages per BPL. */ 4192 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == 4193 LPFC_SLI_INTF_SLI_HINT1_1) 4194 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 4195 break; 4196 case LPFC_SLI_INTF_FAMILY_LNCR_A0: 4197 case LPFC_SLI_INTF_FAMILY_LNCR_B0: 4198 default: 4199 break; 4200 } 4201 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 4202 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 4203 dma_buf_size = dma_buf_size << 1) 4204 ; 4205 if (dma_buf_size == max_buf_size) 4206 phba->cfg_sg_seg_cnt = (dma_buf_size - 4207 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - 4208 (2 * sizeof(struct sli4_sge))) / 4209 sizeof(struct sli4_sge); 4210 phba->cfg_sg_dma_buf_size = dma_buf_size; 4211 4212 /* Initialize buffer queue management fields */ 4213 hbq_count = lpfc_sli_hbq_count(); 4214 for (i = 0; i < hbq_count; ++i) 4215 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 4216 INIT_LIST_HEAD(&phba->rb_pend_list); 4217 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 4218 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 4219 4220 /* 4221 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 4222 */ 4223 /* Initialize the Abort scsi buffer list used by driver */ 4224 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 4225 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 4226 /* This abort list used by worker thread */ 4227 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 4228 4229 /* 4230 * Initialize dirver internal slow-path work queues 4231 */ 4232 4233 /* Driver internel slow-path CQ Event pool */ 4234 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 4235 /* Response IOCB work queue list */ 4236 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 4237 /* Asynchronous event CQ Event work queue list */ 4238 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 4239 /* Fast-path XRI aborted CQ Event work queue list */ 4240 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 4241 /* Slow-path XRI aborted CQ Event work queue list */ 4242 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 4243 /* Receive queue CQ Event work queue list */ 4244 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 4245 4246 /* Initialize the driver internal SLI layer lists. */ 4247 lpfc_sli_setup(phba); 4248 lpfc_sli_queue_setup(phba); 4249 4250 /* Allocate device driver memory */ 4251 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 4252 if (rc) 4253 return -ENOMEM; 4254 4255 /* IF Type 2 ports get initialized now. */ 4256 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 4257 LPFC_SLI_INTF_IF_TYPE_2) { 4258 rc = lpfc_pci_function_reset(phba); 4259 if (unlikely(rc)) 4260 return -ENODEV; 4261 } 4262 4263 /* Create the bootstrap mailbox command */ 4264 rc = lpfc_create_bootstrap_mbox(phba); 4265 if (unlikely(rc)) 4266 goto out_free_mem; 4267 4268 /* Set up the host's endian order with the device. */ 4269 rc = lpfc_setup_endian_order(phba); 4270 if (unlikely(rc)) 4271 goto out_free_bsmbx; 4272 4273 /* Set up the hba's configuration parameters. */ 4274 rc = lpfc_sli4_read_config(phba); 4275 if (unlikely(rc)) 4276 goto out_free_bsmbx; 4277 4278 /* IF Type 0 ports get initialized now. */ 4279 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 4280 LPFC_SLI_INTF_IF_TYPE_0) { 4281 rc = lpfc_pci_function_reset(phba); 4282 if (unlikely(rc)) 4283 goto out_free_bsmbx; 4284 } 4285 4286 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4287 GFP_KERNEL); 4288 if (!mboxq) { 4289 rc = -ENOMEM; 4290 goto out_free_bsmbx; 4291 } 4292 4293 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 4294 lpfc_supported_pages(mboxq); 4295 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4296 if (!rc) { 4297 mqe = &mboxq->u.mqe; 4298 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 4299 LPFC_MAX_SUPPORTED_PAGES); 4300 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 4301 switch (pn_page[i]) { 4302 case LPFC_SLI4_PARAMETERS: 4303 phba->sli4_hba.pc_sli4_params.supported = 1; 4304 break; 4305 default: 4306 break; 4307 } 4308 } 4309 /* Read the port's SLI4 Parameters capabilities if supported. */ 4310 if (phba->sli4_hba.pc_sli4_params.supported) 4311 rc = lpfc_pc_sli4_params_get(phba, mboxq); 4312 if (rc) { 4313 mempool_free(mboxq, phba->mbox_mem_pool); 4314 rc = -EIO; 4315 goto out_free_bsmbx; 4316 } 4317 } 4318 /* 4319 * Get sli4 parameters that override parameters from Port capabilities. 4320 * If this call fails it is not a critical error so continue loading. 4321 */ 4322 lpfc_get_sli4_parameters(phba, mboxq); 4323 mempool_free(mboxq, phba->mbox_mem_pool); 4324 /* Create all the SLI4 queues */ 4325 rc = lpfc_sli4_queue_create(phba); 4326 if (rc) 4327 goto out_free_bsmbx; 4328 4329 /* Create driver internal CQE event pool */ 4330 rc = lpfc_sli4_cq_event_pool_create(phba); 4331 if (rc) 4332 goto out_destroy_queue; 4333 4334 /* Initialize and populate the iocb list per host */ 4335 rc = lpfc_init_sgl_list(phba); 4336 if (rc) { 4337 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4338 "1400 Failed to initialize sgl list.\n"); 4339 goto out_destroy_cq_event_pool; 4340 } 4341 rc = lpfc_init_active_sgl_array(phba); 4342 if (rc) { 4343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4344 "1430 Failed to initialize sgl list.\n"); 4345 goto out_free_sgl_list; 4346 } 4347 4348 rc = lpfc_sli4_init_rpi_hdrs(phba); 4349 if (rc) { 4350 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4351 "1432 Failed to initialize rpi headers.\n"); 4352 goto out_free_active_sgl; 4353 } 4354 4355 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 4356 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 4357 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 4358 GFP_KERNEL); 4359 if (!phba->fcf.fcf_rr_bmask) { 4360 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4361 "2759 Failed allocate memory for FCF round " 4362 "robin failover bmask\n"); 4363 goto out_remove_rpi_hdrs; 4364 } 4365 4366 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4367 phba->cfg_fcp_eq_count), GFP_KERNEL); 4368 if (!phba->sli4_hba.fcp_eq_hdl) { 4369 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4370 "2572 Failed allocate memory for fast-path " 4371 "per-EQ handle array\n"); 4372 goto out_free_fcf_rr_bmask; 4373 } 4374 4375 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4376 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 4377 if (!phba->sli4_hba.msix_entries) { 4378 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4379 "2573 Failed allocate memory for msi-x " 4380 "interrupt vector entries\n"); 4381 goto out_free_fcp_eq_hdl; 4382 } 4383 4384 return rc; 4385 4386out_free_fcp_eq_hdl: 4387 kfree(phba->sli4_hba.fcp_eq_hdl); 4388out_free_fcf_rr_bmask: 4389 kfree(phba->fcf.fcf_rr_bmask); 4390out_remove_rpi_hdrs: 4391 lpfc_sli4_remove_rpi_hdrs(phba); 4392out_free_active_sgl: 4393 lpfc_free_active_sgl(phba); 4394out_free_sgl_list: 4395 lpfc_free_sgl_list(phba); 4396out_destroy_cq_event_pool: 4397 lpfc_sli4_cq_event_pool_destroy(phba); 4398out_destroy_queue: 4399 lpfc_sli4_queue_destroy(phba); 4400out_free_bsmbx: 4401 lpfc_destroy_bootstrap_mbox(phba); 4402out_free_mem: 4403 lpfc_mem_free(phba); 4404 return rc; 4405} 4406 4407/** 4408 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 4409 * @phba: pointer to lpfc hba data structure. 4410 * 4411 * This routine is invoked to unset the driver internal resources set up 4412 * specific for supporting the SLI-4 HBA device it attached to. 4413 **/ 4414static void 4415lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 4416{ 4417 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 4418 4419 /* Free memory allocated for msi-x interrupt vector entries */ 4420 kfree(phba->sli4_hba.msix_entries); 4421 4422 /* Free memory allocated for fast-path work queue handles */ 4423 kfree(phba->sli4_hba.fcp_eq_hdl); 4424 4425 /* Free the allocated rpi headers. */ 4426 lpfc_sli4_remove_rpi_hdrs(phba); 4427 lpfc_sli4_remove_rpis(phba); 4428 4429 /* Free eligible FCF index bmask */ 4430 kfree(phba->fcf.fcf_rr_bmask); 4431 4432 /* Free the ELS sgl list */ 4433 lpfc_free_active_sgl(phba); 4434 lpfc_free_sgl_list(phba); 4435 4436 /* Free the SCSI sgl management array */ 4437 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4438 4439 /* Free the SLI4 queues */ 4440 lpfc_sli4_queue_destroy(phba); 4441 4442 /* Free the completion queue EQ event pool */ 4443 lpfc_sli4_cq_event_release_all(phba); 4444 lpfc_sli4_cq_event_pool_destroy(phba); 4445 4446 /* Free the bsmbx region. */ 4447 lpfc_destroy_bootstrap_mbox(phba); 4448 4449 /* Free the SLI Layer memory with SLI4 HBAs */ 4450 lpfc_mem_free_all(phba); 4451 4452 /* Free the current connect table */ 4453 list_for_each_entry_safe(conn_entry, next_conn_entry, 4454 &phba->fcf_conn_rec_list, list) { 4455 list_del_init(&conn_entry->list); 4456 kfree(conn_entry); 4457 } 4458 4459 return; 4460} 4461 4462/** 4463 * lpfc_init_api_table_setup - Set up init api fucntion jump table 4464 * @phba: The hba struct for which this call is being executed. 4465 * @dev_grp: The HBA PCI-Device group number. 4466 * 4467 * This routine sets up the device INIT interface API function jump table 4468 * in @phba struct. 4469 * 4470 * Returns: 0 - success, -ENODEV - failure. 4471 **/ 4472int 4473lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4474{ 4475 phba->lpfc_hba_init_link = lpfc_hba_init_link; 4476 phba->lpfc_hba_down_link = lpfc_hba_down_link; 4477 phba->lpfc_selective_reset = lpfc_selective_reset; 4478 switch (dev_grp) { 4479 case LPFC_PCI_DEV_LP: 4480 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4481 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 4482 phba->lpfc_stop_port = lpfc_stop_port_s3; 4483 break; 4484 case LPFC_PCI_DEV_OC: 4485 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 4486 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 4487 phba->lpfc_stop_port = lpfc_stop_port_s4; 4488 break; 4489 default: 4490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4491 "1431 Invalid HBA PCI-device group: 0x%x\n", 4492 dev_grp); 4493 return -ENODEV; 4494 break; 4495 } 4496 return 0; 4497} 4498 4499/** 4500 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 4501 * @phba: pointer to lpfc hba data structure. 4502 * 4503 * This routine is invoked to set up the driver internal resources before the 4504 * device specific resource setup to support the HBA device it attached to. 4505 * 4506 * Return codes 4507 * 0 - successful 4508 * other values - error 4509 **/ 4510static int 4511lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 4512{ 4513 /* 4514 * Driver resources common to all SLI revisions 4515 */ 4516 atomic_set(&phba->fast_event_count, 0); 4517 spin_lock_init(&phba->hbalock); 4518 4519 /* Initialize ndlp management spinlock */ 4520 spin_lock_init(&phba->ndlp_lock); 4521 4522 INIT_LIST_HEAD(&phba->port_list); 4523 INIT_LIST_HEAD(&phba->work_list); 4524 init_waitqueue_head(&phba->wait_4_mlo_m_q); 4525 4526 /* Initialize the wait queue head for the kernel thread */ 4527 init_waitqueue_head(&phba->work_waitq); 4528 4529 /* Initialize the scsi buffer list used by driver for scsi IO */ 4530 spin_lock_init(&phba->scsi_buf_list_lock); 4531 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 4532 4533 /* Initialize the fabric iocb list */ 4534 INIT_LIST_HEAD(&phba->fabric_iocb_list); 4535 4536 /* Initialize list to save ELS buffers */ 4537 INIT_LIST_HEAD(&phba->elsbuf); 4538 4539 /* Initialize FCF connection rec list */ 4540 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 4541 4542 return 0; 4543} 4544 4545/** 4546 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 4547 * @phba: pointer to lpfc hba data structure. 4548 * 4549 * This routine is invoked to set up the driver internal resources after the 4550 * device specific resource setup to support the HBA device it attached to. 4551 * 4552 * Return codes 4553 * 0 - successful 4554 * other values - error 4555 **/ 4556static int 4557lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 4558{ 4559 int error; 4560 4561 /* Startup the kernel thread for this host adapter. */ 4562 phba->worker_thread = kthread_run(lpfc_do_work, phba, 4563 "lpfc_worker_%d", phba->brd_no); 4564 if (IS_ERR(phba->worker_thread)) { 4565 error = PTR_ERR(phba->worker_thread); 4566 return error; 4567 } 4568 4569 return 0; 4570} 4571 4572/** 4573 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 4574 * @phba: pointer to lpfc hba data structure. 4575 * 4576 * This routine is invoked to unset the driver internal resources set up after 4577 * the device specific resource setup for supporting the HBA device it 4578 * attached to. 4579 **/ 4580static void 4581lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 4582{ 4583 /* Stop kernel worker thread */ 4584 kthread_stop(phba->worker_thread); 4585} 4586 4587/** 4588 * lpfc_free_iocb_list - Free iocb list. 4589 * @phba: pointer to lpfc hba data structure. 4590 * 4591 * This routine is invoked to free the driver's IOCB list and memory. 4592 **/ 4593static void 4594lpfc_free_iocb_list(struct lpfc_hba *phba) 4595{ 4596 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 4597 4598 spin_lock_irq(&phba->hbalock); 4599 list_for_each_entry_safe(iocbq_entry, iocbq_next, 4600 &phba->lpfc_iocb_list, list) { 4601 list_del(&iocbq_entry->list); 4602 kfree(iocbq_entry); 4603 phba->total_iocbq_bufs--; 4604 } 4605 spin_unlock_irq(&phba->hbalock); 4606 4607 return; 4608} 4609 4610/** 4611 * lpfc_init_iocb_list - Allocate and initialize iocb list. 4612 * @phba: pointer to lpfc hba data structure. 4613 * 4614 * This routine is invoked to allocate and initizlize the driver's IOCB 4615 * list and set up the IOCB tag array accordingly. 4616 * 4617 * Return codes 4618 * 0 - successful 4619 * other values - error 4620 **/ 4621static int 4622lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 4623{ 4624 struct lpfc_iocbq *iocbq_entry = NULL; 4625 uint16_t iotag; 4626 int i; 4627 4628 /* Initialize and populate the iocb list per host. */ 4629 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 4630 for (i = 0; i < iocb_count; i++) { 4631 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 4632 if (iocbq_entry == NULL) { 4633 printk(KERN_ERR "%s: only allocated %d iocbs of " 4634 "expected %d count. Unloading driver.\n", 4635 __func__, i, LPFC_IOCB_LIST_CNT); 4636 goto out_free_iocbq; 4637 } 4638 4639 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 4640 if (iotag == 0) { 4641 kfree(iocbq_entry); 4642 printk(KERN_ERR "%s: failed to allocate IOTAG. " 4643 "Unloading driver.\n", __func__); 4644 goto out_free_iocbq; 4645 } 4646 iocbq_entry->sli4_xritag = NO_XRI; 4647 4648 spin_lock_irq(&phba->hbalock); 4649 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 4650 phba->total_iocbq_bufs++; 4651 spin_unlock_irq(&phba->hbalock); 4652 } 4653 4654 return 0; 4655 4656out_free_iocbq: 4657 lpfc_free_iocb_list(phba); 4658 4659 return -ENOMEM; 4660} 4661 4662/** 4663 * lpfc_free_sgl_list - Free sgl list. 4664 * @phba: pointer to lpfc hba data structure. 4665 * 4666 * This routine is invoked to free the driver's sgl list and memory. 4667 **/ 4668static void 4669lpfc_free_sgl_list(struct lpfc_hba *phba) 4670{ 4671 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 4672 LIST_HEAD(sglq_list); 4673 4674 spin_lock_irq(&phba->hbalock); 4675 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 4676 spin_unlock_irq(&phba->hbalock); 4677 4678 list_for_each_entry_safe(sglq_entry, sglq_next, 4679 &sglq_list, list) { 4680 list_del(&sglq_entry->list); 4681 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 4682 kfree(sglq_entry); 4683 phba->sli4_hba.total_sglq_bufs--; 4684 } 4685 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4686} 4687 4688/** 4689 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 4690 * @phba: pointer to lpfc hba data structure. 4691 * 4692 * This routine is invoked to allocate the driver's active sgl memory. 4693 * This array will hold the sglq_entry's for active IOs. 4694 **/ 4695static int 4696lpfc_init_active_sgl_array(struct lpfc_hba *phba) 4697{ 4698 int size; 4699 size = sizeof(struct lpfc_sglq *); 4700 size *= phba->sli4_hba.max_cfg_param.max_xri; 4701 4702 phba->sli4_hba.lpfc_sglq_active_list = 4703 kzalloc(size, GFP_KERNEL); 4704 if (!phba->sli4_hba.lpfc_sglq_active_list) 4705 return -ENOMEM; 4706 return 0; 4707} 4708 4709/** 4710 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 4711 * @phba: pointer to lpfc hba data structure. 4712 * 4713 * This routine is invoked to walk through the array of active sglq entries 4714 * and free all of the resources. 4715 * This is just a place holder for now. 4716 **/ 4717static void 4718lpfc_free_active_sgl(struct lpfc_hba *phba) 4719{ 4720 kfree(phba->sli4_hba.lpfc_sglq_active_list); 4721} 4722 4723/** 4724 * lpfc_init_sgl_list - Allocate and initialize sgl list. 4725 * @phba: pointer to lpfc hba data structure. 4726 * 4727 * This routine is invoked to allocate and initizlize the driver's sgl 4728 * list and set up the sgl xritag tag array accordingly. 4729 * 4730 * Return codes 4731 * 0 - successful 4732 * other values - error 4733 **/ 4734static int 4735lpfc_init_sgl_list(struct lpfc_hba *phba) 4736{ 4737 struct lpfc_sglq *sglq_entry = NULL; 4738 int i; 4739 int els_xri_cnt; 4740 4741 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4742 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4743 "2400 lpfc_init_sgl_list els %d.\n", 4744 els_xri_cnt); 4745 /* Initialize and populate the sglq list per host/VF. */ 4746 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4747 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 4748 4749 /* Sanity check on XRI management */ 4750 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 4751 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4752 "2562 No room left for SCSI XRI allocation: " 4753 "max_xri=%d, els_xri=%d\n", 4754 phba->sli4_hba.max_cfg_param.max_xri, 4755 els_xri_cnt); 4756 return -ENOMEM; 4757 } 4758 4759 /* Allocate memory for the ELS XRI management array */ 4760 phba->sli4_hba.lpfc_els_sgl_array = 4761 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), 4762 GFP_KERNEL); 4763 4764 if (!phba->sli4_hba.lpfc_els_sgl_array) { 4765 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4766 "2401 Failed to allocate memory for ELS " 4767 "XRI management array of size %d.\n", 4768 els_xri_cnt); 4769 return -ENOMEM; 4770 } 4771 4772 /* Keep the SCSI XRI into the XRI management array */ 4773 phba->sli4_hba.scsi_xri_max = 4774 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4775 phba->sli4_hba.scsi_xri_cnt = 0; 4776 4777 phba->sli4_hba.lpfc_scsi_psb_array = 4778 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4779 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4780 4781 if (!phba->sli4_hba.lpfc_scsi_psb_array) { 4782 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4783 "2563 Failed to allocate memory for SCSI " 4784 "XRI management array of size %d.\n", 4785 phba->sli4_hba.scsi_xri_max); 4786 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4787 return -ENOMEM; 4788 } 4789 4790 for (i = 0; i < els_xri_cnt; i++) { 4791 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); 4792 if (sglq_entry == NULL) { 4793 printk(KERN_ERR "%s: only allocated %d sgls of " 4794 "expected %d count. Unloading driver.\n", 4795 __func__, i, els_xri_cnt); 4796 goto out_free_mem; 4797 } 4798 4799 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); 4800 if (sglq_entry->sli4_xritag == NO_XRI) { 4801 kfree(sglq_entry); 4802 printk(KERN_ERR "%s: failed to allocate XRI.\n" 4803 "Unloading driver.\n", __func__); 4804 goto out_free_mem; 4805 } 4806 sglq_entry->buff_type = GEN_BUFF_TYPE; 4807 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4808 if (sglq_entry->virt == NULL) { 4809 kfree(sglq_entry); 4810 printk(KERN_ERR "%s: failed to allocate mbuf.\n" 4811 "Unloading driver.\n", __func__); 4812 goto out_free_mem; 4813 } 4814 sglq_entry->sgl = sglq_entry->virt; 4815 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 4816 4817 /* The list order is used by later block SGL registraton */ 4818 spin_lock_irq(&phba->hbalock); 4819 sglq_entry->state = SGL_FREED; 4820 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4821 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4822 phba->sli4_hba.total_sglq_bufs++; 4823 spin_unlock_irq(&phba->hbalock); 4824 } 4825 return 0; 4826 4827out_free_mem: 4828 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4829 lpfc_free_sgl_list(phba); 4830 return -ENOMEM; 4831} 4832 4833/** 4834 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 4835 * @phba: pointer to lpfc hba data structure. 4836 * 4837 * This routine is invoked to post rpi header templates to the 4838 * HBA consistent with the SLI-4 interface spec. This routine 4839 * posts a PAGE_SIZE memory region to the port to hold up to 4840 * PAGE_SIZE modulo 64 rpi context headers. 4841 * No locks are held here because this is an initialization routine 4842 * called only from probe or lpfc_online when interrupts are not 4843 * enabled and the driver is reinitializing the device. 4844 * 4845 * Return codes 4846 * 0 - successful 4847 * -ENOMEM - No availble memory 4848 * -EIO - The mailbox failed to complete successfully. 4849 **/ 4850int 4851lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4852{ 4853 int rc = 0; 4854 int longs; 4855 uint16_t rpi_count; 4856 struct lpfc_rpi_hdr *rpi_hdr; 4857 4858 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 4859 4860 /* 4861 * Provision an rpi bitmask range for discovery. The total count 4862 * is the difference between max and base + 1. 4863 */ 4864 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 4865 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4866 4867 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 4868 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 4869 GFP_KERNEL); 4870 if (!phba->sli4_hba.rpi_bmask) 4871 return -ENOMEM; 4872 4873 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 4874 if (!rpi_hdr) { 4875 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4876 "0391 Error during rpi post operation\n"); 4877 lpfc_sli4_remove_rpis(phba); 4878 rc = -ENODEV; 4879 } 4880 4881 return rc; 4882} 4883 4884/** 4885 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 4886 * @phba: pointer to lpfc hba data structure. 4887 * 4888 * This routine is invoked to allocate a single 4KB memory region to 4889 * support rpis and stores them in the phba. This single region 4890 * provides support for up to 64 rpis. The region is used globally 4891 * by the device. 4892 * 4893 * Returns: 4894 * A valid rpi hdr on success. 4895 * A NULL pointer on any failure. 4896 **/ 4897struct lpfc_rpi_hdr * 4898lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 4899{ 4900 uint16_t rpi_limit, curr_rpi_range; 4901 struct lpfc_dmabuf *dmabuf; 4902 struct lpfc_rpi_hdr *rpi_hdr; 4903 4904 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 4905 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4906 4907 spin_lock_irq(&phba->hbalock); 4908 curr_rpi_range = phba->sli4_hba.next_rpi; 4909 spin_unlock_irq(&phba->hbalock); 4910 4911 /* 4912 * The port has a limited number of rpis. The increment here 4913 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 4914 * and to allow the full max_rpi range per port. 4915 */ 4916 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 4917 return NULL; 4918 4919 /* 4920 * First allocate the protocol header region for the port. The 4921 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 4922 */ 4923 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4924 if (!dmabuf) 4925 return NULL; 4926 4927 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4928 LPFC_HDR_TEMPLATE_SIZE, 4929 &dmabuf->phys, 4930 GFP_KERNEL); 4931 if (!dmabuf->virt) { 4932 rpi_hdr = NULL; 4933 goto err_free_dmabuf; 4934 } 4935 4936 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 4937 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 4938 rpi_hdr = NULL; 4939 goto err_free_coherent; 4940 } 4941 4942 /* Save the rpi header data for cleanup later. */ 4943 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 4944 if (!rpi_hdr) 4945 goto err_free_coherent; 4946 4947 rpi_hdr->dmabuf = dmabuf; 4948 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 4949 rpi_hdr->page_count = 1; 4950 spin_lock_irq(&phba->hbalock); 4951 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 4952 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 4953 4954 /* 4955 * The next_rpi stores the next module-64 rpi value to post 4956 * in any subsequent rpi memory region postings. 4957 */ 4958 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; 4959 spin_unlock_irq(&phba->hbalock); 4960 return rpi_hdr; 4961 4962 err_free_coherent: 4963 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 4964 dmabuf->virt, dmabuf->phys); 4965 err_free_dmabuf: 4966 kfree(dmabuf); 4967 return NULL; 4968} 4969 4970/** 4971 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 4972 * @phba: pointer to lpfc hba data structure. 4973 * 4974 * This routine is invoked to remove all memory resources allocated 4975 * to support rpis. This routine presumes the caller has released all 4976 * rpis consumed by fabric or port logins and is prepared to have 4977 * the header pages removed. 4978 **/ 4979void 4980lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 4981{ 4982 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 4983 4984 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 4985 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 4986 list_del(&rpi_hdr->list); 4987 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 4988 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 4989 kfree(rpi_hdr->dmabuf); 4990 kfree(rpi_hdr); 4991 } 4992 4993 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4994 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 4995} 4996 4997/** 4998 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 4999 * @pdev: pointer to pci device data structure. 5000 * 5001 * This routine is invoked to allocate the driver hba data structure for an 5002 * HBA device. If the allocation is successful, the phba reference to the 5003 * PCI device data structure is set. 5004 * 5005 * Return codes 5006 * pointer to @phba - successful 5007 * NULL - error 5008 **/ 5009static struct lpfc_hba * 5010lpfc_hba_alloc(struct pci_dev *pdev) 5011{ 5012 struct lpfc_hba *phba; 5013 5014 /* Allocate memory for HBA structure */ 5015 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 5016 if (!phba) { 5017 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 5018 return NULL; 5019 } 5020 5021 /* Set reference to PCI device in HBA structure */ 5022 phba->pcidev = pdev; 5023 5024 /* Assign an unused board number */ 5025 phba->brd_no = lpfc_get_instance(); 5026 if (phba->brd_no < 0) { 5027 kfree(phba); 5028 return NULL; 5029 } 5030 5031 spin_lock_init(&phba->ct_ev_lock); 5032 INIT_LIST_HEAD(&phba->ct_ev_waiters); 5033 5034 return phba; 5035} 5036 5037/** 5038 * lpfc_hba_free - Free driver hba data structure with a device. 5039 * @phba: pointer to lpfc hba data structure. 5040 * 5041 * This routine is invoked to free the driver hba data structure with an 5042 * HBA device. 5043 **/ 5044static void 5045lpfc_hba_free(struct lpfc_hba *phba) 5046{ 5047 /* Release the driver assigned board number */ 5048 idr_remove(&lpfc_hba_index, phba->brd_no); 5049 5050 kfree(phba); 5051 return; 5052} 5053 5054/** 5055 * lpfc_create_shost - Create hba physical port with associated scsi host. 5056 * @phba: pointer to lpfc hba data structure. 5057 * 5058 * This routine is invoked to create HBA physical port and associate a SCSI 5059 * host with it. 5060 * 5061 * Return codes 5062 * 0 - successful 5063 * other values - error 5064 **/ 5065static int 5066lpfc_create_shost(struct lpfc_hba *phba) 5067{ 5068 struct lpfc_vport *vport; 5069 struct Scsi_Host *shost; 5070 5071 /* Initialize HBA FC structure */ 5072 phba->fc_edtov = FF_DEF_EDTOV; 5073 phba->fc_ratov = FF_DEF_RATOV; 5074 phba->fc_altov = FF_DEF_ALTOV; 5075 phba->fc_arbtov = FF_DEF_ARBTOV; 5076 5077 atomic_set(&phba->sdev_cnt, 0); 5078 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 5079 if (!vport) 5080 return -ENODEV; 5081 5082 shost = lpfc_shost_from_vport(vport); 5083 phba->pport = vport; 5084 lpfc_debugfs_initialize(vport); 5085 /* Put reference to SCSI host to driver's device private data */ 5086 pci_set_drvdata(phba->pcidev, shost); 5087 5088 return 0; 5089} 5090 5091/** 5092 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 5093 * @phba: pointer to lpfc hba data structure. 5094 * 5095 * This routine is invoked to destroy HBA physical port and the associated 5096 * SCSI host. 5097 **/ 5098static void 5099lpfc_destroy_shost(struct lpfc_hba *phba) 5100{ 5101 struct lpfc_vport *vport = phba->pport; 5102 5103 /* Destroy physical port that associated with the SCSI host */ 5104 destroy_port(vport); 5105 5106 return; 5107} 5108 5109/** 5110 * lpfc_setup_bg - Setup Block guard structures and debug areas. 5111 * @phba: pointer to lpfc hba data structure. 5112 * @shost: the shost to be used to detect Block guard settings. 5113 * 5114 * This routine sets up the local Block guard protocol settings for @shost. 5115 * This routine also allocates memory for debugging bg buffers. 5116 **/ 5117static void 5118lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 5119{ 5120 int pagecnt = 10; 5121 if (lpfc_prot_mask && lpfc_prot_guard) { 5122 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5123 "1478 Registering BlockGuard with the " 5124 "SCSI layer\n"); 5125 scsi_host_set_prot(shost, lpfc_prot_mask); 5126 scsi_host_set_guard(shost, lpfc_prot_guard); 5127 } 5128 if (!_dump_buf_data) { 5129 while (pagecnt) { 5130 spin_lock_init(&_dump_buf_lock); 5131 _dump_buf_data = 5132 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5133 if (_dump_buf_data) { 5134 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5135 "9043 BLKGRD: allocated %d pages for " 5136 "_dump_buf_data at 0x%p\n", 5137 (1 << pagecnt), _dump_buf_data); 5138 _dump_buf_data_order = pagecnt; 5139 memset(_dump_buf_data, 0, 5140 ((1 << PAGE_SHIFT) << pagecnt)); 5141 break; 5142 } else 5143 --pagecnt; 5144 } 5145 if (!_dump_buf_data_order) 5146 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5147 "9044 BLKGRD: ERROR unable to allocate " 5148 "memory for hexdump\n"); 5149 } else 5150 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5151 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 5152 "\n", _dump_buf_data); 5153 if (!_dump_buf_dif) { 5154 while (pagecnt) { 5155 _dump_buf_dif = 5156 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5157 if (_dump_buf_dif) { 5158 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5159 "9046 BLKGRD: allocated %d pages for " 5160 "_dump_buf_dif at 0x%p\n", 5161 (1 << pagecnt), _dump_buf_dif); 5162 _dump_buf_dif_order = pagecnt; 5163 memset(_dump_buf_dif, 0, 5164 ((1 << PAGE_SHIFT) << pagecnt)); 5165 break; 5166 } else 5167 --pagecnt; 5168 } 5169 if (!_dump_buf_dif_order) 5170 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5171 "9047 BLKGRD: ERROR unable to allocate " 5172 "memory for hexdump\n"); 5173 } else 5174 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5175 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 5176 _dump_buf_dif); 5177} 5178 5179/** 5180 * lpfc_post_init_setup - Perform necessary device post initialization setup. 5181 * @phba: pointer to lpfc hba data structure. 5182 * 5183 * This routine is invoked to perform all the necessary post initialization 5184 * setup for the device. 5185 **/ 5186static void 5187lpfc_post_init_setup(struct lpfc_hba *phba) 5188{ 5189 struct Scsi_Host *shost; 5190 struct lpfc_adapter_event_header adapter_event; 5191 5192 /* Get the default values for Model Name and Description */ 5193 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 5194 5195 /* 5196 * hba setup may have changed the hba_queue_depth so we need to 5197 * adjust the value of can_queue. 5198 */ 5199 shost = pci_get_drvdata(phba->pcidev); 5200 shost->can_queue = phba->cfg_hba_queue_depth - 10; 5201 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 5202 lpfc_setup_bg(phba, shost); 5203 5204 lpfc_host_attrib_init(shost); 5205 5206 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 5207 spin_lock_irq(shost->host_lock); 5208 lpfc_poll_start_timer(phba); 5209 spin_unlock_irq(shost->host_lock); 5210 } 5211 5212 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5213 "0428 Perform SCSI scan\n"); 5214 /* Send board arrival event to upper layer */ 5215 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 5216 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 5217 fc_host_post_vendor_event(shost, fc_get_event_number(), 5218 sizeof(adapter_event), 5219 (char *) &adapter_event, 5220 LPFC_NL_VENDOR_ID); 5221 return; 5222} 5223 5224/** 5225 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 5226 * @phba: pointer to lpfc hba data structure. 5227 * 5228 * This routine is invoked to set up the PCI device memory space for device 5229 * with SLI-3 interface spec. 5230 * 5231 * Return codes 5232 * 0 - successful 5233 * other values - error 5234 **/ 5235static int 5236lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 5237{ 5238 struct pci_dev *pdev; 5239 unsigned long bar0map_len, bar2map_len; 5240 int i, hbq_count; 5241 void *ptr; 5242 int error = -ENODEV; 5243 5244 /* Obtain PCI device reference */ 5245 if (!phba->pcidev) 5246 return error; 5247 else 5248 pdev = phba->pcidev; 5249 5250 /* Set the device DMA mask size */ 5251 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 5252 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 5253 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 5254 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 5255 return error; 5256 } 5257 } 5258 5259 /* Get the bus address of Bar0 and Bar2 and the number of bytes 5260 * required by each mapping. 5261 */ 5262 phba->pci_bar0_map = pci_resource_start(pdev, 0); 5263 bar0map_len = pci_resource_len(pdev, 0); 5264 5265 phba->pci_bar2_map = pci_resource_start(pdev, 2); 5266 bar2map_len = pci_resource_len(pdev, 2); 5267 5268 /* Map HBA SLIM to a kernel virtual address. */ 5269 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 5270 if (!phba->slim_memmap_p) { 5271 dev_printk(KERN_ERR, &pdev->dev, 5272 "ioremap failed for SLIM memory.\n"); 5273 goto out; 5274 } 5275 5276 /* Map HBA Control Registers to a kernel virtual address. */ 5277 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 5278 if (!phba->ctrl_regs_memmap_p) { 5279 dev_printk(KERN_ERR, &pdev->dev, 5280 "ioremap failed for HBA control registers.\n"); 5281 goto out_iounmap_slim; 5282 } 5283 5284 /* Allocate memory for SLI-2 structures */ 5285 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 5286 SLI2_SLIM_SIZE, 5287 &phba->slim2p.phys, 5288 GFP_KERNEL); 5289 if (!phba->slim2p.virt) 5290 goto out_iounmap; 5291 5292 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 5293 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 5294 phba->mbox_ext = (phba->slim2p.virt + 5295 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 5296 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 5297 phba->IOCBs = (phba->slim2p.virt + 5298 offsetof(struct lpfc_sli2_slim, IOCBs)); 5299 5300 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 5301 lpfc_sli_hbq_size(), 5302 &phba->hbqslimp.phys, 5303 GFP_KERNEL); 5304 if (!phba->hbqslimp.virt) 5305 goto out_free_slim; 5306 5307 hbq_count = lpfc_sli_hbq_count(); 5308 ptr = phba->hbqslimp.virt; 5309 for (i = 0; i < hbq_count; ++i) { 5310 phba->hbqs[i].hbq_virt = ptr; 5311 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5312 ptr += (lpfc_hbq_defs[i]->entry_count * 5313 sizeof(struct lpfc_hbq_entry)); 5314 } 5315 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 5316 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 5317 5318 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 5319 5320 INIT_LIST_HEAD(&phba->rb_pend_list); 5321 5322 phba->MBslimaddr = phba->slim_memmap_p; 5323 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 5324 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 5325 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 5326 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 5327 5328 return 0; 5329 5330out_free_slim: 5331 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5332 phba->slim2p.virt, phba->slim2p.phys); 5333out_iounmap: 5334 iounmap(phba->ctrl_regs_memmap_p); 5335out_iounmap_slim: 5336 iounmap(phba->slim_memmap_p); 5337out: 5338 return error; 5339} 5340 5341/** 5342 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 5343 * @phba: pointer to lpfc hba data structure. 5344 * 5345 * This routine is invoked to unset the PCI device memory space for device 5346 * with SLI-3 interface spec. 5347 **/ 5348static void 5349lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 5350{ 5351 struct pci_dev *pdev; 5352 5353 /* Obtain PCI device reference */ 5354 if (!phba->pcidev) 5355 return; 5356 else 5357 pdev = phba->pcidev; 5358 5359 /* Free coherent DMA memory allocated */ 5360 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 5361 phba->hbqslimp.virt, phba->hbqslimp.phys); 5362 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5363 phba->slim2p.virt, phba->slim2p.phys); 5364 5365 /* I/O memory unmap */ 5366 iounmap(phba->ctrl_regs_memmap_p); 5367 iounmap(phba->slim_memmap_p); 5368 5369 return; 5370} 5371 5372/** 5373 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 5374 * @phba: pointer to lpfc hba data structure. 5375 * 5376 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 5377 * done and check status. 5378 * 5379 * Return 0 if successful, otherwise -ENODEV. 5380 **/ 5381int 5382lpfc_sli4_post_status_check(struct lpfc_hba *phba) 5383{ 5384 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 5385 struct lpfc_register reg_data; 5386 int i, port_error = 0; 5387 uint32_t if_type; 5388 5389 if (!phba->sli4_hba.PSMPHRregaddr) 5390 return -ENODEV; 5391 5392 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 5393 for (i = 0; i < 3000; i++) { 5394 portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr); 5395 if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) { 5396 /* Port has a fatal POST error, break out */ 5397 port_error = -ENODEV; 5398 break; 5399 } 5400 if (LPFC_POST_STAGE_PORT_READY == 5401 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 5402 break; 5403 msleep(10); 5404 } 5405 5406 /* 5407 * If there was a port error during POST, then don't proceed with 5408 * other register reads as the data may not be valid. Just exit. 5409 */ 5410 if (port_error) { 5411 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5412 "1408 Port Failed POST - portsmphr=0x%x, " 5413 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 5414 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 5415 portsmphr_reg.word0, 5416 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 5417 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 5418 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 5419 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 5420 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 5421 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 5422 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 5423 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 5424 } else { 5425 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5426 "2534 Device Info: SLIFamily=0x%x, " 5427 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 5428 "SLIHint_2=0x%x, FT=0x%x\n", 5429 bf_get(lpfc_sli_intf_sli_family, 5430 &phba->sli4_hba.sli_intf), 5431 bf_get(lpfc_sli_intf_slirev, 5432 &phba->sli4_hba.sli_intf), 5433 bf_get(lpfc_sli_intf_if_type, 5434 &phba->sli4_hba.sli_intf), 5435 bf_get(lpfc_sli_intf_sli_hint1, 5436 &phba->sli4_hba.sli_intf), 5437 bf_get(lpfc_sli_intf_sli_hint2, 5438 &phba->sli4_hba.sli_intf), 5439 bf_get(lpfc_sli_intf_func_type, 5440 &phba->sli4_hba.sli_intf)); 5441 /* 5442 * Check for other Port errors during the initialization 5443 * process. Fail the load if the port did not come up 5444 * correctly. 5445 */ 5446 if_type = bf_get(lpfc_sli_intf_if_type, 5447 &phba->sli4_hba.sli_intf); 5448 switch (if_type) { 5449 case LPFC_SLI_INTF_IF_TYPE_0: 5450 phba->sli4_hba.ue_mask_lo = 5451 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 5452 phba->sli4_hba.ue_mask_hi = 5453 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 5454 uerrlo_reg.word0 = 5455 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 5456 uerrhi_reg.word0 = 5457 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 5458 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 5459 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 5460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5461 "1422 Unrecoverable Error " 5462 "Detected during POST " 5463 "uerr_lo_reg=0x%x, " 5464 "uerr_hi_reg=0x%x, " 5465 "ue_mask_lo_reg=0x%x, " 5466 "ue_mask_hi_reg=0x%x\n", 5467 uerrlo_reg.word0, 5468 uerrhi_reg.word0, 5469 phba->sli4_hba.ue_mask_lo, 5470 phba->sli4_hba.ue_mask_hi); 5471 port_error = -ENODEV; 5472 } 5473 break; 5474 case LPFC_SLI_INTF_IF_TYPE_2: 5475 /* Final checks. The port status should be clean. */ 5476 reg_data.word0 = 5477 readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 5478 if (bf_get(lpfc_sliport_status_err, ®_data)) { 5479 phba->work_status[0] = 5480 readl(phba->sli4_hba.u.if_type2. 5481 ERR1regaddr); 5482 phba->work_status[1] = 5483 readl(phba->sli4_hba.u.if_type2. 5484 ERR2regaddr); 5485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5486 "2888 Port Error Detected " 5487 "during POST: " 5488 "port status reg 0x%x, " 5489 "port_smphr reg 0x%x, " 5490 "error 1=0x%x, error 2=0x%x\n", 5491 reg_data.word0, 5492 portsmphr_reg.word0, 5493 phba->work_status[0], 5494 phba->work_status[1]); 5495 port_error = -ENODEV; 5496 } 5497 break; 5498 case LPFC_SLI_INTF_IF_TYPE_1: 5499 default: 5500 break; 5501 } 5502 } 5503 return port_error; 5504} 5505 5506/** 5507 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 5508 * @phba: pointer to lpfc hba data structure. 5509 * @if_type: The SLI4 interface type getting configured. 5510 * 5511 * This routine is invoked to set up SLI4 BAR0 PCI config space register 5512 * memory map. 5513 **/ 5514static void 5515lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 5516{ 5517 switch (if_type) { 5518 case LPFC_SLI_INTF_IF_TYPE_0: 5519 phba->sli4_hba.u.if_type0.UERRLOregaddr = 5520 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 5521 phba->sli4_hba.u.if_type0.UERRHIregaddr = 5522 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 5523 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 5524 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 5525 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 5526 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 5527 phba->sli4_hba.SLIINTFregaddr = 5528 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 5529 break; 5530 case LPFC_SLI_INTF_IF_TYPE_2: 5531 phba->sli4_hba.u.if_type2.ERR1regaddr = 5532 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1; 5533 phba->sli4_hba.u.if_type2.ERR2regaddr = 5534 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2; 5535 phba->sli4_hba.u.if_type2.CTRLregaddr = 5536 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL; 5537 phba->sli4_hba.u.if_type2.STATUSregaddr = 5538 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS; 5539 phba->sli4_hba.SLIINTFregaddr = 5540 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 5541 phba->sli4_hba.PSMPHRregaddr = 5542 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR; 5543 phba->sli4_hba.RQDBregaddr = 5544 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL; 5545 phba->sli4_hba.WQDBregaddr = 5546 phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL; 5547 phba->sli4_hba.EQCQDBregaddr = 5548 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 5549 phba->sli4_hba.MQDBregaddr = 5550 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 5551 phba->sli4_hba.BMBXregaddr = 5552 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 5553 break; 5554 case LPFC_SLI_INTF_IF_TYPE_1: 5555 default: 5556 dev_printk(KERN_ERR, &phba->pcidev->dev, 5557 "FATAL - unsupported SLI4 interface type - %d\n", 5558 if_type); 5559 break; 5560 } 5561} 5562 5563/** 5564 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 5565 * @phba: pointer to lpfc hba data structure. 5566 * 5567 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 5568 * memory map. 5569 **/ 5570static void 5571lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 5572{ 5573 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5574 LPFC_SLIPORT_IF0_SMPHR; 5575 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5576 LPFC_HST_ISR0; 5577 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5578 LPFC_HST_IMR0; 5579 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5580 LPFC_HST_ISCR0; 5581} 5582 5583/** 5584 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 5585 * @phba: pointer to lpfc hba data structure. 5586 * @vf: virtual function number 5587 * 5588 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 5589 * based on the given viftual function number, @vf. 5590 * 5591 * Return 0 if successful, otherwise -ENODEV. 5592 **/ 5593static int 5594lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 5595{ 5596 if (vf > LPFC_VIR_FUNC_MAX) 5597 return -ENODEV; 5598 5599 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5600 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 5601 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5602 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 5603 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5604 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 5605 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5606 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 5607 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5608 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 5609 return 0; 5610} 5611 5612/** 5613 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 5614 * @phba: pointer to lpfc hba data structure. 5615 * 5616 * This routine is invoked to create the bootstrap mailbox 5617 * region consistent with the SLI-4 interface spec. This 5618 * routine allocates all memory necessary to communicate 5619 * mailbox commands to the port and sets up all alignment 5620 * needs. No locks are expected to be held when calling 5621 * this routine. 5622 * 5623 * Return codes 5624 * 0 - successful 5625 * -ENOMEM - could not allocated memory. 5626 **/ 5627static int 5628lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 5629{ 5630 uint32_t bmbx_size; 5631 struct lpfc_dmabuf *dmabuf; 5632 struct dma_address *dma_address; 5633 uint32_t pa_addr; 5634 uint64_t phys_addr; 5635 5636 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5637 if (!dmabuf) 5638 return -ENOMEM; 5639 5640 /* 5641 * The bootstrap mailbox region is comprised of 2 parts 5642 * plus an alignment restriction of 16 bytes. 5643 */ 5644 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 5645 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5646 bmbx_size, 5647 &dmabuf->phys, 5648 GFP_KERNEL); 5649 if (!dmabuf->virt) { 5650 kfree(dmabuf); 5651 return -ENOMEM; 5652 } 5653 memset(dmabuf->virt, 0, bmbx_size); 5654 5655 /* 5656 * Initialize the bootstrap mailbox pointers now so that the register 5657 * operations are simple later. The mailbox dma address is required 5658 * to be 16-byte aligned. Also align the virtual memory as each 5659 * maibox is copied into the bmbx mailbox region before issuing the 5660 * command to the port. 5661 */ 5662 phba->sli4_hba.bmbx.dmabuf = dmabuf; 5663 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 5664 5665 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 5666 LPFC_ALIGN_16_BYTE); 5667 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 5668 LPFC_ALIGN_16_BYTE); 5669 5670 /* 5671 * Set the high and low physical addresses now. The SLI4 alignment 5672 * requirement is 16 bytes and the mailbox is posted to the port 5673 * as two 30-bit addresses. The other data is a bit marking whether 5674 * the 30-bit address is the high or low address. 5675 * Upcast bmbx aphys to 64bits so shift instruction compiles 5676 * clean on 32 bit machines. 5677 */ 5678 dma_address = &phba->sli4_hba.bmbx.dma_address; 5679 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 5680 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 5681 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 5682 LPFC_BMBX_BIT1_ADDR_HI); 5683 5684 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 5685 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 5686 LPFC_BMBX_BIT1_ADDR_LO); 5687 return 0; 5688} 5689 5690/** 5691 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 5692 * @phba: pointer to lpfc hba data structure. 5693 * 5694 * This routine is invoked to teardown the bootstrap mailbox 5695 * region and release all host resources. This routine requires 5696 * the caller to ensure all mailbox commands recovered, no 5697 * additional mailbox comands are sent, and interrupts are disabled 5698 * before calling this routine. 5699 * 5700 **/ 5701static void 5702lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 5703{ 5704 dma_free_coherent(&phba->pcidev->dev, 5705 phba->sli4_hba.bmbx.bmbx_size, 5706 phba->sli4_hba.bmbx.dmabuf->virt, 5707 phba->sli4_hba.bmbx.dmabuf->phys); 5708 5709 kfree(phba->sli4_hba.bmbx.dmabuf); 5710 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 5711} 5712 5713/** 5714 * lpfc_sli4_read_config - Get the config parameters. 5715 * @phba: pointer to lpfc hba data structure. 5716 * 5717 * This routine is invoked to read the configuration parameters from the HBA. 5718 * The configuration parameters are used to set the base and maximum values 5719 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 5720 * allocation for the port. 5721 * 5722 * Return codes 5723 * 0 - successful 5724 * -ENOMEM - No availble memory 5725 * -EIO - The mailbox failed to complete successfully. 5726 **/ 5727static int 5728lpfc_sli4_read_config(struct lpfc_hba *phba) 5729{ 5730 LPFC_MBOXQ_t *pmb; 5731 struct lpfc_mbx_read_config *rd_config; 5732 uint32_t rc = 0; 5733 5734 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5735 if (!pmb) { 5736 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5737 "2011 Unable to allocate memory for issuing " 5738 "SLI_CONFIG_SPECIAL mailbox command\n"); 5739 return -ENOMEM; 5740 } 5741 5742 lpfc_read_config(phba, pmb); 5743 5744 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 5745 if (rc != MBX_SUCCESS) { 5746 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5747 "2012 Mailbox failed , mbxCmd x%x " 5748 "READ_CONFIG, mbxStatus x%x\n", 5749 bf_get(lpfc_mqe_command, &pmb->u.mqe), 5750 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 5751 rc = -EIO; 5752 } else { 5753 rd_config = &pmb->u.mqe.un.rd_config; 5754 phba->sli4_hba.max_cfg_param.max_xri = 5755 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 5756 phba->sli4_hba.max_cfg_param.xri_base = 5757 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 5758 phba->sli4_hba.max_cfg_param.max_vpi = 5759 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 5760 phba->sli4_hba.max_cfg_param.vpi_base = 5761 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 5762 phba->sli4_hba.max_cfg_param.max_rpi = 5763 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 5764 phba->sli4_hba.max_cfg_param.rpi_base = 5765 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 5766 phba->sli4_hba.max_cfg_param.max_vfi = 5767 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 5768 phba->sli4_hba.max_cfg_param.vfi_base = 5769 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 5770 phba->sli4_hba.max_cfg_param.max_fcfi = 5771 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 5772 phba->sli4_hba.max_cfg_param.fcfi_base = 5773 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); 5774 phba->sli4_hba.max_cfg_param.max_eq = 5775 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 5776 phba->sli4_hba.max_cfg_param.max_rq = 5777 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 5778 phba->sli4_hba.max_cfg_param.max_wq = 5779 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 5780 phba->sli4_hba.max_cfg_param.max_cq = 5781 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 5782 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 5783 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 5784 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 5785 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 5786 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5787 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 5788 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 5789 phba->max_vports = phba->max_vpi; 5790 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5791 "2003 cfg params XRI(B:%d M:%d), " 5792 "VPI(B:%d M:%d) " 5793 "VFI(B:%d M:%d) " 5794 "RPI(B:%d M:%d) " 5795 "FCFI(B:%d M:%d)\n", 5796 phba->sli4_hba.max_cfg_param.xri_base, 5797 phba->sli4_hba.max_cfg_param.max_xri, 5798 phba->sli4_hba.max_cfg_param.vpi_base, 5799 phba->sli4_hba.max_cfg_param.max_vpi, 5800 phba->sli4_hba.max_cfg_param.vfi_base, 5801 phba->sli4_hba.max_cfg_param.max_vfi, 5802 phba->sli4_hba.max_cfg_param.rpi_base, 5803 phba->sli4_hba.max_cfg_param.max_rpi, 5804 phba->sli4_hba.max_cfg_param.fcfi_base, 5805 phba->sli4_hba.max_cfg_param.max_fcfi); 5806 } 5807 mempool_free(pmb, phba->mbox_mem_pool); 5808 5809 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 5810 if (phba->cfg_hba_queue_depth > 5811 (phba->sli4_hba.max_cfg_param.max_xri - 5812 lpfc_sli4_get_els_iocb_cnt(phba))) 5813 phba->cfg_hba_queue_depth = 5814 phba->sli4_hba.max_cfg_param.max_xri - 5815 lpfc_sli4_get_els_iocb_cnt(phba); 5816 return rc; 5817} 5818 5819/** 5820 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 5821 * @phba: pointer to lpfc hba data structure. 5822 * 5823 * This routine is invoked to setup the port-side endian order when 5824 * the port if_type is 0. This routine has no function for other 5825 * if_types. 5826 * 5827 * Return codes 5828 * 0 - successful 5829 * -ENOMEM - No availble memory 5830 * -EIO - The mailbox failed to complete successfully. 5831 **/ 5832static int 5833lpfc_setup_endian_order(struct lpfc_hba *phba) 5834{ 5835 LPFC_MBOXQ_t *mboxq; 5836 uint32_t if_type, rc = 0; 5837 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 5838 HOST_ENDIAN_HIGH_WORD1}; 5839 5840 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 5841 switch (if_type) { 5842 case LPFC_SLI_INTF_IF_TYPE_0: 5843 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 5844 GFP_KERNEL); 5845 if (!mboxq) { 5846 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5847 "0492 Unable to allocate memory for " 5848 "issuing SLI_CONFIG_SPECIAL mailbox " 5849 "command\n"); 5850 return -ENOMEM; 5851 } 5852 5853 /* 5854 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 5855 * two words to contain special data values and no other data. 5856 */ 5857 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 5858 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 5859 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5860 if (rc != MBX_SUCCESS) { 5861 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5862 "0493 SLI_CONFIG_SPECIAL mailbox " 5863 "failed with status x%x\n", 5864 rc); 5865 rc = -EIO; 5866 } 5867 mempool_free(mboxq, phba->mbox_mem_pool); 5868 break; 5869 case LPFC_SLI_INTF_IF_TYPE_2: 5870 case LPFC_SLI_INTF_IF_TYPE_1: 5871 default: 5872 break; 5873 } 5874 return rc; 5875} 5876 5877/** 5878 * lpfc_sli4_queue_create - Create all the SLI4 queues 5879 * @phba: pointer to lpfc hba data structure. 5880 * 5881 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 5882 * operation. For each SLI4 queue type, the parameters such as queue entry 5883 * count (queue depth) shall be taken from the module parameter. For now, 5884 * we just use some constant number as place holder. 5885 * 5886 * Return codes 5887 * 0 - successful 5888 * -ENOMEM - No availble memory 5889 * -EIO - The mailbox failed to complete successfully. 5890 **/ 5891static int 5892lpfc_sli4_queue_create(struct lpfc_hba *phba) 5893{ 5894 struct lpfc_queue *qdesc; 5895 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5896 int cfg_fcp_wq_count; 5897 int cfg_fcp_eq_count; 5898 5899 /* 5900 * Sanity check for confiugred queue parameters against the run-time 5901 * device parameters 5902 */ 5903 5904 /* Sanity check on FCP fast-path WQ parameters */ 5905 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 5906 if (cfg_fcp_wq_count > 5907 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 5908 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 5909 LPFC_SP_WQN_DEF; 5910 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 5911 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5912 "2581 Not enough WQs (%d) from " 5913 "the pci function for supporting " 5914 "FCP WQs (%d)\n", 5915 phba->sli4_hba.max_cfg_param.max_wq, 5916 phba->cfg_fcp_wq_count); 5917 goto out_error; 5918 } 5919 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5920 "2582 Not enough WQs (%d) from the pci " 5921 "function for supporting the requested " 5922 "FCP WQs (%d), the actual FCP WQs can " 5923 "be supported: %d\n", 5924 phba->sli4_hba.max_cfg_param.max_wq, 5925 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 5926 } 5927 /* The actual number of FCP work queues adopted */ 5928 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 5929 5930 /* Sanity check on FCP fast-path EQ parameters */ 5931 cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 5932 if (cfg_fcp_eq_count > 5933 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 5934 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 5935 LPFC_SP_EQN_DEF; 5936 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 5937 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5938 "2574 Not enough EQs (%d) from the " 5939 "pci function for supporting FCP " 5940 "EQs (%d)\n", 5941 phba->sli4_hba.max_cfg_param.max_eq, 5942 phba->cfg_fcp_eq_count); 5943 goto out_error; 5944 } 5945 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5946 "2575 Not enough EQs (%d) from the pci " 5947 "function for supporting the requested " 5948 "FCP EQs (%d), the actual FCP EQs can " 5949 "be supported: %d\n", 5950 phba->sli4_hba.max_cfg_param.max_eq, 5951 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 5952 } 5953 /* It does not make sense to have more EQs than WQs */ 5954 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 5955 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5956 "2593 The FCP EQ count(%d) cannot be greater " 5957 "than the FCP WQ count(%d), limiting the " 5958 "FCP EQ count to %d\n", cfg_fcp_eq_count, 5959 phba->cfg_fcp_wq_count, 5960 phba->cfg_fcp_wq_count); 5961 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 5962 } 5963 /* The actual number of FCP event queues adopted */ 5964 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 5965 /* The overall number of event queues used */ 5966 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 5967 5968 /* 5969 * Create Event Queues (EQs) 5970 */ 5971 5972 /* Get EQ depth from module parameter, fake the default for now */ 5973 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 5974 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 5975 5976 /* Create slow path event queue */ 5977 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5978 phba->sli4_hba.eq_ecount); 5979 if (!qdesc) { 5980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5981 "0496 Failed allocate slow-path EQ\n"); 5982 goto out_error; 5983 } 5984 phba->sli4_hba.sp_eq = qdesc; 5985 5986 /* Create fast-path FCP Event Queue(s) */ 5987 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 5988 phba->cfg_fcp_eq_count), GFP_KERNEL); 5989 if (!phba->sli4_hba.fp_eq) { 5990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5991 "2576 Failed allocate memory for fast-path " 5992 "EQ record array\n"); 5993 goto out_free_sp_eq; 5994 } 5995 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5996 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5997 phba->sli4_hba.eq_ecount); 5998 if (!qdesc) { 5999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6000 "0497 Failed allocate fast-path EQ\n"); 6001 goto out_free_fp_eq; 6002 } 6003 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 6004 } 6005 6006 /* 6007 * Create Complete Queues (CQs) 6008 */ 6009 6010 /* Get CQ depth from module parameter, fake the default for now */ 6011 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 6012 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 6013 6014 /* Create slow-path Mailbox Command Complete Queue */ 6015 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6016 phba->sli4_hba.cq_ecount); 6017 if (!qdesc) { 6018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6019 "0500 Failed allocate slow-path mailbox CQ\n"); 6020 goto out_free_fp_eq; 6021 } 6022 phba->sli4_hba.mbx_cq = qdesc; 6023 6024 /* Create slow-path ELS Complete Queue */ 6025 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6026 phba->sli4_hba.cq_ecount); 6027 if (!qdesc) { 6028 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6029 "0501 Failed allocate slow-path ELS CQ\n"); 6030 goto out_free_mbx_cq; 6031 } 6032 phba->sli4_hba.els_cq = qdesc; 6033 6034 6035 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 6036 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 6037 phba->cfg_fcp_eq_count), GFP_KERNEL); 6038 if (!phba->sli4_hba.fcp_cq) { 6039 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6040 "2577 Failed allocate memory for fast-path " 6041 "CQ record array\n"); 6042 goto out_free_els_cq; 6043 } 6044 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6045 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6046 phba->sli4_hba.cq_ecount); 6047 if (!qdesc) { 6048 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6049 "0499 Failed allocate fast-path FCP " 6050 "CQ (%d)\n", fcp_cqidx); 6051 goto out_free_fcp_cq; 6052 } 6053 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 6054 } 6055 6056 /* Create Mailbox Command Queue */ 6057 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 6058 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 6059 6060 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 6061 phba->sli4_hba.mq_ecount); 6062 if (!qdesc) { 6063 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6064 "0505 Failed allocate slow-path MQ\n"); 6065 goto out_free_fcp_cq; 6066 } 6067 phba->sli4_hba.mbx_wq = qdesc; 6068 6069 /* 6070 * Create all the Work Queues (WQs) 6071 */ 6072 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 6073 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 6074 6075 /* Create slow-path ELS Work Queue */ 6076 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6077 phba->sli4_hba.wq_ecount); 6078 if (!qdesc) { 6079 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6080 "0504 Failed allocate slow-path ELS WQ\n"); 6081 goto out_free_mbx_wq; 6082 } 6083 phba->sli4_hba.els_wq = qdesc; 6084 6085 /* Create fast-path FCP Work Queue(s) */ 6086 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 6087 phba->cfg_fcp_wq_count), GFP_KERNEL); 6088 if (!phba->sli4_hba.fcp_wq) { 6089 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6090 "2578 Failed allocate memory for fast-path " 6091 "WQ record array\n"); 6092 goto out_free_els_wq; 6093 } 6094 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6095 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6096 phba->sli4_hba.wq_ecount); 6097 if (!qdesc) { 6098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6099 "0503 Failed allocate fast-path FCP " 6100 "WQ (%d)\n", fcp_wqidx); 6101 goto out_free_fcp_wq; 6102 } 6103 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 6104 } 6105 6106 /* 6107 * Create Receive Queue (RQ) 6108 */ 6109 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 6110 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 6111 6112 /* Create Receive Queue for header */ 6113 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6114 phba->sli4_hba.rq_ecount); 6115 if (!qdesc) { 6116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6117 "0506 Failed allocate receive HRQ\n"); 6118 goto out_free_fcp_wq; 6119 } 6120 phba->sli4_hba.hdr_rq = qdesc; 6121 6122 /* Create Receive Queue for data */ 6123 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6124 phba->sli4_hba.rq_ecount); 6125 if (!qdesc) { 6126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6127 "0507 Failed allocate receive DRQ\n"); 6128 goto out_free_hdr_rq; 6129 } 6130 phba->sli4_hba.dat_rq = qdesc; 6131 6132 return 0; 6133 6134out_free_hdr_rq: 6135 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6136 phba->sli4_hba.hdr_rq = NULL; 6137out_free_fcp_wq: 6138 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 6139 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 6140 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 6141 } 6142 kfree(phba->sli4_hba.fcp_wq); 6143out_free_els_wq: 6144 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6145 phba->sli4_hba.els_wq = NULL; 6146out_free_mbx_wq: 6147 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6148 phba->sli4_hba.mbx_wq = NULL; 6149out_free_fcp_cq: 6150 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 6151 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 6152 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 6153 } 6154 kfree(phba->sli4_hba.fcp_cq); 6155out_free_els_cq: 6156 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6157 phba->sli4_hba.els_cq = NULL; 6158out_free_mbx_cq: 6159 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6160 phba->sli4_hba.mbx_cq = NULL; 6161out_free_fp_eq: 6162 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 6163 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 6164 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 6165 } 6166 kfree(phba->sli4_hba.fp_eq); 6167out_free_sp_eq: 6168 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6169 phba->sli4_hba.sp_eq = NULL; 6170out_error: 6171 return -ENOMEM; 6172} 6173 6174/** 6175 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 6176 * @phba: pointer to lpfc hba data structure. 6177 * 6178 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 6179 * operation. 6180 * 6181 * Return codes 6182 * 0 - successful 6183 * -ENOMEM - No availble memory 6184 * -EIO - The mailbox failed to complete successfully. 6185 **/ 6186static void 6187lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 6188{ 6189 int fcp_qidx; 6190 6191 /* Release mailbox command work queue */ 6192 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6193 phba->sli4_hba.mbx_wq = NULL; 6194 6195 /* Release ELS work queue */ 6196 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6197 phba->sli4_hba.els_wq = NULL; 6198 6199 /* Release FCP work queue */ 6200 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6201 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 6202 kfree(phba->sli4_hba.fcp_wq); 6203 phba->sli4_hba.fcp_wq = NULL; 6204 6205 /* Release unsolicited receive queue */ 6206 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6207 phba->sli4_hba.hdr_rq = NULL; 6208 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 6209 phba->sli4_hba.dat_rq = NULL; 6210 6211 /* Release ELS complete queue */ 6212 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6213 phba->sli4_hba.els_cq = NULL; 6214 6215 /* Release mailbox command complete queue */ 6216 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6217 phba->sli4_hba.mbx_cq = NULL; 6218 6219 /* Release FCP response complete queue */ 6220 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6221 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6222 kfree(phba->sli4_hba.fcp_cq); 6223 phba->sli4_hba.fcp_cq = NULL; 6224 6225 /* Release fast-path event queue */ 6226 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6227 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 6228 kfree(phba->sli4_hba.fp_eq); 6229 phba->sli4_hba.fp_eq = NULL; 6230 6231 /* Release slow-path event queue */ 6232 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6233 phba->sli4_hba.sp_eq = NULL; 6234 6235 return; 6236} 6237 6238/** 6239 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 6240 * @phba: pointer to lpfc hba data structure. 6241 * 6242 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 6243 * operation. 6244 * 6245 * Return codes 6246 * 0 - successful 6247 * -ENOMEM - No availble memory 6248 * -EIO - The mailbox failed to complete successfully. 6249 **/ 6250int 6251lpfc_sli4_queue_setup(struct lpfc_hba *phba) 6252{ 6253 int rc = -ENOMEM; 6254 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6255 int fcp_cq_index = 0; 6256 6257 /* 6258 * Set up Event Queues (EQs) 6259 */ 6260 6261 /* Set up slow-path event queue */ 6262 if (!phba->sli4_hba.sp_eq) { 6263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6264 "0520 Slow-path EQ not allocated\n"); 6265 goto out_error; 6266 } 6267 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 6268 LPFC_SP_DEF_IMAX); 6269 if (rc) { 6270 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6271 "0521 Failed setup of slow-path EQ: " 6272 "rc = 0x%x\n", rc); 6273 goto out_error; 6274 } 6275 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6276 "2583 Slow-path EQ setup: queue-id=%d\n", 6277 phba->sli4_hba.sp_eq->queue_id); 6278 6279 /* Set up fast-path event queue */ 6280 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6281 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6282 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6283 "0522 Fast-path EQ (%d) not " 6284 "allocated\n", fcp_eqidx); 6285 goto out_destroy_fp_eq; 6286 } 6287 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 6288 phba->cfg_fcp_imax); 6289 if (rc) { 6290 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6291 "0523 Failed setup of fast-path EQ " 6292 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 6293 goto out_destroy_fp_eq; 6294 } 6295 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6296 "2584 Fast-path EQ setup: " 6297 "queue[%d]-id=%d\n", fcp_eqidx, 6298 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 6299 } 6300 6301 /* 6302 * Set up Complete Queues (CQs) 6303 */ 6304 6305 /* Set up slow-path MBOX Complete Queue as the first CQ */ 6306 if (!phba->sli4_hba.mbx_cq) { 6307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6308 "0528 Mailbox CQ not allocated\n"); 6309 goto out_destroy_fp_eq; 6310 } 6311 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 6312 LPFC_MCQ, LPFC_MBOX); 6313 if (rc) { 6314 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6315 "0529 Failed setup of slow-path mailbox CQ: " 6316 "rc = 0x%x\n", rc); 6317 goto out_destroy_fp_eq; 6318 } 6319 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6320 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 6321 phba->sli4_hba.mbx_cq->queue_id, 6322 phba->sli4_hba.sp_eq->queue_id); 6323 6324 /* Set up slow-path ELS Complete Queue */ 6325 if (!phba->sli4_hba.els_cq) { 6326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6327 "0530 ELS CQ not allocated\n"); 6328 goto out_destroy_mbx_cq; 6329 } 6330 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 6331 LPFC_WCQ, LPFC_ELS); 6332 if (rc) { 6333 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6334 "0531 Failed setup of slow-path ELS CQ: " 6335 "rc = 0x%x\n", rc); 6336 goto out_destroy_mbx_cq; 6337 } 6338 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6339 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 6340 phba->sli4_hba.els_cq->queue_id, 6341 phba->sli4_hba.sp_eq->queue_id); 6342 6343 /* Set up fast-path FCP Response Complete Queue */ 6344 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6345 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6346 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6347 "0526 Fast-path FCP CQ (%d) not " 6348 "allocated\n", fcp_cqidx); 6349 goto out_destroy_fcp_cq; 6350 } 6351 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 6352 phba->sli4_hba.fp_eq[fcp_cqidx], 6353 LPFC_WCQ, LPFC_FCP); 6354 if (rc) { 6355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6356 "0527 Failed setup of fast-path FCP " 6357 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 6358 goto out_destroy_fcp_cq; 6359 } 6360 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6361 "2588 FCP CQ setup: cq[%d]-id=%d, " 6362 "parent eq[%d]-id=%d\n", 6363 fcp_cqidx, 6364 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 6365 fcp_cqidx, 6366 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 6367 } 6368 6369 /* 6370 * Set up all the Work Queues (WQs) 6371 */ 6372 6373 /* Set up Mailbox Command Queue */ 6374 if (!phba->sli4_hba.mbx_wq) { 6375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6376 "0538 Slow-path MQ not allocated\n"); 6377 goto out_destroy_fcp_cq; 6378 } 6379 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 6380 phba->sli4_hba.mbx_cq, LPFC_MBOX); 6381 if (rc) { 6382 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6383 "0539 Failed setup of slow-path MQ: " 6384 "rc = 0x%x\n", rc); 6385 goto out_destroy_fcp_cq; 6386 } 6387 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6388 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 6389 phba->sli4_hba.mbx_wq->queue_id, 6390 phba->sli4_hba.mbx_cq->queue_id); 6391 6392 /* Set up slow-path ELS Work Queue */ 6393 if (!phba->sli4_hba.els_wq) { 6394 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6395 "0536 Slow-path ELS WQ not allocated\n"); 6396 goto out_destroy_mbx_wq; 6397 } 6398 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 6399 phba->sli4_hba.els_cq, LPFC_ELS); 6400 if (rc) { 6401 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6402 "0537 Failed setup of slow-path ELS WQ: " 6403 "rc = 0x%x\n", rc); 6404 goto out_destroy_mbx_wq; 6405 } 6406 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6407 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 6408 phba->sli4_hba.els_wq->queue_id, 6409 phba->sli4_hba.els_cq->queue_id); 6410 6411 /* Set up fast-path FCP Work Queue */ 6412 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6413 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 6414 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6415 "0534 Fast-path FCP WQ (%d) not " 6416 "allocated\n", fcp_wqidx); 6417 goto out_destroy_fcp_wq; 6418 } 6419 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 6420 phba->sli4_hba.fcp_cq[fcp_cq_index], 6421 LPFC_FCP); 6422 if (rc) { 6423 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6424 "0535 Failed setup of fast-path FCP " 6425 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 6426 goto out_destroy_fcp_wq; 6427 } 6428 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6429 "2591 FCP WQ setup: wq[%d]-id=%d, " 6430 "parent cq[%d]-id=%d\n", 6431 fcp_wqidx, 6432 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 6433 fcp_cq_index, 6434 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 6435 /* Round robin FCP Work Queue's Completion Queue assignment */ 6436 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 6437 } 6438 6439 /* 6440 * Create Receive Queue (RQ) 6441 */ 6442 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 6443 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6444 "0540 Receive Queue not allocated\n"); 6445 goto out_destroy_fcp_wq; 6446 } 6447 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 6448 phba->sli4_hba.els_cq, LPFC_USOL); 6449 if (rc) { 6450 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6451 "0541 Failed setup of Receive Queue: " 6452 "rc = 0x%x\n", rc); 6453 goto out_destroy_fcp_wq; 6454 } 6455 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6456 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 6457 "parent cq-id=%d\n", 6458 phba->sli4_hba.hdr_rq->queue_id, 6459 phba->sli4_hba.dat_rq->queue_id, 6460 phba->sli4_hba.els_cq->queue_id); 6461 return 0; 6462 6463out_destroy_fcp_wq: 6464 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 6465 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 6466 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6467out_destroy_mbx_wq: 6468 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6469out_destroy_fcp_cq: 6470 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6471 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6472 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6473out_destroy_mbx_cq: 6474 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6475out_destroy_fp_eq: 6476 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 6477 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 6478 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6479out_error: 6480 return rc; 6481} 6482 6483/** 6484 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 6485 * @phba: pointer to lpfc hba data structure. 6486 * 6487 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 6488 * operation. 6489 * 6490 * Return codes 6491 * 0 - successful 6492 * -ENOMEM - No availble memory 6493 * -EIO - The mailbox failed to complete successfully. 6494 **/ 6495void 6496lpfc_sli4_queue_unset(struct lpfc_hba *phba) 6497{ 6498 int fcp_qidx; 6499 6500 /* Unset mailbox command work queue */ 6501 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6502 /* Unset ELS work queue */ 6503 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6504 /* Unset unsolicited receive queue */ 6505 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 6506 /* Unset FCP work queue */ 6507 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6508 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 6509 /* Unset mailbox command complete queue */ 6510 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6511 /* Unset ELS complete queue */ 6512 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6513 /* Unset FCP response complete queue */ 6514 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6515 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6516 /* Unset fast-path event queue */ 6517 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6518 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6519 /* Unset slow-path event queue */ 6520 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6521} 6522 6523/** 6524 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 6525 * @phba: pointer to lpfc hba data structure. 6526 * 6527 * This routine is invoked to allocate and set up a pool of completion queue 6528 * events. The body of the completion queue event is a completion queue entry 6529 * CQE. For now, this pool is used for the interrupt service routine to queue 6530 * the following HBA completion queue events for the worker thread to process: 6531 * - Mailbox asynchronous events 6532 * - Receive queue completion unsolicited events 6533 * Later, this can be used for all the slow-path events. 6534 * 6535 * Return codes 6536 * 0 - successful 6537 * -ENOMEM - No availble memory 6538 **/ 6539static int 6540lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 6541{ 6542 struct lpfc_cq_event *cq_event; 6543 int i; 6544 6545 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 6546 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 6547 if (!cq_event) 6548 goto out_pool_create_fail; 6549 list_add_tail(&cq_event->list, 6550 &phba->sli4_hba.sp_cqe_event_pool); 6551 } 6552 return 0; 6553 6554out_pool_create_fail: 6555 lpfc_sli4_cq_event_pool_destroy(phba); 6556 return -ENOMEM; 6557} 6558 6559/** 6560 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 6561 * @phba: pointer to lpfc hba data structure. 6562 * 6563 * This routine is invoked to free the pool of completion queue events at 6564 * driver unload time. Note that, it is the responsibility of the driver 6565 * cleanup routine to free all the outstanding completion-queue events 6566 * allocated from this pool back into the pool before invoking this routine 6567 * to destroy the pool. 6568 **/ 6569static void 6570lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 6571{ 6572 struct lpfc_cq_event *cq_event, *next_cq_event; 6573 6574 list_for_each_entry_safe(cq_event, next_cq_event, 6575 &phba->sli4_hba.sp_cqe_event_pool, list) { 6576 list_del(&cq_event->list); 6577 kfree(cq_event); 6578 } 6579} 6580 6581/** 6582 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6583 * @phba: pointer to lpfc hba data structure. 6584 * 6585 * This routine is the lock free version of the API invoked to allocate a 6586 * completion-queue event from the free pool. 6587 * 6588 * Return: Pointer to the newly allocated completion-queue event if successful 6589 * NULL otherwise. 6590 **/ 6591struct lpfc_cq_event * 6592__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6593{ 6594 struct lpfc_cq_event *cq_event = NULL; 6595 6596 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 6597 struct lpfc_cq_event, list); 6598 return cq_event; 6599} 6600 6601/** 6602 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6603 * @phba: pointer to lpfc hba data structure. 6604 * 6605 * This routine is the lock version of the API invoked to allocate a 6606 * completion-queue event from the free pool. 6607 * 6608 * Return: Pointer to the newly allocated completion-queue event if successful 6609 * NULL otherwise. 6610 **/ 6611struct lpfc_cq_event * 6612lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6613{ 6614 struct lpfc_cq_event *cq_event; 6615 unsigned long iflags; 6616 6617 spin_lock_irqsave(&phba->hbalock, iflags); 6618 cq_event = __lpfc_sli4_cq_event_alloc(phba); 6619 spin_unlock_irqrestore(&phba->hbalock, iflags); 6620 return cq_event; 6621} 6622 6623/** 6624 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6625 * @phba: pointer to lpfc hba data structure. 6626 * @cq_event: pointer to the completion queue event to be freed. 6627 * 6628 * This routine is the lock free version of the API invoked to release a 6629 * completion-queue event back into the free pool. 6630 **/ 6631void 6632__lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6633 struct lpfc_cq_event *cq_event) 6634{ 6635 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 6636} 6637 6638/** 6639 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6640 * @phba: pointer to lpfc hba data structure. 6641 * @cq_event: pointer to the completion queue event to be freed. 6642 * 6643 * This routine is the lock version of the API invoked to release a 6644 * completion-queue event back into the free pool. 6645 **/ 6646void 6647lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6648 struct lpfc_cq_event *cq_event) 6649{ 6650 unsigned long iflags; 6651 spin_lock_irqsave(&phba->hbalock, iflags); 6652 __lpfc_sli4_cq_event_release(phba, cq_event); 6653 spin_unlock_irqrestore(&phba->hbalock, iflags); 6654} 6655 6656/** 6657 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 6658 * @phba: pointer to lpfc hba data structure. 6659 * 6660 * This routine is to free all the pending completion-queue events to the 6661 * back into the free pool for device reset. 6662 **/ 6663static void 6664lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 6665{ 6666 LIST_HEAD(cqelist); 6667 struct lpfc_cq_event *cqe; 6668 unsigned long iflags; 6669 6670 /* Retrieve all the pending WCQEs from pending WCQE lists */ 6671 spin_lock_irqsave(&phba->hbalock, iflags); 6672 /* Pending FCP XRI abort events */ 6673 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 6674 &cqelist); 6675 /* Pending ELS XRI abort events */ 6676 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 6677 &cqelist); 6678 /* Pending asynnc events */ 6679 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 6680 &cqelist); 6681 spin_unlock_irqrestore(&phba->hbalock, iflags); 6682 6683 while (!list_empty(&cqelist)) { 6684 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 6685 lpfc_sli4_cq_event_release(phba, cqe); 6686 } 6687} 6688 6689/** 6690 * lpfc_pci_function_reset - Reset pci function. 6691 * @phba: pointer to lpfc hba data structure. 6692 * 6693 * This routine is invoked to request a PCI function reset. It will destroys 6694 * all resources assigned to the PCI function which originates this request. 6695 * 6696 * Return codes 6697 * 0 - successful 6698 * -ENOMEM - No availble memory 6699 * -EIO - The mailbox failed to complete successfully. 6700 **/ 6701int 6702lpfc_pci_function_reset(struct lpfc_hba *phba) 6703{ 6704 LPFC_MBOXQ_t *mboxq; 6705 uint32_t rc = 0, if_type; 6706 uint32_t shdr_status, shdr_add_status; 6707 uint32_t rdy_chk, num_resets = 0, reset_again = 0; 6708 union lpfc_sli4_cfg_shdr *shdr; 6709 struct lpfc_register reg_data; 6710 6711 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 6712 switch (if_type) { 6713 case LPFC_SLI_INTF_IF_TYPE_0: 6714 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6715 GFP_KERNEL); 6716 if (!mboxq) { 6717 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6718 "0494 Unable to allocate memory for " 6719 "issuing SLI_FUNCTION_RESET mailbox " 6720 "command\n"); 6721 return -ENOMEM; 6722 } 6723 6724 /* Setup PCI function reset mailbox-ioctl command */ 6725 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6726 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 6727 LPFC_SLI4_MBX_EMBED); 6728 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6729 shdr = (union lpfc_sli4_cfg_shdr *) 6730 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6731 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6732 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 6733 &shdr->response); 6734 if (rc != MBX_TIMEOUT) 6735 mempool_free(mboxq, phba->mbox_mem_pool); 6736 if (shdr_status || shdr_add_status || rc) { 6737 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6738 "0495 SLI_FUNCTION_RESET mailbox " 6739 "failed with status x%x add_status x%x," 6740 " mbx status x%x\n", 6741 shdr_status, shdr_add_status, rc); 6742 rc = -ENXIO; 6743 } 6744 break; 6745 case LPFC_SLI_INTF_IF_TYPE_2: 6746 for (num_resets = 0; 6747 num_resets < MAX_IF_TYPE_2_RESETS; 6748 num_resets++) { 6749 reg_data.word0 = 0; 6750 bf_set(lpfc_sliport_ctrl_end, ®_data, 6751 LPFC_SLIPORT_LITTLE_ENDIAN); 6752 bf_set(lpfc_sliport_ctrl_ip, ®_data, 6753 LPFC_SLIPORT_INIT_PORT); 6754 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 6755 CTRLregaddr); 6756 6757 /* 6758 * Poll the Port Status Register and wait for RDY for 6759 * up to 10 seconds. If the port doesn't respond, treat 6760 * it as an error. If the port responds with RN, start 6761 * the loop again. 6762 */ 6763 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { 6764 reg_data.word0 = 6765 readl(phba->sli4_hba.u.if_type2. 6766 STATUSregaddr); 6767 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 6768 break; 6769 if (bf_get(lpfc_sliport_status_rn, ®_data)) { 6770 reset_again++; 6771 break; 6772 } 6773 msleep(10); 6774 } 6775 6776 /* 6777 * If the port responds to the init request with 6778 * reset needed, delay for a bit and restart the loop. 6779 */ 6780 if (reset_again) { 6781 msleep(10); 6782 reset_again = 0; 6783 continue; 6784 } 6785 6786 /* Detect any port errors. */ 6787 reg_data.word0 = readl(phba->sli4_hba.u.if_type2. 6788 STATUSregaddr); 6789 if ((bf_get(lpfc_sliport_status_err, ®_data)) || 6790 (rdy_chk >= 1000)) { 6791 phba->work_status[0] = readl( 6792 phba->sli4_hba.u.if_type2.ERR1regaddr); 6793 phba->work_status[1] = readl( 6794 phba->sli4_hba.u.if_type2.ERR2regaddr); 6795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6796 "2890 Port Error Detected " 6797 "during Port Reset: " 6798 "port status reg 0x%x, " 6799 "error 1=0x%x, error 2=0x%x\n", 6800 reg_data.word0, 6801 phba->work_status[0], 6802 phba->work_status[1]); 6803 rc = -ENODEV; 6804 } 6805 6806 /* 6807 * Terminate the outer loop provided the Port indicated 6808 * ready within 10 seconds. 6809 */ 6810 if (rdy_chk < 1000) 6811 break; 6812 } 6813 break; 6814 case LPFC_SLI_INTF_IF_TYPE_1: 6815 default: 6816 break; 6817 } 6818 6819 /* Catch the not-ready port failure after a port reset. */ 6820 if (num_resets >= MAX_IF_TYPE_2_RESETS) 6821 rc = -ENODEV; 6822 6823 return rc; 6824} 6825 6826/** 6827 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 6828 * @phba: pointer to lpfc hba data structure. 6829 * @cnt: number of nop mailbox commands to send. 6830 * 6831 * This routine is invoked to send a number @cnt of NOP mailbox command and 6832 * wait for each command to complete. 6833 * 6834 * Return: the number of NOP mailbox command completed. 6835 **/ 6836static int 6837lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 6838{ 6839 LPFC_MBOXQ_t *mboxq; 6840 int length, cmdsent; 6841 uint32_t mbox_tmo; 6842 uint32_t rc = 0; 6843 uint32_t shdr_status, shdr_add_status; 6844 union lpfc_sli4_cfg_shdr *shdr; 6845 6846 if (cnt == 0) { 6847 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6848 "2518 Requested to send 0 NOP mailbox cmd\n"); 6849 return cnt; 6850 } 6851 6852 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6853 if (!mboxq) { 6854 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6855 "2519 Unable to allocate memory for issuing " 6856 "NOP mailbox command\n"); 6857 return 0; 6858 } 6859 6860 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 6861 length = (sizeof(struct lpfc_mbx_nop) - 6862 sizeof(struct lpfc_sli4_cfg_mhdr)); 6863 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6864 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 6865 6866 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 6867 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 6868 if (!phba->sli4_hba.intr_enable) 6869 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6870 else 6871 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 6872 if (rc == MBX_TIMEOUT) 6873 break; 6874 /* Check return status */ 6875 shdr = (union lpfc_sli4_cfg_shdr *) 6876 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6877 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6878 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 6879 &shdr->response); 6880 if (shdr_status || shdr_add_status || rc) { 6881 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6882 "2520 NOP mailbox command failed " 6883 "status x%x add_status x%x mbx " 6884 "status x%x\n", shdr_status, 6885 shdr_add_status, rc); 6886 break; 6887 } 6888 } 6889 6890 if (rc != MBX_TIMEOUT) 6891 mempool_free(mboxq, phba->mbox_mem_pool); 6892 6893 return cmdsent; 6894} 6895 6896/** 6897 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 6898 * @phba: pointer to lpfc hba data structure. 6899 * 6900 * This routine is invoked to set up the PCI device memory space for device 6901 * with SLI-4 interface spec. 6902 * 6903 * Return codes 6904 * 0 - successful 6905 * other values - error 6906 **/ 6907static int 6908lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 6909{ 6910 struct pci_dev *pdev; 6911 unsigned long bar0map_len, bar1map_len, bar2map_len; 6912 int error = -ENODEV; 6913 uint32_t if_type; 6914 6915 /* Obtain PCI device reference */ 6916 if (!phba->pcidev) 6917 return error; 6918 else 6919 pdev = phba->pcidev; 6920 6921 /* Set the device DMA mask size */ 6922 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6923 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6924 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6925 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6926 return error; 6927 } 6928 } 6929 6930 /* 6931 * The BARs and register set definitions and offset locations are 6932 * dependent on the if_type. 6933 */ 6934 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 6935 &phba->sli4_hba.sli_intf.word0)) { 6936 return error; 6937 } 6938 6939 /* There is no SLI3 failback for SLI4 devices. */ 6940 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 6941 LPFC_SLI_INTF_VALID) { 6942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6943 "2894 SLI_INTF reg contents invalid " 6944 "sli_intf reg 0x%x\n", 6945 phba->sli4_hba.sli_intf.word0); 6946 return error; 6947 } 6948 6949 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 6950 /* 6951 * Get the bus address of SLI4 device Bar regions and the 6952 * number of bytes required by each mapping. The mapping of the 6953 * particular PCI BARs regions is dependent on the type of 6954 * SLI4 device. 6955 */ 6956 if (pci_resource_start(pdev, 0)) { 6957 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6958 bar0map_len = pci_resource_len(pdev, 0); 6959 6960 /* 6961 * Map SLI4 PCI Config Space Register base to a kernel virtual 6962 * addr 6963 */ 6964 phba->sli4_hba.conf_regs_memmap_p = 6965 ioremap(phba->pci_bar0_map, bar0map_len); 6966 if (!phba->sli4_hba.conf_regs_memmap_p) { 6967 dev_printk(KERN_ERR, &pdev->dev, 6968 "ioremap failed for SLI4 PCI config " 6969 "registers.\n"); 6970 goto out; 6971 } 6972 /* Set up BAR0 PCI config space register memory map */ 6973 lpfc_sli4_bar0_register_memmap(phba, if_type); 6974 } else { 6975 phba->pci_bar0_map = pci_resource_start(pdev, 1); 6976 bar0map_len = pci_resource_len(pdev, 1); 6977 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 6978 dev_printk(KERN_ERR, &pdev->dev, 6979 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 6980 goto out; 6981 } 6982 phba->sli4_hba.conf_regs_memmap_p = 6983 ioremap(phba->pci_bar0_map, bar0map_len); 6984 if (!phba->sli4_hba.conf_regs_memmap_p) { 6985 dev_printk(KERN_ERR, &pdev->dev, 6986 "ioremap failed for SLI4 PCI config " 6987 "registers.\n"); 6988 goto out; 6989 } 6990 lpfc_sli4_bar0_register_memmap(phba, if_type); 6991 } 6992 6993 if (pci_resource_start(pdev, 2)) { 6994 /* 6995 * Map SLI4 if type 0 HBA Control Register base to a kernel 6996 * virtual address and setup the registers. 6997 */ 6998 phba->pci_bar1_map = pci_resource_start(pdev, 2); 6999 bar1map_len = pci_resource_len(pdev, 2); 7000 phba->sli4_hba.ctrl_regs_memmap_p = 7001 ioremap(phba->pci_bar1_map, bar1map_len); 7002 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 7003 dev_printk(KERN_ERR, &pdev->dev, 7004 "ioremap failed for SLI4 HBA control registers.\n"); 7005 goto out_iounmap_conf; 7006 } 7007 lpfc_sli4_bar1_register_memmap(phba); 7008 } 7009 7010 if (pci_resource_start(pdev, 4)) { 7011 /* 7012 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 7013 * virtual address and setup the registers. 7014 */ 7015 phba->pci_bar2_map = pci_resource_start(pdev, 4); 7016 bar2map_len = pci_resource_len(pdev, 4); 7017 phba->sli4_hba.drbl_regs_memmap_p = 7018 ioremap(phba->pci_bar2_map, bar2map_len); 7019 if (!phba->sli4_hba.drbl_regs_memmap_p) { 7020 dev_printk(KERN_ERR, &pdev->dev, 7021 "ioremap failed for SLI4 HBA doorbell registers.\n"); 7022 goto out_iounmap_ctrl; 7023 } 7024 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 7025 if (error) 7026 goto out_iounmap_all; 7027 } 7028 7029 return 0; 7030 7031out_iounmap_all: 7032 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7033out_iounmap_ctrl: 7034 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7035out_iounmap_conf: 7036 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7037out: 7038 return error; 7039} 7040 7041/** 7042 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 7043 * @phba: pointer to lpfc hba data structure. 7044 * 7045 * This routine is invoked to unset the PCI device memory space for device 7046 * with SLI-4 interface spec. 7047 **/ 7048static void 7049lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 7050{ 7051 struct pci_dev *pdev; 7052 7053 /* Obtain PCI device reference */ 7054 if (!phba->pcidev) 7055 return; 7056 else 7057 pdev = phba->pcidev; 7058 7059 /* Free coherent DMA memory allocated */ 7060 7061 /* Unmap I/O memory space */ 7062 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7063 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7064 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7065 7066 return; 7067} 7068 7069/** 7070 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 7071 * @phba: pointer to lpfc hba data structure. 7072 * 7073 * This routine is invoked to enable the MSI-X interrupt vectors to device 7074 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 7075 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 7076 * invoked, enables either all or nothing, depending on the current 7077 * availability of PCI vector resources. The device driver is responsible 7078 * for calling the individual request_irq() to register each MSI-X vector 7079 * with a interrupt handler, which is done in this function. Note that 7080 * later when device is unloading, the driver should always call free_irq() 7081 * on all MSI-X vectors it has done request_irq() on before calling 7082 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 7083 * will be left with MSI-X enabled and leaks its vectors. 7084 * 7085 * Return codes 7086 * 0 - successful 7087 * other values - error 7088 **/ 7089static int 7090lpfc_sli_enable_msix(struct lpfc_hba *phba) 7091{ 7092 int rc, i; 7093 LPFC_MBOXQ_t *pmb; 7094 7095 /* Set up MSI-X multi-message vectors */ 7096 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7097 phba->msix_entries[i].entry = i; 7098 7099 /* Configure MSI-X capability structure */ 7100 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 7101 ARRAY_SIZE(phba->msix_entries)); 7102 if (rc) { 7103 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7104 "0420 PCI enable MSI-X failed (%d)\n", rc); 7105 goto msi_fail_out; 7106 } 7107 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7108 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7109 "0477 MSI-X entry[%d]: vector=x%x " 7110 "message=%d\n", i, 7111 phba->msix_entries[i].vector, 7112 phba->msix_entries[i].entry); 7113 /* 7114 * Assign MSI-X vectors to interrupt handlers 7115 */ 7116 7117 /* vector-0 is associated to slow-path handler */ 7118 rc = request_irq(phba->msix_entries[0].vector, 7119 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 7120 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7121 if (rc) { 7122 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7123 "0421 MSI-X slow-path request_irq failed " 7124 "(%d)\n", rc); 7125 goto msi_fail_out; 7126 } 7127 7128 /* vector-1 is associated to fast-path handler */ 7129 rc = request_irq(phba->msix_entries[1].vector, 7130 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 7131 LPFC_FP_DRIVER_HANDLER_NAME, phba); 7132 7133 if (rc) { 7134 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7135 "0429 MSI-X fast-path request_irq failed " 7136 "(%d)\n", rc); 7137 goto irq_fail_out; 7138 } 7139 7140 /* 7141 * Configure HBA MSI-X attention conditions to messages 7142 */ 7143 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7144 7145 if (!pmb) { 7146 rc = -ENOMEM; 7147 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7148 "0474 Unable to allocate memory for issuing " 7149 "MBOX_CONFIG_MSI command\n"); 7150 goto mem_fail_out; 7151 } 7152 rc = lpfc_config_msi(phba, pmb); 7153 if (rc) 7154 goto mbx_fail_out; 7155 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 7156 if (rc != MBX_SUCCESS) { 7157 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 7158 "0351 Config MSI mailbox command failed, " 7159 "mbxCmd x%x, mbxStatus x%x\n", 7160 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 7161 goto mbx_fail_out; 7162 } 7163 7164 /* Free memory allocated for mailbox command */ 7165 mempool_free(pmb, phba->mbox_mem_pool); 7166 return rc; 7167 7168mbx_fail_out: 7169 /* Free memory allocated for mailbox command */ 7170 mempool_free(pmb, phba->mbox_mem_pool); 7171 7172mem_fail_out: 7173 /* free the irq already requested */ 7174 free_irq(phba->msix_entries[1].vector, phba); 7175 7176irq_fail_out: 7177 /* free the irq already requested */ 7178 free_irq(phba->msix_entries[0].vector, phba); 7179 7180msi_fail_out: 7181 /* Unconfigure MSI-X capability structure */ 7182 pci_disable_msix(phba->pcidev); 7183 return rc; 7184} 7185 7186/** 7187 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 7188 * @phba: pointer to lpfc hba data structure. 7189 * 7190 * This routine is invoked to release the MSI-X vectors and then disable the 7191 * MSI-X interrupt mode to device with SLI-3 interface spec. 7192 **/ 7193static void 7194lpfc_sli_disable_msix(struct lpfc_hba *phba) 7195{ 7196 int i; 7197 7198 /* Free up MSI-X multi-message vectors */ 7199 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7200 free_irq(phba->msix_entries[i].vector, phba); 7201 /* Disable MSI-X */ 7202 pci_disable_msix(phba->pcidev); 7203 7204 return; 7205} 7206 7207/** 7208 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 7209 * @phba: pointer to lpfc hba data structure. 7210 * 7211 * This routine is invoked to enable the MSI interrupt mode to device with 7212 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 7213 * enable the MSI vector. The device driver is responsible for calling the 7214 * request_irq() to register MSI vector with a interrupt the handler, which 7215 * is done in this function. 7216 * 7217 * Return codes 7218 * 0 - successful 7219 * other values - error 7220 */ 7221static int 7222lpfc_sli_enable_msi(struct lpfc_hba *phba) 7223{ 7224 int rc; 7225 7226 rc = pci_enable_msi(phba->pcidev); 7227 if (!rc) 7228 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7229 "0462 PCI enable MSI mode success.\n"); 7230 else { 7231 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7232 "0471 PCI enable MSI mode failed (%d)\n", rc); 7233 return rc; 7234 } 7235 7236 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 7237 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7238 if (rc) { 7239 pci_disable_msi(phba->pcidev); 7240 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7241 "0478 MSI request_irq failed (%d)\n", rc); 7242 } 7243 return rc; 7244} 7245 7246/** 7247 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 7248 * @phba: pointer to lpfc hba data structure. 7249 * 7250 * This routine is invoked to disable the MSI interrupt mode to device with 7251 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 7252 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7253 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7254 * its vector. 7255 */ 7256static void 7257lpfc_sli_disable_msi(struct lpfc_hba *phba) 7258{ 7259 free_irq(phba->pcidev->irq, phba); 7260 pci_disable_msi(phba->pcidev); 7261 return; 7262} 7263 7264/** 7265 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 7266 * @phba: pointer to lpfc hba data structure. 7267 * 7268 * This routine is invoked to enable device interrupt and associate driver's 7269 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 7270 * spec. Depends on the interrupt mode configured to the driver, the driver 7271 * will try to fallback from the configured interrupt mode to an interrupt 7272 * mode which is supported by the platform, kernel, and device in the order 7273 * of: 7274 * MSI-X -> MSI -> IRQ. 7275 * 7276 * Return codes 7277 * 0 - successful 7278 * other values - error 7279 **/ 7280static uint32_t 7281lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 7282{ 7283 uint32_t intr_mode = LPFC_INTR_ERROR; 7284 int retval; 7285 7286 if (cfg_mode == 2) { 7287 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 7288 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 7289 if (!retval) { 7290 /* Now, try to enable MSI-X interrupt mode */ 7291 retval = lpfc_sli_enable_msix(phba); 7292 if (!retval) { 7293 /* Indicate initialization to MSI-X mode */ 7294 phba->intr_type = MSIX; 7295 intr_mode = 2; 7296 } 7297 } 7298 } 7299 7300 /* Fallback to MSI if MSI-X initialization failed */ 7301 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7302 retval = lpfc_sli_enable_msi(phba); 7303 if (!retval) { 7304 /* Indicate initialization to MSI mode */ 7305 phba->intr_type = MSI; 7306 intr_mode = 1; 7307 } 7308 } 7309 7310 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7311 if (phba->intr_type == NONE) { 7312 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 7313 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7314 if (!retval) { 7315 /* Indicate initialization to INTx mode */ 7316 phba->intr_type = INTx; 7317 intr_mode = 0; 7318 } 7319 } 7320 return intr_mode; 7321} 7322 7323/** 7324 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 7325 * @phba: pointer to lpfc hba data structure. 7326 * 7327 * This routine is invoked to disable device interrupt and disassociate the 7328 * driver's interrupt handler(s) from interrupt vector(s) to device with 7329 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 7330 * release the interrupt vector(s) for the message signaled interrupt. 7331 **/ 7332static void 7333lpfc_sli_disable_intr(struct lpfc_hba *phba) 7334{ 7335 /* Disable the currently initialized interrupt mode */ 7336 if (phba->intr_type == MSIX) 7337 lpfc_sli_disable_msix(phba); 7338 else if (phba->intr_type == MSI) 7339 lpfc_sli_disable_msi(phba); 7340 else if (phba->intr_type == INTx) 7341 free_irq(phba->pcidev->irq, phba); 7342 7343 /* Reset interrupt management states */ 7344 phba->intr_type = NONE; 7345 phba->sli.slistat.sli_intr = 0; 7346 7347 return; 7348} 7349 7350/** 7351 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 7352 * @phba: pointer to lpfc hba data structure. 7353 * 7354 * This routine is invoked to enable the MSI-X interrupt vectors to device 7355 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 7356 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 7357 * enables either all or nothing, depending on the current availability of 7358 * PCI vector resources. The device driver is responsible for calling the 7359 * individual request_irq() to register each MSI-X vector with a interrupt 7360 * handler, which is done in this function. Note that later when device is 7361 * unloading, the driver should always call free_irq() on all MSI-X vectors 7362 * it has done request_irq() on before calling pci_disable_msix(). Failure 7363 * to do so results in a BUG_ON() and a device will be left with MSI-X 7364 * enabled and leaks its vectors. 7365 * 7366 * Return codes 7367 * 0 - successful 7368 * other values - error 7369 **/ 7370static int 7371lpfc_sli4_enable_msix(struct lpfc_hba *phba) 7372{ 7373 int vectors, rc, index; 7374 7375 /* Set up MSI-X multi-message vectors */ 7376 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 7377 phba->sli4_hba.msix_entries[index].entry = index; 7378 7379 /* Configure MSI-X capability structure */ 7380 vectors = phba->sli4_hba.cfg_eqn; 7381enable_msix_vectors: 7382 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 7383 vectors); 7384 if (rc > 1) { 7385 vectors = rc; 7386 goto enable_msix_vectors; 7387 } else if (rc) { 7388 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7389 "0484 PCI enable MSI-X failed (%d)\n", rc); 7390 goto msi_fail_out; 7391 } 7392 7393 /* Log MSI-X vector assignment */ 7394 for (index = 0; index < vectors; index++) 7395 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7396 "0489 MSI-X entry[%d]: vector=x%x " 7397 "message=%d\n", index, 7398 phba->sli4_hba.msix_entries[index].vector, 7399 phba->sli4_hba.msix_entries[index].entry); 7400 /* 7401 * Assign MSI-X vectors to interrupt handlers 7402 */ 7403 7404 /* The first vector must associated to slow-path handler for MQ */ 7405 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 7406 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 7407 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7408 if (rc) { 7409 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7410 "0485 MSI-X slow-path request_irq failed " 7411 "(%d)\n", rc); 7412 goto msi_fail_out; 7413 } 7414 7415 /* The rest of the vector(s) are associated to fast-path handler(s) */ 7416 for (index = 1; index < vectors; index++) { 7417 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 7418 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 7419 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 7420 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 7421 LPFC_FP_DRIVER_HANDLER_NAME, 7422 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7423 if (rc) { 7424 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7425 "0486 MSI-X fast-path (%d) " 7426 "request_irq failed (%d)\n", index, rc); 7427 goto cfg_fail_out; 7428 } 7429 } 7430 phba->sli4_hba.msix_vec_nr = vectors; 7431 7432 return rc; 7433 7434cfg_fail_out: 7435 /* free the irq already requested */ 7436 for (--index; index >= 1; index--) 7437 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 7438 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7439 7440 /* free the irq already requested */ 7441 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7442 7443msi_fail_out: 7444 /* Unconfigure MSI-X capability structure */ 7445 pci_disable_msix(phba->pcidev); 7446 return rc; 7447} 7448 7449/** 7450 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 7451 * @phba: pointer to lpfc hba data structure. 7452 * 7453 * This routine is invoked to release the MSI-X vectors and then disable the 7454 * MSI-X interrupt mode to device with SLI-4 interface spec. 7455 **/ 7456static void 7457lpfc_sli4_disable_msix(struct lpfc_hba *phba) 7458{ 7459 int index; 7460 7461 /* Free up MSI-X multi-message vectors */ 7462 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7463 7464 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++) 7465 free_irq(phba->sli4_hba.msix_entries[index].vector, 7466 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7467 7468 /* Disable MSI-X */ 7469 pci_disable_msix(phba->pcidev); 7470 7471 return; 7472} 7473 7474/** 7475 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 7476 * @phba: pointer to lpfc hba data structure. 7477 * 7478 * This routine is invoked to enable the MSI interrupt mode to device with 7479 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 7480 * to enable the MSI vector. The device driver is responsible for calling 7481 * the request_irq() to register MSI vector with a interrupt the handler, 7482 * which is done in this function. 7483 * 7484 * Return codes 7485 * 0 - successful 7486 * other values - error 7487 **/ 7488static int 7489lpfc_sli4_enable_msi(struct lpfc_hba *phba) 7490{ 7491 int rc, index; 7492 7493 rc = pci_enable_msi(phba->pcidev); 7494 if (!rc) 7495 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7496 "0487 PCI enable MSI mode success.\n"); 7497 else { 7498 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7499 "0488 PCI enable MSI mode failed (%d)\n", rc); 7500 return rc; 7501 } 7502 7503 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7504 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7505 if (rc) { 7506 pci_disable_msi(phba->pcidev); 7507 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7508 "0490 MSI request_irq failed (%d)\n", rc); 7509 return rc; 7510 } 7511 7512 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 7513 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7514 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7515 } 7516 7517 return 0; 7518} 7519 7520/** 7521 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 7522 * @phba: pointer to lpfc hba data structure. 7523 * 7524 * This routine is invoked to disable the MSI interrupt mode to device with 7525 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 7526 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7527 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7528 * its vector. 7529 **/ 7530static void 7531lpfc_sli4_disable_msi(struct lpfc_hba *phba) 7532{ 7533 free_irq(phba->pcidev->irq, phba); 7534 pci_disable_msi(phba->pcidev); 7535 return; 7536} 7537 7538/** 7539 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 7540 * @phba: pointer to lpfc hba data structure. 7541 * 7542 * This routine is invoked to enable device interrupt and associate driver's 7543 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 7544 * interface spec. Depends on the interrupt mode configured to the driver, 7545 * the driver will try to fallback from the configured interrupt mode to an 7546 * interrupt mode which is supported by the platform, kernel, and device in 7547 * the order of: 7548 * MSI-X -> MSI -> IRQ. 7549 * 7550 * Return codes 7551 * 0 - successful 7552 * other values - error 7553 **/ 7554static uint32_t 7555lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 7556{ 7557 uint32_t intr_mode = LPFC_INTR_ERROR; 7558 int retval, index; 7559 7560 if (cfg_mode == 2) { 7561 /* Preparation before conf_msi mbox cmd */ 7562 retval = 0; 7563 if (!retval) { 7564 /* Now, try to enable MSI-X interrupt mode */ 7565 retval = lpfc_sli4_enable_msix(phba); 7566 if (!retval) { 7567 /* Indicate initialization to MSI-X mode */ 7568 phba->intr_type = MSIX; 7569 intr_mode = 2; 7570 } 7571 } 7572 } 7573 7574 /* Fallback to MSI if MSI-X initialization failed */ 7575 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7576 retval = lpfc_sli4_enable_msi(phba); 7577 if (!retval) { 7578 /* Indicate initialization to MSI mode */ 7579 phba->intr_type = MSI; 7580 intr_mode = 1; 7581 } 7582 } 7583 7584 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7585 if (phba->intr_type == NONE) { 7586 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7587 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7588 if (!retval) { 7589 /* Indicate initialization to INTx mode */ 7590 phba->intr_type = INTx; 7591 intr_mode = 0; 7592 for (index = 0; index < phba->cfg_fcp_eq_count; 7593 index++) { 7594 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7595 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7596 } 7597 } 7598 } 7599 return intr_mode; 7600} 7601 7602/** 7603 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 7604 * @phba: pointer to lpfc hba data structure. 7605 * 7606 * This routine is invoked to disable device interrupt and disassociate 7607 * the driver's interrupt handler(s) from interrupt vector(s) to device 7608 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 7609 * will release the interrupt vector(s) for the message signaled interrupt. 7610 **/ 7611static void 7612lpfc_sli4_disable_intr(struct lpfc_hba *phba) 7613{ 7614 /* Disable the currently initialized interrupt mode */ 7615 if (phba->intr_type == MSIX) 7616 lpfc_sli4_disable_msix(phba); 7617 else if (phba->intr_type == MSI) 7618 lpfc_sli4_disable_msi(phba); 7619 else if (phba->intr_type == INTx) 7620 free_irq(phba->pcidev->irq, phba); 7621 7622 /* Reset interrupt management states */ 7623 phba->intr_type = NONE; 7624 phba->sli.slistat.sli_intr = 0; 7625 7626 return; 7627} 7628 7629/** 7630 * lpfc_unset_hba - Unset SLI3 hba device initialization 7631 * @phba: pointer to lpfc hba data structure. 7632 * 7633 * This routine is invoked to unset the HBA device initialization steps to 7634 * a device with SLI-3 interface spec. 7635 **/ 7636static void 7637lpfc_unset_hba(struct lpfc_hba *phba) 7638{ 7639 struct lpfc_vport *vport = phba->pport; 7640 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7641 7642 spin_lock_irq(shost->host_lock); 7643 vport->load_flag |= FC_UNLOADING; 7644 spin_unlock_irq(shost->host_lock); 7645 7646 lpfc_stop_hba_timers(phba); 7647 7648 phba->pport->work_port_events = 0; 7649 7650 lpfc_sli_hba_down(phba); 7651 7652 lpfc_sli_brdrestart(phba); 7653 7654 lpfc_sli_disable_intr(phba); 7655 7656 return; 7657} 7658 7659/** 7660 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 7661 * @phba: pointer to lpfc hba data structure. 7662 * 7663 * This routine is invoked to unset the HBA device initialization steps to 7664 * a device with SLI-4 interface spec. 7665 **/ 7666static void 7667lpfc_sli4_unset_hba(struct lpfc_hba *phba) 7668{ 7669 struct lpfc_vport *vport = phba->pport; 7670 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7671 7672 spin_lock_irq(shost->host_lock); 7673 vport->load_flag |= FC_UNLOADING; 7674 spin_unlock_irq(shost->host_lock); 7675 7676 phba->pport->work_port_events = 0; 7677 7678 /* Stop the SLI4 device port */ 7679 lpfc_stop_port(phba); 7680 7681 lpfc_sli4_disable_intr(phba); 7682 7683 /* Reset SLI4 HBA FCoE function */ 7684 lpfc_pci_function_reset(phba); 7685 7686 return; 7687} 7688 7689/** 7690 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 7691 * @phba: Pointer to HBA context object. 7692 * 7693 * This function is called in the SLI4 code path to wait for completion 7694 * of device's XRIs exchange busy. It will check the XRI exchange busy 7695 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 7696 * that, it will check the XRI exchange busy on outstanding FCP and ELS 7697 * I/Os every 30 seconds, log error message, and wait forever. Only when 7698 * all XRI exchange busy complete, the driver unload shall proceed with 7699 * invoking the function reset ioctl mailbox command to the CNA and the 7700 * the rest of the driver unload resource release. 7701 **/ 7702static void 7703lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 7704{ 7705 int wait_time = 0; 7706 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 7707 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7708 7709 while (!fcp_xri_cmpl || !els_xri_cmpl) { 7710 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 7711 if (!fcp_xri_cmpl) 7712 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7713 "2877 FCP XRI exchange busy " 7714 "wait time: %d seconds.\n", 7715 wait_time/1000); 7716 if (!els_xri_cmpl) 7717 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7718 "2878 ELS XRI exchange busy " 7719 "wait time: %d seconds.\n", 7720 wait_time/1000); 7721 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 7722 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 7723 } else { 7724 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 7725 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 7726 } 7727 fcp_xri_cmpl = 7728 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 7729 els_xri_cmpl = 7730 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7731 } 7732} 7733 7734/** 7735 * lpfc_sli4_hba_unset - Unset the fcoe hba 7736 * @phba: Pointer to HBA context object. 7737 * 7738 * This function is called in the SLI4 code path to reset the HBA's FCoE 7739 * function. The caller is not required to hold any lock. This routine 7740 * issues PCI function reset mailbox command to reset the FCoE function. 7741 * At the end of the function, it calls lpfc_hba_down_post function to 7742 * free any pending commands. 7743 **/ 7744static void 7745lpfc_sli4_hba_unset(struct lpfc_hba *phba) 7746{ 7747 int wait_cnt = 0; 7748 LPFC_MBOXQ_t *mboxq; 7749 7750 lpfc_stop_hba_timers(phba); 7751 phba->sli4_hba.intr_enable = 0; 7752 7753 /* 7754 * Gracefully wait out the potential current outstanding asynchronous 7755 * mailbox command. 7756 */ 7757 7758 /* First, block any pending async mailbox command from posted */ 7759 spin_lock_irq(&phba->hbalock); 7760 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 7761 spin_unlock_irq(&phba->hbalock); 7762 /* Now, trying to wait it out if we can */ 7763 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7764 msleep(10); 7765 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 7766 break; 7767 } 7768 /* Forcefully release the outstanding mailbox command if timed out */ 7769 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7770 spin_lock_irq(&phba->hbalock); 7771 mboxq = phba->sli.mbox_active; 7772 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 7773 __lpfc_mbox_cmpl_put(phba, mboxq); 7774 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7775 phba->sli.mbox_active = NULL; 7776 spin_unlock_irq(&phba->hbalock); 7777 } 7778 7779 /* Abort all iocbs associated with the hba */ 7780 lpfc_sli_hba_iocb_abort(phba); 7781 7782 /* Wait for completion of device XRI exchange busy */ 7783 lpfc_sli4_xri_exchange_busy_wait(phba); 7784 7785 /* Disable PCI subsystem interrupt */ 7786 lpfc_sli4_disable_intr(phba); 7787 7788 /* Stop kthread signal shall trigger work_done one more time */ 7789 kthread_stop(phba->worker_thread); 7790 7791 /* Reset SLI4 HBA FCoE function */ 7792 lpfc_pci_function_reset(phba); 7793 7794 /* Stop the SLI4 device port */ 7795 phba->pport->work_port_events = 0; 7796} 7797 7798 /** 7799 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 7800 * @phba: Pointer to HBA context object. 7801 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 7802 * 7803 * This function is called in the SLI4 code path to read the port's 7804 * sli4 capabilities. 7805 * 7806 * This function may be be called from any context that can block-wait 7807 * for the completion. The expectation is that this routine is called 7808 * typically from probe_one or from the online routine. 7809 **/ 7810int 7811lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7812{ 7813 int rc; 7814 struct lpfc_mqe *mqe; 7815 struct lpfc_pc_sli4_params *sli4_params; 7816 uint32_t mbox_tmo; 7817 7818 rc = 0; 7819 mqe = &mboxq->u.mqe; 7820 7821 /* Read the port's SLI4 Parameters port capabilities */ 7822 lpfc_pc_sli4_params(mboxq); 7823 if (!phba->sli4_hba.intr_enable) 7824 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7825 else { 7826 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES); 7827 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 7828 } 7829 7830 if (unlikely(rc)) 7831 return 1; 7832 7833 sli4_params = &phba->sli4_hba.pc_sli4_params; 7834 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 7835 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 7836 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 7837 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 7838 &mqe->un.sli4_params); 7839 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 7840 &mqe->un.sli4_params); 7841 sli4_params->proto_types = mqe->un.sli4_params.word3; 7842 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 7843 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 7844 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 7845 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 7846 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 7847 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 7848 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 7849 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 7850 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 7851 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 7852 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 7853 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 7854 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 7855 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 7856 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 7857 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 7858 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 7859 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 7860 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 7861 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 7862 return rc; 7863} 7864 7865/** 7866 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 7867 * @phba: Pointer to HBA context object. 7868 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 7869 * 7870 * This function is called in the SLI4 code path to read the port's 7871 * sli4 capabilities. 7872 * 7873 * This function may be be called from any context that can block-wait 7874 * for the completion. The expectation is that this routine is called 7875 * typically from probe_one or from the online routine. 7876 **/ 7877int 7878lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7879{ 7880 int rc; 7881 struct lpfc_mqe *mqe = &mboxq->u.mqe; 7882 struct lpfc_pc_sli4_params *sli4_params; 7883 int length; 7884 struct lpfc_sli4_parameters *mbx_sli4_parameters; 7885 7886 /* Read the port's SLI4 Config Parameters */ 7887 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 7888 sizeof(struct lpfc_sli4_cfg_mhdr)); 7889 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7890 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 7891 length, LPFC_SLI4_MBX_EMBED); 7892 if (!phba->sli4_hba.intr_enable) 7893 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7894 else 7895 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, 7896 lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG)); 7897 if (unlikely(rc)) 7898 return rc; 7899 sli4_params = &phba->sli4_hba.pc_sli4_params; 7900 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 7901 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 7902 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 7903 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 7904 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 7905 mbx_sli4_parameters); 7906 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 7907 mbx_sli4_parameters); 7908 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 7909 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 7910 else 7911 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 7912 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 7913 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 7914 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 7915 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 7916 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 7917 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 7918 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 7919 mbx_sli4_parameters); 7920 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 7921 mbx_sli4_parameters); 7922 return 0; 7923} 7924 7925/** 7926 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 7927 * @pdev: pointer to PCI device 7928 * @pid: pointer to PCI device identifier 7929 * 7930 * This routine is to be called to attach a device with SLI-3 interface spec 7931 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 7932 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 7933 * information of the device and driver to see if the driver state that it can 7934 * support this kind of device. If the match is successful, the driver core 7935 * invokes this routine. If this routine determines it can claim the HBA, it 7936 * does all the initialization that it needs to do to handle the HBA properly. 7937 * 7938 * Return code 7939 * 0 - driver can claim the device 7940 * negative value - driver can not claim the device 7941 **/ 7942static int __devinit 7943lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 7944{ 7945 struct lpfc_hba *phba; 7946 struct lpfc_vport *vport = NULL; 7947 struct Scsi_Host *shost = NULL; 7948 int error; 7949 uint32_t cfg_mode, intr_mode; 7950 7951 /* Allocate memory for HBA structure */ 7952 phba = lpfc_hba_alloc(pdev); 7953 if (!phba) 7954 return -ENOMEM; 7955 7956 /* Perform generic PCI device enabling operation */ 7957 error = lpfc_enable_pci_dev(phba); 7958 if (error) { 7959 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7960 "1401 Failed to enable pci device.\n"); 7961 goto out_free_phba; 7962 } 7963 7964 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 7965 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 7966 if (error) 7967 goto out_disable_pci_dev; 7968 7969 /* Set up SLI-3 specific device PCI memory space */ 7970 error = lpfc_sli_pci_mem_setup(phba); 7971 if (error) { 7972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7973 "1402 Failed to set up pci memory space.\n"); 7974 goto out_disable_pci_dev; 7975 } 7976 7977 /* Set up phase-1 common device driver resources */ 7978 error = lpfc_setup_driver_resource_phase1(phba); 7979 if (error) { 7980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7981 "1403 Failed to set up driver resource.\n"); 7982 goto out_unset_pci_mem_s3; 7983 } 7984 7985 /* Set up SLI-3 specific device driver resources */ 7986 error = lpfc_sli_driver_resource_setup(phba); 7987 if (error) { 7988 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7989 "1404 Failed to set up driver resource.\n"); 7990 goto out_unset_pci_mem_s3; 7991 } 7992 7993 /* Initialize and populate the iocb list per host */ 7994 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 7995 if (error) { 7996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7997 "1405 Failed to initialize iocb list.\n"); 7998 goto out_unset_driver_resource_s3; 7999 } 8000 8001 /* Set up common device driver resources */ 8002 error = lpfc_setup_driver_resource_phase2(phba); 8003 if (error) { 8004 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8005 "1406 Failed to set up driver resource.\n"); 8006 goto out_free_iocb_list; 8007 } 8008 8009 /* Create SCSI host to the physical port */ 8010 error = lpfc_create_shost(phba); 8011 if (error) { 8012 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8013 "1407 Failed to create scsi host.\n"); 8014 goto out_unset_driver_resource; 8015 } 8016 8017 /* Configure sysfs attributes */ 8018 vport = phba->pport; 8019 error = lpfc_alloc_sysfs_attr(vport); 8020 if (error) { 8021 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8022 "1476 Failed to allocate sysfs attr\n"); 8023 goto out_destroy_shost; 8024 } 8025 8026 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 8027 /* Now, trying to enable interrupt and bring up the device */ 8028 cfg_mode = phba->cfg_use_msi; 8029 while (true) { 8030 /* Put device to a known state before enabling interrupt */ 8031 lpfc_stop_port(phba); 8032 /* Configure and enable interrupt */ 8033 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 8034 if (intr_mode == LPFC_INTR_ERROR) { 8035 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8036 "0431 Failed to enable interrupt.\n"); 8037 error = -ENODEV; 8038 goto out_free_sysfs_attr; 8039 } 8040 /* SLI-3 HBA setup */ 8041 if (lpfc_sli_hba_setup(phba)) { 8042 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8043 "1477 Failed to set up hba\n"); 8044 error = -ENODEV; 8045 goto out_remove_device; 8046 } 8047 8048 /* Wait 50ms for the interrupts of previous mailbox commands */ 8049 msleep(50); 8050 /* Check active interrupts on message signaled interrupts */ 8051 if (intr_mode == 0 || 8052 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 8053 /* Log the current active interrupt mode */ 8054 phba->intr_mode = intr_mode; 8055 lpfc_log_intr_mode(phba, intr_mode); 8056 break; 8057 } else { 8058 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8059 "0447 Configure interrupt mode (%d) " 8060 "failed active interrupt test.\n", 8061 intr_mode); 8062 /* Disable the current interrupt mode */ 8063 lpfc_sli_disable_intr(phba); 8064 /* Try next level of interrupt mode */ 8065 cfg_mode = --intr_mode; 8066 } 8067 } 8068 8069 /* Perform post initialization setup */ 8070 lpfc_post_init_setup(phba); 8071 8072 /* Check if there are static vports to be created. */ 8073 lpfc_create_static_vport(phba); 8074 8075 return 0; 8076 8077out_remove_device: 8078 lpfc_unset_hba(phba); 8079out_free_sysfs_attr: 8080 lpfc_free_sysfs_attr(vport); 8081out_destroy_shost: 8082 lpfc_destroy_shost(phba); 8083out_unset_driver_resource: 8084 lpfc_unset_driver_resource_phase2(phba); 8085out_free_iocb_list: 8086 lpfc_free_iocb_list(phba); 8087out_unset_driver_resource_s3: 8088 lpfc_sli_driver_resource_unset(phba); 8089out_unset_pci_mem_s3: 8090 lpfc_sli_pci_mem_unset(phba); 8091out_disable_pci_dev: 8092 lpfc_disable_pci_dev(phba); 8093 if (shost) 8094 scsi_host_put(shost); 8095out_free_phba: 8096 lpfc_hba_free(phba); 8097 return error; 8098} 8099 8100/** 8101 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 8102 * @pdev: pointer to PCI device 8103 * 8104 * This routine is to be called to disattach a device with SLI-3 interface 8105 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 8106 * removed from PCI bus, it performs all the necessary cleanup for the HBA 8107 * device to be removed from the PCI subsystem properly. 8108 **/ 8109static void __devexit 8110lpfc_pci_remove_one_s3(struct pci_dev *pdev) 8111{ 8112 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8113 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8114 struct lpfc_vport **vports; 8115 struct lpfc_hba *phba = vport->phba; 8116 int i; 8117 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 8118 8119 spin_lock_irq(&phba->hbalock); 8120 vport->load_flag |= FC_UNLOADING; 8121 spin_unlock_irq(&phba->hbalock); 8122 8123 lpfc_free_sysfs_attr(vport); 8124 8125 /* Release all the vports against this physical port */ 8126 vports = lpfc_create_vport_work_array(phba); 8127 if (vports != NULL) 8128 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8129 fc_vport_terminate(vports[i]->fc_vport); 8130 lpfc_destroy_vport_work_array(phba, vports); 8131 8132 /* Remove FC host and then SCSI host with the physical port */ 8133 fc_remove_host(shost); 8134 scsi_remove_host(shost); 8135 lpfc_cleanup(vport); 8136 8137 /* 8138 * Bring down the SLI Layer. This step disable all interrupts, 8139 * clears the rings, discards all mailbox commands, and resets 8140 * the HBA. 8141 */ 8142 8143 /* HBA interrupt will be disabled after this call */ 8144 lpfc_sli_hba_down(phba); 8145 /* Stop kthread signal shall trigger work_done one more time */ 8146 kthread_stop(phba->worker_thread); 8147 /* Final cleanup of txcmplq and reset the HBA */ 8148 lpfc_sli_brdrestart(phba); 8149 8150 lpfc_stop_hba_timers(phba); 8151 spin_lock_irq(&phba->hbalock); 8152 list_del_init(&vport->listentry); 8153 spin_unlock_irq(&phba->hbalock); 8154 8155 lpfc_debugfs_terminate(vport); 8156 8157 /* Disable interrupt */ 8158 lpfc_sli_disable_intr(phba); 8159 8160 pci_set_drvdata(pdev, NULL); 8161 scsi_host_put(shost); 8162 8163 /* 8164 * Call scsi_free before mem_free since scsi bufs are released to their 8165 * corresponding pools here. 8166 */ 8167 lpfc_scsi_free(phba); 8168 lpfc_mem_free_all(phba); 8169 8170 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 8171 phba->hbqslimp.virt, phba->hbqslimp.phys); 8172 8173 /* Free resources associated with SLI2 interface */ 8174 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 8175 phba->slim2p.virt, phba->slim2p.phys); 8176 8177 /* unmap adapter SLIM and Control Registers */ 8178 iounmap(phba->ctrl_regs_memmap_p); 8179 iounmap(phba->slim_memmap_p); 8180 8181 lpfc_hba_free(phba); 8182 8183 pci_release_selected_regions(pdev, bars); 8184 pci_disable_device(pdev); 8185} 8186 8187/** 8188 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 8189 * @pdev: pointer to PCI device 8190 * @msg: power management message 8191 * 8192 * This routine is to be called from the kernel's PCI subsystem to support 8193 * system Power Management (PM) to device with SLI-3 interface spec. When 8194 * PM invokes this method, it quiesces the device by stopping the driver's 8195 * worker thread for the device, turning off device's interrupt and DMA, 8196 * and bring the device offline. Note that as the driver implements the 8197 * minimum PM requirements to a power-aware driver's PM support for the 8198 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8199 * to the suspend() method call will be treated as SUSPEND and the driver will 8200 * fully reinitialize its device during resume() method call, the driver will 8201 * set device to PCI_D3hot state in PCI config space instead of setting it 8202 * according to the @msg provided by the PM. 8203 * 8204 * Return code 8205 * 0 - driver suspended the device 8206 * Error otherwise 8207 **/ 8208static int 8209lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 8210{ 8211 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8212 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8213 8214 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8215 "0473 PCI device Power Management suspend.\n"); 8216 8217 /* Bring down the device */ 8218 lpfc_offline_prep(phba); 8219 lpfc_offline(phba); 8220 kthread_stop(phba->worker_thread); 8221 8222 /* Disable interrupt from device */ 8223 lpfc_sli_disable_intr(phba); 8224 8225 /* Save device state to PCI config space */ 8226 pci_save_state(pdev); 8227 pci_set_power_state(pdev, PCI_D3hot); 8228 8229 return 0; 8230} 8231 8232/** 8233 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 8234 * @pdev: pointer to PCI device 8235 * 8236 * This routine is to be called from the kernel's PCI subsystem to support 8237 * system Power Management (PM) to device with SLI-3 interface spec. When PM 8238 * invokes this method, it restores the device's PCI config space state and 8239 * fully reinitializes the device and brings it online. Note that as the 8240 * driver implements the minimum PM requirements to a power-aware driver's 8241 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 8242 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 8243 * driver will fully reinitialize its device during resume() method call, 8244 * the device will be set to PCI_D0 directly in PCI config space before 8245 * restoring the state. 8246 * 8247 * Return code 8248 * 0 - driver suspended the device 8249 * Error otherwise 8250 **/ 8251static int 8252lpfc_pci_resume_one_s3(struct pci_dev *pdev) 8253{ 8254 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8255 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8256 uint32_t intr_mode; 8257 int error; 8258 8259 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8260 "0452 PCI device Power Management resume.\n"); 8261 8262 /* Restore device state from PCI config space */ 8263 pci_set_power_state(pdev, PCI_D0); 8264 pci_restore_state(pdev); 8265 8266 /* 8267 * As the new kernel behavior of pci_restore_state() API call clears 8268 * device saved_state flag, need to save the restored state again. 8269 */ 8270 pci_save_state(pdev); 8271 8272 if (pdev->is_busmaster) 8273 pci_set_master(pdev); 8274 8275 /* Startup the kernel thread for this host adapter. */ 8276 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8277 "lpfc_worker_%d", phba->brd_no); 8278 if (IS_ERR(phba->worker_thread)) { 8279 error = PTR_ERR(phba->worker_thread); 8280 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8281 "0434 PM resume failed to start worker " 8282 "thread: error=x%x.\n", error); 8283 return error; 8284 } 8285 8286 /* Configure and enable interrupt */ 8287 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 8288 if (intr_mode == LPFC_INTR_ERROR) { 8289 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8290 "0430 PM resume Failed to enable interrupt\n"); 8291 return -EIO; 8292 } else 8293 phba->intr_mode = intr_mode; 8294 8295 /* Restart HBA and bring it online */ 8296 lpfc_sli_brdrestart(phba); 8297 lpfc_online(phba); 8298 8299 /* Log the current active interrupt mode */ 8300 lpfc_log_intr_mode(phba, phba->intr_mode); 8301 8302 return 0; 8303} 8304 8305/** 8306 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 8307 * @phba: pointer to lpfc hba data structure. 8308 * 8309 * This routine is called to prepare the SLI3 device for PCI slot recover. It 8310 * aborts all the outstanding SCSI I/Os to the pci device. 8311 **/ 8312static void 8313lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 8314{ 8315 struct lpfc_sli *psli = &phba->sli; 8316 struct lpfc_sli_ring *pring; 8317 8318 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8319 "2723 PCI channel I/O abort preparing for recovery\n"); 8320 8321 /* 8322 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 8323 * and let the SCSI mid-layer to retry them to recover. 8324 */ 8325 pring = &psli->ring[psli->fcp_ring]; 8326 lpfc_sli_abort_iocb_ring(phba, pring); 8327} 8328 8329/** 8330 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 8331 * @phba: pointer to lpfc hba data structure. 8332 * 8333 * This routine is called to prepare the SLI3 device for PCI slot reset. It 8334 * disables the device interrupt and pci device, and aborts the internal FCP 8335 * pending I/Os. 8336 **/ 8337static void 8338lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 8339{ 8340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8341 "2710 PCI channel disable preparing for reset\n"); 8342 8343 /* Block any management I/Os to the device */ 8344 lpfc_block_mgmt_io(phba); 8345 8346 /* Block all SCSI devices' I/Os on the host */ 8347 lpfc_scsi_dev_block(phba); 8348 8349 /* stop all timers */ 8350 lpfc_stop_hba_timers(phba); 8351 8352 /* Disable interrupt and pci device */ 8353 lpfc_sli_disable_intr(phba); 8354 pci_disable_device(phba->pcidev); 8355 8356 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 8357 lpfc_sli_flush_fcp_rings(phba); 8358} 8359 8360/** 8361 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 8362 * @phba: pointer to lpfc hba data structure. 8363 * 8364 * This routine is called to prepare the SLI3 device for PCI slot permanently 8365 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 8366 * pending I/Os. 8367 **/ 8368static void 8369lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 8370{ 8371 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8372 "2711 PCI channel permanent disable for failure\n"); 8373 /* Block all SCSI devices' I/Os on the host */ 8374 lpfc_scsi_dev_block(phba); 8375 8376 /* stop all timers */ 8377 lpfc_stop_hba_timers(phba); 8378 8379 /* Clean up all driver's outstanding SCSI I/Os */ 8380 lpfc_sli_flush_fcp_rings(phba); 8381} 8382 8383/** 8384 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 8385 * @pdev: pointer to PCI device. 8386 * @state: the current PCI connection state. 8387 * 8388 * This routine is called from the PCI subsystem for I/O error handling to 8389 * device with SLI-3 interface spec. This function is called by the PCI 8390 * subsystem after a PCI bus error affecting this device has been detected. 8391 * When this function is invoked, it will need to stop all the I/Os and 8392 * interrupt(s) to the device. Once that is done, it will return 8393 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 8394 * as desired. 8395 * 8396 * Return codes 8397 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 8398 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8399 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8400 **/ 8401static pci_ers_result_t 8402lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 8403{ 8404 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8405 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8406 8407 switch (state) { 8408 case pci_channel_io_normal: 8409 /* Non-fatal error, prepare for recovery */ 8410 lpfc_sli_prep_dev_for_recover(phba); 8411 return PCI_ERS_RESULT_CAN_RECOVER; 8412 case pci_channel_io_frozen: 8413 /* Fatal error, prepare for slot reset */ 8414 lpfc_sli_prep_dev_for_reset(phba); 8415 return PCI_ERS_RESULT_NEED_RESET; 8416 case pci_channel_io_perm_failure: 8417 /* Permanent failure, prepare for device down */ 8418 lpfc_sli_prep_dev_for_perm_failure(phba); 8419 return PCI_ERS_RESULT_DISCONNECT; 8420 default: 8421 /* Unknown state, prepare and request slot reset */ 8422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8423 "0472 Unknown PCI error state: x%x\n", state); 8424 lpfc_sli_prep_dev_for_reset(phba); 8425 return PCI_ERS_RESULT_NEED_RESET; 8426 } 8427} 8428 8429/** 8430 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 8431 * @pdev: pointer to PCI device. 8432 * 8433 * This routine is called from the PCI subsystem for error handling to 8434 * device with SLI-3 interface spec. This is called after PCI bus has been 8435 * reset to restart the PCI card from scratch, as if from a cold-boot. 8436 * During the PCI subsystem error recovery, after driver returns 8437 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 8438 * recovery and then call this routine before calling the .resume method 8439 * to recover the device. This function will initialize the HBA device, 8440 * enable the interrupt, but it will just put the HBA to offline state 8441 * without passing any I/O traffic. 8442 * 8443 * Return codes 8444 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8445 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8446 */ 8447static pci_ers_result_t 8448lpfc_io_slot_reset_s3(struct pci_dev *pdev) 8449{ 8450 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8451 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8452 struct lpfc_sli *psli = &phba->sli; 8453 uint32_t intr_mode; 8454 8455 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 8456 if (pci_enable_device_mem(pdev)) { 8457 printk(KERN_ERR "lpfc: Cannot re-enable " 8458 "PCI device after reset.\n"); 8459 return PCI_ERS_RESULT_DISCONNECT; 8460 } 8461 8462 pci_restore_state(pdev); 8463 8464 /* 8465 * As the new kernel behavior of pci_restore_state() API call clears 8466 * device saved_state flag, need to save the restored state again. 8467 */ 8468 pci_save_state(pdev); 8469 8470 if (pdev->is_busmaster) 8471 pci_set_master(pdev); 8472 8473 spin_lock_irq(&phba->hbalock); 8474 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 8475 spin_unlock_irq(&phba->hbalock); 8476 8477 /* Configure and enable interrupt */ 8478 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 8479 if (intr_mode == LPFC_INTR_ERROR) { 8480 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8481 "0427 Cannot re-enable interrupt after " 8482 "slot reset.\n"); 8483 return PCI_ERS_RESULT_DISCONNECT; 8484 } else 8485 phba->intr_mode = intr_mode; 8486 8487 /* Take device offline, it will perform cleanup */ 8488 lpfc_offline_prep(phba); 8489 lpfc_offline(phba); 8490 lpfc_sli_brdrestart(phba); 8491 8492 /* Log the current active interrupt mode */ 8493 lpfc_log_intr_mode(phba, phba->intr_mode); 8494 8495 return PCI_ERS_RESULT_RECOVERED; 8496} 8497 8498/** 8499 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 8500 * @pdev: pointer to PCI device 8501 * 8502 * This routine is called from the PCI subsystem for error handling to device 8503 * with SLI-3 interface spec. It is called when kernel error recovery tells 8504 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 8505 * error recovery. After this call, traffic can start to flow from this device 8506 * again. 8507 */ 8508static void 8509lpfc_io_resume_s3(struct pci_dev *pdev) 8510{ 8511 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8512 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8513 8514 /* Bring device online, it will be no-op for non-fatal error resume */ 8515 lpfc_online(phba); 8516 8517 /* Clean up Advanced Error Reporting (AER) if needed */ 8518 if (phba->hba_flag & HBA_AER_ENABLED) 8519 pci_cleanup_aer_uncorrect_error_status(pdev); 8520} 8521 8522/** 8523 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 8524 * @phba: pointer to lpfc hba data structure. 8525 * 8526 * returns the number of ELS/CT IOCBs to reserve 8527 **/ 8528int 8529lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 8530{ 8531 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 8532 8533 if (phba->sli_rev == LPFC_SLI_REV4) { 8534 if (max_xri <= 100) 8535 return 10; 8536 else if (max_xri <= 256) 8537 return 25; 8538 else if (max_xri <= 512) 8539 return 50; 8540 else if (max_xri <= 1024) 8541 return 100; 8542 else 8543 return 150; 8544 } else 8545 return 0; 8546} 8547 8548/** 8549 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 8550 * @pdev: pointer to PCI device 8551 * @pid: pointer to PCI device identifier 8552 * 8553 * This routine is called from the kernel's PCI subsystem to device with 8554 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8555 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 8556 * information of the device and driver to see if the driver state that it 8557 * can support this kind of device. If the match is successful, the driver 8558 * core invokes this routine. If this routine determines it can claim the HBA, 8559 * it does all the initialization that it needs to do to handle the HBA 8560 * properly. 8561 * 8562 * Return code 8563 * 0 - driver can claim the device 8564 * negative value - driver can not claim the device 8565 **/ 8566static int __devinit 8567lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 8568{ 8569 struct lpfc_hba *phba; 8570 struct lpfc_vport *vport = NULL; 8571 struct Scsi_Host *shost = NULL; 8572 int error; 8573 uint32_t cfg_mode, intr_mode; 8574 int mcnt; 8575 8576 /* Allocate memory for HBA structure */ 8577 phba = lpfc_hba_alloc(pdev); 8578 if (!phba) 8579 return -ENOMEM; 8580 8581 /* Perform generic PCI device enabling operation */ 8582 error = lpfc_enable_pci_dev(phba); 8583 if (error) { 8584 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8585 "1409 Failed to enable pci device.\n"); 8586 goto out_free_phba; 8587 } 8588 8589 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 8590 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 8591 if (error) 8592 goto out_disable_pci_dev; 8593 8594 /* Set up SLI-4 specific device PCI memory space */ 8595 error = lpfc_sli4_pci_mem_setup(phba); 8596 if (error) { 8597 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8598 "1410 Failed to set up pci memory space.\n"); 8599 goto out_disable_pci_dev; 8600 } 8601 8602 /* Set up phase-1 common device driver resources */ 8603 error = lpfc_setup_driver_resource_phase1(phba); 8604 if (error) { 8605 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8606 "1411 Failed to set up driver resource.\n"); 8607 goto out_unset_pci_mem_s4; 8608 } 8609 8610 /* Set up SLI-4 Specific device driver resources */ 8611 error = lpfc_sli4_driver_resource_setup(phba); 8612 if (error) { 8613 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8614 "1412 Failed to set up driver resource.\n"); 8615 goto out_unset_pci_mem_s4; 8616 } 8617 8618 /* Initialize and populate the iocb list per host */ 8619 8620 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8621 "2821 initialize iocb list %d.\n", 8622 phba->cfg_iocb_cnt*1024); 8623 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); 8624 8625 if (error) { 8626 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8627 "1413 Failed to initialize iocb list.\n"); 8628 goto out_unset_driver_resource_s4; 8629 } 8630 8631 INIT_LIST_HEAD(&phba->active_rrq_list); 8632 8633 /* Set up common device driver resources */ 8634 error = lpfc_setup_driver_resource_phase2(phba); 8635 if (error) { 8636 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8637 "1414 Failed to set up driver resource.\n"); 8638 goto out_free_iocb_list; 8639 } 8640 8641 /* Create SCSI host to the physical port */ 8642 error = lpfc_create_shost(phba); 8643 if (error) { 8644 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8645 "1415 Failed to create scsi host.\n"); 8646 goto out_unset_driver_resource; 8647 } 8648 8649 /* Configure sysfs attributes */ 8650 vport = phba->pport; 8651 error = lpfc_alloc_sysfs_attr(vport); 8652 if (error) { 8653 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8654 "1416 Failed to allocate sysfs attr\n"); 8655 goto out_destroy_shost; 8656 } 8657 8658 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 8659 /* Now, trying to enable interrupt and bring up the device */ 8660 cfg_mode = phba->cfg_use_msi; 8661 while (true) { 8662 /* Put device to a known state before enabling interrupt */ 8663 lpfc_stop_port(phba); 8664 /* Configure and enable interrupt */ 8665 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 8666 if (intr_mode == LPFC_INTR_ERROR) { 8667 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8668 "0426 Failed to enable interrupt.\n"); 8669 error = -ENODEV; 8670 goto out_free_sysfs_attr; 8671 } 8672 /* Default to single FCP EQ for non-MSI-X */ 8673 if (phba->intr_type != MSIX) 8674 phba->cfg_fcp_eq_count = 1; 8675 else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count) 8676 phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 8677 /* Set up SLI-4 HBA */ 8678 if (lpfc_sli4_hba_setup(phba)) { 8679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8680 "1421 Failed to set up hba\n"); 8681 error = -ENODEV; 8682 goto out_disable_intr; 8683 } 8684 8685 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 8686 if (intr_mode != 0) 8687 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 8688 LPFC_ACT_INTR_CNT); 8689 8690 /* Check active interrupts received only for MSI/MSI-X */ 8691 if (intr_mode == 0 || 8692 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 8693 /* Log the current active interrupt mode */ 8694 phba->intr_mode = intr_mode; 8695 lpfc_log_intr_mode(phba, intr_mode); 8696 break; 8697 } 8698 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8699 "0451 Configure interrupt mode (%d) " 8700 "failed active interrupt test.\n", 8701 intr_mode); 8702 /* Unset the previous SLI-4 HBA setup. */ 8703 /* 8704 * TODO: Is this operation compatible with IF TYPE 2 8705 * devices? All port state is deleted and cleared. 8706 */ 8707 lpfc_sli4_unset_hba(phba); 8708 /* Try next level of interrupt mode */ 8709 cfg_mode = --intr_mode; 8710 } 8711 8712 /* Perform post initialization setup */ 8713 lpfc_post_init_setup(phba); 8714 8715 /* Check if there are static vports to be created. */ 8716 lpfc_create_static_vport(phba); 8717 8718 return 0; 8719 8720out_disable_intr: 8721 lpfc_sli4_disable_intr(phba); 8722out_free_sysfs_attr: 8723 lpfc_free_sysfs_attr(vport); 8724out_destroy_shost: 8725 lpfc_destroy_shost(phba); 8726out_unset_driver_resource: 8727 lpfc_unset_driver_resource_phase2(phba); 8728out_free_iocb_list: 8729 lpfc_free_iocb_list(phba); 8730out_unset_driver_resource_s4: 8731 lpfc_sli4_driver_resource_unset(phba); 8732out_unset_pci_mem_s4: 8733 lpfc_sli4_pci_mem_unset(phba); 8734out_disable_pci_dev: 8735 lpfc_disable_pci_dev(phba); 8736 if (shost) 8737 scsi_host_put(shost); 8738out_free_phba: 8739 lpfc_hba_free(phba); 8740 return error; 8741} 8742 8743/** 8744 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 8745 * @pdev: pointer to PCI device 8746 * 8747 * This routine is called from the kernel's PCI subsystem to device with 8748 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8749 * removed from PCI bus, it performs all the necessary cleanup for the HBA 8750 * device to be removed from the PCI subsystem properly. 8751 **/ 8752static void __devexit 8753lpfc_pci_remove_one_s4(struct pci_dev *pdev) 8754{ 8755 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8756 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8757 struct lpfc_vport **vports; 8758 struct lpfc_hba *phba = vport->phba; 8759 int i; 8760 8761 /* Mark the device unloading flag */ 8762 spin_lock_irq(&phba->hbalock); 8763 vport->load_flag |= FC_UNLOADING; 8764 spin_unlock_irq(&phba->hbalock); 8765 8766 /* Free the HBA sysfs attributes */ 8767 lpfc_free_sysfs_attr(vport); 8768 8769 /* Release all the vports against this physical port */ 8770 vports = lpfc_create_vport_work_array(phba); 8771 if (vports != NULL) 8772 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8773 fc_vport_terminate(vports[i]->fc_vport); 8774 lpfc_destroy_vport_work_array(phba, vports); 8775 8776 /* Remove FC host and then SCSI host with the physical port */ 8777 fc_remove_host(shost); 8778 scsi_remove_host(shost); 8779 8780 /* Perform cleanup on the physical port */ 8781 lpfc_cleanup(vport); 8782 8783 /* 8784 * Bring down the SLI Layer. This step disables all interrupts, 8785 * clears the rings, discards all mailbox commands, and resets 8786 * the HBA FCoE function. 8787 */ 8788 lpfc_debugfs_terminate(vport); 8789 lpfc_sli4_hba_unset(phba); 8790 8791 spin_lock_irq(&phba->hbalock); 8792 list_del_init(&vport->listentry); 8793 spin_unlock_irq(&phba->hbalock); 8794 8795 /* Perform scsi free before driver resource_unset since scsi 8796 * buffers are released to their corresponding pools here. 8797 */ 8798 lpfc_scsi_free(phba); 8799 lpfc_sli4_driver_resource_unset(phba); 8800 8801 /* Unmap adapter Control and Doorbell registers */ 8802 lpfc_sli4_pci_mem_unset(phba); 8803 8804 /* Release PCI resources and disable device's PCI function */ 8805 scsi_host_put(shost); 8806 lpfc_disable_pci_dev(phba); 8807 8808 /* Finally, free the driver's device data structure */ 8809 lpfc_hba_free(phba); 8810 8811 return; 8812} 8813 8814/** 8815 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 8816 * @pdev: pointer to PCI device 8817 * @msg: power management message 8818 * 8819 * This routine is called from the kernel's PCI subsystem to support system 8820 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 8821 * this method, it quiesces the device by stopping the driver's worker 8822 * thread for the device, turning off device's interrupt and DMA, and bring 8823 * the device offline. Note that as the driver implements the minimum PM 8824 * requirements to a power-aware driver's PM support for suspend/resume -- all 8825 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 8826 * method call will be treated as SUSPEND and the driver will fully 8827 * reinitialize its device during resume() method call, the driver will set 8828 * device to PCI_D3hot state in PCI config space instead of setting it 8829 * according to the @msg provided by the PM. 8830 * 8831 * Return code 8832 * 0 - driver suspended the device 8833 * Error otherwise 8834 **/ 8835static int 8836lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 8837{ 8838 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8839 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8840 8841 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8842 "2843 PCI device Power Management suspend.\n"); 8843 8844 /* Bring down the device */ 8845 lpfc_offline_prep(phba); 8846 lpfc_offline(phba); 8847 kthread_stop(phba->worker_thread); 8848 8849 /* Disable interrupt from device */ 8850 lpfc_sli4_disable_intr(phba); 8851 8852 /* Save device state to PCI config space */ 8853 pci_save_state(pdev); 8854 pci_set_power_state(pdev, PCI_D3hot); 8855 8856 return 0; 8857} 8858 8859/** 8860 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 8861 * @pdev: pointer to PCI device 8862 * 8863 * This routine is called from the kernel's PCI subsystem to support system 8864 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 8865 * this method, it restores the device's PCI config space state and fully 8866 * reinitializes the device and brings it online. Note that as the driver 8867 * implements the minimum PM requirements to a power-aware driver's PM for 8868 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8869 * to the suspend() method call will be treated as SUSPEND and the driver 8870 * will fully reinitialize its device during resume() method call, the device 8871 * will be set to PCI_D0 directly in PCI config space before restoring the 8872 * state. 8873 * 8874 * Return code 8875 * 0 - driver suspended the device 8876 * Error otherwise 8877 **/ 8878static int 8879lpfc_pci_resume_one_s4(struct pci_dev *pdev) 8880{ 8881 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8882 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8883 uint32_t intr_mode; 8884 int error; 8885 8886 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8887 "0292 PCI device Power Management resume.\n"); 8888 8889 /* Restore device state from PCI config space */ 8890 pci_set_power_state(pdev, PCI_D0); 8891 pci_restore_state(pdev); 8892 8893 /* 8894 * As the new kernel behavior of pci_restore_state() API call clears 8895 * device saved_state flag, need to save the restored state again. 8896 */ 8897 pci_save_state(pdev); 8898 8899 if (pdev->is_busmaster) 8900 pci_set_master(pdev); 8901 8902 /* Startup the kernel thread for this host adapter. */ 8903 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8904 "lpfc_worker_%d", phba->brd_no); 8905 if (IS_ERR(phba->worker_thread)) { 8906 error = PTR_ERR(phba->worker_thread); 8907 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8908 "0293 PM resume failed to start worker " 8909 "thread: error=x%x.\n", error); 8910 return error; 8911 } 8912 8913 /* Configure and enable interrupt */ 8914 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 8915 if (intr_mode == LPFC_INTR_ERROR) { 8916 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8917 "0294 PM resume Failed to enable interrupt\n"); 8918 return -EIO; 8919 } else 8920 phba->intr_mode = intr_mode; 8921 8922 /* Restart HBA and bring it online */ 8923 lpfc_sli_brdrestart(phba); 8924 lpfc_online(phba); 8925 8926 /* Log the current active interrupt mode */ 8927 lpfc_log_intr_mode(phba, phba->intr_mode); 8928 8929 return 0; 8930} 8931 8932/** 8933 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 8934 * @phba: pointer to lpfc hba data structure. 8935 * 8936 * This routine is called to prepare the SLI4 device for PCI slot recover. It 8937 * aborts all the outstanding SCSI I/Os to the pci device. 8938 **/ 8939static void 8940lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 8941{ 8942 struct lpfc_sli *psli = &phba->sli; 8943 struct lpfc_sli_ring *pring; 8944 8945 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8946 "2828 PCI channel I/O abort preparing for recovery\n"); 8947 /* 8948 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 8949 * and let the SCSI mid-layer to retry them to recover. 8950 */ 8951 pring = &psli->ring[psli->fcp_ring]; 8952 lpfc_sli_abort_iocb_ring(phba, pring); 8953} 8954 8955/** 8956 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 8957 * @phba: pointer to lpfc hba data structure. 8958 * 8959 * This routine is called to prepare the SLI4 device for PCI slot reset. It 8960 * disables the device interrupt and pci device, and aborts the internal FCP 8961 * pending I/Os. 8962 **/ 8963static void 8964lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 8965{ 8966 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8967 "2826 PCI channel disable preparing for reset\n"); 8968 8969 /* Block any management I/Os to the device */ 8970 lpfc_block_mgmt_io(phba); 8971 8972 /* Block all SCSI devices' I/Os on the host */ 8973 lpfc_scsi_dev_block(phba); 8974 8975 /* stop all timers */ 8976 lpfc_stop_hba_timers(phba); 8977 8978 /* Disable interrupt and pci device */ 8979 lpfc_sli4_disable_intr(phba); 8980 pci_disable_device(phba->pcidev); 8981 8982 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 8983 lpfc_sli_flush_fcp_rings(phba); 8984} 8985 8986/** 8987 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 8988 * @phba: pointer to lpfc hba data structure. 8989 * 8990 * This routine is called to prepare the SLI4 device for PCI slot permanently 8991 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 8992 * pending I/Os. 8993 **/ 8994static void 8995lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 8996{ 8997 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8998 "2827 PCI channel permanent disable for failure\n"); 8999 9000 /* Block all SCSI devices' I/Os on the host */ 9001 lpfc_scsi_dev_block(phba); 9002 9003 /* stop all timers */ 9004 lpfc_stop_hba_timers(phba); 9005 9006 /* Clean up all driver's outstanding SCSI I/Os */ 9007 lpfc_sli_flush_fcp_rings(phba); 9008} 9009 9010/** 9011 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 9012 * @pdev: pointer to PCI device. 9013 * @state: the current PCI connection state. 9014 * 9015 * This routine is called from the PCI subsystem for error handling to device 9016 * with SLI-4 interface spec. This function is called by the PCI subsystem 9017 * after a PCI bus error affecting this device has been detected. When this 9018 * function is invoked, it will need to stop all the I/Os and interrupt(s) 9019 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 9020 * for the PCI subsystem to perform proper recovery as desired. 9021 * 9022 * Return codes 9023 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9024 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9025 **/ 9026static pci_ers_result_t 9027lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 9028{ 9029 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9030 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9031 9032 switch (state) { 9033 case pci_channel_io_normal: 9034 /* Non-fatal error, prepare for recovery */ 9035 lpfc_sli4_prep_dev_for_recover(phba); 9036 return PCI_ERS_RESULT_CAN_RECOVER; 9037 case pci_channel_io_frozen: 9038 /* Fatal error, prepare for slot reset */ 9039 lpfc_sli4_prep_dev_for_reset(phba); 9040 return PCI_ERS_RESULT_NEED_RESET; 9041 case pci_channel_io_perm_failure: 9042 /* Permanent failure, prepare for device down */ 9043 lpfc_sli4_prep_dev_for_perm_failure(phba); 9044 return PCI_ERS_RESULT_DISCONNECT; 9045 default: 9046 /* Unknown state, prepare and request slot reset */ 9047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9048 "2825 Unknown PCI error state: x%x\n", state); 9049 lpfc_sli4_prep_dev_for_reset(phba); 9050 return PCI_ERS_RESULT_NEED_RESET; 9051 } 9052} 9053 9054/** 9055 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 9056 * @pdev: pointer to PCI device. 9057 * 9058 * This routine is called from the PCI subsystem for error handling to device 9059 * with SLI-4 interface spec. It is called after PCI bus has been reset to 9060 * restart the PCI card from scratch, as if from a cold-boot. During the 9061 * PCI subsystem error recovery, after the driver returns 9062 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 9063 * recovery and then call this routine before calling the .resume method to 9064 * recover the device. This function will initialize the HBA device, enable 9065 * the interrupt, but it will just put the HBA to offline state without 9066 * passing any I/O traffic. 9067 * 9068 * Return codes 9069 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9070 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9071 */ 9072static pci_ers_result_t 9073lpfc_io_slot_reset_s4(struct pci_dev *pdev) 9074{ 9075 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9076 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9077 struct lpfc_sli *psli = &phba->sli; 9078 uint32_t intr_mode; 9079 9080 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 9081 if (pci_enable_device_mem(pdev)) { 9082 printk(KERN_ERR "lpfc: Cannot re-enable " 9083 "PCI device after reset.\n"); 9084 return PCI_ERS_RESULT_DISCONNECT; 9085 } 9086 9087 pci_restore_state(pdev); 9088 if (pdev->is_busmaster) 9089 pci_set_master(pdev); 9090 9091 spin_lock_irq(&phba->hbalock); 9092 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 9093 spin_unlock_irq(&phba->hbalock); 9094 9095 /* Configure and enable interrupt */ 9096 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 9097 if (intr_mode == LPFC_INTR_ERROR) { 9098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9099 "2824 Cannot re-enable interrupt after " 9100 "slot reset.\n"); 9101 return PCI_ERS_RESULT_DISCONNECT; 9102 } else 9103 phba->intr_mode = intr_mode; 9104 9105 /* Log the current active interrupt mode */ 9106 lpfc_log_intr_mode(phba, phba->intr_mode); 9107 9108 return PCI_ERS_RESULT_RECOVERED; 9109} 9110 9111/** 9112 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 9113 * @pdev: pointer to PCI device 9114 * 9115 * This routine is called from the PCI subsystem for error handling to device 9116 * with SLI-4 interface spec. It is called when kernel error recovery tells 9117 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 9118 * error recovery. After this call, traffic can start to flow from this device 9119 * again. 9120 **/ 9121static void 9122lpfc_io_resume_s4(struct pci_dev *pdev) 9123{ 9124 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9125 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9126 9127 /* 9128 * In case of slot reset, as function reset is performed through 9129 * mailbox command which needs DMA to be enabled, this operation 9130 * has to be moved to the io resume phase. Taking device offline 9131 * will perform the necessary cleanup. 9132 */ 9133 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 9134 /* Perform device reset */ 9135 lpfc_offline_prep(phba); 9136 lpfc_offline(phba); 9137 lpfc_sli_brdrestart(phba); 9138 /* Bring the device back online */ 9139 lpfc_online(phba); 9140 } 9141 9142 /* Clean up Advanced Error Reporting (AER) if needed */ 9143 if (phba->hba_flag & HBA_AER_ENABLED) 9144 pci_cleanup_aer_uncorrect_error_status(pdev); 9145} 9146 9147/** 9148 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 9149 * @pdev: pointer to PCI device 9150 * @pid: pointer to PCI device identifier 9151 * 9152 * This routine is to be registered to the kernel's PCI subsystem. When an 9153 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 9154 * at PCI device-specific information of the device and driver to see if the 9155 * driver state that it can support this kind of device. If the match is 9156 * successful, the driver core invokes this routine. This routine dispatches 9157 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 9158 * do all the initialization that it needs to do to handle the HBA device 9159 * properly. 9160 * 9161 * Return code 9162 * 0 - driver can claim the device 9163 * negative value - driver can not claim the device 9164 **/ 9165static int __devinit 9166lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 9167{ 9168 int rc; 9169 struct lpfc_sli_intf intf; 9170 9171 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 9172 return -ENODEV; 9173 9174 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 9175 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 9176 rc = lpfc_pci_probe_one_s4(pdev, pid); 9177 else 9178 rc = lpfc_pci_probe_one_s3(pdev, pid); 9179 9180 return rc; 9181} 9182 9183/** 9184 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 9185 * @pdev: pointer to PCI device 9186 * 9187 * This routine is to be registered to the kernel's PCI subsystem. When an 9188 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 9189 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 9190 * remove routine, which will perform all the necessary cleanup for the 9191 * device to be removed from the PCI subsystem properly. 9192 **/ 9193static void __devexit 9194lpfc_pci_remove_one(struct pci_dev *pdev) 9195{ 9196 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9197 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9198 9199 switch (phba->pci_dev_grp) { 9200 case LPFC_PCI_DEV_LP: 9201 lpfc_pci_remove_one_s3(pdev); 9202 break; 9203 case LPFC_PCI_DEV_OC: 9204 lpfc_pci_remove_one_s4(pdev); 9205 break; 9206 default: 9207 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9208 "1424 Invalid PCI device group: 0x%x\n", 9209 phba->pci_dev_grp); 9210 break; 9211 } 9212 return; 9213} 9214 9215/** 9216 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 9217 * @pdev: pointer to PCI device 9218 * @msg: power management message 9219 * 9220 * This routine is to be registered to the kernel's PCI subsystem to support 9221 * system Power Management (PM). When PM invokes this method, it dispatches 9222 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 9223 * suspend the device. 9224 * 9225 * Return code 9226 * 0 - driver suspended the device 9227 * Error otherwise 9228 **/ 9229static int 9230lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 9231{ 9232 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9233 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9234 int rc = -ENODEV; 9235 9236 switch (phba->pci_dev_grp) { 9237 case LPFC_PCI_DEV_LP: 9238 rc = lpfc_pci_suspend_one_s3(pdev, msg); 9239 break; 9240 case LPFC_PCI_DEV_OC: 9241 rc = lpfc_pci_suspend_one_s4(pdev, msg); 9242 break; 9243 default: 9244 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9245 "1425 Invalid PCI device group: 0x%x\n", 9246 phba->pci_dev_grp); 9247 break; 9248 } 9249 return rc; 9250} 9251 9252/** 9253 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 9254 * @pdev: pointer to PCI device 9255 * 9256 * This routine is to be registered to the kernel's PCI subsystem to support 9257 * system Power Management (PM). When PM invokes this method, it dispatches 9258 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 9259 * resume the device. 9260 * 9261 * Return code 9262 * 0 - driver suspended the device 9263 * Error otherwise 9264 **/ 9265static int 9266lpfc_pci_resume_one(struct pci_dev *pdev) 9267{ 9268 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9269 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9270 int rc = -ENODEV; 9271 9272 switch (phba->pci_dev_grp) { 9273 case LPFC_PCI_DEV_LP: 9274 rc = lpfc_pci_resume_one_s3(pdev); 9275 break; 9276 case LPFC_PCI_DEV_OC: 9277 rc = lpfc_pci_resume_one_s4(pdev); 9278 break; 9279 default: 9280 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9281 "1426 Invalid PCI device group: 0x%x\n", 9282 phba->pci_dev_grp); 9283 break; 9284 } 9285 return rc; 9286} 9287 9288/** 9289 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 9290 * @pdev: pointer to PCI device. 9291 * @state: the current PCI connection state. 9292 * 9293 * This routine is registered to the PCI subsystem for error handling. This 9294 * function is called by the PCI subsystem after a PCI bus error affecting 9295 * this device has been detected. When this routine is invoked, it dispatches 9296 * the action to the proper SLI-3 or SLI-4 device error detected handling 9297 * routine, which will perform the proper error detected operation. 9298 * 9299 * Return codes 9300 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9301 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9302 **/ 9303static pci_ers_result_t 9304lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 9305{ 9306 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9307 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9308 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 9309 9310 switch (phba->pci_dev_grp) { 9311 case LPFC_PCI_DEV_LP: 9312 rc = lpfc_io_error_detected_s3(pdev, state); 9313 break; 9314 case LPFC_PCI_DEV_OC: 9315 rc = lpfc_io_error_detected_s4(pdev, state); 9316 break; 9317 default: 9318 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9319 "1427 Invalid PCI device group: 0x%x\n", 9320 phba->pci_dev_grp); 9321 break; 9322 } 9323 return rc; 9324} 9325 9326/** 9327 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 9328 * @pdev: pointer to PCI device. 9329 * 9330 * This routine is registered to the PCI subsystem for error handling. This 9331 * function is called after PCI bus has been reset to restart the PCI card 9332 * from scratch, as if from a cold-boot. When this routine is invoked, it 9333 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 9334 * routine, which will perform the proper device reset. 9335 * 9336 * Return codes 9337 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9338 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9339 **/ 9340static pci_ers_result_t 9341lpfc_io_slot_reset(struct pci_dev *pdev) 9342{ 9343 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9344 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9345 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 9346 9347 switch (phba->pci_dev_grp) { 9348 case LPFC_PCI_DEV_LP: 9349 rc = lpfc_io_slot_reset_s3(pdev); 9350 break; 9351 case LPFC_PCI_DEV_OC: 9352 rc = lpfc_io_slot_reset_s4(pdev); 9353 break; 9354 default: 9355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9356 "1428 Invalid PCI device group: 0x%x\n", 9357 phba->pci_dev_grp); 9358 break; 9359 } 9360 return rc; 9361} 9362 9363/** 9364 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 9365 * @pdev: pointer to PCI device 9366 * 9367 * This routine is registered to the PCI subsystem for error handling. It 9368 * is called when kernel error recovery tells the lpfc driver that it is 9369 * OK to resume normal PCI operation after PCI bus error recovery. When 9370 * this routine is invoked, it dispatches the action to the proper SLI-3 9371 * or SLI-4 device io_resume routine, which will resume the device operation. 9372 **/ 9373static void 9374lpfc_io_resume(struct pci_dev *pdev) 9375{ 9376 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9377 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9378 9379 switch (phba->pci_dev_grp) { 9380 case LPFC_PCI_DEV_LP: 9381 lpfc_io_resume_s3(pdev); 9382 break; 9383 case LPFC_PCI_DEV_OC: 9384 lpfc_io_resume_s4(pdev); 9385 break; 9386 default: 9387 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9388 "1429 Invalid PCI device group: 0x%x\n", 9389 phba->pci_dev_grp); 9390 break; 9391 } 9392 return; 9393} 9394 9395static struct pci_device_id lpfc_id_table[] = { 9396 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 9397 PCI_ANY_ID, PCI_ANY_ID, }, 9398 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 9399 PCI_ANY_ID, PCI_ANY_ID, }, 9400 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 9401 PCI_ANY_ID, PCI_ANY_ID, }, 9402 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 9403 PCI_ANY_ID, PCI_ANY_ID, }, 9404 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 9405 PCI_ANY_ID, PCI_ANY_ID, }, 9406 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 9407 PCI_ANY_ID, PCI_ANY_ID, }, 9408 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 9409 PCI_ANY_ID, PCI_ANY_ID, }, 9410 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 9411 PCI_ANY_ID, PCI_ANY_ID, }, 9412 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 9413 PCI_ANY_ID, PCI_ANY_ID, }, 9414 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 9415 PCI_ANY_ID, PCI_ANY_ID, }, 9416 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 9417 PCI_ANY_ID, PCI_ANY_ID, }, 9418 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 9419 PCI_ANY_ID, PCI_ANY_ID, }, 9420 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 9421 PCI_ANY_ID, PCI_ANY_ID, }, 9422 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 9423 PCI_ANY_ID, PCI_ANY_ID, }, 9424 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 9425 PCI_ANY_ID, PCI_ANY_ID, }, 9426 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 9427 PCI_ANY_ID, PCI_ANY_ID, }, 9428 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 9429 PCI_ANY_ID, PCI_ANY_ID, }, 9430 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 9431 PCI_ANY_ID, PCI_ANY_ID, }, 9432 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 9433 PCI_ANY_ID, PCI_ANY_ID, }, 9434 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 9435 PCI_ANY_ID, PCI_ANY_ID, }, 9436 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 9437 PCI_ANY_ID, PCI_ANY_ID, }, 9438 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 9439 PCI_ANY_ID, PCI_ANY_ID, }, 9440 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 9441 PCI_ANY_ID, PCI_ANY_ID, }, 9442 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 9443 PCI_ANY_ID, PCI_ANY_ID, }, 9444 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 9445 PCI_ANY_ID, PCI_ANY_ID, }, 9446 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 9447 PCI_ANY_ID, PCI_ANY_ID, }, 9448 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 9449 PCI_ANY_ID, PCI_ANY_ID, }, 9450 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 9451 PCI_ANY_ID, PCI_ANY_ID, }, 9452 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 9453 PCI_ANY_ID, PCI_ANY_ID, }, 9454 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 9455 PCI_ANY_ID, PCI_ANY_ID, }, 9456 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 9457 PCI_ANY_ID, PCI_ANY_ID, }, 9458 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 9459 PCI_ANY_ID, PCI_ANY_ID, }, 9460 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 9461 PCI_ANY_ID, PCI_ANY_ID, }, 9462 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 9463 PCI_ANY_ID, PCI_ANY_ID, }, 9464 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 9465 PCI_ANY_ID, PCI_ANY_ID, }, 9466 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 9467 PCI_ANY_ID, PCI_ANY_ID, }, 9468 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 9469 PCI_ANY_ID, PCI_ANY_ID, }, 9470 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 9471 PCI_ANY_ID, PCI_ANY_ID, }, 9472 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 9473 PCI_ANY_ID, PCI_ANY_ID, }, 9474 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 9475 PCI_ANY_ID, PCI_ANY_ID, }, 9476 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, 9477 PCI_ANY_ID, PCI_ANY_ID, }, 9478 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, 9479 PCI_ANY_ID, PCI_ANY_ID, }, 9480 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, 9481 PCI_ANY_ID, PCI_ANY_ID, }, 9482 { 0 } 9483}; 9484 9485MODULE_DEVICE_TABLE(pci, lpfc_id_table); 9486 9487static struct pci_error_handlers lpfc_err_handler = { 9488 .error_detected = lpfc_io_error_detected, 9489 .slot_reset = lpfc_io_slot_reset, 9490 .resume = lpfc_io_resume, 9491}; 9492 9493static struct pci_driver lpfc_driver = { 9494 .name = LPFC_DRIVER_NAME, 9495 .id_table = lpfc_id_table, 9496 .probe = lpfc_pci_probe_one, 9497 .remove = __devexit_p(lpfc_pci_remove_one), 9498 .suspend = lpfc_pci_suspend_one, 9499 .resume = lpfc_pci_resume_one, 9500 .err_handler = &lpfc_err_handler, 9501}; 9502 9503/** 9504 * lpfc_init - lpfc module initialization routine 9505 * 9506 * This routine is to be invoked when the lpfc module is loaded into the 9507 * kernel. The special kernel macro module_init() is used to indicate the 9508 * role of this routine to the kernel as lpfc module entry point. 9509 * 9510 * Return codes 9511 * 0 - successful 9512 * -ENOMEM - FC attach transport failed 9513 * all others - failed 9514 */ 9515static int __init 9516lpfc_init(void) 9517{ 9518 int error = 0; 9519 9520 printk(LPFC_MODULE_DESC "\n"); 9521 printk(LPFC_COPYRIGHT "\n"); 9522 9523 if (lpfc_enable_npiv) { 9524 lpfc_transport_functions.vport_create = lpfc_vport_create; 9525 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 9526 } 9527 lpfc_transport_template = 9528 fc_attach_transport(&lpfc_transport_functions); 9529 if (lpfc_transport_template == NULL) 9530 return -ENOMEM; 9531 if (lpfc_enable_npiv) { 9532 lpfc_vport_transport_template = 9533 fc_attach_transport(&lpfc_vport_transport_functions); 9534 if (lpfc_vport_transport_template == NULL) { 9535 fc_release_transport(lpfc_transport_template); 9536 return -ENOMEM; 9537 } 9538 } 9539 error = pci_register_driver(&lpfc_driver); 9540 if (error) { 9541 fc_release_transport(lpfc_transport_template); 9542 if (lpfc_enable_npiv) 9543 fc_release_transport(lpfc_vport_transport_template); 9544 } 9545 9546 return error; 9547} 9548 9549/** 9550 * lpfc_exit - lpfc module removal routine 9551 * 9552 * This routine is invoked when the lpfc module is removed from the kernel. 9553 * The special kernel macro module_exit() is used to indicate the role of 9554 * this routine to the kernel as lpfc module exit point. 9555 */ 9556static void __exit 9557lpfc_exit(void) 9558{ 9559 pci_unregister_driver(&lpfc_driver); 9560 fc_release_transport(lpfc_transport_template); 9561 if (lpfc_enable_npiv) 9562 fc_release_transport(lpfc_vport_transport_template); 9563 if (_dump_buf_data) { 9564 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 9565 "_dump_buf_data at 0x%p\n", 9566 (1L << _dump_buf_data_order), _dump_buf_data); 9567 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 9568 } 9569 9570 if (_dump_buf_dif) { 9571 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 9572 "_dump_buf_dif at 0x%p\n", 9573 (1L << _dump_buf_dif_order), _dump_buf_dif); 9574 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 9575 } 9576} 9577 9578module_init(lpfc_init); 9579module_exit(lpfc_exit); 9580MODULE_LICENSE("GPL"); 9581MODULE_DESCRIPTION(LPFC_MODULE_DESC); 9582MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 9583MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 9584