lpfc_init.c revision 999d813f227435c35b44362ee82211a1458844fc
1/******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/dma-mapping.h> 25#include <linux/idr.h> 26#include <linux/interrupt.h> 27#include <linux/kthread.h> 28#include <linux/pci.h> 29#include <linux/spinlock.h> 30#include <linux/ctype.h> 31#include <linux/aer.h> 32#include <linux/slab.h> 33 34#include <scsi/scsi.h> 35#include <scsi/scsi_device.h> 36#include <scsi/scsi_host.h> 37#include <scsi/scsi_transport_fc.h> 38 39#include "lpfc_hw4.h" 40#include "lpfc_hw.h" 41#include "lpfc_sli.h" 42#include "lpfc_sli4.h" 43#include "lpfc_nl.h" 44#include "lpfc_disc.h" 45#include "lpfc_scsi.h" 46#include "lpfc.h" 47#include "lpfc_logmsg.h" 48#include "lpfc_crtn.h" 49#include "lpfc_vport.h" 50#include "lpfc_version.h" 51 52char *_dump_buf_data; 53unsigned long _dump_buf_data_order; 54char *_dump_buf_dif; 55unsigned long _dump_buf_dif_order; 56spinlock_t _dump_buf_lock; 57 58static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 59static int lpfc_post_rcv_buf(struct lpfc_hba *); 60static int lpfc_sli4_queue_create(struct lpfc_hba *); 61static void lpfc_sli4_queue_destroy(struct lpfc_hba *); 62static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 63static int lpfc_setup_endian_order(struct lpfc_hba *); 64static int lpfc_sli4_read_config(struct lpfc_hba *); 65static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 66static void lpfc_free_sgl_list(struct lpfc_hba *); 67static int lpfc_init_sgl_list(struct lpfc_hba *); 68static int lpfc_init_active_sgl_array(struct lpfc_hba *); 69static void lpfc_free_active_sgl(struct lpfc_hba *); 70static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 71static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 72static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 73static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 74static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 75 76static struct scsi_transport_template *lpfc_transport_template = NULL; 77static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 78static DEFINE_IDR(lpfc_hba_index); 79 80/** 81 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 82 * @phba: pointer to lpfc hba data structure. 83 * 84 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 85 * mailbox command. It retrieves the revision information from the HBA and 86 * collects the Vital Product Data (VPD) about the HBA for preparing the 87 * configuration of the HBA. 88 * 89 * Return codes: 90 * 0 - success. 91 * -ERESTART - requests the SLI layer to reset the HBA and try again. 92 * Any other value - indicates an error. 93 **/ 94int 95lpfc_config_port_prep(struct lpfc_hba *phba) 96{ 97 lpfc_vpd_t *vp = &phba->vpd; 98 int i = 0, rc; 99 LPFC_MBOXQ_t *pmb; 100 MAILBOX_t *mb; 101 char *lpfc_vpd_data = NULL; 102 uint16_t offset = 0; 103 static char licensed[56] = 104 "key unlock for use with gnu public licensed code only\0"; 105 static int init_key = 1; 106 107 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 108 if (!pmb) { 109 phba->link_state = LPFC_HBA_ERROR; 110 return -ENOMEM; 111 } 112 113 mb = &pmb->u.mb; 114 phba->link_state = LPFC_INIT_MBX_CMDS; 115 116 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 117 if (init_key) { 118 uint32_t *ptext = (uint32_t *) licensed; 119 120 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 121 *ptext = cpu_to_be32(*ptext); 122 init_key = 0; 123 } 124 125 lpfc_read_nv(phba, pmb); 126 memset((char*)mb->un.varRDnvp.rsvd3, 0, 127 sizeof (mb->un.varRDnvp.rsvd3)); 128 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 129 sizeof (licensed)); 130 131 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 132 133 if (rc != MBX_SUCCESS) { 134 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 135 "0324 Config Port initialization " 136 "error, mbxCmd x%x READ_NVPARM, " 137 "mbxStatus x%x\n", 138 mb->mbxCommand, mb->mbxStatus); 139 mempool_free(pmb, phba->mbox_mem_pool); 140 return -ERESTART; 141 } 142 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 143 sizeof(phba->wwnn)); 144 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 145 sizeof(phba->wwpn)); 146 } 147 148 phba->sli3_options = 0x0; 149 150 /* Setup and issue mailbox READ REV command */ 151 lpfc_read_rev(phba, pmb); 152 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 153 if (rc != MBX_SUCCESS) { 154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 155 "0439 Adapter failed to init, mbxCmd x%x " 156 "READ_REV, mbxStatus x%x\n", 157 mb->mbxCommand, mb->mbxStatus); 158 mempool_free( pmb, phba->mbox_mem_pool); 159 return -ERESTART; 160 } 161 162 163 /* 164 * The value of rr must be 1 since the driver set the cv field to 1. 165 * This setting requires the FW to set all revision fields. 166 */ 167 if (mb->un.varRdRev.rr == 0) { 168 vp->rev.rBit = 0; 169 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 170 "0440 Adapter failed to init, READ_REV has " 171 "missing revision information.\n"); 172 mempool_free(pmb, phba->mbox_mem_pool); 173 return -ERESTART; 174 } 175 176 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 177 mempool_free(pmb, phba->mbox_mem_pool); 178 return -EINVAL; 179 } 180 181 /* Save information as VPD data */ 182 vp->rev.rBit = 1; 183 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 184 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 185 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 186 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 187 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 188 vp->rev.biuRev = mb->un.varRdRev.biuRev; 189 vp->rev.smRev = mb->un.varRdRev.smRev; 190 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 191 vp->rev.endecRev = mb->un.varRdRev.endecRev; 192 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 193 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 194 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 195 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 196 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 197 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 198 199 /* If the sli feature level is less then 9, we must 200 * tear down all RPIs and VPIs on link down if NPIV 201 * is enabled. 202 */ 203 if (vp->rev.feaLevelHigh < 9) 204 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 205 206 if (lpfc_is_LC_HBA(phba->pcidev->device)) 207 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 208 sizeof (phba->RandomData)); 209 210 /* Get adapter VPD information */ 211 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 212 if (!lpfc_vpd_data) 213 goto out_free_mbox; 214 215 do { 216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 218 219 if (rc != MBX_SUCCESS) { 220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 221 "0441 VPD not present on adapter, " 222 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 223 mb->mbxCommand, mb->mbxStatus); 224 mb->un.varDmp.word_cnt = 0; 225 } 226 /* dump mem may return a zero when finished or we got a 227 * mailbox error, either way we are done. 228 */ 229 if (mb->un.varDmp.word_cnt == 0) 230 break; 231 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 232 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 233 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 234 lpfc_vpd_data + offset, 235 mb->un.varDmp.word_cnt); 236 offset += mb->un.varDmp.word_cnt; 237 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 238 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 239 240 kfree(lpfc_vpd_data); 241out_free_mbox: 242 mempool_free(pmb, phba->mbox_mem_pool); 243 return 0; 244} 245 246/** 247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 248 * @phba: pointer to lpfc hba data structure. 249 * @pmboxq: pointer to the driver internal queue element for mailbox command. 250 * 251 * This is the completion handler for driver's configuring asynchronous event 252 * mailbox command to the device. If the mailbox command returns successfully, 253 * it will set internal async event support flag to 1; otherwise, it will 254 * set internal async event support flag to 0. 255 **/ 256static void 257lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 258{ 259 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 260 phba->temp_sensor_support = 1; 261 else 262 phba->temp_sensor_support = 0; 263 mempool_free(pmboxq, phba->mbox_mem_pool); 264 return; 265} 266 267/** 268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 269 * @phba: pointer to lpfc hba data structure. 270 * @pmboxq: pointer to the driver internal queue element for mailbox command. 271 * 272 * This is the completion handler for dump mailbox command for getting 273 * wake up parameters. When this command complete, the response contain 274 * Option rom version of the HBA. This function translate the version number 275 * into a human readable string and store it in OptionROMVersion. 276 **/ 277static void 278lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 279{ 280 struct prog_id *prg; 281 uint32_t prog_id_word; 282 char dist = ' '; 283 /* character array used for decoding dist type. */ 284 char dist_char[] = "nabx"; 285 286 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 287 mempool_free(pmboxq, phba->mbox_mem_pool); 288 return; 289 } 290 291 prg = (struct prog_id *) &prog_id_word; 292 293 /* word 7 contain option rom version */ 294 prog_id_word = pmboxq->u.mb.un.varWords[7]; 295 296 /* Decode the Option rom version word to a readable string */ 297 if (prg->dist < 4) 298 dist = dist_char[prg->dist]; 299 300 if ((prg->dist == 3) && (prg->num == 0)) 301 sprintf(phba->OptionROMVersion, "%d.%d%d", 302 prg->ver, prg->rev, prg->lev); 303 else 304 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 305 prg->ver, prg->rev, prg->lev, 306 dist, prg->num); 307 mempool_free(pmboxq, phba->mbox_mem_pool); 308 return; 309} 310 311/** 312 * lpfc_config_port_post - Perform lpfc initialization after config port 313 * @phba: pointer to lpfc hba data structure. 314 * 315 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 316 * command call. It performs all internal resource and state setups on the 317 * port: post IOCB buffers, enable appropriate host interrupt attentions, 318 * ELS ring timers, etc. 319 * 320 * Return codes 321 * 0 - success. 322 * Any other value - error. 323 **/ 324int 325lpfc_config_port_post(struct lpfc_hba *phba) 326{ 327 struct lpfc_vport *vport = phba->pport; 328 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 329 LPFC_MBOXQ_t *pmb; 330 MAILBOX_t *mb; 331 struct lpfc_dmabuf *mp; 332 struct lpfc_sli *psli = &phba->sli; 333 uint32_t status, timeout; 334 int i, j; 335 int rc; 336 337 spin_lock_irq(&phba->hbalock); 338 /* 339 * If the Config port completed correctly the HBA is not 340 * over heated any more. 341 */ 342 if (phba->over_temp_state == HBA_OVER_TEMP) 343 phba->over_temp_state = HBA_NORMAL_TEMP; 344 spin_unlock_irq(&phba->hbalock); 345 346 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 347 if (!pmb) { 348 phba->link_state = LPFC_HBA_ERROR; 349 return -ENOMEM; 350 } 351 mb = &pmb->u.mb; 352 353 /* Get login parameters for NID. */ 354 rc = lpfc_read_sparam(phba, pmb, 0); 355 if (rc) { 356 mempool_free(pmb, phba->mbox_mem_pool); 357 return -ENOMEM; 358 } 359 360 pmb->vport = vport; 361 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 363 "0448 Adapter failed init, mbxCmd x%x " 364 "READ_SPARM mbxStatus x%x\n", 365 mb->mbxCommand, mb->mbxStatus); 366 phba->link_state = LPFC_HBA_ERROR; 367 mp = (struct lpfc_dmabuf *) pmb->context1; 368 mempool_free(pmb, phba->mbox_mem_pool); 369 lpfc_mbuf_free(phba, mp->virt, mp->phys); 370 kfree(mp); 371 return -EIO; 372 } 373 374 mp = (struct lpfc_dmabuf *) pmb->context1; 375 376 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 377 lpfc_mbuf_free(phba, mp->virt, mp->phys); 378 kfree(mp); 379 pmb->context1 = NULL; 380 381 if (phba->cfg_soft_wwnn) 382 u64_to_wwn(phba->cfg_soft_wwnn, 383 vport->fc_sparam.nodeName.u.wwn); 384 if (phba->cfg_soft_wwpn) 385 u64_to_wwn(phba->cfg_soft_wwpn, 386 vport->fc_sparam.portName.u.wwn); 387 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 388 sizeof (struct lpfc_name)); 389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 390 sizeof (struct lpfc_name)); 391 392 /* Update the fc_host data structures with new wwn. */ 393 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 394 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 395 fc_host_max_npiv_vports(shost) = phba->max_vpi; 396 397 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 398 /* This should be consolidated into parse_vpd ? - mr */ 399 if (phba->SerialNumber[0] == 0) { 400 uint8_t *outptr; 401 402 outptr = &vport->fc_nodename.u.s.IEEE[0]; 403 for (i = 0; i < 12; i++) { 404 status = *outptr++; 405 j = ((status & 0xf0) >> 4); 406 if (j <= 9) 407 phba->SerialNumber[i] = 408 (char)((uint8_t) 0x30 + (uint8_t) j); 409 else 410 phba->SerialNumber[i] = 411 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 412 i++; 413 j = (status & 0xf); 414 if (j <= 9) 415 phba->SerialNumber[i] = 416 (char)((uint8_t) 0x30 + (uint8_t) j); 417 else 418 phba->SerialNumber[i] = 419 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 420 } 421 } 422 423 lpfc_read_config(phba, pmb); 424 pmb->vport = vport; 425 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 427 "0453 Adapter failed to init, mbxCmd x%x " 428 "READ_CONFIG, mbxStatus x%x\n", 429 mb->mbxCommand, mb->mbxStatus); 430 phba->link_state = LPFC_HBA_ERROR; 431 mempool_free( pmb, phba->mbox_mem_pool); 432 return -EIO; 433 } 434 435 /* Check if the port is disabled */ 436 lpfc_sli_read_link_ste(phba); 437 438 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 439 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 440 phba->cfg_hba_queue_depth = 441 (mb->un.varRdConfig.max_xri + 1) - 442 lpfc_sli4_get_els_iocb_cnt(phba); 443 444 phba->lmt = mb->un.varRdConfig.lmt; 445 446 /* Get the default values for Model Name and Description */ 447 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 448 449 if ((phba->cfg_link_speed > LINK_SPEED_10G) 450 || ((phba->cfg_link_speed == LINK_SPEED_1G) 451 && !(phba->lmt & LMT_1Gb)) 452 || ((phba->cfg_link_speed == LINK_SPEED_2G) 453 && !(phba->lmt & LMT_2Gb)) 454 || ((phba->cfg_link_speed == LINK_SPEED_4G) 455 && !(phba->lmt & LMT_4Gb)) 456 || ((phba->cfg_link_speed == LINK_SPEED_8G) 457 && !(phba->lmt & LMT_8Gb)) 458 || ((phba->cfg_link_speed == LINK_SPEED_10G) 459 && !(phba->lmt & LMT_10Gb))) { 460 /* Reset link speed to auto */ 461 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 462 "1302 Invalid speed for this board: " 463 "Reset link speed to auto: x%x\n", 464 phba->cfg_link_speed); 465 phba->cfg_link_speed = LINK_SPEED_AUTO; 466 } 467 468 phba->link_state = LPFC_LINK_DOWN; 469 470 /* Only process IOCBs on ELS ring till hba_state is READY */ 471 if (psli->ring[psli->extra_ring].cmdringaddr) 472 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 473 if (psli->ring[psli->fcp_ring].cmdringaddr) 474 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 475 if (psli->ring[psli->next_ring].cmdringaddr) 476 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 477 478 /* Post receive buffers for desired rings */ 479 if (phba->sli_rev != 3) 480 lpfc_post_rcv_buf(phba); 481 482 /* 483 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 484 */ 485 if (phba->intr_type == MSIX) { 486 rc = lpfc_config_msi(phba, pmb); 487 if (rc) { 488 mempool_free(pmb, phba->mbox_mem_pool); 489 return -EIO; 490 } 491 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 492 if (rc != MBX_SUCCESS) { 493 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 494 "0352 Config MSI mailbox command " 495 "failed, mbxCmd x%x, mbxStatus x%x\n", 496 pmb->u.mb.mbxCommand, 497 pmb->u.mb.mbxStatus); 498 mempool_free(pmb, phba->mbox_mem_pool); 499 return -EIO; 500 } 501 } 502 503 spin_lock_irq(&phba->hbalock); 504 /* Initialize ERATT handling flag */ 505 phba->hba_flag &= ~HBA_ERATT_HANDLED; 506 507 /* Enable appropriate host interrupts */ 508 status = readl(phba->HCregaddr); 509 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 510 if (psli->num_rings > 0) 511 status |= HC_R0INT_ENA; 512 if (psli->num_rings > 1) 513 status |= HC_R1INT_ENA; 514 if (psli->num_rings > 2) 515 status |= HC_R2INT_ENA; 516 if (psli->num_rings > 3) 517 status |= HC_R3INT_ENA; 518 519 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 520 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 521 status &= ~(HC_R0INT_ENA); 522 523 writel(status, phba->HCregaddr); 524 readl(phba->HCregaddr); /* flush */ 525 spin_unlock_irq(&phba->hbalock); 526 527 /* Set up ring-0 (ELS) timer */ 528 timeout = phba->fc_ratov * 2; 529 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 530 /* Set up heart beat (HB) timer */ 531 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 532 phba->hb_outstanding = 0; 533 phba->last_completion_time = jiffies; 534 /* Set up error attention (ERATT) polling timer */ 535 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 536 537 if (phba->hba_flag & LINK_DISABLED) { 538 lpfc_printf_log(phba, 539 KERN_ERR, LOG_INIT, 540 "2598 Adapter Link is disabled.\n"); 541 lpfc_down_link(phba, pmb); 542 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 543 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 544 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 545 lpfc_printf_log(phba, 546 KERN_ERR, LOG_INIT, 547 "2599 Adapter failed to issue DOWN_LINK" 548 " mbox command rc 0x%x\n", rc); 549 550 mempool_free(pmb, phba->mbox_mem_pool); 551 return -EIO; 552 } 553 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 554 lpfc_init_link(phba, pmb, phba->cfg_topology, 555 phba->cfg_link_speed); 556 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 557 lpfc_set_loopback_flag(phba); 558 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 559 if (rc != MBX_SUCCESS) { 560 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 561 "0454 Adapter failed to init, mbxCmd x%x " 562 "INIT_LINK, mbxStatus x%x\n", 563 mb->mbxCommand, mb->mbxStatus); 564 565 /* Clear all interrupt enable conditions */ 566 writel(0, phba->HCregaddr); 567 readl(phba->HCregaddr); /* flush */ 568 /* Clear all pending interrupts */ 569 writel(0xffffffff, phba->HAregaddr); 570 readl(phba->HAregaddr); /* flush */ 571 572 phba->link_state = LPFC_HBA_ERROR; 573 if (rc != MBX_BUSY) 574 mempool_free(pmb, phba->mbox_mem_pool); 575 return -EIO; 576 } 577 } 578 /* MBOX buffer will be freed in mbox compl */ 579 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 580 if (!pmb) { 581 phba->link_state = LPFC_HBA_ERROR; 582 return -ENOMEM; 583 } 584 585 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 586 pmb->mbox_cmpl = lpfc_config_async_cmpl; 587 pmb->vport = phba->pport; 588 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 589 590 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 591 lpfc_printf_log(phba, 592 KERN_ERR, 593 LOG_INIT, 594 "0456 Adapter failed to issue " 595 "ASYNCEVT_ENABLE mbox status x%x\n", 596 rc); 597 mempool_free(pmb, phba->mbox_mem_pool); 598 } 599 600 /* Get Option rom version */ 601 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 602 if (!pmb) { 603 phba->link_state = LPFC_HBA_ERROR; 604 return -ENOMEM; 605 } 606 607 lpfc_dump_wakeup_param(phba, pmb); 608 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 609 pmb->vport = phba->pport; 610 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 611 612 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 613 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 614 "to get Option ROM version status x%x\n", rc); 615 mempool_free(pmb, phba->mbox_mem_pool); 616 } 617 618 return 0; 619} 620 621/** 622 * lpfc_hba_init_link - Initialize the FC link 623 * @phba: pointer to lpfc hba data structure. 624 * 625 * This routine will issue the INIT_LINK mailbox command call. 626 * It is available to other drivers through the lpfc_hba data 627 * structure for use as a delayed link up mechanism with the 628 * module parameter lpfc_suppress_link_up. 629 * 630 * Return code 631 * 0 - success 632 * Any other value - error 633 **/ 634int 635lpfc_hba_init_link(struct lpfc_hba *phba) 636{ 637 struct lpfc_vport *vport = phba->pport; 638 LPFC_MBOXQ_t *pmb; 639 MAILBOX_t *mb; 640 int rc; 641 642 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 643 if (!pmb) { 644 phba->link_state = LPFC_HBA_ERROR; 645 return -ENOMEM; 646 } 647 mb = &pmb->u.mb; 648 pmb->vport = vport; 649 650 lpfc_init_link(phba, pmb, phba->cfg_topology, 651 phba->cfg_link_speed); 652 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 653 lpfc_set_loopback_flag(phba); 654 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 655 if (rc != MBX_SUCCESS) { 656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 657 "0498 Adapter failed to init, mbxCmd x%x " 658 "INIT_LINK, mbxStatus x%x\n", 659 mb->mbxCommand, mb->mbxStatus); 660 /* Clear all interrupt enable conditions */ 661 writel(0, phba->HCregaddr); 662 readl(phba->HCregaddr); /* flush */ 663 /* Clear all pending interrupts */ 664 writel(0xffffffff, phba->HAregaddr); 665 readl(phba->HAregaddr); /* flush */ 666 phba->link_state = LPFC_HBA_ERROR; 667 if (rc != MBX_BUSY) 668 mempool_free(pmb, phba->mbox_mem_pool); 669 return -EIO; 670 } 671 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 672 673 return 0; 674} 675 676/** 677 * lpfc_hba_down_link - this routine downs the FC link 678 * 679 * This routine will issue the DOWN_LINK mailbox command call. 680 * It is available to other drivers through the lpfc_hba data 681 * structure for use to stop the link. 682 * 683 * Return code 684 * 0 - success 685 * Any other value - error 686 **/ 687int 688lpfc_hba_down_link(struct lpfc_hba *phba) 689{ 690 LPFC_MBOXQ_t *pmb; 691 int rc; 692 693 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 694 if (!pmb) { 695 phba->link_state = LPFC_HBA_ERROR; 696 return -ENOMEM; 697 } 698 699 lpfc_printf_log(phba, 700 KERN_ERR, LOG_INIT, 701 "0491 Adapter Link is disabled.\n"); 702 lpfc_down_link(phba, pmb); 703 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 704 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 705 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 706 lpfc_printf_log(phba, 707 KERN_ERR, LOG_INIT, 708 "2522 Adapter failed to issue DOWN_LINK" 709 " mbox command rc 0x%x\n", rc); 710 711 mempool_free(pmb, phba->mbox_mem_pool); 712 return -EIO; 713 } 714 return 0; 715} 716 717/** 718 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 719 * @phba: pointer to lpfc HBA data structure. 720 * 721 * This routine will do LPFC uninitialization before the HBA is reset when 722 * bringing down the SLI Layer. 723 * 724 * Return codes 725 * 0 - success. 726 * Any other value - error. 727 **/ 728int 729lpfc_hba_down_prep(struct lpfc_hba *phba) 730{ 731 struct lpfc_vport **vports; 732 int i; 733 734 if (phba->sli_rev <= LPFC_SLI_REV3) { 735 /* Disable interrupts */ 736 writel(0, phba->HCregaddr); 737 readl(phba->HCregaddr); /* flush */ 738 } 739 740 if (phba->pport->load_flag & FC_UNLOADING) 741 lpfc_cleanup_discovery_resources(phba->pport); 742 else { 743 vports = lpfc_create_vport_work_array(phba); 744 if (vports != NULL) 745 for (i = 0; i <= phba->max_vports && 746 vports[i] != NULL; i++) 747 lpfc_cleanup_discovery_resources(vports[i]); 748 lpfc_destroy_vport_work_array(phba, vports); 749 } 750 return 0; 751} 752 753/** 754 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 755 * @phba: pointer to lpfc HBA data structure. 756 * 757 * This routine will do uninitialization after the HBA is reset when bring 758 * down the SLI Layer. 759 * 760 * Return codes 761 * 0 - success. 762 * Any other value - error. 763 **/ 764static int 765lpfc_hba_down_post_s3(struct lpfc_hba *phba) 766{ 767 struct lpfc_sli *psli = &phba->sli; 768 struct lpfc_sli_ring *pring; 769 struct lpfc_dmabuf *mp, *next_mp; 770 LIST_HEAD(completions); 771 int i; 772 773 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 774 lpfc_sli_hbqbuf_free_all(phba); 775 else { 776 /* Cleanup preposted buffers on the ELS ring */ 777 pring = &psli->ring[LPFC_ELS_RING]; 778 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 779 list_del(&mp->list); 780 pring->postbufq_cnt--; 781 lpfc_mbuf_free(phba, mp->virt, mp->phys); 782 kfree(mp); 783 } 784 } 785 786 spin_lock_irq(&phba->hbalock); 787 for (i = 0; i < psli->num_rings; i++) { 788 pring = &psli->ring[i]; 789 790 /* At this point in time the HBA is either reset or DOA. Either 791 * way, nothing should be on txcmplq as it will NEVER complete. 792 */ 793 list_splice_init(&pring->txcmplq, &completions); 794 pring->txcmplq_cnt = 0; 795 spin_unlock_irq(&phba->hbalock); 796 797 /* Cancel all the IOCBs from the completions list */ 798 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 799 IOERR_SLI_ABORTED); 800 801 lpfc_sli_abort_iocb_ring(phba, pring); 802 spin_lock_irq(&phba->hbalock); 803 } 804 spin_unlock_irq(&phba->hbalock); 805 806 return 0; 807} 808/** 809 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 810 * @phba: pointer to lpfc HBA data structure. 811 * 812 * This routine will do uninitialization after the HBA is reset when bring 813 * down the SLI Layer. 814 * 815 * Return codes 816 * 0 - success. 817 * Any other value - error. 818 **/ 819static int 820lpfc_hba_down_post_s4(struct lpfc_hba *phba) 821{ 822 struct lpfc_scsi_buf *psb, *psb_next; 823 LIST_HEAD(aborts); 824 int ret; 825 unsigned long iflag = 0; 826 struct lpfc_sglq *sglq_entry = NULL; 827 828 ret = lpfc_hba_down_post_s3(phba); 829 if (ret) 830 return ret; 831 /* At this point in time the HBA is either reset or DOA. Either 832 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 833 * on the lpfc_sgl_list so that it can either be freed if the 834 * driver is unloading or reposted if the driver is restarting 835 * the port. 836 */ 837 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 838 /* scsl_buf_list */ 839 /* abts_sgl_list_lock required because worker thread uses this 840 * list. 841 */ 842 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 843 list_for_each_entry(sglq_entry, 844 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 845 sglq_entry->state = SGL_FREED; 846 847 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 848 &phba->sli4_hba.lpfc_sgl_list); 849 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 850 /* abts_scsi_buf_list_lock required because worker thread uses this 851 * list. 852 */ 853 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 854 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 855 &aborts); 856 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 857 spin_unlock_irq(&phba->hbalock); 858 859 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 860 psb->pCmd = NULL; 861 psb->status = IOSTAT_SUCCESS; 862 } 863 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 864 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 865 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 866 return 0; 867} 868 869/** 870 * lpfc_hba_down_post - Wrapper func for hba down post routine 871 * @phba: pointer to lpfc HBA data structure. 872 * 873 * This routine wraps the actual SLI3 or SLI4 routine for performing 874 * uninitialization after the HBA is reset when bring down the SLI Layer. 875 * 876 * Return codes 877 * 0 - success. 878 * Any other value - error. 879 **/ 880int 881lpfc_hba_down_post(struct lpfc_hba *phba) 882{ 883 return (*phba->lpfc_hba_down_post)(phba); 884} 885 886/** 887 * lpfc_hb_timeout - The HBA-timer timeout handler 888 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 889 * 890 * This is the HBA-timer timeout handler registered to the lpfc driver. When 891 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 892 * work-port-events bitmap and the worker thread is notified. This timeout 893 * event will be used by the worker thread to invoke the actual timeout 894 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 895 * be performed in the timeout handler and the HBA timeout event bit shall 896 * be cleared by the worker thread after it has taken the event bitmap out. 897 **/ 898static void 899lpfc_hb_timeout(unsigned long ptr) 900{ 901 struct lpfc_hba *phba; 902 uint32_t tmo_posted; 903 unsigned long iflag; 904 905 phba = (struct lpfc_hba *)ptr; 906 907 /* Check for heart beat timeout conditions */ 908 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 909 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 910 if (!tmo_posted) 911 phba->pport->work_port_events |= WORKER_HB_TMO; 912 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 913 914 /* Tell the worker thread there is work to do */ 915 if (!tmo_posted) 916 lpfc_worker_wake_up(phba); 917 return; 918} 919 920/** 921 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 922 * @phba: pointer to lpfc hba data structure. 923 * @pmboxq: pointer to the driver internal queue element for mailbox command. 924 * 925 * This is the callback function to the lpfc heart-beat mailbox command. 926 * If configured, the lpfc driver issues the heart-beat mailbox command to 927 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 928 * heart-beat mailbox command is issued, the driver shall set up heart-beat 929 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 930 * heart-beat outstanding state. Once the mailbox command comes back and 931 * no error conditions detected, the heart-beat mailbox command timer is 932 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 933 * state is cleared for the next heart-beat. If the timer expired with the 934 * heart-beat outstanding state set, the driver will put the HBA offline. 935 **/ 936static void 937lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 938{ 939 unsigned long drvr_flag; 940 941 spin_lock_irqsave(&phba->hbalock, drvr_flag); 942 phba->hb_outstanding = 0; 943 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 944 945 /* Check and reset heart-beat timer is necessary */ 946 mempool_free(pmboxq, phba->mbox_mem_pool); 947 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 948 !(phba->link_state == LPFC_HBA_ERROR) && 949 !(phba->pport->load_flag & FC_UNLOADING)) 950 mod_timer(&phba->hb_tmofunc, 951 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 952 return; 953} 954 955/** 956 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 957 * @phba: pointer to lpfc hba data structure. 958 * 959 * This is the actual HBA-timer timeout handler to be invoked by the worker 960 * thread whenever the HBA timer fired and HBA-timeout event posted. This 961 * handler performs any periodic operations needed for the device. If such 962 * periodic event has already been attended to either in the interrupt handler 963 * or by processing slow-ring or fast-ring events within the HBA-timer 964 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 965 * the timer for the next timeout period. If lpfc heart-beat mailbox command 966 * is configured and there is no heart-beat mailbox command outstanding, a 967 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 968 * has been a heart-beat mailbox command outstanding, the HBA shall be put 969 * to offline. 970 **/ 971void 972lpfc_hb_timeout_handler(struct lpfc_hba *phba) 973{ 974 struct lpfc_vport **vports; 975 LPFC_MBOXQ_t *pmboxq; 976 struct lpfc_dmabuf *buf_ptr; 977 int retval, i; 978 struct lpfc_sli *psli = &phba->sli; 979 LIST_HEAD(completions); 980 981 vports = lpfc_create_vport_work_array(phba); 982 if (vports != NULL) 983 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 984 lpfc_rcv_seq_check_edtov(vports[i]); 985 lpfc_destroy_vport_work_array(phba, vports); 986 987 if ((phba->link_state == LPFC_HBA_ERROR) || 988 (phba->pport->load_flag & FC_UNLOADING) || 989 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 990 return; 991 992 spin_lock_irq(&phba->pport->work_port_lock); 993 994 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 995 jiffies)) { 996 spin_unlock_irq(&phba->pport->work_port_lock); 997 if (!phba->hb_outstanding) 998 mod_timer(&phba->hb_tmofunc, 999 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1000 else 1001 mod_timer(&phba->hb_tmofunc, 1002 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1003 return; 1004 } 1005 spin_unlock_irq(&phba->pport->work_port_lock); 1006 1007 if (phba->elsbuf_cnt && 1008 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1009 spin_lock_irq(&phba->hbalock); 1010 list_splice_init(&phba->elsbuf, &completions); 1011 phba->elsbuf_cnt = 0; 1012 phba->elsbuf_prev_cnt = 0; 1013 spin_unlock_irq(&phba->hbalock); 1014 1015 while (!list_empty(&completions)) { 1016 list_remove_head(&completions, buf_ptr, 1017 struct lpfc_dmabuf, list); 1018 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1019 kfree(buf_ptr); 1020 } 1021 } 1022 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1023 1024 /* If there is no heart beat outstanding, issue a heartbeat command */ 1025 if (phba->cfg_enable_hba_heartbeat) { 1026 if (!phba->hb_outstanding) { 1027 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 1028 if (!pmboxq) { 1029 mod_timer(&phba->hb_tmofunc, 1030 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1031 return; 1032 } 1033 1034 lpfc_heart_beat(phba, pmboxq); 1035 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1036 pmboxq->vport = phba->pport; 1037 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 1038 1039 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 1040 mempool_free(pmboxq, phba->mbox_mem_pool); 1041 mod_timer(&phba->hb_tmofunc, 1042 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1043 return; 1044 } 1045 mod_timer(&phba->hb_tmofunc, 1046 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1047 phba->hb_outstanding = 1; 1048 return; 1049 } else { 1050 /* 1051 * If heart beat timeout called with hb_outstanding set 1052 * we need to take the HBA offline. 1053 */ 1054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1055 "0459 Adapter heartbeat failure, " 1056 "taking this port offline.\n"); 1057 1058 spin_lock_irq(&phba->hbalock); 1059 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1060 spin_unlock_irq(&phba->hbalock); 1061 1062 lpfc_offline_prep(phba); 1063 lpfc_offline(phba); 1064 lpfc_unblock_mgmt_io(phba); 1065 phba->link_state = LPFC_HBA_ERROR; 1066 lpfc_hba_down_post(phba); 1067 } 1068 } 1069} 1070 1071/** 1072 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1073 * @phba: pointer to lpfc hba data structure. 1074 * 1075 * This routine is called to bring the HBA offline when HBA hardware error 1076 * other than Port Error 6 has been detected. 1077 **/ 1078static void 1079lpfc_offline_eratt(struct lpfc_hba *phba) 1080{ 1081 struct lpfc_sli *psli = &phba->sli; 1082 1083 spin_lock_irq(&phba->hbalock); 1084 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1085 spin_unlock_irq(&phba->hbalock); 1086 lpfc_offline_prep(phba); 1087 1088 lpfc_offline(phba); 1089 lpfc_reset_barrier(phba); 1090 spin_lock_irq(&phba->hbalock); 1091 lpfc_sli_brdreset(phba); 1092 spin_unlock_irq(&phba->hbalock); 1093 lpfc_hba_down_post(phba); 1094 lpfc_sli_brdready(phba, HS_MBRDY); 1095 lpfc_unblock_mgmt_io(phba); 1096 phba->link_state = LPFC_HBA_ERROR; 1097 return; 1098} 1099 1100/** 1101 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1102 * @phba: pointer to lpfc hba data structure. 1103 * 1104 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1105 * other than Port Error 6 has been detected. 1106 **/ 1107static void 1108lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1109{ 1110 lpfc_offline_prep(phba); 1111 lpfc_offline(phba); 1112 lpfc_sli4_brdreset(phba); 1113 lpfc_hba_down_post(phba); 1114 lpfc_sli4_post_status_check(phba); 1115 lpfc_unblock_mgmt_io(phba); 1116 phba->link_state = LPFC_HBA_ERROR; 1117} 1118 1119/** 1120 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1121 * @phba: pointer to lpfc hba data structure. 1122 * 1123 * This routine is invoked to handle the deferred HBA hardware error 1124 * conditions. This type of error is indicated by HBA by setting ER1 1125 * and another ER bit in the host status register. The driver will 1126 * wait until the ER1 bit clears before handling the error condition. 1127 **/ 1128static void 1129lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1130{ 1131 uint32_t old_host_status = phba->work_hs; 1132 struct lpfc_sli_ring *pring; 1133 struct lpfc_sli *psli = &phba->sli; 1134 1135 /* If the pci channel is offline, ignore possible errors, 1136 * since we cannot communicate with the pci card anyway. 1137 */ 1138 if (pci_channel_offline(phba->pcidev)) { 1139 spin_lock_irq(&phba->hbalock); 1140 phba->hba_flag &= ~DEFER_ERATT; 1141 spin_unlock_irq(&phba->hbalock); 1142 return; 1143 } 1144 1145 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1146 "0479 Deferred Adapter Hardware Error " 1147 "Data: x%x x%x x%x\n", 1148 phba->work_hs, 1149 phba->work_status[0], phba->work_status[1]); 1150 1151 spin_lock_irq(&phba->hbalock); 1152 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1153 spin_unlock_irq(&phba->hbalock); 1154 1155 1156 /* 1157 * Firmware stops when it triggred erratt. That could cause the I/Os 1158 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1159 * SCSI layer retry it after re-establishing link. 1160 */ 1161 pring = &psli->ring[psli->fcp_ring]; 1162 lpfc_sli_abort_iocb_ring(phba, pring); 1163 1164 /* 1165 * There was a firmware error. Take the hba offline and then 1166 * attempt to restart it. 1167 */ 1168 lpfc_offline_prep(phba); 1169 lpfc_offline(phba); 1170 1171 /* Wait for the ER1 bit to clear.*/ 1172 while (phba->work_hs & HS_FFER1) { 1173 msleep(100); 1174 phba->work_hs = readl(phba->HSregaddr); 1175 /* If driver is unloading let the worker thread continue */ 1176 if (phba->pport->load_flag & FC_UNLOADING) { 1177 phba->work_hs = 0; 1178 break; 1179 } 1180 } 1181 1182 /* 1183 * This is to ptrotect against a race condition in which 1184 * first write to the host attention register clear the 1185 * host status register. 1186 */ 1187 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1188 phba->work_hs = old_host_status & ~HS_FFER1; 1189 1190 spin_lock_irq(&phba->hbalock); 1191 phba->hba_flag &= ~DEFER_ERATT; 1192 spin_unlock_irq(&phba->hbalock); 1193 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1194 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1195} 1196 1197static void 1198lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1199{ 1200 struct lpfc_board_event_header board_event; 1201 struct Scsi_Host *shost; 1202 1203 board_event.event_type = FC_REG_BOARD_EVENT; 1204 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1205 shost = lpfc_shost_from_vport(phba->pport); 1206 fc_host_post_vendor_event(shost, fc_get_event_number(), 1207 sizeof(board_event), 1208 (char *) &board_event, 1209 LPFC_NL_VENDOR_ID); 1210} 1211 1212/** 1213 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1214 * @phba: pointer to lpfc hba data structure. 1215 * 1216 * This routine is invoked to handle the following HBA hardware error 1217 * conditions: 1218 * 1 - HBA error attention interrupt 1219 * 2 - DMA ring index out of range 1220 * 3 - Mailbox command came back as unknown 1221 **/ 1222static void 1223lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1224{ 1225 struct lpfc_vport *vport = phba->pport; 1226 struct lpfc_sli *psli = &phba->sli; 1227 struct lpfc_sli_ring *pring; 1228 uint32_t event_data; 1229 unsigned long temperature; 1230 struct temp_event temp_event_data; 1231 struct Scsi_Host *shost; 1232 1233 /* If the pci channel is offline, ignore possible errors, 1234 * since we cannot communicate with the pci card anyway. 1235 */ 1236 if (pci_channel_offline(phba->pcidev)) { 1237 spin_lock_irq(&phba->hbalock); 1238 phba->hba_flag &= ~DEFER_ERATT; 1239 spin_unlock_irq(&phba->hbalock); 1240 return; 1241 } 1242 1243 /* If resets are disabled then leave the HBA alone and return */ 1244 if (!phba->cfg_enable_hba_reset) 1245 return; 1246 1247 /* Send an internal error event to mgmt application */ 1248 lpfc_board_errevt_to_mgmt(phba); 1249 1250 if (phba->hba_flag & DEFER_ERATT) 1251 lpfc_handle_deferred_eratt(phba); 1252 1253 if (phba->work_hs & HS_FFER6) { 1254 /* Re-establishing Link */ 1255 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1256 "1301 Re-establishing Link " 1257 "Data: x%x x%x x%x\n", 1258 phba->work_hs, 1259 phba->work_status[0], phba->work_status[1]); 1260 1261 spin_lock_irq(&phba->hbalock); 1262 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1263 spin_unlock_irq(&phba->hbalock); 1264 1265 /* 1266 * Firmware stops when it triggled erratt with HS_FFER6. 1267 * That could cause the I/Os dropped by the firmware. 1268 * Error iocb (I/O) on txcmplq and let the SCSI layer 1269 * retry it after re-establishing link. 1270 */ 1271 pring = &psli->ring[psli->fcp_ring]; 1272 lpfc_sli_abort_iocb_ring(phba, pring); 1273 1274 /* 1275 * There was a firmware error. Take the hba offline and then 1276 * attempt to restart it. 1277 */ 1278 lpfc_offline_prep(phba); 1279 lpfc_offline(phba); 1280 lpfc_sli_brdrestart(phba); 1281 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1282 lpfc_unblock_mgmt_io(phba); 1283 return; 1284 } 1285 lpfc_unblock_mgmt_io(phba); 1286 } else if (phba->work_hs & HS_CRIT_TEMP) { 1287 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1288 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1289 temp_event_data.event_code = LPFC_CRIT_TEMP; 1290 temp_event_data.data = (uint32_t)temperature; 1291 1292 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1293 "0406 Adapter maximum temperature exceeded " 1294 "(%ld), taking this port offline " 1295 "Data: x%x x%x x%x\n", 1296 temperature, phba->work_hs, 1297 phba->work_status[0], phba->work_status[1]); 1298 1299 shost = lpfc_shost_from_vport(phba->pport); 1300 fc_host_post_vendor_event(shost, fc_get_event_number(), 1301 sizeof(temp_event_data), 1302 (char *) &temp_event_data, 1303 SCSI_NL_VID_TYPE_PCI 1304 | PCI_VENDOR_ID_EMULEX); 1305 1306 spin_lock_irq(&phba->hbalock); 1307 phba->over_temp_state = HBA_OVER_TEMP; 1308 spin_unlock_irq(&phba->hbalock); 1309 lpfc_offline_eratt(phba); 1310 1311 } else { 1312 /* The if clause above forces this code path when the status 1313 * failure is a value other than FFER6. Do not call the offline 1314 * twice. This is the adapter hardware error path. 1315 */ 1316 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1317 "0457 Adapter Hardware Error " 1318 "Data: x%x x%x x%x\n", 1319 phba->work_hs, 1320 phba->work_status[0], phba->work_status[1]); 1321 1322 event_data = FC_REG_DUMP_EVENT; 1323 shost = lpfc_shost_from_vport(vport); 1324 fc_host_post_vendor_event(shost, fc_get_event_number(), 1325 sizeof(event_data), (char *) &event_data, 1326 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1327 1328 lpfc_offline_eratt(phba); 1329 } 1330 return; 1331} 1332 1333/** 1334 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1335 * @phba: pointer to lpfc hba data structure. 1336 * 1337 * This routine is invoked to handle the SLI4 HBA hardware error attention 1338 * conditions. 1339 **/ 1340static void 1341lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1342{ 1343 struct lpfc_vport *vport = phba->pport; 1344 uint32_t event_data; 1345 struct Scsi_Host *shost; 1346 1347 /* If the pci channel is offline, ignore possible errors, since 1348 * we cannot communicate with the pci card anyway. 1349 */ 1350 if (pci_channel_offline(phba->pcidev)) 1351 return; 1352 /* If resets are disabled then leave the HBA alone and return */ 1353 if (!phba->cfg_enable_hba_reset) 1354 return; 1355 1356 /* Send an internal error event to mgmt application */ 1357 lpfc_board_errevt_to_mgmt(phba); 1358 1359 /* For now, the actual action for SLI4 device handling is not 1360 * specified yet, just treated it as adaptor hardware failure 1361 */ 1362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1363 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", 1364 phba->work_status[0], phba->work_status[1]); 1365 1366 event_data = FC_REG_DUMP_EVENT; 1367 shost = lpfc_shost_from_vport(vport); 1368 fc_host_post_vendor_event(shost, fc_get_event_number(), 1369 sizeof(event_data), (char *) &event_data, 1370 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1371 1372 lpfc_sli4_offline_eratt(phba); 1373} 1374 1375/** 1376 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1377 * @phba: pointer to lpfc HBA data structure. 1378 * 1379 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1380 * routine from the API jump table function pointer from the lpfc_hba struct. 1381 * 1382 * Return codes 1383 * 0 - success. 1384 * Any other value - error. 1385 **/ 1386void 1387lpfc_handle_eratt(struct lpfc_hba *phba) 1388{ 1389 (*phba->lpfc_handle_eratt)(phba); 1390} 1391 1392/** 1393 * lpfc_handle_latt - The HBA link event handler 1394 * @phba: pointer to lpfc hba data structure. 1395 * 1396 * This routine is invoked from the worker thread to handle a HBA host 1397 * attention link event. 1398 **/ 1399void 1400lpfc_handle_latt(struct lpfc_hba *phba) 1401{ 1402 struct lpfc_vport *vport = phba->pport; 1403 struct lpfc_sli *psli = &phba->sli; 1404 LPFC_MBOXQ_t *pmb; 1405 volatile uint32_t control; 1406 struct lpfc_dmabuf *mp; 1407 int rc = 0; 1408 1409 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1410 if (!pmb) { 1411 rc = 1; 1412 goto lpfc_handle_latt_err_exit; 1413 } 1414 1415 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1416 if (!mp) { 1417 rc = 2; 1418 goto lpfc_handle_latt_free_pmb; 1419 } 1420 1421 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1422 if (!mp->virt) { 1423 rc = 3; 1424 goto lpfc_handle_latt_free_mp; 1425 } 1426 1427 /* Cleanup any outstanding ELS commands */ 1428 lpfc_els_flush_all_cmd(phba); 1429 1430 psli->slistat.link_event++; 1431 lpfc_read_la(phba, pmb, mp); 1432 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 1433 pmb->vport = vport; 1434 /* Block ELS IOCBs until we have processed this mbox command */ 1435 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1436 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1437 if (rc == MBX_NOT_FINISHED) { 1438 rc = 4; 1439 goto lpfc_handle_latt_free_mbuf; 1440 } 1441 1442 /* Clear Link Attention in HA REG */ 1443 spin_lock_irq(&phba->hbalock); 1444 writel(HA_LATT, phba->HAregaddr); 1445 readl(phba->HAregaddr); /* flush */ 1446 spin_unlock_irq(&phba->hbalock); 1447 1448 return; 1449 1450lpfc_handle_latt_free_mbuf: 1451 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1452 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1453lpfc_handle_latt_free_mp: 1454 kfree(mp); 1455lpfc_handle_latt_free_pmb: 1456 mempool_free(pmb, phba->mbox_mem_pool); 1457lpfc_handle_latt_err_exit: 1458 /* Enable Link attention interrupts */ 1459 spin_lock_irq(&phba->hbalock); 1460 psli->sli_flag |= LPFC_PROCESS_LA; 1461 control = readl(phba->HCregaddr); 1462 control |= HC_LAINT_ENA; 1463 writel(control, phba->HCregaddr); 1464 readl(phba->HCregaddr); /* flush */ 1465 1466 /* Clear Link Attention in HA REG */ 1467 writel(HA_LATT, phba->HAregaddr); 1468 readl(phba->HAregaddr); /* flush */ 1469 spin_unlock_irq(&phba->hbalock); 1470 lpfc_linkdown(phba); 1471 phba->link_state = LPFC_HBA_ERROR; 1472 1473 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1474 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1475 1476 return; 1477} 1478 1479/** 1480 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1481 * @phba: pointer to lpfc hba data structure. 1482 * @vpd: pointer to the vital product data. 1483 * @len: length of the vital product data in bytes. 1484 * 1485 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1486 * an array of characters. In this routine, the ModelName, ProgramType, and 1487 * ModelDesc, etc. fields of the phba data structure will be populated. 1488 * 1489 * Return codes 1490 * 0 - pointer to the VPD passed in is NULL 1491 * 1 - success 1492 **/ 1493int 1494lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1495{ 1496 uint8_t lenlo, lenhi; 1497 int Length; 1498 int i, j; 1499 int finished = 0; 1500 int index = 0; 1501 1502 if (!vpd) 1503 return 0; 1504 1505 /* Vital Product */ 1506 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1507 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1508 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1509 (uint32_t) vpd[3]); 1510 while (!finished && (index < (len - 4))) { 1511 switch (vpd[index]) { 1512 case 0x82: 1513 case 0x91: 1514 index += 1; 1515 lenlo = vpd[index]; 1516 index += 1; 1517 lenhi = vpd[index]; 1518 index += 1; 1519 i = ((((unsigned short)lenhi) << 8) + lenlo); 1520 index += i; 1521 break; 1522 case 0x90: 1523 index += 1; 1524 lenlo = vpd[index]; 1525 index += 1; 1526 lenhi = vpd[index]; 1527 index += 1; 1528 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1529 if (Length > len - index) 1530 Length = len - index; 1531 while (Length > 0) { 1532 /* Look for Serial Number */ 1533 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1534 index += 2; 1535 i = vpd[index]; 1536 index += 1; 1537 j = 0; 1538 Length -= (3+i); 1539 while(i--) { 1540 phba->SerialNumber[j++] = vpd[index++]; 1541 if (j == 31) 1542 break; 1543 } 1544 phba->SerialNumber[j] = 0; 1545 continue; 1546 } 1547 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1548 phba->vpd_flag |= VPD_MODEL_DESC; 1549 index += 2; 1550 i = vpd[index]; 1551 index += 1; 1552 j = 0; 1553 Length -= (3+i); 1554 while(i--) { 1555 phba->ModelDesc[j++] = vpd[index++]; 1556 if (j == 255) 1557 break; 1558 } 1559 phba->ModelDesc[j] = 0; 1560 continue; 1561 } 1562 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1563 phba->vpd_flag |= VPD_MODEL_NAME; 1564 index += 2; 1565 i = vpd[index]; 1566 index += 1; 1567 j = 0; 1568 Length -= (3+i); 1569 while(i--) { 1570 phba->ModelName[j++] = vpd[index++]; 1571 if (j == 79) 1572 break; 1573 } 1574 phba->ModelName[j] = 0; 1575 continue; 1576 } 1577 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1578 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1579 index += 2; 1580 i = vpd[index]; 1581 index += 1; 1582 j = 0; 1583 Length -= (3+i); 1584 while(i--) { 1585 phba->ProgramType[j++] = vpd[index++]; 1586 if (j == 255) 1587 break; 1588 } 1589 phba->ProgramType[j] = 0; 1590 continue; 1591 } 1592 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1593 phba->vpd_flag |= VPD_PORT; 1594 index += 2; 1595 i = vpd[index]; 1596 index += 1; 1597 j = 0; 1598 Length -= (3+i); 1599 while(i--) { 1600 phba->Port[j++] = vpd[index++]; 1601 if (j == 19) 1602 break; 1603 } 1604 phba->Port[j] = 0; 1605 continue; 1606 } 1607 else { 1608 index += 2; 1609 i = vpd[index]; 1610 index += 1; 1611 index += i; 1612 Length -= (3 + i); 1613 } 1614 } 1615 finished = 0; 1616 break; 1617 case 0x78: 1618 finished = 1; 1619 break; 1620 default: 1621 index ++; 1622 break; 1623 } 1624 } 1625 1626 return(1); 1627} 1628 1629/** 1630 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1631 * @phba: pointer to lpfc hba data structure. 1632 * @mdp: pointer to the data structure to hold the derived model name. 1633 * @descp: pointer to the data structure to hold the derived description. 1634 * 1635 * This routine retrieves HBA's description based on its registered PCI device 1636 * ID. The @descp passed into this function points to an array of 256 chars. It 1637 * shall be returned with the model name, maximum speed, and the host bus type. 1638 * The @mdp passed into this function points to an array of 80 chars. When the 1639 * function returns, the @mdp will be filled with the model name. 1640 **/ 1641static void 1642lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1643{ 1644 lpfc_vpd_t *vp; 1645 uint16_t dev_id = phba->pcidev->device; 1646 int max_speed; 1647 int GE = 0; 1648 int oneConnect = 0; /* default is not a oneConnect */ 1649 struct { 1650 char *name; 1651 char *bus; 1652 char *function; 1653 } m = {"<Unknown>", "", ""}; 1654 1655 if (mdp && mdp[0] != '\0' 1656 && descp && descp[0] != '\0') 1657 return; 1658 1659 if (phba->lmt & LMT_10Gb) 1660 max_speed = 10; 1661 else if (phba->lmt & LMT_8Gb) 1662 max_speed = 8; 1663 else if (phba->lmt & LMT_4Gb) 1664 max_speed = 4; 1665 else if (phba->lmt & LMT_2Gb) 1666 max_speed = 2; 1667 else 1668 max_speed = 1; 1669 1670 vp = &phba->vpd; 1671 1672 switch (dev_id) { 1673 case PCI_DEVICE_ID_FIREFLY: 1674 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 1675 break; 1676 case PCI_DEVICE_ID_SUPERFLY: 1677 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1678 m = (typeof(m)){"LP7000", "PCI", 1679 "Fibre Channel Adapter"}; 1680 else 1681 m = (typeof(m)){"LP7000E", "PCI", 1682 "Fibre Channel Adapter"}; 1683 break; 1684 case PCI_DEVICE_ID_DRAGONFLY: 1685 m = (typeof(m)){"LP8000", "PCI", 1686 "Fibre Channel Adapter"}; 1687 break; 1688 case PCI_DEVICE_ID_CENTAUR: 1689 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1690 m = (typeof(m)){"LP9002", "PCI", 1691 "Fibre Channel Adapter"}; 1692 else 1693 m = (typeof(m)){"LP9000", "PCI", 1694 "Fibre Channel Adapter"}; 1695 break; 1696 case PCI_DEVICE_ID_RFLY: 1697 m = (typeof(m)){"LP952", "PCI", 1698 "Fibre Channel Adapter"}; 1699 break; 1700 case PCI_DEVICE_ID_PEGASUS: 1701 m = (typeof(m)){"LP9802", "PCI-X", 1702 "Fibre Channel Adapter"}; 1703 break; 1704 case PCI_DEVICE_ID_THOR: 1705 m = (typeof(m)){"LP10000", "PCI-X", 1706 "Fibre Channel Adapter"}; 1707 break; 1708 case PCI_DEVICE_ID_VIPER: 1709 m = (typeof(m)){"LPX1000", "PCI-X", 1710 "Fibre Channel Adapter"}; 1711 break; 1712 case PCI_DEVICE_ID_PFLY: 1713 m = (typeof(m)){"LP982", "PCI-X", 1714 "Fibre Channel Adapter"}; 1715 break; 1716 case PCI_DEVICE_ID_TFLY: 1717 m = (typeof(m)){"LP1050", "PCI-X", 1718 "Fibre Channel Adapter"}; 1719 break; 1720 case PCI_DEVICE_ID_HELIOS: 1721 m = (typeof(m)){"LP11000", "PCI-X2", 1722 "Fibre Channel Adapter"}; 1723 break; 1724 case PCI_DEVICE_ID_HELIOS_SCSP: 1725 m = (typeof(m)){"LP11000-SP", "PCI-X2", 1726 "Fibre Channel Adapter"}; 1727 break; 1728 case PCI_DEVICE_ID_HELIOS_DCSP: 1729 m = (typeof(m)){"LP11002-SP", "PCI-X2", 1730 "Fibre Channel Adapter"}; 1731 break; 1732 case PCI_DEVICE_ID_NEPTUNE: 1733 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 1734 break; 1735 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1736 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 1737 break; 1738 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1739 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 1740 break; 1741 case PCI_DEVICE_ID_BMID: 1742 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 1743 break; 1744 case PCI_DEVICE_ID_BSMB: 1745 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 1746 break; 1747 case PCI_DEVICE_ID_ZEPHYR: 1748 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1749 break; 1750 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1751 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1752 break; 1753 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1754 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 1755 GE = 1; 1756 break; 1757 case PCI_DEVICE_ID_ZMID: 1758 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 1759 break; 1760 case PCI_DEVICE_ID_ZSMB: 1761 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 1762 break; 1763 case PCI_DEVICE_ID_LP101: 1764 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 1765 break; 1766 case PCI_DEVICE_ID_LP10000S: 1767 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 1768 break; 1769 case PCI_DEVICE_ID_LP11000S: 1770 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 1771 break; 1772 case PCI_DEVICE_ID_LPE11000S: 1773 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 1774 break; 1775 case PCI_DEVICE_ID_SAT: 1776 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 1777 break; 1778 case PCI_DEVICE_ID_SAT_MID: 1779 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 1780 break; 1781 case PCI_DEVICE_ID_SAT_SMB: 1782 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 1783 break; 1784 case PCI_DEVICE_ID_SAT_DCSP: 1785 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 1786 break; 1787 case PCI_DEVICE_ID_SAT_SCSP: 1788 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 1789 break; 1790 case PCI_DEVICE_ID_SAT_S: 1791 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 1792 break; 1793 case PCI_DEVICE_ID_HORNET: 1794 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 1795 GE = 1; 1796 break; 1797 case PCI_DEVICE_ID_PROTEUS_VF: 1798 m = (typeof(m)){"LPev12000", "PCIe IOV", 1799 "Fibre Channel Adapter"}; 1800 break; 1801 case PCI_DEVICE_ID_PROTEUS_PF: 1802 m = (typeof(m)){"LPev12000", "PCIe IOV", 1803 "Fibre Channel Adapter"}; 1804 break; 1805 case PCI_DEVICE_ID_PROTEUS_S: 1806 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 1807 "Fibre Channel Adapter"}; 1808 break; 1809 case PCI_DEVICE_ID_TIGERSHARK: 1810 oneConnect = 1; 1811 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 1812 break; 1813 case PCI_DEVICE_ID_TOMCAT: 1814 oneConnect = 1; 1815 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 1816 break; 1817 case PCI_DEVICE_ID_FALCON: 1818 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 1819 "EmulexSecure Fibre"}; 1820 break; 1821 default: 1822 m = (typeof(m)){"Unknown", "", ""}; 1823 break; 1824 } 1825 1826 if (mdp && mdp[0] == '\0') 1827 snprintf(mdp, 79,"%s", m.name); 1828 /* oneConnect hba requires special processing, they are all initiators 1829 * and we put the port number on the end 1830 */ 1831 if (descp && descp[0] == '\0') { 1832 if (oneConnect) 1833 snprintf(descp, 255, 1834 "Emulex OneConnect %s, %s Initiator, Port %s", 1835 m.name, m.function, 1836 phba->Port); 1837 else 1838 snprintf(descp, 255, 1839 "Emulex %s %d%s %s %s", 1840 m.name, max_speed, (GE) ? "GE" : "Gb", 1841 m.bus, m.function); 1842 } 1843} 1844 1845/** 1846 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 1847 * @phba: pointer to lpfc hba data structure. 1848 * @pring: pointer to a IOCB ring. 1849 * @cnt: the number of IOCBs to be posted to the IOCB ring. 1850 * 1851 * This routine posts a given number of IOCBs with the associated DMA buffer 1852 * descriptors specified by the cnt argument to the given IOCB ring. 1853 * 1854 * Return codes 1855 * The number of IOCBs NOT able to be posted to the IOCB ring. 1856 **/ 1857int 1858lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 1859{ 1860 IOCB_t *icmd; 1861 struct lpfc_iocbq *iocb; 1862 struct lpfc_dmabuf *mp1, *mp2; 1863 1864 cnt += pring->missbufcnt; 1865 1866 /* While there are buffers to post */ 1867 while (cnt > 0) { 1868 /* Allocate buffer for command iocb */ 1869 iocb = lpfc_sli_get_iocbq(phba); 1870 if (iocb == NULL) { 1871 pring->missbufcnt = cnt; 1872 return cnt; 1873 } 1874 icmd = &iocb->iocb; 1875 1876 /* 2 buffers can be posted per command */ 1877 /* Allocate buffer to post */ 1878 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1879 if (mp1) 1880 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1881 if (!mp1 || !mp1->virt) { 1882 kfree(mp1); 1883 lpfc_sli_release_iocbq(phba, iocb); 1884 pring->missbufcnt = cnt; 1885 return cnt; 1886 } 1887 1888 INIT_LIST_HEAD(&mp1->list); 1889 /* Allocate buffer to post */ 1890 if (cnt > 1) { 1891 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1892 if (mp2) 1893 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1894 &mp2->phys); 1895 if (!mp2 || !mp2->virt) { 1896 kfree(mp2); 1897 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1898 kfree(mp1); 1899 lpfc_sli_release_iocbq(phba, iocb); 1900 pring->missbufcnt = cnt; 1901 return cnt; 1902 } 1903 1904 INIT_LIST_HEAD(&mp2->list); 1905 } else { 1906 mp2 = NULL; 1907 } 1908 1909 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 1910 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 1911 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 1912 icmd->ulpBdeCount = 1; 1913 cnt--; 1914 if (mp2) { 1915 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 1916 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 1917 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 1918 cnt--; 1919 icmd->ulpBdeCount = 2; 1920 } 1921 1922 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1923 icmd->ulpLe = 1; 1924 1925 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 1926 IOCB_ERROR) { 1927 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1928 kfree(mp1); 1929 cnt++; 1930 if (mp2) { 1931 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 1932 kfree(mp2); 1933 cnt++; 1934 } 1935 lpfc_sli_release_iocbq(phba, iocb); 1936 pring->missbufcnt = cnt; 1937 return cnt; 1938 } 1939 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1940 if (mp2) 1941 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1942 } 1943 pring->missbufcnt = 0; 1944 return 0; 1945} 1946 1947/** 1948 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 1949 * @phba: pointer to lpfc hba data structure. 1950 * 1951 * This routine posts initial receive IOCB buffers to the ELS ring. The 1952 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 1953 * set to 64 IOCBs. 1954 * 1955 * Return codes 1956 * 0 - success (currently always success) 1957 **/ 1958static int 1959lpfc_post_rcv_buf(struct lpfc_hba *phba) 1960{ 1961 struct lpfc_sli *psli = &phba->sli; 1962 1963 /* Ring 0, ELS / CT buffers */ 1964 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 1965 /* Ring 2 - FCP no buffers needed */ 1966 1967 return 0; 1968} 1969 1970#define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 1971 1972/** 1973 * lpfc_sha_init - Set up initial array of hash table entries 1974 * @HashResultPointer: pointer to an array as hash table. 1975 * 1976 * This routine sets up the initial values to the array of hash table entries 1977 * for the LC HBAs. 1978 **/ 1979static void 1980lpfc_sha_init(uint32_t * HashResultPointer) 1981{ 1982 HashResultPointer[0] = 0x67452301; 1983 HashResultPointer[1] = 0xEFCDAB89; 1984 HashResultPointer[2] = 0x98BADCFE; 1985 HashResultPointer[3] = 0x10325476; 1986 HashResultPointer[4] = 0xC3D2E1F0; 1987} 1988 1989/** 1990 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 1991 * @HashResultPointer: pointer to an initial/result hash table. 1992 * @HashWorkingPointer: pointer to an working hash table. 1993 * 1994 * This routine iterates an initial hash table pointed by @HashResultPointer 1995 * with the values from the working hash table pointeed by @HashWorkingPointer. 1996 * The results are putting back to the initial hash table, returned through 1997 * the @HashResultPointer as the result hash table. 1998 **/ 1999static void 2000lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2001{ 2002 int t; 2003 uint32_t TEMP; 2004 uint32_t A, B, C, D, E; 2005 t = 16; 2006 do { 2007 HashWorkingPointer[t] = 2008 S(1, 2009 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2010 8] ^ 2011 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2012 } while (++t <= 79); 2013 t = 0; 2014 A = HashResultPointer[0]; 2015 B = HashResultPointer[1]; 2016 C = HashResultPointer[2]; 2017 D = HashResultPointer[3]; 2018 E = HashResultPointer[4]; 2019 2020 do { 2021 if (t < 20) { 2022 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2023 } else if (t < 40) { 2024 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2025 } else if (t < 60) { 2026 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2027 } else { 2028 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2029 } 2030 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2031 E = D; 2032 D = C; 2033 C = S(30, B); 2034 B = A; 2035 A = TEMP; 2036 } while (++t <= 79); 2037 2038 HashResultPointer[0] += A; 2039 HashResultPointer[1] += B; 2040 HashResultPointer[2] += C; 2041 HashResultPointer[3] += D; 2042 HashResultPointer[4] += E; 2043 2044} 2045 2046/** 2047 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2048 * @RandomChallenge: pointer to the entry of host challenge random number array. 2049 * @HashWorking: pointer to the entry of the working hash array. 2050 * 2051 * This routine calculates the working hash array referred by @HashWorking 2052 * from the challenge random numbers associated with the host, referred by 2053 * @RandomChallenge. The result is put into the entry of the working hash 2054 * array and returned by reference through @HashWorking. 2055 **/ 2056static void 2057lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2058{ 2059 *HashWorking = (*RandomChallenge ^ *HashWorking); 2060} 2061 2062/** 2063 * lpfc_hba_init - Perform special handling for LC HBA initialization 2064 * @phba: pointer to lpfc hba data structure. 2065 * @hbainit: pointer to an array of unsigned 32-bit integers. 2066 * 2067 * This routine performs the special handling for LC HBA initialization. 2068 **/ 2069void 2070lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2071{ 2072 int t; 2073 uint32_t *HashWorking; 2074 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2075 2076 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2077 if (!HashWorking) 2078 return; 2079 2080 HashWorking[0] = HashWorking[78] = *pwwnn++; 2081 HashWorking[1] = HashWorking[79] = *pwwnn; 2082 2083 for (t = 0; t < 7; t++) 2084 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2085 2086 lpfc_sha_init(hbainit); 2087 lpfc_sha_iterate(hbainit, HashWorking); 2088 kfree(HashWorking); 2089} 2090 2091/** 2092 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2093 * @vport: pointer to a virtual N_Port data structure. 2094 * 2095 * This routine performs the necessary cleanups before deleting the @vport. 2096 * It invokes the discovery state machine to perform necessary state 2097 * transitions and to release the ndlps associated with the @vport. Note, 2098 * the physical port is treated as @vport 0. 2099 **/ 2100void 2101lpfc_cleanup(struct lpfc_vport *vport) 2102{ 2103 struct lpfc_hba *phba = vport->phba; 2104 struct lpfc_nodelist *ndlp, *next_ndlp; 2105 int i = 0; 2106 2107 if (phba->link_state > LPFC_LINK_DOWN) 2108 lpfc_port_link_failure(vport); 2109 2110 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2111 if (!NLP_CHK_NODE_ACT(ndlp)) { 2112 ndlp = lpfc_enable_node(vport, ndlp, 2113 NLP_STE_UNUSED_NODE); 2114 if (!ndlp) 2115 continue; 2116 spin_lock_irq(&phba->ndlp_lock); 2117 NLP_SET_FREE_REQ(ndlp); 2118 spin_unlock_irq(&phba->ndlp_lock); 2119 /* Trigger the release of the ndlp memory */ 2120 lpfc_nlp_put(ndlp); 2121 continue; 2122 } 2123 spin_lock_irq(&phba->ndlp_lock); 2124 if (NLP_CHK_FREE_REQ(ndlp)) { 2125 /* The ndlp should not be in memory free mode already */ 2126 spin_unlock_irq(&phba->ndlp_lock); 2127 continue; 2128 } else 2129 /* Indicate request for freeing ndlp memory */ 2130 NLP_SET_FREE_REQ(ndlp); 2131 spin_unlock_irq(&phba->ndlp_lock); 2132 2133 if (vport->port_type != LPFC_PHYSICAL_PORT && 2134 ndlp->nlp_DID == Fabric_DID) { 2135 /* Just free up ndlp with Fabric_DID for vports */ 2136 lpfc_nlp_put(ndlp); 2137 continue; 2138 } 2139 2140 if (ndlp->nlp_type & NLP_FABRIC) 2141 lpfc_disc_state_machine(vport, ndlp, NULL, 2142 NLP_EVT_DEVICE_RECOVERY); 2143 2144 lpfc_disc_state_machine(vport, ndlp, NULL, 2145 NLP_EVT_DEVICE_RM); 2146 2147 } 2148 2149 /* At this point, ALL ndlp's should be gone 2150 * because of the previous NLP_EVT_DEVICE_RM. 2151 * Lets wait for this to happen, if needed. 2152 */ 2153 while (!list_empty(&vport->fc_nodes)) { 2154 if (i++ > 3000) { 2155 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2156 "0233 Nodelist not empty\n"); 2157 list_for_each_entry_safe(ndlp, next_ndlp, 2158 &vport->fc_nodes, nlp_listp) { 2159 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2160 LOG_NODE, 2161 "0282 did:x%x ndlp:x%p " 2162 "usgmap:x%x refcnt:%d\n", 2163 ndlp->nlp_DID, (void *)ndlp, 2164 ndlp->nlp_usg_map, 2165 atomic_read( 2166 &ndlp->kref.refcount)); 2167 } 2168 break; 2169 } 2170 2171 /* Wait for any activity on ndlps to settle */ 2172 msleep(10); 2173 } 2174} 2175 2176/** 2177 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2178 * @vport: pointer to a virtual N_Port data structure. 2179 * 2180 * This routine stops all the timers associated with a @vport. This function 2181 * is invoked before disabling or deleting a @vport. Note that the physical 2182 * port is treated as @vport 0. 2183 **/ 2184void 2185lpfc_stop_vport_timers(struct lpfc_vport *vport) 2186{ 2187 del_timer_sync(&vport->els_tmofunc); 2188 del_timer_sync(&vport->fc_fdmitmo); 2189 lpfc_can_disctmo(vport); 2190 return; 2191} 2192 2193/** 2194 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2195 * @phba: pointer to lpfc hba data structure. 2196 * 2197 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2198 * caller of this routine should already hold the host lock. 2199 **/ 2200void 2201__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2202{ 2203 /* Clear pending FCF rediscovery wait and failover in progress flags */ 2204 phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND | 2205 FCF_DEAD_DISC | 2206 FCF_ACVL_DISC); 2207 /* Now, try to stop the timer */ 2208 del_timer(&phba->fcf.redisc_wait); 2209} 2210 2211/** 2212 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2213 * @phba: pointer to lpfc hba data structure. 2214 * 2215 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2216 * checks whether the FCF rediscovery wait timer is pending with the host 2217 * lock held before proceeding with disabling the timer and clearing the 2218 * wait timer pendig flag. 2219 **/ 2220void 2221lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2222{ 2223 spin_lock_irq(&phba->hbalock); 2224 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2225 /* FCF rediscovery timer already fired or stopped */ 2226 spin_unlock_irq(&phba->hbalock); 2227 return; 2228 } 2229 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2230 spin_unlock_irq(&phba->hbalock); 2231} 2232 2233/** 2234 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2235 * @phba: pointer to lpfc hba data structure. 2236 * 2237 * This routine stops all the timers associated with a HBA. This function is 2238 * invoked before either putting a HBA offline or unloading the driver. 2239 **/ 2240void 2241lpfc_stop_hba_timers(struct lpfc_hba *phba) 2242{ 2243 lpfc_stop_vport_timers(phba->pport); 2244 del_timer_sync(&phba->sli.mbox_tmo); 2245 del_timer_sync(&phba->fabric_block_timer); 2246 del_timer_sync(&phba->eratt_poll); 2247 del_timer_sync(&phba->hb_tmofunc); 2248 phba->hb_outstanding = 0; 2249 2250 switch (phba->pci_dev_grp) { 2251 case LPFC_PCI_DEV_LP: 2252 /* Stop any LightPulse device specific driver timers */ 2253 del_timer_sync(&phba->fcp_poll_timer); 2254 break; 2255 case LPFC_PCI_DEV_OC: 2256 /* Stop any OneConnect device sepcific driver timers */ 2257 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2258 break; 2259 default: 2260 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2261 "0297 Invalid device group (x%x)\n", 2262 phba->pci_dev_grp); 2263 break; 2264 } 2265 return; 2266} 2267 2268/** 2269 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2270 * @phba: pointer to lpfc hba data structure. 2271 * 2272 * This routine marks a HBA's management interface as blocked. Once the HBA's 2273 * management interface is marked as blocked, all the user space access to 2274 * the HBA, whether they are from sysfs interface or libdfc interface will 2275 * all be blocked. The HBA is set to block the management interface when the 2276 * driver prepares the HBA interface for online or offline. 2277 **/ 2278static void 2279lpfc_block_mgmt_io(struct lpfc_hba * phba) 2280{ 2281 unsigned long iflag; 2282 2283 spin_lock_irqsave(&phba->hbalock, iflag); 2284 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2285 spin_unlock_irqrestore(&phba->hbalock, iflag); 2286} 2287 2288/** 2289 * lpfc_online - Initialize and bring a HBA online 2290 * @phba: pointer to lpfc hba data structure. 2291 * 2292 * This routine initializes the HBA and brings a HBA online. During this 2293 * process, the management interface is blocked to prevent user space access 2294 * to the HBA interfering with the driver initialization. 2295 * 2296 * Return codes 2297 * 0 - successful 2298 * 1 - failed 2299 **/ 2300int 2301lpfc_online(struct lpfc_hba *phba) 2302{ 2303 struct lpfc_vport *vport; 2304 struct lpfc_vport **vports; 2305 int i; 2306 2307 if (!phba) 2308 return 0; 2309 vport = phba->pport; 2310 2311 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2312 return 0; 2313 2314 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2315 "0458 Bring Adapter online\n"); 2316 2317 lpfc_block_mgmt_io(phba); 2318 2319 if (!lpfc_sli_queue_setup(phba)) { 2320 lpfc_unblock_mgmt_io(phba); 2321 return 1; 2322 } 2323 2324 if (phba->sli_rev == LPFC_SLI_REV4) { 2325 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2326 lpfc_unblock_mgmt_io(phba); 2327 return 1; 2328 } 2329 } else { 2330 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2331 lpfc_unblock_mgmt_io(phba); 2332 return 1; 2333 } 2334 } 2335 2336 vports = lpfc_create_vport_work_array(phba); 2337 if (vports != NULL) 2338 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2339 struct Scsi_Host *shost; 2340 shost = lpfc_shost_from_vport(vports[i]); 2341 spin_lock_irq(shost->host_lock); 2342 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2343 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2344 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2345 if (phba->sli_rev == LPFC_SLI_REV4) 2346 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2347 spin_unlock_irq(shost->host_lock); 2348 } 2349 lpfc_destroy_vport_work_array(phba, vports); 2350 2351 lpfc_unblock_mgmt_io(phba); 2352 return 0; 2353} 2354 2355/** 2356 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2357 * @phba: pointer to lpfc hba data structure. 2358 * 2359 * This routine marks a HBA's management interface as not blocked. Once the 2360 * HBA's management interface is marked as not blocked, all the user space 2361 * access to the HBA, whether they are from sysfs interface or libdfc 2362 * interface will be allowed. The HBA is set to block the management interface 2363 * when the driver prepares the HBA interface for online or offline and then 2364 * set to unblock the management interface afterwards. 2365 **/ 2366void 2367lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2368{ 2369 unsigned long iflag; 2370 2371 spin_lock_irqsave(&phba->hbalock, iflag); 2372 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2373 spin_unlock_irqrestore(&phba->hbalock, iflag); 2374} 2375 2376/** 2377 * lpfc_offline_prep - Prepare a HBA to be brought offline 2378 * @phba: pointer to lpfc hba data structure. 2379 * 2380 * This routine is invoked to prepare a HBA to be brought offline. It performs 2381 * unregistration login to all the nodes on all vports and flushes the mailbox 2382 * queue to make it ready to be brought offline. 2383 **/ 2384void 2385lpfc_offline_prep(struct lpfc_hba * phba) 2386{ 2387 struct lpfc_vport *vport = phba->pport; 2388 struct lpfc_nodelist *ndlp, *next_ndlp; 2389 struct lpfc_vport **vports; 2390 struct Scsi_Host *shost; 2391 int i; 2392 2393 if (vport->fc_flag & FC_OFFLINE_MODE) 2394 return; 2395 2396 lpfc_block_mgmt_io(phba); 2397 2398 lpfc_linkdown(phba); 2399 2400 /* Issue an unreg_login to all nodes on all vports */ 2401 vports = lpfc_create_vport_work_array(phba); 2402 if (vports != NULL) { 2403 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2404 if (vports[i]->load_flag & FC_UNLOADING) 2405 continue; 2406 shost = lpfc_shost_from_vport(vports[i]); 2407 spin_lock_irq(shost->host_lock); 2408 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2409 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2410 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2411 spin_unlock_irq(shost->host_lock); 2412 2413 shost = lpfc_shost_from_vport(vports[i]); 2414 list_for_each_entry_safe(ndlp, next_ndlp, 2415 &vports[i]->fc_nodes, 2416 nlp_listp) { 2417 if (!NLP_CHK_NODE_ACT(ndlp)) 2418 continue; 2419 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2420 continue; 2421 if (ndlp->nlp_type & NLP_FABRIC) { 2422 lpfc_disc_state_machine(vports[i], ndlp, 2423 NULL, NLP_EVT_DEVICE_RECOVERY); 2424 lpfc_disc_state_machine(vports[i], ndlp, 2425 NULL, NLP_EVT_DEVICE_RM); 2426 } 2427 spin_lock_irq(shost->host_lock); 2428 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2429 spin_unlock_irq(shost->host_lock); 2430 lpfc_unreg_rpi(vports[i], ndlp); 2431 } 2432 } 2433 } 2434 lpfc_destroy_vport_work_array(phba, vports); 2435 2436 lpfc_sli_mbox_sys_shutdown(phba); 2437} 2438 2439/** 2440 * lpfc_offline - Bring a HBA offline 2441 * @phba: pointer to lpfc hba data structure. 2442 * 2443 * This routine actually brings a HBA offline. It stops all the timers 2444 * associated with the HBA, brings down the SLI layer, and eventually 2445 * marks the HBA as in offline state for the upper layer protocol. 2446 **/ 2447void 2448lpfc_offline(struct lpfc_hba *phba) 2449{ 2450 struct Scsi_Host *shost; 2451 struct lpfc_vport **vports; 2452 int i; 2453 2454 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2455 return; 2456 2457 /* stop port and all timers associated with this hba */ 2458 lpfc_stop_port(phba); 2459 vports = lpfc_create_vport_work_array(phba); 2460 if (vports != NULL) 2461 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2462 lpfc_stop_vport_timers(vports[i]); 2463 lpfc_destroy_vport_work_array(phba, vports); 2464 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2465 "0460 Bring Adapter offline\n"); 2466 /* Bring down the SLI Layer and cleanup. The HBA is offline 2467 now. */ 2468 lpfc_sli_hba_down(phba); 2469 spin_lock_irq(&phba->hbalock); 2470 phba->work_ha = 0; 2471 spin_unlock_irq(&phba->hbalock); 2472 vports = lpfc_create_vport_work_array(phba); 2473 if (vports != NULL) 2474 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2475 shost = lpfc_shost_from_vport(vports[i]); 2476 spin_lock_irq(shost->host_lock); 2477 vports[i]->work_port_events = 0; 2478 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2479 spin_unlock_irq(shost->host_lock); 2480 } 2481 lpfc_destroy_vport_work_array(phba, vports); 2482} 2483 2484/** 2485 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2486 * @phba: pointer to lpfc hba data structure. 2487 * 2488 * This routine is to free all the SCSI buffers and IOCBs from the driver 2489 * list back to kernel. It is called from lpfc_pci_remove_one to free 2490 * the internal resources before the device is removed from the system. 2491 * 2492 * Return codes 2493 * 0 - successful (for now, it always returns 0) 2494 **/ 2495static int 2496lpfc_scsi_free(struct lpfc_hba *phba) 2497{ 2498 struct lpfc_scsi_buf *sb, *sb_next; 2499 struct lpfc_iocbq *io, *io_next; 2500 2501 spin_lock_irq(&phba->hbalock); 2502 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2503 spin_lock(&phba->scsi_buf_list_lock); 2504 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2505 list_del(&sb->list); 2506 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2507 sb->dma_handle); 2508 kfree(sb); 2509 phba->total_scsi_bufs--; 2510 } 2511 spin_unlock(&phba->scsi_buf_list_lock); 2512 2513 /* Release all the lpfc_iocbq entries maintained by this host. */ 2514 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2515 list_del(&io->list); 2516 kfree(io); 2517 phba->total_iocbq_bufs--; 2518 } 2519 spin_unlock_irq(&phba->hbalock); 2520 return 0; 2521} 2522 2523/** 2524 * lpfc_create_port - Create an FC port 2525 * @phba: pointer to lpfc hba data structure. 2526 * @instance: a unique integer ID to this FC port. 2527 * @dev: pointer to the device data structure. 2528 * 2529 * This routine creates a FC port for the upper layer protocol. The FC port 2530 * can be created on top of either a physical port or a virtual port provided 2531 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2532 * and associates the FC port created before adding the shost into the SCSI 2533 * layer. 2534 * 2535 * Return codes 2536 * @vport - pointer to the virtual N_Port data structure. 2537 * NULL - port create failed. 2538 **/ 2539struct lpfc_vport * 2540lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2541{ 2542 struct lpfc_vport *vport; 2543 struct Scsi_Host *shost; 2544 int error = 0; 2545 2546 if (dev != &phba->pcidev->dev) 2547 shost = scsi_host_alloc(&lpfc_vport_template, 2548 sizeof(struct lpfc_vport)); 2549 else 2550 shost = scsi_host_alloc(&lpfc_template, 2551 sizeof(struct lpfc_vport)); 2552 if (!shost) 2553 goto out; 2554 2555 vport = (struct lpfc_vport *) shost->hostdata; 2556 vport->phba = phba; 2557 vport->load_flag |= FC_LOADING; 2558 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2559 vport->fc_rscn_flush = 0; 2560 2561 lpfc_get_vport_cfgparam(vport); 2562 shost->unique_id = instance; 2563 shost->max_id = LPFC_MAX_TARGET; 2564 shost->max_lun = vport->cfg_max_luns; 2565 shost->this_id = -1; 2566 shost->max_cmd_len = 16; 2567 if (phba->sli_rev == LPFC_SLI_REV4) { 2568 shost->dma_boundary = 2569 phba->sli4_hba.pc_sli4_params.sge_supp_len; 2570 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2571 } 2572 2573 /* 2574 * Set initial can_queue value since 0 is no longer supported and 2575 * scsi_add_host will fail. This will be adjusted later based on the 2576 * max xri value determined in hba setup. 2577 */ 2578 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2579 if (dev != &phba->pcidev->dev) { 2580 shost->transportt = lpfc_vport_transport_template; 2581 vport->port_type = LPFC_NPIV_PORT; 2582 } else { 2583 shost->transportt = lpfc_transport_template; 2584 vport->port_type = LPFC_PHYSICAL_PORT; 2585 } 2586 2587 /* Initialize all internally managed lists. */ 2588 INIT_LIST_HEAD(&vport->fc_nodes); 2589 INIT_LIST_HEAD(&vport->rcv_buffer_list); 2590 spin_lock_init(&vport->work_port_lock); 2591 2592 init_timer(&vport->fc_disctmo); 2593 vport->fc_disctmo.function = lpfc_disc_timeout; 2594 vport->fc_disctmo.data = (unsigned long)vport; 2595 2596 init_timer(&vport->fc_fdmitmo); 2597 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2598 vport->fc_fdmitmo.data = (unsigned long)vport; 2599 2600 init_timer(&vport->els_tmofunc); 2601 vport->els_tmofunc.function = lpfc_els_timeout; 2602 vport->els_tmofunc.data = (unsigned long)vport; 2603 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 2604 phba->menlo_flag |= HBA_MENLO_SUPPORT; 2605 /* check for menlo minimum sg count */ 2606 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) { 2607 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 2608 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2609 } 2610 } 2611 2612 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2613 if (error) 2614 goto out_put_shost; 2615 2616 spin_lock_irq(&phba->hbalock); 2617 list_add_tail(&vport->listentry, &phba->port_list); 2618 spin_unlock_irq(&phba->hbalock); 2619 return vport; 2620 2621out_put_shost: 2622 scsi_host_put(shost); 2623out: 2624 return NULL; 2625} 2626 2627/** 2628 * destroy_port - destroy an FC port 2629 * @vport: pointer to an lpfc virtual N_Port data structure. 2630 * 2631 * This routine destroys a FC port from the upper layer protocol. All the 2632 * resources associated with the port are released. 2633 **/ 2634void 2635destroy_port(struct lpfc_vport *vport) 2636{ 2637 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2638 struct lpfc_hba *phba = vport->phba; 2639 2640 lpfc_debugfs_terminate(vport); 2641 fc_remove_host(shost); 2642 scsi_remove_host(shost); 2643 2644 spin_lock_irq(&phba->hbalock); 2645 list_del_init(&vport->listentry); 2646 spin_unlock_irq(&phba->hbalock); 2647 2648 lpfc_cleanup(vport); 2649 return; 2650} 2651 2652/** 2653 * lpfc_get_instance - Get a unique integer ID 2654 * 2655 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2656 * uses the kernel idr facility to perform the task. 2657 * 2658 * Return codes: 2659 * instance - a unique integer ID allocated as the new instance. 2660 * -1 - lpfc get instance failed. 2661 **/ 2662int 2663lpfc_get_instance(void) 2664{ 2665 int instance = 0; 2666 2667 /* Assign an unused number */ 2668 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2669 return -1; 2670 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2671 return -1; 2672 return instance; 2673} 2674 2675/** 2676 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2677 * @shost: pointer to SCSI host data structure. 2678 * @time: elapsed time of the scan in jiffies. 2679 * 2680 * This routine is called by the SCSI layer with a SCSI host to determine 2681 * whether the scan host is finished. 2682 * 2683 * Note: there is no scan_start function as adapter initialization will have 2684 * asynchronously kicked off the link initialization. 2685 * 2686 * Return codes 2687 * 0 - SCSI host scan is not over yet. 2688 * 1 - SCSI host scan is over. 2689 **/ 2690int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2691{ 2692 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2693 struct lpfc_hba *phba = vport->phba; 2694 int stat = 0; 2695 2696 spin_lock_irq(shost->host_lock); 2697 2698 if (vport->load_flag & FC_UNLOADING) { 2699 stat = 1; 2700 goto finished; 2701 } 2702 if (time >= 30 * HZ) { 2703 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2704 "0461 Scanning longer than 30 " 2705 "seconds. Continuing initialization\n"); 2706 stat = 1; 2707 goto finished; 2708 } 2709 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2710 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2711 "0465 Link down longer than 15 " 2712 "seconds. Continuing initialization\n"); 2713 stat = 1; 2714 goto finished; 2715 } 2716 2717 if (vport->port_state != LPFC_VPORT_READY) 2718 goto finished; 2719 if (vport->num_disc_nodes || vport->fc_prli_sent) 2720 goto finished; 2721 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2722 goto finished; 2723 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2724 goto finished; 2725 2726 stat = 1; 2727 2728finished: 2729 spin_unlock_irq(shost->host_lock); 2730 return stat; 2731} 2732 2733/** 2734 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2735 * @shost: pointer to SCSI host data structure. 2736 * 2737 * This routine initializes a given SCSI host attributes on a FC port. The 2738 * SCSI host can be either on top of a physical port or a virtual port. 2739 **/ 2740void lpfc_host_attrib_init(struct Scsi_Host *shost) 2741{ 2742 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2743 struct lpfc_hba *phba = vport->phba; 2744 /* 2745 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2746 */ 2747 2748 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2749 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2750 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2751 2752 memset(fc_host_supported_fc4s(shost), 0, 2753 sizeof(fc_host_supported_fc4s(shost))); 2754 fc_host_supported_fc4s(shost)[2] = 1; 2755 fc_host_supported_fc4s(shost)[7] = 1; 2756 2757 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2758 sizeof fc_host_symbolic_name(shost)); 2759 2760 fc_host_supported_speeds(shost) = 0; 2761 if (phba->lmt & LMT_10Gb) 2762 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2763 if (phba->lmt & LMT_8Gb) 2764 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2765 if (phba->lmt & LMT_4Gb) 2766 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2767 if (phba->lmt & LMT_2Gb) 2768 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2769 if (phba->lmt & LMT_1Gb) 2770 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2771 2772 fc_host_maxframe_size(shost) = 2773 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2774 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2775 2776 /* This value is also unchanging */ 2777 memset(fc_host_active_fc4s(shost), 0, 2778 sizeof(fc_host_active_fc4s(shost))); 2779 fc_host_active_fc4s(shost)[2] = 1; 2780 fc_host_active_fc4s(shost)[7] = 1; 2781 2782 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2783 spin_lock_irq(shost->host_lock); 2784 vport->load_flag &= ~FC_LOADING; 2785 spin_unlock_irq(shost->host_lock); 2786} 2787 2788/** 2789 * lpfc_stop_port_s3 - Stop SLI3 device port 2790 * @phba: pointer to lpfc hba data structure. 2791 * 2792 * This routine is invoked to stop an SLI3 device port, it stops the device 2793 * from generating interrupts and stops the device driver's timers for the 2794 * device. 2795 **/ 2796static void 2797lpfc_stop_port_s3(struct lpfc_hba *phba) 2798{ 2799 /* Clear all interrupt enable conditions */ 2800 writel(0, phba->HCregaddr); 2801 readl(phba->HCregaddr); /* flush */ 2802 /* Clear all pending interrupts */ 2803 writel(0xffffffff, phba->HAregaddr); 2804 readl(phba->HAregaddr); /* flush */ 2805 2806 /* Reset some HBA SLI setup states */ 2807 lpfc_stop_hba_timers(phba); 2808 phba->pport->work_port_events = 0; 2809} 2810 2811/** 2812 * lpfc_stop_port_s4 - Stop SLI4 device port 2813 * @phba: pointer to lpfc hba data structure. 2814 * 2815 * This routine is invoked to stop an SLI4 device port, it stops the device 2816 * from generating interrupts and stops the device driver's timers for the 2817 * device. 2818 **/ 2819static void 2820lpfc_stop_port_s4(struct lpfc_hba *phba) 2821{ 2822 /* Reset some HBA SLI4 setup states */ 2823 lpfc_stop_hba_timers(phba); 2824 phba->pport->work_port_events = 0; 2825 phba->sli4_hba.intr_enable = 0; 2826} 2827 2828/** 2829 * lpfc_stop_port - Wrapper function for stopping hba port 2830 * @phba: Pointer to HBA context object. 2831 * 2832 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 2833 * the API jump table function pointer from the lpfc_hba struct. 2834 **/ 2835void 2836lpfc_stop_port(struct lpfc_hba *phba) 2837{ 2838 phba->lpfc_stop_port(phba); 2839} 2840 2841/** 2842 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port. 2843 * @phba: pointer to lpfc hba data structure. 2844 * 2845 * This routine is invoked to remove the driver default fcf record from 2846 * the port. This routine currently acts on FCF Index 0. 2847 * 2848 **/ 2849void 2850lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) 2851{ 2852 int rc = 0; 2853 LPFC_MBOXQ_t *mboxq; 2854 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record; 2855 uint32_t mbox_tmo, req_len; 2856 uint32_t shdr_status, shdr_add_status; 2857 2858 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2859 if (!mboxq) { 2860 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2861 "2020 Failed to allocate mbox for ADD_FCF cmd\n"); 2862 return; 2863 } 2864 2865 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) - 2866 sizeof(struct lpfc_sli4_cfg_mhdr); 2867 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2868 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF, 2869 req_len, LPFC_SLI4_MBX_EMBED); 2870 /* 2871 * In phase 1, there is a single FCF index, 0. In phase2, the driver 2872 * supports multiple FCF indices. 2873 */ 2874 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; 2875 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); 2876 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, 2877 phba->fcf.current_rec.fcf_indx); 2878 2879 if (!phba->sli4_hba.intr_enable) 2880 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2881 else { 2882 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 2883 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 2884 } 2885 /* The IOCTL status is embedded in the mailbox subheader. */ 2886 shdr_status = bf_get(lpfc_mbox_hdr_status, 2887 &del_fcf_record->header.cfg_shdr.response); 2888 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 2889 &del_fcf_record->header.cfg_shdr.response); 2890 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { 2891 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2892 "2516 DEL FCF of default FCF Index failed " 2893 "mbx status x%x, status x%x add_status x%x\n", 2894 rc, shdr_status, shdr_add_status); 2895 } 2896 if (rc != MBX_TIMEOUT) 2897 mempool_free(mboxq, phba->mbox_mem_pool); 2898} 2899 2900/** 2901 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 2902 * @phba: Pointer to hba for which this call is being executed. 2903 * 2904 * This routine starts the timer waiting for the FCF rediscovery to complete. 2905 **/ 2906void 2907lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 2908{ 2909 unsigned long fcf_redisc_wait_tmo = 2910 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 2911 /* Start fcf rediscovery wait period timer */ 2912 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 2913 spin_lock_irq(&phba->hbalock); 2914 /* Allow action to new fcf asynchronous event */ 2915 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 2916 /* Mark the FCF rediscovery pending state */ 2917 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 2918 spin_unlock_irq(&phba->hbalock); 2919} 2920 2921/** 2922 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 2923 * @ptr: Map to lpfc_hba data structure pointer. 2924 * 2925 * This routine is invoked when waiting for FCF table rediscover has been 2926 * timed out. If new FCF record(s) has (have) been discovered during the 2927 * wait period, a new FCF event shall be added to the FCOE async event 2928 * list, and then worker thread shall be waked up for processing from the 2929 * worker thread context. 2930 **/ 2931void 2932lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 2933{ 2934 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 2935 2936 /* Don't send FCF rediscovery event if timer cancelled */ 2937 spin_lock_irq(&phba->hbalock); 2938 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2939 spin_unlock_irq(&phba->hbalock); 2940 return; 2941 } 2942 /* Clear FCF rediscovery timer pending flag */ 2943 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2944 /* FCF rediscovery event to worker thread */ 2945 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 2946 spin_unlock_irq(&phba->hbalock); 2947 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2948 "2776 FCF rediscover wait timer expired, post " 2949 "a worker thread event for FCF table scan\n"); 2950 /* wake up worker thread */ 2951 lpfc_worker_wake_up(phba); 2952} 2953 2954/** 2955 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support 2956 * @phba: pointer to lpfc hba data structure. 2957 * 2958 * This function uses the QUERY_FW_CFG mailbox command to determine if the 2959 * firmware loaded supports FCoE. A return of zero indicates that the mailbox 2960 * was successful and the firmware supports FCoE. Any other return indicates 2961 * a error. It is assumed that this function will be called before interrupts 2962 * are enabled. 2963 **/ 2964static int 2965lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba) 2966{ 2967 int rc = 0; 2968 LPFC_MBOXQ_t *mboxq; 2969 struct lpfc_mbx_query_fw_cfg *query_fw_cfg; 2970 uint32_t length; 2971 uint32_t shdr_status, shdr_add_status; 2972 2973 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2974 if (!mboxq) { 2975 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2976 "2621 Failed to allocate mbox for " 2977 "query firmware config cmd\n"); 2978 return -ENOMEM; 2979 } 2980 query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg; 2981 length = (sizeof(struct lpfc_mbx_query_fw_cfg) - 2982 sizeof(struct lpfc_sli4_cfg_mhdr)); 2983 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 2984 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 2985 length, LPFC_SLI4_MBX_EMBED); 2986 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2987 /* The IOCTL status is embedded in the mailbox subheader. */ 2988 shdr_status = bf_get(lpfc_mbox_hdr_status, 2989 &query_fw_cfg->header.cfg_shdr.response); 2990 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 2991 &query_fw_cfg->header.cfg_shdr.response); 2992 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { 2993 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2994 "2622 Query Firmware Config failed " 2995 "mbx status x%x, status x%x add_status x%x\n", 2996 rc, shdr_status, shdr_add_status); 2997 return -EINVAL; 2998 } 2999 if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) { 3000 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3001 "2623 FCoE Function not supported by firmware. " 3002 "Function mode = %08x\n", 3003 query_fw_cfg->function_mode); 3004 return -EINVAL; 3005 } 3006 if (rc != MBX_TIMEOUT) 3007 mempool_free(mboxq, phba->mbox_mem_pool); 3008 return 0; 3009} 3010 3011/** 3012 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3013 * @phba: pointer to lpfc hba data structure. 3014 * @acqe_link: pointer to the async link completion queue entry. 3015 * 3016 * This routine is to parse the SLI4 link-attention link fault code and 3017 * translate it into the base driver's read link attention mailbox command 3018 * status. 3019 * 3020 * Return: Link-attention status in terms of base driver's coding. 3021 **/ 3022static uint16_t 3023lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3024 struct lpfc_acqe_link *acqe_link) 3025{ 3026 uint16_t latt_fault; 3027 3028 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3029 case LPFC_ASYNC_LINK_FAULT_NONE: 3030 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3031 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3032 latt_fault = 0; 3033 break; 3034 default: 3035 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3036 "0398 Invalid link fault code: x%x\n", 3037 bf_get(lpfc_acqe_link_fault, acqe_link)); 3038 latt_fault = MBXERR_ERROR; 3039 break; 3040 } 3041 return latt_fault; 3042} 3043 3044/** 3045 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3046 * @phba: pointer to lpfc hba data structure. 3047 * @acqe_link: pointer to the async link completion queue entry. 3048 * 3049 * This routine is to parse the SLI4 link attention type and translate it 3050 * into the base driver's link attention type coding. 3051 * 3052 * Return: Link attention type in terms of base driver's coding. 3053 **/ 3054static uint8_t 3055lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3056 struct lpfc_acqe_link *acqe_link) 3057{ 3058 uint8_t att_type; 3059 3060 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3061 case LPFC_ASYNC_LINK_STATUS_DOWN: 3062 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3063 att_type = AT_LINK_DOWN; 3064 break; 3065 case LPFC_ASYNC_LINK_STATUS_UP: 3066 /* Ignore physical link up events - wait for logical link up */ 3067 att_type = AT_RESERVED; 3068 break; 3069 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3070 att_type = AT_LINK_UP; 3071 break; 3072 default: 3073 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3074 "0399 Invalid link attention type: x%x\n", 3075 bf_get(lpfc_acqe_link_status, acqe_link)); 3076 att_type = AT_RESERVED; 3077 break; 3078 } 3079 return att_type; 3080} 3081 3082/** 3083 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 3084 * @phba: pointer to lpfc hba data structure. 3085 * @acqe_link: pointer to the async link completion queue entry. 3086 * 3087 * This routine is to parse the SLI4 link-attention link speed and translate 3088 * it into the base driver's link-attention link speed coding. 3089 * 3090 * Return: Link-attention link speed in terms of base driver's coding. 3091 **/ 3092static uint8_t 3093lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 3094 struct lpfc_acqe_link *acqe_link) 3095{ 3096 uint8_t link_speed; 3097 3098 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3099 case LPFC_ASYNC_LINK_SPEED_ZERO: 3100 link_speed = LA_UNKNW_LINK; 3101 break; 3102 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3103 link_speed = LA_UNKNW_LINK; 3104 break; 3105 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3106 link_speed = LA_UNKNW_LINK; 3107 break; 3108 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3109 link_speed = LA_1GHZ_LINK; 3110 break; 3111 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3112 link_speed = LA_10GHZ_LINK; 3113 break; 3114 default: 3115 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3116 "0483 Invalid link-attention link speed: x%x\n", 3117 bf_get(lpfc_acqe_link_speed, acqe_link)); 3118 link_speed = LA_UNKNW_LINK; 3119 break; 3120 } 3121 return link_speed; 3122} 3123 3124/** 3125 * lpfc_sli4_async_link_evt - Process the asynchronous link event 3126 * @phba: pointer to lpfc hba data structure. 3127 * @acqe_link: pointer to the async link completion queue entry. 3128 * 3129 * This routine is to handle the SLI4 asynchronous link event. 3130 **/ 3131static void 3132lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3133 struct lpfc_acqe_link *acqe_link) 3134{ 3135 struct lpfc_dmabuf *mp; 3136 LPFC_MBOXQ_t *pmb; 3137 MAILBOX_t *mb; 3138 READ_LA_VAR *la; 3139 uint8_t att_type; 3140 3141 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3142 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) 3143 return; 3144 phba->fcoe_eventtag = acqe_link->event_tag; 3145 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3146 if (!pmb) { 3147 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3148 "0395 The mboxq allocation failed\n"); 3149 return; 3150 } 3151 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3152 if (!mp) { 3153 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3154 "0396 The lpfc_dmabuf allocation failed\n"); 3155 goto out_free_pmb; 3156 } 3157 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3158 if (!mp->virt) { 3159 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3160 "0397 The mbuf allocation failed\n"); 3161 goto out_free_dmabuf; 3162 } 3163 3164 /* Cleanup any outstanding ELS commands */ 3165 lpfc_els_flush_all_cmd(phba); 3166 3167 /* Block ELS IOCBs until we have done process link event */ 3168 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3169 3170 /* Update link event statistics */ 3171 phba->sli.slistat.link_event++; 3172 3173 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ 3174 lpfc_read_la(phba, pmb, mp); 3175 pmb->vport = phba->pport; 3176 3177 /* Parse and translate status field */ 3178 mb = &pmb->u.mb; 3179 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3180 3181 /* Parse and translate link attention fields */ 3182 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; 3183 la->eventTag = acqe_link->event_tag; 3184 la->attType = att_type; 3185 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); 3186 3187 /* Fake the the following irrelvant fields */ 3188 la->topology = TOPOLOGY_PT_PT; 3189 la->granted_AL_PA = 0; 3190 la->il = 0; 3191 la->pb = 0; 3192 la->fa = 0; 3193 la->mm = 0; 3194 3195 /* Keep the link status for extra SLI4 state machine reference */ 3196 phba->sli4_hba.link_state.speed = 3197 bf_get(lpfc_acqe_link_speed, acqe_link); 3198 phba->sli4_hba.link_state.duplex = 3199 bf_get(lpfc_acqe_link_duplex, acqe_link); 3200 phba->sli4_hba.link_state.status = 3201 bf_get(lpfc_acqe_link_status, acqe_link); 3202 phba->sli4_hba.link_state.physical = 3203 bf_get(lpfc_acqe_link_physical, acqe_link); 3204 phba->sli4_hba.link_state.fault = 3205 bf_get(lpfc_acqe_link_fault, acqe_link); 3206 phba->sli4_hba.link_state.logical_speed = 3207 bf_get(lpfc_acqe_qos_link_speed, acqe_link); 3208 3209 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3210 lpfc_mbx_cmpl_read_la(phba, pmb); 3211 3212 return; 3213 3214out_free_dmabuf: 3215 kfree(mp); 3216out_free_pmb: 3217 mempool_free(pmb, phba->mbox_mem_pool); 3218} 3219 3220/** 3221 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 3222 * @vport: pointer to vport data structure. 3223 * 3224 * This routine is to perform Clear Virtual Link (CVL) on a vport in 3225 * response to a CVL event. 3226 * 3227 * Return the pointer to the ndlp with the vport if successful, otherwise 3228 * return NULL. 3229 **/ 3230static struct lpfc_nodelist * 3231lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 3232{ 3233 struct lpfc_nodelist *ndlp; 3234 struct Scsi_Host *shost; 3235 struct lpfc_hba *phba; 3236 3237 if (!vport) 3238 return NULL; 3239 ndlp = lpfc_findnode_did(vport, Fabric_DID); 3240 if (!ndlp) 3241 return NULL; 3242 phba = vport->phba; 3243 if (!phba) 3244 return NULL; 3245 if (phba->pport->port_state <= LPFC_FLOGI) 3246 return NULL; 3247 /* If virtual link is not yet instantiated ignore CVL */ 3248 if (vport->port_state <= LPFC_FDISC) 3249 return NULL; 3250 shost = lpfc_shost_from_vport(vport); 3251 if (!shost) 3252 return NULL; 3253 lpfc_linkdown_port(vport); 3254 lpfc_cleanup_pending_mbox(vport); 3255 spin_lock_irq(shost->host_lock); 3256 vport->fc_flag |= FC_VPORT_CVL_RCVD; 3257 spin_unlock_irq(shost->host_lock); 3258 3259 return ndlp; 3260} 3261 3262/** 3263 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 3264 * @vport: pointer to lpfc hba data structure. 3265 * 3266 * This routine is to perform Clear Virtual Link (CVL) on all vports in 3267 * response to a FCF dead event. 3268 **/ 3269static void 3270lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 3271{ 3272 struct lpfc_vport **vports; 3273 int i; 3274 3275 vports = lpfc_create_vport_work_array(phba); 3276 if (vports) 3277 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3278 lpfc_sli4_perform_vport_cvl(vports[i]); 3279 lpfc_destroy_vport_work_array(phba, vports); 3280} 3281 3282/** 3283 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 3284 * @phba: pointer to lpfc hba data structure. 3285 * @acqe_link: pointer to the async fcoe completion queue entry. 3286 * 3287 * This routine is to handle the SLI4 asynchronous fcoe event. 3288 **/ 3289static void 3290lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, 3291 struct lpfc_acqe_fcoe *acqe_fcoe) 3292{ 3293 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 3294 int rc; 3295 struct lpfc_vport *vport; 3296 struct lpfc_nodelist *ndlp; 3297 struct Scsi_Host *shost; 3298 int active_vlink_present; 3299 struct lpfc_vport **vports; 3300 int i; 3301 3302 phba->fc_eventTag = acqe_fcoe->event_tag; 3303 phba->fcoe_eventtag = acqe_fcoe->event_tag; 3304 switch (event_type) { 3305 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3306 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: 3307 if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) 3308 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3309 LOG_DISCOVERY, 3310 "2546 New FCF found event: " 3311 "evt_tag:x%x, fcf_index:x%x\n", 3312 acqe_fcoe->event_tag, 3313 acqe_fcoe->index); 3314 else 3315 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3316 LOG_DISCOVERY, 3317 "2788 FCF parameter modified event: " 3318 "evt_tag:x%x, fcf_index:x%x\n", 3319 acqe_fcoe->event_tag, 3320 acqe_fcoe->index); 3321 spin_lock_irq(&phba->hbalock); 3322 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || 3323 (phba->hba_flag & FCF_DISC_INPROGRESS)) { 3324 /* 3325 * If the current FCF is in discovered state or 3326 * FCF discovery is in progress, do nothing. 3327 */ 3328 spin_unlock_irq(&phba->hbalock); 3329 break; 3330 } 3331 3332 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3333 /* 3334 * If fast FCF failover rescan event is pending, 3335 * do nothing. 3336 */ 3337 spin_unlock_irq(&phba->hbalock); 3338 break; 3339 } 3340 spin_unlock_irq(&phba->hbalock); 3341 3342 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 3343 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 3344 /* 3345 * During period of FCF discovery, read the FCF 3346 * table record indexed by the event to update 3347 * FCF round robin failover eligible FCF bmask. 3348 */ 3349 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3350 LOG_DISCOVERY, 3351 "2779 Read new FCF record with " 3352 "fcf_index:x%x for updating FCF " 3353 "round robin failover bmask\n", 3354 acqe_fcoe->index); 3355 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); 3356 } 3357 3358 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3359 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3360 "2770 Start FCF table scan due to new FCF " 3361 "event: evt_tag:x%x, fcf_index:x%x\n", 3362 acqe_fcoe->event_tag, acqe_fcoe->index); 3363 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3364 LPFC_FCOE_FCF_GET_FIRST); 3365 if (rc) 3366 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3367 "2547 Issue FCF scan read FCF mailbox " 3368 "command failed 0x%x\n", rc); 3369 break; 3370 3371 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3372 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3373 "2548 FCF Table full count 0x%x tag 0x%x\n", 3374 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), 3375 acqe_fcoe->event_tag); 3376 break; 3377 3378 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3379 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3380 "2549 FCF disconnected from network index 0x%x" 3381 " tag 0x%x\n", acqe_fcoe->index, 3382 acqe_fcoe->event_tag); 3383 /* If the event is not for currently used fcf do nothing */ 3384 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) 3385 break; 3386 /* We request port to rediscover the entire FCF table for 3387 * a fast recovery from case that the current FCF record 3388 * is no longer valid if we are not in the middle of FCF 3389 * failover process already. 3390 */ 3391 spin_lock_irq(&phba->hbalock); 3392 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3393 spin_unlock_irq(&phba->hbalock); 3394 /* Update FLOGI FCF failover eligible FCF bmask */ 3395 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index); 3396 break; 3397 } 3398 /* Mark the fast failover process in progress */ 3399 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 3400 spin_unlock_irq(&phba->hbalock); 3401 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3402 "2771 Start FCF fast failover process due to " 3403 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 3404 "\n", acqe_fcoe->event_tag, acqe_fcoe->index); 3405 rc = lpfc_sli4_redisc_fcf_table(phba); 3406 if (rc) { 3407 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3408 LOG_DISCOVERY, 3409 "2772 Issue FCF rediscover mabilbox " 3410 "command failed, fail through to FCF " 3411 "dead event\n"); 3412 spin_lock_irq(&phba->hbalock); 3413 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 3414 spin_unlock_irq(&phba->hbalock); 3415 /* 3416 * Last resort will fail over by treating this 3417 * as a link down to FCF registration. 3418 */ 3419 lpfc_sli4_fcf_dead_failthrough(phba); 3420 } else 3421 /* Handling fast FCF failover to a DEAD FCF event 3422 * is considered equalivant to receiving CVL to all 3423 * vports. 3424 */ 3425 lpfc_sli4_perform_all_vport_cvl(phba); 3426 break; 3427 case LPFC_FCOE_EVENT_TYPE_CVL: 3428 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3429 "2718 Clear Virtual Link Received for VPI 0x%x" 3430 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3431 vport = lpfc_find_vport_by_vpid(phba, 3432 acqe_fcoe->index - phba->vpi_base); 3433 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3434 if (!ndlp) 3435 break; 3436 active_vlink_present = 0; 3437 3438 vports = lpfc_create_vport_work_array(phba); 3439 if (vports) { 3440 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 3441 i++) { 3442 if ((!(vports[i]->fc_flag & 3443 FC_VPORT_CVL_RCVD)) && 3444 (vports[i]->port_state > LPFC_FDISC)) { 3445 active_vlink_present = 1; 3446 break; 3447 } 3448 } 3449 lpfc_destroy_vport_work_array(phba, vports); 3450 } 3451 3452 if (active_vlink_present) { 3453 /* 3454 * If there are other active VLinks present, 3455 * re-instantiate the Vlink using FDISC. 3456 */ 3457 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3458 shost = lpfc_shost_from_vport(vport); 3459 spin_lock_irq(shost->host_lock); 3460 ndlp->nlp_flag |= NLP_DELAY_TMO; 3461 spin_unlock_irq(shost->host_lock); 3462 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 3463 vport->port_state = LPFC_FDISC; 3464 } else { 3465 /* 3466 * Otherwise, we request port to rediscover 3467 * the entire FCF table for a fast recovery 3468 * from possible case that the current FCF 3469 * is no longer valid if we are not already 3470 * in the FCF failover process. 3471 */ 3472 spin_lock_irq(&phba->hbalock); 3473 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3474 spin_unlock_irq(&phba->hbalock); 3475 break; 3476 } 3477 /* Mark the fast failover process in progress */ 3478 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 3479 spin_unlock_irq(&phba->hbalock); 3480 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3481 LOG_DISCOVERY, 3482 "2773 Start FCF fast failover due " 3483 "to CVL event: evt_tag:x%x\n", 3484 acqe_fcoe->event_tag); 3485 rc = lpfc_sli4_redisc_fcf_table(phba); 3486 if (rc) { 3487 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3488 LOG_DISCOVERY, 3489 "2774 Issue FCF rediscover " 3490 "mabilbox command failed, " 3491 "through to CVL event\n"); 3492 spin_lock_irq(&phba->hbalock); 3493 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 3494 spin_unlock_irq(&phba->hbalock); 3495 /* 3496 * Last resort will be re-try on the 3497 * the current registered FCF entry. 3498 */ 3499 lpfc_retry_pport_discovery(phba); 3500 } 3501 } 3502 break; 3503 default: 3504 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3505 "0288 Unknown FCoE event type 0x%x event tag " 3506 "0x%x\n", event_type, acqe_fcoe->event_tag); 3507 break; 3508 } 3509} 3510 3511/** 3512 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 3513 * @phba: pointer to lpfc hba data structure. 3514 * @acqe_link: pointer to the async dcbx completion queue entry. 3515 * 3516 * This routine is to handle the SLI4 asynchronous dcbx event. 3517 **/ 3518static void 3519lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3520 struct lpfc_acqe_dcbx *acqe_dcbx) 3521{ 3522 phba->fc_eventTag = acqe_dcbx->event_tag; 3523 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3524 "0290 The SLI4 DCBX asynchronous event is not " 3525 "handled yet\n"); 3526} 3527 3528/** 3529 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 3530 * @phba: pointer to lpfc hba data structure. 3531 * 3532 * This routine is invoked by the worker thread to process all the pending 3533 * SLI4 asynchronous events. 3534 **/ 3535void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 3536{ 3537 struct lpfc_cq_event *cq_event; 3538 3539 /* First, declare the async event has been handled */ 3540 spin_lock_irq(&phba->hbalock); 3541 phba->hba_flag &= ~ASYNC_EVENT; 3542 spin_unlock_irq(&phba->hbalock); 3543 /* Now, handle all the async events */ 3544 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 3545 /* Get the first event from the head of the event queue */ 3546 spin_lock_irq(&phba->hbalock); 3547 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 3548 cq_event, struct lpfc_cq_event, list); 3549 spin_unlock_irq(&phba->hbalock); 3550 /* Process the asynchronous event */ 3551 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 3552 case LPFC_TRAILER_CODE_LINK: 3553 lpfc_sli4_async_link_evt(phba, 3554 &cq_event->cqe.acqe_link); 3555 break; 3556 case LPFC_TRAILER_CODE_FCOE: 3557 lpfc_sli4_async_fcoe_evt(phba, 3558 &cq_event->cqe.acqe_fcoe); 3559 break; 3560 case LPFC_TRAILER_CODE_DCBX: 3561 lpfc_sli4_async_dcbx_evt(phba, 3562 &cq_event->cqe.acqe_dcbx); 3563 break; 3564 default: 3565 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3566 "1804 Invalid asynchrous event code: " 3567 "x%x\n", bf_get(lpfc_trailer_code, 3568 &cq_event->cqe.mcqe_cmpl)); 3569 break; 3570 } 3571 /* Free the completion event processed to the free pool */ 3572 lpfc_sli4_cq_event_release(phba, cq_event); 3573 } 3574} 3575 3576/** 3577 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 3578 * @phba: pointer to lpfc hba data structure. 3579 * 3580 * This routine is invoked by the worker thread to process FCF table 3581 * rediscovery pending completion event. 3582 **/ 3583void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 3584{ 3585 int rc; 3586 3587 spin_lock_irq(&phba->hbalock); 3588 /* Clear FCF rediscovery timeout event */ 3589 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 3590 /* Clear driver fast failover FCF record flag */ 3591 phba->fcf.failover_rec.flag = 0; 3592 /* Set state for FCF fast failover */ 3593 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 3594 spin_unlock_irq(&phba->hbalock); 3595 3596 /* Scan FCF table from the first entry to re-discover SAN */ 3597 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3598 "2777 Start FCF table scan after FCF " 3599 "rediscovery quiescent period over\n"); 3600 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 3601 if (rc) 3602 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3603 "2747 Issue FCF scan read FCF mailbox " 3604 "command failed 0x%x\n", rc); 3605} 3606 3607/** 3608 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3609 * @phba: pointer to lpfc hba data structure. 3610 * @dev_grp: The HBA PCI-Device group number. 3611 * 3612 * This routine is invoked to set up the per HBA PCI-Device group function 3613 * API jump table entries. 3614 * 3615 * Return: 0 if success, otherwise -ENODEV 3616 **/ 3617int 3618lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3619{ 3620 int rc; 3621 3622 /* Set up lpfc PCI-device group */ 3623 phba->pci_dev_grp = dev_grp; 3624 3625 /* The LPFC_PCI_DEV_OC uses SLI4 */ 3626 if (dev_grp == LPFC_PCI_DEV_OC) 3627 phba->sli_rev = LPFC_SLI_REV4; 3628 3629 /* Set up device INIT API function jump table */ 3630 rc = lpfc_init_api_table_setup(phba, dev_grp); 3631 if (rc) 3632 return -ENODEV; 3633 /* Set up SCSI API function jump table */ 3634 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 3635 if (rc) 3636 return -ENODEV; 3637 /* Set up SLI API function jump table */ 3638 rc = lpfc_sli_api_table_setup(phba, dev_grp); 3639 if (rc) 3640 return -ENODEV; 3641 /* Set up MBOX API function jump table */ 3642 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 3643 if (rc) 3644 return -ENODEV; 3645 3646 return 0; 3647} 3648 3649/** 3650 * lpfc_log_intr_mode - Log the active interrupt mode 3651 * @phba: pointer to lpfc hba data structure. 3652 * @intr_mode: active interrupt mode adopted. 3653 * 3654 * This routine it invoked to log the currently used active interrupt mode 3655 * to the device. 3656 **/ 3657static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 3658{ 3659 switch (intr_mode) { 3660 case 0: 3661 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3662 "0470 Enable INTx interrupt mode.\n"); 3663 break; 3664 case 1: 3665 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3666 "0481 Enabled MSI interrupt mode.\n"); 3667 break; 3668 case 2: 3669 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3670 "0480 Enabled MSI-X interrupt mode.\n"); 3671 break; 3672 default: 3673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3674 "0482 Illegal interrupt mode.\n"); 3675 break; 3676 } 3677 return; 3678} 3679 3680/** 3681 * lpfc_enable_pci_dev - Enable a generic PCI device. 3682 * @phba: pointer to lpfc hba data structure. 3683 * 3684 * This routine is invoked to enable the PCI device that is common to all 3685 * PCI devices. 3686 * 3687 * Return codes 3688 * 0 - successful 3689 * other values - error 3690 **/ 3691static int 3692lpfc_enable_pci_dev(struct lpfc_hba *phba) 3693{ 3694 struct pci_dev *pdev; 3695 int bars; 3696 3697 /* Obtain PCI device reference */ 3698 if (!phba->pcidev) 3699 goto out_error; 3700 else 3701 pdev = phba->pcidev; 3702 /* Select PCI BARs */ 3703 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3704 /* Enable PCI device */ 3705 if (pci_enable_device_mem(pdev)) 3706 goto out_error; 3707 /* Request PCI resource for the device */ 3708 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 3709 goto out_disable_device; 3710 /* Set up device as PCI master and save state for EEH */ 3711 pci_set_master(pdev); 3712 pci_try_set_mwi(pdev); 3713 pci_save_state(pdev); 3714 3715 return 0; 3716 3717out_disable_device: 3718 pci_disable_device(pdev); 3719out_error: 3720 return -ENODEV; 3721} 3722 3723/** 3724 * lpfc_disable_pci_dev - Disable a generic PCI device. 3725 * @phba: pointer to lpfc hba data structure. 3726 * 3727 * This routine is invoked to disable the PCI device that is common to all 3728 * PCI devices. 3729 **/ 3730static void 3731lpfc_disable_pci_dev(struct lpfc_hba *phba) 3732{ 3733 struct pci_dev *pdev; 3734 int bars; 3735 3736 /* Obtain PCI device reference */ 3737 if (!phba->pcidev) 3738 return; 3739 else 3740 pdev = phba->pcidev; 3741 /* Select PCI BARs */ 3742 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3743 /* Release PCI resource and disable PCI device */ 3744 pci_release_selected_regions(pdev, bars); 3745 pci_disable_device(pdev); 3746 /* Null out PCI private reference to driver */ 3747 pci_set_drvdata(pdev, NULL); 3748 3749 return; 3750} 3751 3752/** 3753 * lpfc_reset_hba - Reset a hba 3754 * @phba: pointer to lpfc hba data structure. 3755 * 3756 * This routine is invoked to reset a hba device. It brings the HBA 3757 * offline, performs a board restart, and then brings the board back 3758 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 3759 * on outstanding mailbox commands. 3760 **/ 3761void 3762lpfc_reset_hba(struct lpfc_hba *phba) 3763{ 3764 /* If resets are disabled then set error state and return. */ 3765 if (!phba->cfg_enable_hba_reset) { 3766 phba->link_state = LPFC_HBA_ERROR; 3767 return; 3768 } 3769 lpfc_offline_prep(phba); 3770 lpfc_offline(phba); 3771 lpfc_sli_brdrestart(phba); 3772 lpfc_online(phba); 3773 lpfc_unblock_mgmt_io(phba); 3774} 3775 3776/** 3777 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 3778 * @phba: pointer to lpfc hba data structure. 3779 * 3780 * This routine is invoked to set up the driver internal resources specific to 3781 * support the SLI-3 HBA device it attached to. 3782 * 3783 * Return codes 3784 * 0 - successful 3785 * other values - error 3786 **/ 3787static int 3788lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 3789{ 3790 struct lpfc_sli *psli; 3791 3792 /* 3793 * Initialize timers used by driver 3794 */ 3795 3796 /* Heartbeat timer */ 3797 init_timer(&phba->hb_tmofunc); 3798 phba->hb_tmofunc.function = lpfc_hb_timeout; 3799 phba->hb_tmofunc.data = (unsigned long)phba; 3800 3801 psli = &phba->sli; 3802 /* MBOX heartbeat timer */ 3803 init_timer(&psli->mbox_tmo); 3804 psli->mbox_tmo.function = lpfc_mbox_timeout; 3805 psli->mbox_tmo.data = (unsigned long) phba; 3806 /* FCP polling mode timer */ 3807 init_timer(&phba->fcp_poll_timer); 3808 phba->fcp_poll_timer.function = lpfc_poll_timeout; 3809 phba->fcp_poll_timer.data = (unsigned long) phba; 3810 /* Fabric block timer */ 3811 init_timer(&phba->fabric_block_timer); 3812 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3813 phba->fabric_block_timer.data = (unsigned long) phba; 3814 /* EA polling mode timer */ 3815 init_timer(&phba->eratt_poll); 3816 phba->eratt_poll.function = lpfc_poll_eratt; 3817 phba->eratt_poll.data = (unsigned long) phba; 3818 3819 /* Host attention work mask setup */ 3820 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 3821 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 3822 3823 /* Get all the module params for configuring this host */ 3824 lpfc_get_cfgparam(phba); 3825 /* 3826 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3827 * used to create the sg_dma_buf_pool must be dynamically calculated. 3828 * 2 segments are added since the IOCB needs a command and response bde. 3829 */ 3830 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 3831 sizeof(struct fcp_rsp) + 3832 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 3833 3834 if (phba->cfg_enable_bg) { 3835 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 3836 phba->cfg_sg_dma_buf_size += 3837 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 3838 } 3839 3840 /* Also reinitialize the host templates with new values. */ 3841 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3842 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3843 3844 phba->max_vpi = LPFC_MAX_VPI; 3845 /* This will be set to correct value after config_port mbox */ 3846 phba->max_vports = 0; 3847 3848 /* 3849 * Initialize the SLI Layer to run with lpfc HBAs. 3850 */ 3851 lpfc_sli_setup(phba); 3852 lpfc_sli_queue_setup(phba); 3853 3854 /* Allocate device driver memory */ 3855 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 3856 return -ENOMEM; 3857 3858 return 0; 3859} 3860 3861/** 3862 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 3863 * @phba: pointer to lpfc hba data structure. 3864 * 3865 * This routine is invoked to unset the driver internal resources set up 3866 * specific for supporting the SLI-3 HBA device it attached to. 3867 **/ 3868static void 3869lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 3870{ 3871 /* Free device driver memory allocated */ 3872 lpfc_mem_free_all(phba); 3873 3874 return; 3875} 3876 3877/** 3878 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 3879 * @phba: pointer to lpfc hba data structure. 3880 * 3881 * This routine is invoked to set up the driver internal resources specific to 3882 * support the SLI-4 HBA device it attached to. 3883 * 3884 * Return codes 3885 * 0 - successful 3886 * other values - error 3887 **/ 3888static int 3889lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 3890{ 3891 struct lpfc_sli *psli; 3892 LPFC_MBOXQ_t *mboxq; 3893 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 3894 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 3895 struct lpfc_mqe *mqe; 3896 int longs; 3897 3898 /* Before proceed, wait for POST done and device ready */ 3899 rc = lpfc_sli4_post_status_check(phba); 3900 if (rc) 3901 return -ENODEV; 3902 3903 /* 3904 * Initialize timers used by driver 3905 */ 3906 3907 /* Heartbeat timer */ 3908 init_timer(&phba->hb_tmofunc); 3909 phba->hb_tmofunc.function = lpfc_hb_timeout; 3910 phba->hb_tmofunc.data = (unsigned long)phba; 3911 3912 psli = &phba->sli; 3913 /* MBOX heartbeat timer */ 3914 init_timer(&psli->mbox_tmo); 3915 psli->mbox_tmo.function = lpfc_mbox_timeout; 3916 psli->mbox_tmo.data = (unsigned long) phba; 3917 /* Fabric block timer */ 3918 init_timer(&phba->fabric_block_timer); 3919 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3920 phba->fabric_block_timer.data = (unsigned long) phba; 3921 /* EA polling mode timer */ 3922 init_timer(&phba->eratt_poll); 3923 phba->eratt_poll.function = lpfc_poll_eratt; 3924 phba->eratt_poll.data = (unsigned long) phba; 3925 /* FCF rediscover timer */ 3926 init_timer(&phba->fcf.redisc_wait); 3927 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 3928 phba->fcf.redisc_wait.data = (unsigned long)phba; 3929 3930 /* 3931 * We need to do a READ_CONFIG mailbox command here before 3932 * calling lpfc_get_cfgparam. For VFs this will report the 3933 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 3934 * All of the resources allocated 3935 * for this Port are tied to these values. 3936 */ 3937 /* Get all the module params for configuring this host */ 3938 lpfc_get_cfgparam(phba); 3939 phba->max_vpi = LPFC_MAX_VPI; 3940 /* This will be set to correct value after the read_config mbox */ 3941 phba->max_vports = 0; 3942 3943 /* Program the default value of vlan_id and fc_map */ 3944 phba->valid_vlan = 0; 3945 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 3946 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 3947 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 3948 3949 /* 3950 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3951 * used to create the sg_dma_buf_pool must be dynamically calculated. 3952 * 2 segments are added since the IOCB needs a command and response bde. 3953 * To insure that the scsi sgl does not cross a 4k page boundary only 3954 * sgl sizes of must be a power of 2. 3955 */ 3956 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 3957 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); 3958 /* Feature Level 1 hardware is limited to 2 pages */ 3959 if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) == 3960 LPFC_SLI_INTF_FEATURELEVEL1_1)) 3961 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 3962 else 3963 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 3964 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 3965 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 3966 dma_buf_size = dma_buf_size << 1) 3967 ; 3968 if (dma_buf_size == max_buf_size) 3969 phba->cfg_sg_seg_cnt = (dma_buf_size - 3970 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - 3971 (2 * sizeof(struct sli4_sge))) / 3972 sizeof(struct sli4_sge); 3973 phba->cfg_sg_dma_buf_size = dma_buf_size; 3974 3975 /* Initialize buffer queue management fields */ 3976 hbq_count = lpfc_sli_hbq_count(); 3977 for (i = 0; i < hbq_count; ++i) 3978 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 3979 INIT_LIST_HEAD(&phba->rb_pend_list); 3980 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 3981 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 3982 3983 /* 3984 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 3985 */ 3986 /* Initialize the Abort scsi buffer list used by driver */ 3987 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 3988 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 3989 /* This abort list used by worker thread */ 3990 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 3991 3992 /* 3993 * Initialize dirver internal slow-path work queues 3994 */ 3995 3996 /* Driver internel slow-path CQ Event pool */ 3997 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 3998 /* Response IOCB work queue list */ 3999 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 4000 /* Asynchronous event CQ Event work queue list */ 4001 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 4002 /* Fast-path XRI aborted CQ Event work queue list */ 4003 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 4004 /* Slow-path XRI aborted CQ Event work queue list */ 4005 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 4006 /* Receive queue CQ Event work queue list */ 4007 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 4008 4009 /* Initialize the driver internal SLI layer lists. */ 4010 lpfc_sli_setup(phba); 4011 lpfc_sli_queue_setup(phba); 4012 4013 /* Allocate device driver memory */ 4014 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 4015 if (rc) 4016 return -ENOMEM; 4017 4018 /* Create the bootstrap mailbox command */ 4019 rc = lpfc_create_bootstrap_mbox(phba); 4020 if (unlikely(rc)) 4021 goto out_free_mem; 4022 4023 /* Set up the host's endian order with the device. */ 4024 rc = lpfc_setup_endian_order(phba); 4025 if (unlikely(rc)) 4026 goto out_free_bsmbx; 4027 4028 rc = lpfc_sli4_fw_cfg_check(phba); 4029 if (unlikely(rc)) 4030 goto out_free_bsmbx; 4031 4032 /* Set up the hba's configuration parameters. */ 4033 rc = lpfc_sli4_read_config(phba); 4034 if (unlikely(rc)) 4035 goto out_free_bsmbx; 4036 4037 /* Perform a function reset */ 4038 rc = lpfc_pci_function_reset(phba); 4039 if (unlikely(rc)) 4040 goto out_free_bsmbx; 4041 4042 /* Create all the SLI4 queues */ 4043 rc = lpfc_sli4_queue_create(phba); 4044 if (rc) 4045 goto out_free_bsmbx; 4046 4047 /* Create driver internal CQE event pool */ 4048 rc = lpfc_sli4_cq_event_pool_create(phba); 4049 if (rc) 4050 goto out_destroy_queue; 4051 4052 /* Initialize and populate the iocb list per host */ 4053 rc = lpfc_init_sgl_list(phba); 4054 if (rc) { 4055 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4056 "1400 Failed to initialize sgl list.\n"); 4057 goto out_destroy_cq_event_pool; 4058 } 4059 rc = lpfc_init_active_sgl_array(phba); 4060 if (rc) { 4061 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4062 "1430 Failed to initialize sgl list.\n"); 4063 goto out_free_sgl_list; 4064 } 4065 4066 rc = lpfc_sli4_init_rpi_hdrs(phba); 4067 if (rc) { 4068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4069 "1432 Failed to initialize rpi headers.\n"); 4070 goto out_free_active_sgl; 4071 } 4072 4073 /* Allocate eligible FCF bmask memory for FCF round robin failover */ 4074 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 4075 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 4076 GFP_KERNEL); 4077 if (!phba->fcf.fcf_rr_bmask) { 4078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4079 "2759 Failed allocate memory for FCF round " 4080 "robin failover bmask\n"); 4081 goto out_remove_rpi_hdrs; 4082 } 4083 4084 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4085 phba->cfg_fcp_eq_count), GFP_KERNEL); 4086 if (!phba->sli4_hba.fcp_eq_hdl) { 4087 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4088 "2572 Failed allocate memory for fast-path " 4089 "per-EQ handle array\n"); 4090 goto out_free_fcf_rr_bmask; 4091 } 4092 4093 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4094 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 4095 if (!phba->sli4_hba.msix_entries) { 4096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4097 "2573 Failed allocate memory for msi-x " 4098 "interrupt vector entries\n"); 4099 goto out_free_fcp_eq_hdl; 4100 } 4101 4102 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4103 GFP_KERNEL); 4104 if (!mboxq) { 4105 rc = -ENOMEM; 4106 goto out_free_fcp_eq_hdl; 4107 } 4108 4109 /* Get the Supported Pages. It is always available. */ 4110 lpfc_supported_pages(mboxq); 4111 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4112 if (unlikely(rc)) { 4113 rc = -EIO; 4114 mempool_free(mboxq, phba->mbox_mem_pool); 4115 goto out_free_fcp_eq_hdl; 4116 } 4117 4118 mqe = &mboxq->u.mqe; 4119 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 4120 LPFC_MAX_SUPPORTED_PAGES); 4121 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 4122 switch (pn_page[i]) { 4123 case LPFC_SLI4_PARAMETERS: 4124 phba->sli4_hba.pc_sli4_params.supported = 1; 4125 break; 4126 default: 4127 break; 4128 } 4129 } 4130 4131 /* Read the port's SLI4 Parameters capabilities if supported. */ 4132 if (phba->sli4_hba.pc_sli4_params.supported) 4133 rc = lpfc_pc_sli4_params_get(phba, mboxq); 4134 mempool_free(mboxq, phba->mbox_mem_pool); 4135 if (rc) { 4136 rc = -EIO; 4137 goto out_free_fcp_eq_hdl; 4138 } 4139 return rc; 4140 4141out_free_fcp_eq_hdl: 4142 kfree(phba->sli4_hba.fcp_eq_hdl); 4143out_free_fcf_rr_bmask: 4144 kfree(phba->fcf.fcf_rr_bmask); 4145out_remove_rpi_hdrs: 4146 lpfc_sli4_remove_rpi_hdrs(phba); 4147out_free_active_sgl: 4148 lpfc_free_active_sgl(phba); 4149out_free_sgl_list: 4150 lpfc_free_sgl_list(phba); 4151out_destroy_cq_event_pool: 4152 lpfc_sli4_cq_event_pool_destroy(phba); 4153out_destroy_queue: 4154 lpfc_sli4_queue_destroy(phba); 4155out_free_bsmbx: 4156 lpfc_destroy_bootstrap_mbox(phba); 4157out_free_mem: 4158 lpfc_mem_free(phba); 4159 return rc; 4160} 4161 4162/** 4163 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 4164 * @phba: pointer to lpfc hba data structure. 4165 * 4166 * This routine is invoked to unset the driver internal resources set up 4167 * specific for supporting the SLI-4 HBA device it attached to. 4168 **/ 4169static void 4170lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 4171{ 4172 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 4173 4174 /* unregister default FCFI from the HBA */ 4175 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); 4176 4177 /* Free the default FCR table */ 4178 lpfc_sli_remove_dflt_fcf(phba); 4179 4180 /* Free memory allocated for msi-x interrupt vector entries */ 4181 kfree(phba->sli4_hba.msix_entries); 4182 4183 /* Free memory allocated for fast-path work queue handles */ 4184 kfree(phba->sli4_hba.fcp_eq_hdl); 4185 4186 /* Free the allocated rpi headers. */ 4187 lpfc_sli4_remove_rpi_hdrs(phba); 4188 lpfc_sli4_remove_rpis(phba); 4189 4190 /* Free eligible FCF index bmask */ 4191 kfree(phba->fcf.fcf_rr_bmask); 4192 4193 /* Free the ELS sgl list */ 4194 lpfc_free_active_sgl(phba); 4195 lpfc_free_sgl_list(phba); 4196 4197 /* Free the SCSI sgl management array */ 4198 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4199 4200 /* Free the SLI4 queues */ 4201 lpfc_sli4_queue_destroy(phba); 4202 4203 /* Free the completion queue EQ event pool */ 4204 lpfc_sli4_cq_event_release_all(phba); 4205 lpfc_sli4_cq_event_pool_destroy(phba); 4206 4207 /* Reset SLI4 HBA FCoE function */ 4208 lpfc_pci_function_reset(phba); 4209 4210 /* Free the bsmbx region. */ 4211 lpfc_destroy_bootstrap_mbox(phba); 4212 4213 /* Free the SLI Layer memory with SLI4 HBAs */ 4214 lpfc_mem_free_all(phba); 4215 4216 /* Free the current connect table */ 4217 list_for_each_entry_safe(conn_entry, next_conn_entry, 4218 &phba->fcf_conn_rec_list, list) { 4219 list_del_init(&conn_entry->list); 4220 kfree(conn_entry); 4221 } 4222 4223 return; 4224} 4225 4226/** 4227 * lpfc_init_api_table_setup - Set up init api fucntion jump table 4228 * @phba: The hba struct for which this call is being executed. 4229 * @dev_grp: The HBA PCI-Device group number. 4230 * 4231 * This routine sets up the device INIT interface API function jump table 4232 * in @phba struct. 4233 * 4234 * Returns: 0 - success, -ENODEV - failure. 4235 **/ 4236int 4237lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4238{ 4239 phba->lpfc_hba_init_link = lpfc_hba_init_link; 4240 phba->lpfc_hba_down_link = lpfc_hba_down_link; 4241 switch (dev_grp) { 4242 case LPFC_PCI_DEV_LP: 4243 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4244 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 4245 phba->lpfc_stop_port = lpfc_stop_port_s3; 4246 break; 4247 case LPFC_PCI_DEV_OC: 4248 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 4249 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 4250 phba->lpfc_stop_port = lpfc_stop_port_s4; 4251 break; 4252 default: 4253 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4254 "1431 Invalid HBA PCI-device group: 0x%x\n", 4255 dev_grp); 4256 return -ENODEV; 4257 break; 4258 } 4259 return 0; 4260} 4261 4262/** 4263 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 4264 * @phba: pointer to lpfc hba data structure. 4265 * 4266 * This routine is invoked to set up the driver internal resources before the 4267 * device specific resource setup to support the HBA device it attached to. 4268 * 4269 * Return codes 4270 * 0 - successful 4271 * other values - error 4272 **/ 4273static int 4274lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 4275{ 4276 /* 4277 * Driver resources common to all SLI revisions 4278 */ 4279 atomic_set(&phba->fast_event_count, 0); 4280 spin_lock_init(&phba->hbalock); 4281 4282 /* Initialize ndlp management spinlock */ 4283 spin_lock_init(&phba->ndlp_lock); 4284 4285 INIT_LIST_HEAD(&phba->port_list); 4286 INIT_LIST_HEAD(&phba->work_list); 4287 init_waitqueue_head(&phba->wait_4_mlo_m_q); 4288 4289 /* Initialize the wait queue head for the kernel thread */ 4290 init_waitqueue_head(&phba->work_waitq); 4291 4292 /* Initialize the scsi buffer list used by driver for scsi IO */ 4293 spin_lock_init(&phba->scsi_buf_list_lock); 4294 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 4295 4296 /* Initialize the fabric iocb list */ 4297 INIT_LIST_HEAD(&phba->fabric_iocb_list); 4298 4299 /* Initialize list to save ELS buffers */ 4300 INIT_LIST_HEAD(&phba->elsbuf); 4301 4302 /* Initialize FCF connection rec list */ 4303 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 4304 4305 return 0; 4306} 4307 4308/** 4309 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 4310 * @phba: pointer to lpfc hba data structure. 4311 * 4312 * This routine is invoked to set up the driver internal resources after the 4313 * device specific resource setup to support the HBA device it attached to. 4314 * 4315 * Return codes 4316 * 0 - successful 4317 * other values - error 4318 **/ 4319static int 4320lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 4321{ 4322 int error; 4323 4324 /* Startup the kernel thread for this host adapter. */ 4325 phba->worker_thread = kthread_run(lpfc_do_work, phba, 4326 "lpfc_worker_%d", phba->brd_no); 4327 if (IS_ERR(phba->worker_thread)) { 4328 error = PTR_ERR(phba->worker_thread); 4329 return error; 4330 } 4331 4332 return 0; 4333} 4334 4335/** 4336 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 4337 * @phba: pointer to lpfc hba data structure. 4338 * 4339 * This routine is invoked to unset the driver internal resources set up after 4340 * the device specific resource setup for supporting the HBA device it 4341 * attached to. 4342 **/ 4343static void 4344lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 4345{ 4346 /* Stop kernel worker thread */ 4347 kthread_stop(phba->worker_thread); 4348} 4349 4350/** 4351 * lpfc_free_iocb_list - Free iocb list. 4352 * @phba: pointer to lpfc hba data structure. 4353 * 4354 * This routine is invoked to free the driver's IOCB list and memory. 4355 **/ 4356static void 4357lpfc_free_iocb_list(struct lpfc_hba *phba) 4358{ 4359 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 4360 4361 spin_lock_irq(&phba->hbalock); 4362 list_for_each_entry_safe(iocbq_entry, iocbq_next, 4363 &phba->lpfc_iocb_list, list) { 4364 list_del(&iocbq_entry->list); 4365 kfree(iocbq_entry); 4366 phba->total_iocbq_bufs--; 4367 } 4368 spin_unlock_irq(&phba->hbalock); 4369 4370 return; 4371} 4372 4373/** 4374 * lpfc_init_iocb_list - Allocate and initialize iocb list. 4375 * @phba: pointer to lpfc hba data structure. 4376 * 4377 * This routine is invoked to allocate and initizlize the driver's IOCB 4378 * list and set up the IOCB tag array accordingly. 4379 * 4380 * Return codes 4381 * 0 - successful 4382 * other values - error 4383 **/ 4384static int 4385lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 4386{ 4387 struct lpfc_iocbq *iocbq_entry = NULL; 4388 uint16_t iotag; 4389 int i; 4390 4391 /* Initialize and populate the iocb list per host. */ 4392 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 4393 for (i = 0; i < iocb_count; i++) { 4394 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 4395 if (iocbq_entry == NULL) { 4396 printk(KERN_ERR "%s: only allocated %d iocbs of " 4397 "expected %d count. Unloading driver.\n", 4398 __func__, i, LPFC_IOCB_LIST_CNT); 4399 goto out_free_iocbq; 4400 } 4401 4402 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 4403 if (iotag == 0) { 4404 kfree(iocbq_entry); 4405 printk(KERN_ERR "%s: failed to allocate IOTAG. " 4406 "Unloading driver.\n", __func__); 4407 goto out_free_iocbq; 4408 } 4409 iocbq_entry->sli4_xritag = NO_XRI; 4410 4411 spin_lock_irq(&phba->hbalock); 4412 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 4413 phba->total_iocbq_bufs++; 4414 spin_unlock_irq(&phba->hbalock); 4415 } 4416 4417 return 0; 4418 4419out_free_iocbq: 4420 lpfc_free_iocb_list(phba); 4421 4422 return -ENOMEM; 4423} 4424 4425/** 4426 * lpfc_free_sgl_list - Free sgl list. 4427 * @phba: pointer to lpfc hba data structure. 4428 * 4429 * This routine is invoked to free the driver's sgl list and memory. 4430 **/ 4431static void 4432lpfc_free_sgl_list(struct lpfc_hba *phba) 4433{ 4434 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 4435 LIST_HEAD(sglq_list); 4436 int rc = 0; 4437 4438 spin_lock_irq(&phba->hbalock); 4439 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 4440 spin_unlock_irq(&phba->hbalock); 4441 4442 list_for_each_entry_safe(sglq_entry, sglq_next, 4443 &sglq_list, list) { 4444 list_del(&sglq_entry->list); 4445 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 4446 kfree(sglq_entry); 4447 phba->sli4_hba.total_sglq_bufs--; 4448 } 4449 rc = lpfc_sli4_remove_all_sgl_pages(phba); 4450 if (rc) { 4451 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4452 "2005 Unable to deregister pages from HBA: %x\n", rc); 4453 } 4454 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4455} 4456 4457/** 4458 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 4459 * @phba: pointer to lpfc hba data structure. 4460 * 4461 * This routine is invoked to allocate the driver's active sgl memory. 4462 * This array will hold the sglq_entry's for active IOs. 4463 **/ 4464static int 4465lpfc_init_active_sgl_array(struct lpfc_hba *phba) 4466{ 4467 int size; 4468 size = sizeof(struct lpfc_sglq *); 4469 size *= phba->sli4_hba.max_cfg_param.max_xri; 4470 4471 phba->sli4_hba.lpfc_sglq_active_list = 4472 kzalloc(size, GFP_KERNEL); 4473 if (!phba->sli4_hba.lpfc_sglq_active_list) 4474 return -ENOMEM; 4475 return 0; 4476} 4477 4478/** 4479 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 4480 * @phba: pointer to lpfc hba data structure. 4481 * 4482 * This routine is invoked to walk through the array of active sglq entries 4483 * and free all of the resources. 4484 * This is just a place holder for now. 4485 **/ 4486static void 4487lpfc_free_active_sgl(struct lpfc_hba *phba) 4488{ 4489 kfree(phba->sli4_hba.lpfc_sglq_active_list); 4490} 4491 4492/** 4493 * lpfc_init_sgl_list - Allocate and initialize sgl list. 4494 * @phba: pointer to lpfc hba data structure. 4495 * 4496 * This routine is invoked to allocate and initizlize the driver's sgl 4497 * list and set up the sgl xritag tag array accordingly. 4498 * 4499 * Return codes 4500 * 0 - successful 4501 * other values - error 4502 **/ 4503static int 4504lpfc_init_sgl_list(struct lpfc_hba *phba) 4505{ 4506 struct lpfc_sglq *sglq_entry = NULL; 4507 int i; 4508 int els_xri_cnt; 4509 4510 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4511 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4512 "2400 lpfc_init_sgl_list els %d.\n", 4513 els_xri_cnt); 4514 /* Initialize and populate the sglq list per host/VF. */ 4515 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4516 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 4517 4518 /* Sanity check on XRI management */ 4519 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 4520 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4521 "2562 No room left for SCSI XRI allocation: " 4522 "max_xri=%d, els_xri=%d\n", 4523 phba->sli4_hba.max_cfg_param.max_xri, 4524 els_xri_cnt); 4525 return -ENOMEM; 4526 } 4527 4528 /* Allocate memory for the ELS XRI management array */ 4529 phba->sli4_hba.lpfc_els_sgl_array = 4530 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), 4531 GFP_KERNEL); 4532 4533 if (!phba->sli4_hba.lpfc_els_sgl_array) { 4534 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4535 "2401 Failed to allocate memory for ELS " 4536 "XRI management array of size %d.\n", 4537 els_xri_cnt); 4538 return -ENOMEM; 4539 } 4540 4541 /* Keep the SCSI XRI into the XRI management array */ 4542 phba->sli4_hba.scsi_xri_max = 4543 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4544 phba->sli4_hba.scsi_xri_cnt = 0; 4545 4546 phba->sli4_hba.lpfc_scsi_psb_array = 4547 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4548 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4549 4550 if (!phba->sli4_hba.lpfc_scsi_psb_array) { 4551 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4552 "2563 Failed to allocate memory for SCSI " 4553 "XRI management array of size %d.\n", 4554 phba->sli4_hba.scsi_xri_max); 4555 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4556 return -ENOMEM; 4557 } 4558 4559 for (i = 0; i < els_xri_cnt; i++) { 4560 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); 4561 if (sglq_entry == NULL) { 4562 printk(KERN_ERR "%s: only allocated %d sgls of " 4563 "expected %d count. Unloading driver.\n", 4564 __func__, i, els_xri_cnt); 4565 goto out_free_mem; 4566 } 4567 4568 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); 4569 if (sglq_entry->sli4_xritag == NO_XRI) { 4570 kfree(sglq_entry); 4571 printk(KERN_ERR "%s: failed to allocate XRI.\n" 4572 "Unloading driver.\n", __func__); 4573 goto out_free_mem; 4574 } 4575 sglq_entry->buff_type = GEN_BUFF_TYPE; 4576 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4577 if (sglq_entry->virt == NULL) { 4578 kfree(sglq_entry); 4579 printk(KERN_ERR "%s: failed to allocate mbuf.\n" 4580 "Unloading driver.\n", __func__); 4581 goto out_free_mem; 4582 } 4583 sglq_entry->sgl = sglq_entry->virt; 4584 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 4585 4586 /* The list order is used by later block SGL registraton */ 4587 spin_lock_irq(&phba->hbalock); 4588 sglq_entry->state = SGL_FREED; 4589 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4590 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4591 phba->sli4_hba.total_sglq_bufs++; 4592 spin_unlock_irq(&phba->hbalock); 4593 } 4594 return 0; 4595 4596out_free_mem: 4597 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4598 lpfc_free_sgl_list(phba); 4599 return -ENOMEM; 4600} 4601 4602/** 4603 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 4604 * @phba: pointer to lpfc hba data structure. 4605 * 4606 * This routine is invoked to post rpi header templates to the 4607 * HBA consistent with the SLI-4 interface spec. This routine 4608 * posts a PAGE_SIZE memory region to the port to hold up to 4609 * PAGE_SIZE modulo 64 rpi context headers. 4610 * No locks are held here because this is an initialization routine 4611 * called only from probe or lpfc_online when interrupts are not 4612 * enabled and the driver is reinitializing the device. 4613 * 4614 * Return codes 4615 * 0 - successful 4616 * ENOMEM - No availble memory 4617 * EIO - The mailbox failed to complete successfully. 4618 **/ 4619int 4620lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4621{ 4622 int rc = 0; 4623 int longs; 4624 uint16_t rpi_count; 4625 struct lpfc_rpi_hdr *rpi_hdr; 4626 4627 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 4628 4629 /* 4630 * Provision an rpi bitmask range for discovery. The total count 4631 * is the difference between max and base + 1. 4632 */ 4633 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 4634 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4635 4636 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 4637 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 4638 GFP_KERNEL); 4639 if (!phba->sli4_hba.rpi_bmask) 4640 return -ENOMEM; 4641 4642 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 4643 if (!rpi_hdr) { 4644 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4645 "0391 Error during rpi post operation\n"); 4646 lpfc_sli4_remove_rpis(phba); 4647 rc = -ENODEV; 4648 } 4649 4650 return rc; 4651} 4652 4653/** 4654 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 4655 * @phba: pointer to lpfc hba data structure. 4656 * 4657 * This routine is invoked to allocate a single 4KB memory region to 4658 * support rpis and stores them in the phba. This single region 4659 * provides support for up to 64 rpis. The region is used globally 4660 * by the device. 4661 * 4662 * Returns: 4663 * A valid rpi hdr on success. 4664 * A NULL pointer on any failure. 4665 **/ 4666struct lpfc_rpi_hdr * 4667lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 4668{ 4669 uint16_t rpi_limit, curr_rpi_range; 4670 struct lpfc_dmabuf *dmabuf; 4671 struct lpfc_rpi_hdr *rpi_hdr; 4672 4673 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 4674 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4675 4676 spin_lock_irq(&phba->hbalock); 4677 curr_rpi_range = phba->sli4_hba.next_rpi; 4678 spin_unlock_irq(&phba->hbalock); 4679 4680 /* 4681 * The port has a limited number of rpis. The increment here 4682 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 4683 * and to allow the full max_rpi range per port. 4684 */ 4685 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 4686 return NULL; 4687 4688 /* 4689 * First allocate the protocol header region for the port. The 4690 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 4691 */ 4692 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4693 if (!dmabuf) 4694 return NULL; 4695 4696 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4697 LPFC_HDR_TEMPLATE_SIZE, 4698 &dmabuf->phys, 4699 GFP_KERNEL); 4700 if (!dmabuf->virt) { 4701 rpi_hdr = NULL; 4702 goto err_free_dmabuf; 4703 } 4704 4705 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 4706 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 4707 rpi_hdr = NULL; 4708 goto err_free_coherent; 4709 } 4710 4711 /* Save the rpi header data for cleanup later. */ 4712 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 4713 if (!rpi_hdr) 4714 goto err_free_coherent; 4715 4716 rpi_hdr->dmabuf = dmabuf; 4717 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 4718 rpi_hdr->page_count = 1; 4719 spin_lock_irq(&phba->hbalock); 4720 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 4721 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 4722 4723 /* 4724 * The next_rpi stores the next module-64 rpi value to post 4725 * in any subsequent rpi memory region postings. 4726 */ 4727 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; 4728 spin_unlock_irq(&phba->hbalock); 4729 return rpi_hdr; 4730 4731 err_free_coherent: 4732 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 4733 dmabuf->virt, dmabuf->phys); 4734 err_free_dmabuf: 4735 kfree(dmabuf); 4736 return NULL; 4737} 4738 4739/** 4740 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 4741 * @phba: pointer to lpfc hba data structure. 4742 * 4743 * This routine is invoked to remove all memory resources allocated 4744 * to support rpis. This routine presumes the caller has released all 4745 * rpis consumed by fabric or port logins and is prepared to have 4746 * the header pages removed. 4747 **/ 4748void 4749lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 4750{ 4751 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 4752 4753 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 4754 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 4755 list_del(&rpi_hdr->list); 4756 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 4757 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 4758 kfree(rpi_hdr->dmabuf); 4759 kfree(rpi_hdr); 4760 } 4761 4762 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4763 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 4764} 4765 4766/** 4767 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 4768 * @pdev: pointer to pci device data structure. 4769 * 4770 * This routine is invoked to allocate the driver hba data structure for an 4771 * HBA device. If the allocation is successful, the phba reference to the 4772 * PCI device data structure is set. 4773 * 4774 * Return codes 4775 * pointer to @phba - successful 4776 * NULL - error 4777 **/ 4778static struct lpfc_hba * 4779lpfc_hba_alloc(struct pci_dev *pdev) 4780{ 4781 struct lpfc_hba *phba; 4782 4783 /* Allocate memory for HBA structure */ 4784 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 4785 if (!phba) { 4786 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 4787 return NULL; 4788 } 4789 4790 /* Set reference to PCI device in HBA structure */ 4791 phba->pcidev = pdev; 4792 4793 /* Assign an unused board number */ 4794 phba->brd_no = lpfc_get_instance(); 4795 if (phba->brd_no < 0) { 4796 kfree(phba); 4797 return NULL; 4798 } 4799 4800 spin_lock_init(&phba->ct_ev_lock); 4801 INIT_LIST_HEAD(&phba->ct_ev_waiters); 4802 4803 return phba; 4804} 4805 4806/** 4807 * lpfc_hba_free - Free driver hba data structure with a device. 4808 * @phba: pointer to lpfc hba data structure. 4809 * 4810 * This routine is invoked to free the driver hba data structure with an 4811 * HBA device. 4812 **/ 4813static void 4814lpfc_hba_free(struct lpfc_hba *phba) 4815{ 4816 /* Release the driver assigned board number */ 4817 idr_remove(&lpfc_hba_index, phba->brd_no); 4818 4819 kfree(phba); 4820 return; 4821} 4822 4823/** 4824 * lpfc_create_shost - Create hba physical port with associated scsi host. 4825 * @phba: pointer to lpfc hba data structure. 4826 * 4827 * This routine is invoked to create HBA physical port and associate a SCSI 4828 * host with it. 4829 * 4830 * Return codes 4831 * 0 - successful 4832 * other values - error 4833 **/ 4834static int 4835lpfc_create_shost(struct lpfc_hba *phba) 4836{ 4837 struct lpfc_vport *vport; 4838 struct Scsi_Host *shost; 4839 4840 /* Initialize HBA FC structure */ 4841 phba->fc_edtov = FF_DEF_EDTOV; 4842 phba->fc_ratov = FF_DEF_RATOV; 4843 phba->fc_altov = FF_DEF_ALTOV; 4844 phba->fc_arbtov = FF_DEF_ARBTOV; 4845 4846 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 4847 if (!vport) 4848 return -ENODEV; 4849 4850 shost = lpfc_shost_from_vport(vport); 4851 phba->pport = vport; 4852 lpfc_debugfs_initialize(vport); 4853 /* Put reference to SCSI host to driver's device private data */ 4854 pci_set_drvdata(phba->pcidev, shost); 4855 4856 return 0; 4857} 4858 4859/** 4860 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 4861 * @phba: pointer to lpfc hba data structure. 4862 * 4863 * This routine is invoked to destroy HBA physical port and the associated 4864 * SCSI host. 4865 **/ 4866static void 4867lpfc_destroy_shost(struct lpfc_hba *phba) 4868{ 4869 struct lpfc_vport *vport = phba->pport; 4870 4871 /* Destroy physical port that associated with the SCSI host */ 4872 destroy_port(vport); 4873 4874 return; 4875} 4876 4877/** 4878 * lpfc_setup_bg - Setup Block guard structures and debug areas. 4879 * @phba: pointer to lpfc hba data structure. 4880 * @shost: the shost to be used to detect Block guard settings. 4881 * 4882 * This routine sets up the local Block guard protocol settings for @shost. 4883 * This routine also allocates memory for debugging bg buffers. 4884 **/ 4885static void 4886lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 4887{ 4888 int pagecnt = 10; 4889 if (lpfc_prot_mask && lpfc_prot_guard) { 4890 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4891 "1478 Registering BlockGuard with the " 4892 "SCSI layer\n"); 4893 scsi_host_set_prot(shost, lpfc_prot_mask); 4894 scsi_host_set_guard(shost, lpfc_prot_guard); 4895 } 4896 if (!_dump_buf_data) { 4897 while (pagecnt) { 4898 spin_lock_init(&_dump_buf_lock); 4899 _dump_buf_data = 4900 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4901 if (_dump_buf_data) { 4902 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4903 "9043 BLKGRD: allocated %d pages for " 4904 "_dump_buf_data at 0x%p\n", 4905 (1 << pagecnt), _dump_buf_data); 4906 _dump_buf_data_order = pagecnt; 4907 memset(_dump_buf_data, 0, 4908 ((1 << PAGE_SHIFT) << pagecnt)); 4909 break; 4910 } else 4911 --pagecnt; 4912 } 4913 if (!_dump_buf_data_order) 4914 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4915 "9044 BLKGRD: ERROR unable to allocate " 4916 "memory for hexdump\n"); 4917 } else 4918 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4919 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 4920 "\n", _dump_buf_data); 4921 if (!_dump_buf_dif) { 4922 while (pagecnt) { 4923 _dump_buf_dif = 4924 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4925 if (_dump_buf_dif) { 4926 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4927 "9046 BLKGRD: allocated %d pages for " 4928 "_dump_buf_dif at 0x%p\n", 4929 (1 << pagecnt), _dump_buf_dif); 4930 _dump_buf_dif_order = pagecnt; 4931 memset(_dump_buf_dif, 0, 4932 ((1 << PAGE_SHIFT) << pagecnt)); 4933 break; 4934 } else 4935 --pagecnt; 4936 } 4937 if (!_dump_buf_dif_order) 4938 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4939 "9047 BLKGRD: ERROR unable to allocate " 4940 "memory for hexdump\n"); 4941 } else 4942 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4943 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 4944 _dump_buf_dif); 4945} 4946 4947/** 4948 * lpfc_post_init_setup - Perform necessary device post initialization setup. 4949 * @phba: pointer to lpfc hba data structure. 4950 * 4951 * This routine is invoked to perform all the necessary post initialization 4952 * setup for the device. 4953 **/ 4954static void 4955lpfc_post_init_setup(struct lpfc_hba *phba) 4956{ 4957 struct Scsi_Host *shost; 4958 struct lpfc_adapter_event_header adapter_event; 4959 4960 /* Get the default values for Model Name and Description */ 4961 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 4962 4963 /* 4964 * hba setup may have changed the hba_queue_depth so we need to 4965 * adjust the value of can_queue. 4966 */ 4967 shost = pci_get_drvdata(phba->pcidev); 4968 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4969 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4970 lpfc_setup_bg(phba, shost); 4971 4972 lpfc_host_attrib_init(shost); 4973 4974 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 4975 spin_lock_irq(shost->host_lock); 4976 lpfc_poll_start_timer(phba); 4977 spin_unlock_irq(shost->host_lock); 4978 } 4979 4980 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4981 "0428 Perform SCSI scan\n"); 4982 /* Send board arrival event to upper layer */ 4983 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 4984 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 4985 fc_host_post_vendor_event(shost, fc_get_event_number(), 4986 sizeof(adapter_event), 4987 (char *) &adapter_event, 4988 LPFC_NL_VENDOR_ID); 4989 return; 4990} 4991 4992/** 4993 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 4994 * @phba: pointer to lpfc hba data structure. 4995 * 4996 * This routine is invoked to set up the PCI device memory space for device 4997 * with SLI-3 interface spec. 4998 * 4999 * Return codes 5000 * 0 - successful 5001 * other values - error 5002 **/ 5003static int 5004lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 5005{ 5006 struct pci_dev *pdev; 5007 unsigned long bar0map_len, bar2map_len; 5008 int i, hbq_count; 5009 void *ptr; 5010 int error = -ENODEV; 5011 5012 /* Obtain PCI device reference */ 5013 if (!phba->pcidev) 5014 return error; 5015 else 5016 pdev = phba->pcidev; 5017 5018 /* Set the device DMA mask size */ 5019 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 5020 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 5021 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 5022 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 5023 return error; 5024 } 5025 } 5026 5027 /* Get the bus address of Bar0 and Bar2 and the number of bytes 5028 * required by each mapping. 5029 */ 5030 phba->pci_bar0_map = pci_resource_start(pdev, 0); 5031 bar0map_len = pci_resource_len(pdev, 0); 5032 5033 phba->pci_bar2_map = pci_resource_start(pdev, 2); 5034 bar2map_len = pci_resource_len(pdev, 2); 5035 5036 /* Map HBA SLIM to a kernel virtual address. */ 5037 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 5038 if (!phba->slim_memmap_p) { 5039 dev_printk(KERN_ERR, &pdev->dev, 5040 "ioremap failed for SLIM memory.\n"); 5041 goto out; 5042 } 5043 5044 /* Map HBA Control Registers to a kernel virtual address. */ 5045 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 5046 if (!phba->ctrl_regs_memmap_p) { 5047 dev_printk(KERN_ERR, &pdev->dev, 5048 "ioremap failed for HBA control registers.\n"); 5049 goto out_iounmap_slim; 5050 } 5051 5052 /* Allocate memory for SLI-2 structures */ 5053 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 5054 SLI2_SLIM_SIZE, 5055 &phba->slim2p.phys, 5056 GFP_KERNEL); 5057 if (!phba->slim2p.virt) 5058 goto out_iounmap; 5059 5060 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 5061 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 5062 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 5063 phba->IOCBs = (phba->slim2p.virt + 5064 offsetof(struct lpfc_sli2_slim, IOCBs)); 5065 5066 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 5067 lpfc_sli_hbq_size(), 5068 &phba->hbqslimp.phys, 5069 GFP_KERNEL); 5070 if (!phba->hbqslimp.virt) 5071 goto out_free_slim; 5072 5073 hbq_count = lpfc_sli_hbq_count(); 5074 ptr = phba->hbqslimp.virt; 5075 for (i = 0; i < hbq_count; ++i) { 5076 phba->hbqs[i].hbq_virt = ptr; 5077 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5078 ptr += (lpfc_hbq_defs[i]->entry_count * 5079 sizeof(struct lpfc_hbq_entry)); 5080 } 5081 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 5082 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 5083 5084 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 5085 5086 INIT_LIST_HEAD(&phba->rb_pend_list); 5087 5088 phba->MBslimaddr = phba->slim_memmap_p; 5089 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 5090 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 5091 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 5092 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 5093 5094 return 0; 5095 5096out_free_slim: 5097 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5098 phba->slim2p.virt, phba->slim2p.phys); 5099out_iounmap: 5100 iounmap(phba->ctrl_regs_memmap_p); 5101out_iounmap_slim: 5102 iounmap(phba->slim_memmap_p); 5103out: 5104 return error; 5105} 5106 5107/** 5108 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 5109 * @phba: pointer to lpfc hba data structure. 5110 * 5111 * This routine is invoked to unset the PCI device memory space for device 5112 * with SLI-3 interface spec. 5113 **/ 5114static void 5115lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 5116{ 5117 struct pci_dev *pdev; 5118 5119 /* Obtain PCI device reference */ 5120 if (!phba->pcidev) 5121 return; 5122 else 5123 pdev = phba->pcidev; 5124 5125 /* Free coherent DMA memory allocated */ 5126 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 5127 phba->hbqslimp.virt, phba->hbqslimp.phys); 5128 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5129 phba->slim2p.virt, phba->slim2p.phys); 5130 5131 /* I/O memory unmap */ 5132 iounmap(phba->ctrl_regs_memmap_p); 5133 iounmap(phba->slim_memmap_p); 5134 5135 return; 5136} 5137 5138/** 5139 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 5140 * @phba: pointer to lpfc hba data structure. 5141 * 5142 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 5143 * done and check status. 5144 * 5145 * Return 0 if successful, otherwise -ENODEV. 5146 **/ 5147int 5148lpfc_sli4_post_status_check(struct lpfc_hba *phba) 5149{ 5150 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg; 5151 int i, port_error = -ENODEV; 5152 5153 if (!phba->sli4_hba.STAregaddr) 5154 return -ENODEV; 5155 5156 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 5157 for (i = 0; i < 3000; i++) { 5158 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); 5159 /* Encounter fatal POST error, break out */ 5160 if (bf_get(lpfc_hst_state_perr, &sta_reg)) { 5161 port_error = -ENODEV; 5162 break; 5163 } 5164 if (LPFC_POST_STAGE_ARMFW_READY == 5165 bf_get(lpfc_hst_state_port_status, &sta_reg)) { 5166 port_error = 0; 5167 break; 5168 } 5169 msleep(10); 5170 } 5171 5172 if (port_error) 5173 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5174 "1408 Failure HBA POST Status: sta_reg=0x%x, " 5175 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " 5176 "dl=x%x, pstatus=x%x\n", sta_reg.word0, 5177 bf_get(lpfc_hst_state_perr, &sta_reg), 5178 bf_get(lpfc_hst_state_sfi, &sta_reg), 5179 bf_get(lpfc_hst_state_nip, &sta_reg), 5180 bf_get(lpfc_hst_state_ipc, &sta_reg), 5181 bf_get(lpfc_hst_state_xrom, &sta_reg), 5182 bf_get(lpfc_hst_state_dl, &sta_reg), 5183 bf_get(lpfc_hst_state_port_status, &sta_reg)); 5184 5185 /* Log device information */ 5186 phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr); 5187 if (bf_get(lpfc_sli_intf_valid, 5188 &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) { 5189 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5190 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " 5191 "FeatureL1=0x%x, FeatureL2=0x%x\n", 5192 bf_get(lpfc_sli_intf_sli_family, 5193 &phba->sli4_hba.sli_intf), 5194 bf_get(lpfc_sli_intf_slirev, 5195 &phba->sli4_hba.sli_intf), 5196 bf_get(lpfc_sli_intf_featurelevel1, 5197 &phba->sli4_hba.sli_intf), 5198 bf_get(lpfc_sli_intf_featurelevel2, 5199 &phba->sli4_hba.sli_intf)); 5200 } 5201 phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr); 5202 phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr); 5203 /* With uncoverable error, log the error message and return error */ 5204 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); 5205 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); 5206 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 5207 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 5208 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5209 "1422 HBA Unrecoverable error: " 5210 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 5211 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n", 5212 uerrlo_reg.word0, uerrhi_reg.word0, 5213 phba->sli4_hba.ue_mask_lo, 5214 phba->sli4_hba.ue_mask_hi); 5215 return -ENODEV; 5216 } 5217 5218 return port_error; 5219} 5220 5221/** 5222 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 5223 * @phba: pointer to lpfc hba data structure. 5224 * 5225 * This routine is invoked to set up SLI4 BAR0 PCI config space register 5226 * memory map. 5227 **/ 5228static void 5229lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) 5230{ 5231 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 5232 LPFC_UERR_STATUS_LO; 5233 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5234 LPFC_UERR_STATUS_HI; 5235 phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 5236 LPFC_UE_MASK_LO; 5237 phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5238 LPFC_UE_MASK_HI; 5239 phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p + 5240 LPFC_SLI_INTF; 5241} 5242 5243/** 5244 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 5245 * @phba: pointer to lpfc hba data structure. 5246 * 5247 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 5248 * memory map. 5249 **/ 5250static void 5251lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 5252{ 5253 5254 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5255 LPFC_HST_STATE; 5256 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5257 LPFC_HST_ISR0; 5258 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5259 LPFC_HST_IMR0; 5260 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5261 LPFC_HST_ISCR0; 5262 return; 5263} 5264 5265/** 5266 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 5267 * @phba: pointer to lpfc hba data structure. 5268 * @vf: virtual function number 5269 * 5270 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 5271 * based on the given viftual function number, @vf. 5272 * 5273 * Return 0 if successful, otherwise -ENODEV. 5274 **/ 5275static int 5276lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 5277{ 5278 if (vf > LPFC_VIR_FUNC_MAX) 5279 return -ENODEV; 5280 5281 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5282 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 5283 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5284 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 5285 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5286 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 5287 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5288 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 5289 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5290 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 5291 return 0; 5292} 5293 5294/** 5295 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 5296 * @phba: pointer to lpfc hba data structure. 5297 * 5298 * This routine is invoked to create the bootstrap mailbox 5299 * region consistent with the SLI-4 interface spec. This 5300 * routine allocates all memory necessary to communicate 5301 * mailbox commands to the port and sets up all alignment 5302 * needs. No locks are expected to be held when calling 5303 * this routine. 5304 * 5305 * Return codes 5306 * 0 - successful 5307 * ENOMEM - could not allocated memory. 5308 **/ 5309static int 5310lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 5311{ 5312 uint32_t bmbx_size; 5313 struct lpfc_dmabuf *dmabuf; 5314 struct dma_address *dma_address; 5315 uint32_t pa_addr; 5316 uint64_t phys_addr; 5317 5318 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5319 if (!dmabuf) 5320 return -ENOMEM; 5321 5322 /* 5323 * The bootstrap mailbox region is comprised of 2 parts 5324 * plus an alignment restriction of 16 bytes. 5325 */ 5326 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 5327 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5328 bmbx_size, 5329 &dmabuf->phys, 5330 GFP_KERNEL); 5331 if (!dmabuf->virt) { 5332 kfree(dmabuf); 5333 return -ENOMEM; 5334 } 5335 memset(dmabuf->virt, 0, bmbx_size); 5336 5337 /* 5338 * Initialize the bootstrap mailbox pointers now so that the register 5339 * operations are simple later. The mailbox dma address is required 5340 * to be 16-byte aligned. Also align the virtual memory as each 5341 * maibox is copied into the bmbx mailbox region before issuing the 5342 * command to the port. 5343 */ 5344 phba->sli4_hba.bmbx.dmabuf = dmabuf; 5345 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 5346 5347 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 5348 LPFC_ALIGN_16_BYTE); 5349 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 5350 LPFC_ALIGN_16_BYTE); 5351 5352 /* 5353 * Set the high and low physical addresses now. The SLI4 alignment 5354 * requirement is 16 bytes and the mailbox is posted to the port 5355 * as two 30-bit addresses. The other data is a bit marking whether 5356 * the 30-bit address is the high or low address. 5357 * Upcast bmbx aphys to 64bits so shift instruction compiles 5358 * clean on 32 bit machines. 5359 */ 5360 dma_address = &phba->sli4_hba.bmbx.dma_address; 5361 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 5362 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 5363 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 5364 LPFC_BMBX_BIT1_ADDR_HI); 5365 5366 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 5367 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 5368 LPFC_BMBX_BIT1_ADDR_LO); 5369 return 0; 5370} 5371 5372/** 5373 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 5374 * @phba: pointer to lpfc hba data structure. 5375 * 5376 * This routine is invoked to teardown the bootstrap mailbox 5377 * region and release all host resources. This routine requires 5378 * the caller to ensure all mailbox commands recovered, no 5379 * additional mailbox comands are sent, and interrupts are disabled 5380 * before calling this routine. 5381 * 5382 **/ 5383static void 5384lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 5385{ 5386 dma_free_coherent(&phba->pcidev->dev, 5387 phba->sli4_hba.bmbx.bmbx_size, 5388 phba->sli4_hba.bmbx.dmabuf->virt, 5389 phba->sli4_hba.bmbx.dmabuf->phys); 5390 5391 kfree(phba->sli4_hba.bmbx.dmabuf); 5392 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 5393} 5394 5395/** 5396 * lpfc_sli4_read_config - Get the config parameters. 5397 * @phba: pointer to lpfc hba data structure. 5398 * 5399 * This routine is invoked to read the configuration parameters from the HBA. 5400 * The configuration parameters are used to set the base and maximum values 5401 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 5402 * allocation for the port. 5403 * 5404 * Return codes 5405 * 0 - successful 5406 * ENOMEM - No availble memory 5407 * EIO - The mailbox failed to complete successfully. 5408 **/ 5409static int 5410lpfc_sli4_read_config(struct lpfc_hba *phba) 5411{ 5412 LPFC_MBOXQ_t *pmb; 5413 struct lpfc_mbx_read_config *rd_config; 5414 uint32_t rc = 0; 5415 5416 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5417 if (!pmb) { 5418 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5419 "2011 Unable to allocate memory for issuing " 5420 "SLI_CONFIG_SPECIAL mailbox command\n"); 5421 return -ENOMEM; 5422 } 5423 5424 lpfc_read_config(phba, pmb); 5425 5426 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 5427 if (rc != MBX_SUCCESS) { 5428 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5429 "2012 Mailbox failed , mbxCmd x%x " 5430 "READ_CONFIG, mbxStatus x%x\n", 5431 bf_get(lpfc_mqe_command, &pmb->u.mqe), 5432 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 5433 rc = -EIO; 5434 } else { 5435 rd_config = &pmb->u.mqe.un.rd_config; 5436 phba->sli4_hba.max_cfg_param.max_xri = 5437 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 5438 phba->sli4_hba.max_cfg_param.xri_base = 5439 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 5440 phba->sli4_hba.max_cfg_param.max_vpi = 5441 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 5442 phba->sli4_hba.max_cfg_param.vpi_base = 5443 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 5444 phba->sli4_hba.max_cfg_param.max_rpi = 5445 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 5446 phba->sli4_hba.max_cfg_param.rpi_base = 5447 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 5448 phba->sli4_hba.max_cfg_param.max_vfi = 5449 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 5450 phba->sli4_hba.max_cfg_param.vfi_base = 5451 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 5452 phba->sli4_hba.max_cfg_param.max_fcfi = 5453 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 5454 phba->sli4_hba.max_cfg_param.fcfi_base = 5455 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); 5456 phba->sli4_hba.max_cfg_param.max_eq = 5457 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 5458 phba->sli4_hba.max_cfg_param.max_rq = 5459 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 5460 phba->sli4_hba.max_cfg_param.max_wq = 5461 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 5462 phba->sli4_hba.max_cfg_param.max_cq = 5463 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 5464 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 5465 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 5466 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 5467 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 5468 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5469 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 5470 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 5471 phba->max_vports = phba->max_vpi; 5472 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5473 "2003 cfg params XRI(B:%d M:%d), " 5474 "VPI(B:%d M:%d) " 5475 "VFI(B:%d M:%d) " 5476 "RPI(B:%d M:%d) " 5477 "FCFI(B:%d M:%d)\n", 5478 phba->sli4_hba.max_cfg_param.xri_base, 5479 phba->sli4_hba.max_cfg_param.max_xri, 5480 phba->sli4_hba.max_cfg_param.vpi_base, 5481 phba->sli4_hba.max_cfg_param.max_vpi, 5482 phba->sli4_hba.max_cfg_param.vfi_base, 5483 phba->sli4_hba.max_cfg_param.max_vfi, 5484 phba->sli4_hba.max_cfg_param.rpi_base, 5485 phba->sli4_hba.max_cfg_param.max_rpi, 5486 phba->sli4_hba.max_cfg_param.fcfi_base, 5487 phba->sli4_hba.max_cfg_param.max_fcfi); 5488 } 5489 mempool_free(pmb, phba->mbox_mem_pool); 5490 5491 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 5492 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri)) 5493 phba->cfg_hba_queue_depth = 5494 phba->sli4_hba.max_cfg_param.max_xri; 5495 return rc; 5496} 5497 5498/** 5499 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. 5500 * @phba: pointer to lpfc hba data structure. 5501 * 5502 * This routine is invoked to setup the host-side endian order to the 5503 * HBA consistent with the SLI-4 interface spec. 5504 * 5505 * Return codes 5506 * 0 - successful 5507 * ENOMEM - No availble memory 5508 * EIO - The mailbox failed to complete successfully. 5509 **/ 5510static int 5511lpfc_setup_endian_order(struct lpfc_hba *phba) 5512{ 5513 LPFC_MBOXQ_t *mboxq; 5514 uint32_t rc = 0; 5515 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 5516 HOST_ENDIAN_HIGH_WORD1}; 5517 5518 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5519 if (!mboxq) { 5520 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5521 "0492 Unable to allocate memory for issuing " 5522 "SLI_CONFIG_SPECIAL mailbox command\n"); 5523 return -ENOMEM; 5524 } 5525 5526 /* 5527 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two 5528 * words to contain special data values and no other data. 5529 */ 5530 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 5531 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 5532 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5533 if (rc != MBX_SUCCESS) { 5534 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5535 "0493 SLI_CONFIG_SPECIAL mailbox failed with " 5536 "status x%x\n", 5537 rc); 5538 rc = -EIO; 5539 } 5540 5541 mempool_free(mboxq, phba->mbox_mem_pool); 5542 return rc; 5543} 5544 5545/** 5546 * lpfc_sli4_queue_create - Create all the SLI4 queues 5547 * @phba: pointer to lpfc hba data structure. 5548 * 5549 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 5550 * operation. For each SLI4 queue type, the parameters such as queue entry 5551 * count (queue depth) shall be taken from the module parameter. For now, 5552 * we just use some constant number as place holder. 5553 * 5554 * Return codes 5555 * 0 - successful 5556 * ENOMEM - No availble memory 5557 * EIO - The mailbox failed to complete successfully. 5558 **/ 5559static int 5560lpfc_sli4_queue_create(struct lpfc_hba *phba) 5561{ 5562 struct lpfc_queue *qdesc; 5563 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5564 int cfg_fcp_wq_count; 5565 int cfg_fcp_eq_count; 5566 5567 /* 5568 * Sanity check for confiugred queue parameters against the run-time 5569 * device parameters 5570 */ 5571 5572 /* Sanity check on FCP fast-path WQ parameters */ 5573 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 5574 if (cfg_fcp_wq_count > 5575 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 5576 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 5577 LPFC_SP_WQN_DEF; 5578 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 5579 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5580 "2581 Not enough WQs (%d) from " 5581 "the pci function for supporting " 5582 "FCP WQs (%d)\n", 5583 phba->sli4_hba.max_cfg_param.max_wq, 5584 phba->cfg_fcp_wq_count); 5585 goto out_error; 5586 } 5587 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5588 "2582 Not enough WQs (%d) from the pci " 5589 "function for supporting the requested " 5590 "FCP WQs (%d), the actual FCP WQs can " 5591 "be supported: %d\n", 5592 phba->sli4_hba.max_cfg_param.max_wq, 5593 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 5594 } 5595 /* The actual number of FCP work queues adopted */ 5596 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 5597 5598 /* Sanity check on FCP fast-path EQ parameters */ 5599 cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 5600 if (cfg_fcp_eq_count > 5601 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 5602 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 5603 LPFC_SP_EQN_DEF; 5604 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 5605 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5606 "2574 Not enough EQs (%d) from the " 5607 "pci function for supporting FCP " 5608 "EQs (%d)\n", 5609 phba->sli4_hba.max_cfg_param.max_eq, 5610 phba->cfg_fcp_eq_count); 5611 goto out_error; 5612 } 5613 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5614 "2575 Not enough EQs (%d) from the pci " 5615 "function for supporting the requested " 5616 "FCP EQs (%d), the actual FCP EQs can " 5617 "be supported: %d\n", 5618 phba->sli4_hba.max_cfg_param.max_eq, 5619 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 5620 } 5621 /* It does not make sense to have more EQs than WQs */ 5622 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 5623 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5624 "2593 The FCP EQ count(%d) cannot be greater " 5625 "than the FCP WQ count(%d), limiting the " 5626 "FCP EQ count to %d\n", cfg_fcp_eq_count, 5627 phba->cfg_fcp_wq_count, 5628 phba->cfg_fcp_wq_count); 5629 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 5630 } 5631 /* The actual number of FCP event queues adopted */ 5632 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 5633 /* The overall number of event queues used */ 5634 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 5635 5636 /* 5637 * Create Event Queues (EQs) 5638 */ 5639 5640 /* Get EQ depth from module parameter, fake the default for now */ 5641 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 5642 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 5643 5644 /* Create slow path event queue */ 5645 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5646 phba->sli4_hba.eq_ecount); 5647 if (!qdesc) { 5648 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5649 "0496 Failed allocate slow-path EQ\n"); 5650 goto out_error; 5651 } 5652 phba->sli4_hba.sp_eq = qdesc; 5653 5654 /* Create fast-path FCP Event Queue(s) */ 5655 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 5656 phba->cfg_fcp_eq_count), GFP_KERNEL); 5657 if (!phba->sli4_hba.fp_eq) { 5658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5659 "2576 Failed allocate memory for fast-path " 5660 "EQ record array\n"); 5661 goto out_free_sp_eq; 5662 } 5663 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5664 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5665 phba->sli4_hba.eq_ecount); 5666 if (!qdesc) { 5667 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5668 "0497 Failed allocate fast-path EQ\n"); 5669 goto out_free_fp_eq; 5670 } 5671 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 5672 } 5673 5674 /* 5675 * Create Complete Queues (CQs) 5676 */ 5677 5678 /* Get CQ depth from module parameter, fake the default for now */ 5679 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 5680 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 5681 5682 /* Create slow-path Mailbox Command Complete Queue */ 5683 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5684 phba->sli4_hba.cq_ecount); 5685 if (!qdesc) { 5686 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5687 "0500 Failed allocate slow-path mailbox CQ\n"); 5688 goto out_free_fp_eq; 5689 } 5690 phba->sli4_hba.mbx_cq = qdesc; 5691 5692 /* Create slow-path ELS Complete Queue */ 5693 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5694 phba->sli4_hba.cq_ecount); 5695 if (!qdesc) { 5696 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5697 "0501 Failed allocate slow-path ELS CQ\n"); 5698 goto out_free_mbx_cq; 5699 } 5700 phba->sli4_hba.els_cq = qdesc; 5701 5702 5703 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 5704 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 5705 phba->cfg_fcp_eq_count), GFP_KERNEL); 5706 if (!phba->sli4_hba.fcp_cq) { 5707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5708 "2577 Failed allocate memory for fast-path " 5709 "CQ record array\n"); 5710 goto out_free_els_cq; 5711 } 5712 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5713 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5714 phba->sli4_hba.cq_ecount); 5715 if (!qdesc) { 5716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5717 "0499 Failed allocate fast-path FCP " 5718 "CQ (%d)\n", fcp_cqidx); 5719 goto out_free_fcp_cq; 5720 } 5721 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 5722 } 5723 5724 /* Create Mailbox Command Queue */ 5725 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 5726 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 5727 5728 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 5729 phba->sli4_hba.mq_ecount); 5730 if (!qdesc) { 5731 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5732 "0505 Failed allocate slow-path MQ\n"); 5733 goto out_free_fcp_cq; 5734 } 5735 phba->sli4_hba.mbx_wq = qdesc; 5736 5737 /* 5738 * Create all the Work Queues (WQs) 5739 */ 5740 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 5741 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 5742 5743 /* Create slow-path ELS Work Queue */ 5744 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5745 phba->sli4_hba.wq_ecount); 5746 if (!qdesc) { 5747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5748 "0504 Failed allocate slow-path ELS WQ\n"); 5749 goto out_free_mbx_wq; 5750 } 5751 phba->sli4_hba.els_wq = qdesc; 5752 5753 /* Create fast-path FCP Work Queue(s) */ 5754 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 5755 phba->cfg_fcp_wq_count), GFP_KERNEL); 5756 if (!phba->sli4_hba.fcp_wq) { 5757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5758 "2578 Failed allocate memory for fast-path " 5759 "WQ record array\n"); 5760 goto out_free_els_wq; 5761 } 5762 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 5763 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5764 phba->sli4_hba.wq_ecount); 5765 if (!qdesc) { 5766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5767 "0503 Failed allocate fast-path FCP " 5768 "WQ (%d)\n", fcp_wqidx); 5769 goto out_free_fcp_wq; 5770 } 5771 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 5772 } 5773 5774 /* 5775 * Create Receive Queue (RQ) 5776 */ 5777 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 5778 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 5779 5780 /* Create Receive Queue for header */ 5781 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5782 phba->sli4_hba.rq_ecount); 5783 if (!qdesc) { 5784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5785 "0506 Failed allocate receive HRQ\n"); 5786 goto out_free_fcp_wq; 5787 } 5788 phba->sli4_hba.hdr_rq = qdesc; 5789 5790 /* Create Receive Queue for data */ 5791 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5792 phba->sli4_hba.rq_ecount); 5793 if (!qdesc) { 5794 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5795 "0507 Failed allocate receive DRQ\n"); 5796 goto out_free_hdr_rq; 5797 } 5798 phba->sli4_hba.dat_rq = qdesc; 5799 5800 return 0; 5801 5802out_free_hdr_rq: 5803 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5804 phba->sli4_hba.hdr_rq = NULL; 5805out_free_fcp_wq: 5806 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 5807 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 5808 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 5809 } 5810 kfree(phba->sli4_hba.fcp_wq); 5811out_free_els_wq: 5812 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5813 phba->sli4_hba.els_wq = NULL; 5814out_free_mbx_wq: 5815 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5816 phba->sli4_hba.mbx_wq = NULL; 5817out_free_fcp_cq: 5818 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 5819 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 5820 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 5821 } 5822 kfree(phba->sli4_hba.fcp_cq); 5823out_free_els_cq: 5824 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5825 phba->sli4_hba.els_cq = NULL; 5826out_free_mbx_cq: 5827 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5828 phba->sli4_hba.mbx_cq = NULL; 5829out_free_fp_eq: 5830 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 5831 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 5832 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 5833 } 5834 kfree(phba->sli4_hba.fp_eq); 5835out_free_sp_eq: 5836 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5837 phba->sli4_hba.sp_eq = NULL; 5838out_error: 5839 return -ENOMEM; 5840} 5841 5842/** 5843 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 5844 * @phba: pointer to lpfc hba data structure. 5845 * 5846 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 5847 * operation. 5848 * 5849 * Return codes 5850 * 0 - successful 5851 * ENOMEM - No availble memory 5852 * EIO - The mailbox failed to complete successfully. 5853 **/ 5854static void 5855lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 5856{ 5857 int fcp_qidx; 5858 5859 /* Release mailbox command work queue */ 5860 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5861 phba->sli4_hba.mbx_wq = NULL; 5862 5863 /* Release ELS work queue */ 5864 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5865 phba->sli4_hba.els_wq = NULL; 5866 5867 /* Release FCP work queue */ 5868 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 5869 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 5870 kfree(phba->sli4_hba.fcp_wq); 5871 phba->sli4_hba.fcp_wq = NULL; 5872 5873 /* Release unsolicited receive queue */ 5874 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5875 phba->sli4_hba.hdr_rq = NULL; 5876 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 5877 phba->sli4_hba.dat_rq = NULL; 5878 5879 /* Release ELS complete queue */ 5880 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5881 phba->sli4_hba.els_cq = NULL; 5882 5883 /* Release mailbox command complete queue */ 5884 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5885 phba->sli4_hba.mbx_cq = NULL; 5886 5887 /* Release FCP response complete queue */ 5888 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5889 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 5890 kfree(phba->sli4_hba.fcp_cq); 5891 phba->sli4_hba.fcp_cq = NULL; 5892 5893 /* Release fast-path event queue */ 5894 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5895 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 5896 kfree(phba->sli4_hba.fp_eq); 5897 phba->sli4_hba.fp_eq = NULL; 5898 5899 /* Release slow-path event queue */ 5900 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5901 phba->sli4_hba.sp_eq = NULL; 5902 5903 return; 5904} 5905 5906/** 5907 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 5908 * @phba: pointer to lpfc hba data structure. 5909 * 5910 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 5911 * operation. 5912 * 5913 * Return codes 5914 * 0 - successful 5915 * ENOMEM - No availble memory 5916 * EIO - The mailbox failed to complete successfully. 5917 **/ 5918int 5919lpfc_sli4_queue_setup(struct lpfc_hba *phba) 5920{ 5921 int rc = -ENOMEM; 5922 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5923 int fcp_cq_index = 0; 5924 5925 /* 5926 * Set up Event Queues (EQs) 5927 */ 5928 5929 /* Set up slow-path event queue */ 5930 if (!phba->sli4_hba.sp_eq) { 5931 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5932 "0520 Slow-path EQ not allocated\n"); 5933 goto out_error; 5934 } 5935 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 5936 LPFC_SP_DEF_IMAX); 5937 if (rc) { 5938 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5939 "0521 Failed setup of slow-path EQ: " 5940 "rc = 0x%x\n", rc); 5941 goto out_error; 5942 } 5943 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5944 "2583 Slow-path EQ setup: queue-id=%d\n", 5945 phba->sli4_hba.sp_eq->queue_id); 5946 5947 /* Set up fast-path event queue */ 5948 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5949 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 5950 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5951 "0522 Fast-path EQ (%d) not " 5952 "allocated\n", fcp_eqidx); 5953 goto out_destroy_fp_eq; 5954 } 5955 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 5956 phba->cfg_fcp_imax); 5957 if (rc) { 5958 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5959 "0523 Failed setup of fast-path EQ " 5960 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 5961 goto out_destroy_fp_eq; 5962 } 5963 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5964 "2584 Fast-path EQ setup: " 5965 "queue[%d]-id=%d\n", fcp_eqidx, 5966 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 5967 } 5968 5969 /* 5970 * Set up Complete Queues (CQs) 5971 */ 5972 5973 /* Set up slow-path MBOX Complete Queue as the first CQ */ 5974 if (!phba->sli4_hba.mbx_cq) { 5975 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5976 "0528 Mailbox CQ not allocated\n"); 5977 goto out_destroy_fp_eq; 5978 } 5979 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 5980 LPFC_MCQ, LPFC_MBOX); 5981 if (rc) { 5982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5983 "0529 Failed setup of slow-path mailbox CQ: " 5984 "rc = 0x%x\n", rc); 5985 goto out_destroy_fp_eq; 5986 } 5987 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5988 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 5989 phba->sli4_hba.mbx_cq->queue_id, 5990 phba->sli4_hba.sp_eq->queue_id); 5991 5992 /* Set up slow-path ELS Complete Queue */ 5993 if (!phba->sli4_hba.els_cq) { 5994 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5995 "0530 ELS CQ not allocated\n"); 5996 goto out_destroy_mbx_cq; 5997 } 5998 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 5999 LPFC_WCQ, LPFC_ELS); 6000 if (rc) { 6001 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6002 "0531 Failed setup of slow-path ELS CQ: " 6003 "rc = 0x%x\n", rc); 6004 goto out_destroy_mbx_cq; 6005 } 6006 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6007 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 6008 phba->sli4_hba.els_cq->queue_id, 6009 phba->sli4_hba.sp_eq->queue_id); 6010 6011 /* Set up fast-path FCP Response Complete Queue */ 6012 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6013 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6014 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6015 "0526 Fast-path FCP CQ (%d) not " 6016 "allocated\n", fcp_cqidx); 6017 goto out_destroy_fcp_cq; 6018 } 6019 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 6020 phba->sli4_hba.fp_eq[fcp_cqidx], 6021 LPFC_WCQ, LPFC_FCP); 6022 if (rc) { 6023 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6024 "0527 Failed setup of fast-path FCP " 6025 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 6026 goto out_destroy_fcp_cq; 6027 } 6028 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6029 "2588 FCP CQ setup: cq[%d]-id=%d, " 6030 "parent eq[%d]-id=%d\n", 6031 fcp_cqidx, 6032 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 6033 fcp_cqidx, 6034 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 6035 } 6036 6037 /* 6038 * Set up all the Work Queues (WQs) 6039 */ 6040 6041 /* Set up Mailbox Command Queue */ 6042 if (!phba->sli4_hba.mbx_wq) { 6043 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6044 "0538 Slow-path MQ not allocated\n"); 6045 goto out_destroy_fcp_cq; 6046 } 6047 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 6048 phba->sli4_hba.mbx_cq, LPFC_MBOX); 6049 if (rc) { 6050 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6051 "0539 Failed setup of slow-path MQ: " 6052 "rc = 0x%x\n", rc); 6053 goto out_destroy_fcp_cq; 6054 } 6055 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6056 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 6057 phba->sli4_hba.mbx_wq->queue_id, 6058 phba->sli4_hba.mbx_cq->queue_id); 6059 6060 /* Set up slow-path ELS Work Queue */ 6061 if (!phba->sli4_hba.els_wq) { 6062 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6063 "0536 Slow-path ELS WQ not allocated\n"); 6064 goto out_destroy_mbx_wq; 6065 } 6066 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 6067 phba->sli4_hba.els_cq, LPFC_ELS); 6068 if (rc) { 6069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6070 "0537 Failed setup of slow-path ELS WQ: " 6071 "rc = 0x%x\n", rc); 6072 goto out_destroy_mbx_wq; 6073 } 6074 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6075 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 6076 phba->sli4_hba.els_wq->queue_id, 6077 phba->sli4_hba.els_cq->queue_id); 6078 6079 /* Set up fast-path FCP Work Queue */ 6080 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6081 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 6082 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6083 "0534 Fast-path FCP WQ (%d) not " 6084 "allocated\n", fcp_wqidx); 6085 goto out_destroy_fcp_wq; 6086 } 6087 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 6088 phba->sli4_hba.fcp_cq[fcp_cq_index], 6089 LPFC_FCP); 6090 if (rc) { 6091 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6092 "0535 Failed setup of fast-path FCP " 6093 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 6094 goto out_destroy_fcp_wq; 6095 } 6096 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6097 "2591 FCP WQ setup: wq[%d]-id=%d, " 6098 "parent cq[%d]-id=%d\n", 6099 fcp_wqidx, 6100 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 6101 fcp_cq_index, 6102 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 6103 /* Round robin FCP Work Queue's Completion Queue assignment */ 6104 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 6105 } 6106 6107 /* 6108 * Create Receive Queue (RQ) 6109 */ 6110 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 6111 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6112 "0540 Receive Queue not allocated\n"); 6113 goto out_destroy_fcp_wq; 6114 } 6115 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 6116 phba->sli4_hba.els_cq, LPFC_USOL); 6117 if (rc) { 6118 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6119 "0541 Failed setup of Receive Queue: " 6120 "rc = 0x%x\n", rc); 6121 goto out_destroy_fcp_wq; 6122 } 6123 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6124 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 6125 "parent cq-id=%d\n", 6126 phba->sli4_hba.hdr_rq->queue_id, 6127 phba->sli4_hba.dat_rq->queue_id, 6128 phba->sli4_hba.els_cq->queue_id); 6129 return 0; 6130 6131out_destroy_fcp_wq: 6132 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 6133 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 6134 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6135out_destroy_mbx_wq: 6136 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6137out_destroy_fcp_cq: 6138 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6139 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6140 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6141out_destroy_mbx_cq: 6142 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6143out_destroy_fp_eq: 6144 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 6145 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 6146 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6147out_error: 6148 return rc; 6149} 6150 6151/** 6152 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 6153 * @phba: pointer to lpfc hba data structure. 6154 * 6155 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 6156 * operation. 6157 * 6158 * Return codes 6159 * 0 - successful 6160 * ENOMEM - No availble memory 6161 * EIO - The mailbox failed to complete successfully. 6162 **/ 6163void 6164lpfc_sli4_queue_unset(struct lpfc_hba *phba) 6165{ 6166 int fcp_qidx; 6167 6168 /* Unset mailbox command work queue */ 6169 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6170 /* Unset ELS work queue */ 6171 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6172 /* Unset unsolicited receive queue */ 6173 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 6174 /* Unset FCP work queue */ 6175 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6176 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 6177 /* Unset mailbox command complete queue */ 6178 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6179 /* Unset ELS complete queue */ 6180 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6181 /* Unset FCP response complete queue */ 6182 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6183 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6184 /* Unset fast-path event queue */ 6185 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6186 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6187 /* Unset slow-path event queue */ 6188 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6189} 6190 6191/** 6192 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 6193 * @phba: pointer to lpfc hba data structure. 6194 * 6195 * This routine is invoked to allocate and set up a pool of completion queue 6196 * events. The body of the completion queue event is a completion queue entry 6197 * CQE. For now, this pool is used for the interrupt service routine to queue 6198 * the following HBA completion queue events for the worker thread to process: 6199 * - Mailbox asynchronous events 6200 * - Receive queue completion unsolicited events 6201 * Later, this can be used for all the slow-path events. 6202 * 6203 * Return codes 6204 * 0 - successful 6205 * -ENOMEM - No availble memory 6206 **/ 6207static int 6208lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 6209{ 6210 struct lpfc_cq_event *cq_event; 6211 int i; 6212 6213 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 6214 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 6215 if (!cq_event) 6216 goto out_pool_create_fail; 6217 list_add_tail(&cq_event->list, 6218 &phba->sli4_hba.sp_cqe_event_pool); 6219 } 6220 return 0; 6221 6222out_pool_create_fail: 6223 lpfc_sli4_cq_event_pool_destroy(phba); 6224 return -ENOMEM; 6225} 6226 6227/** 6228 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 6229 * @phba: pointer to lpfc hba data structure. 6230 * 6231 * This routine is invoked to free the pool of completion queue events at 6232 * driver unload time. Note that, it is the responsibility of the driver 6233 * cleanup routine to free all the outstanding completion-queue events 6234 * allocated from this pool back into the pool before invoking this routine 6235 * to destroy the pool. 6236 **/ 6237static void 6238lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 6239{ 6240 struct lpfc_cq_event *cq_event, *next_cq_event; 6241 6242 list_for_each_entry_safe(cq_event, next_cq_event, 6243 &phba->sli4_hba.sp_cqe_event_pool, list) { 6244 list_del(&cq_event->list); 6245 kfree(cq_event); 6246 } 6247} 6248 6249/** 6250 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6251 * @phba: pointer to lpfc hba data structure. 6252 * 6253 * This routine is the lock free version of the API invoked to allocate a 6254 * completion-queue event from the free pool. 6255 * 6256 * Return: Pointer to the newly allocated completion-queue event if successful 6257 * NULL otherwise. 6258 **/ 6259struct lpfc_cq_event * 6260__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6261{ 6262 struct lpfc_cq_event *cq_event = NULL; 6263 6264 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 6265 struct lpfc_cq_event, list); 6266 return cq_event; 6267} 6268 6269/** 6270 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6271 * @phba: pointer to lpfc hba data structure. 6272 * 6273 * This routine is the lock version of the API invoked to allocate a 6274 * completion-queue event from the free pool. 6275 * 6276 * Return: Pointer to the newly allocated completion-queue event if successful 6277 * NULL otherwise. 6278 **/ 6279struct lpfc_cq_event * 6280lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6281{ 6282 struct lpfc_cq_event *cq_event; 6283 unsigned long iflags; 6284 6285 spin_lock_irqsave(&phba->hbalock, iflags); 6286 cq_event = __lpfc_sli4_cq_event_alloc(phba); 6287 spin_unlock_irqrestore(&phba->hbalock, iflags); 6288 return cq_event; 6289} 6290 6291/** 6292 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6293 * @phba: pointer to lpfc hba data structure. 6294 * @cq_event: pointer to the completion queue event to be freed. 6295 * 6296 * This routine is the lock free version of the API invoked to release a 6297 * completion-queue event back into the free pool. 6298 **/ 6299void 6300__lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6301 struct lpfc_cq_event *cq_event) 6302{ 6303 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 6304} 6305 6306/** 6307 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6308 * @phba: pointer to lpfc hba data structure. 6309 * @cq_event: pointer to the completion queue event to be freed. 6310 * 6311 * This routine is the lock version of the API invoked to release a 6312 * completion-queue event back into the free pool. 6313 **/ 6314void 6315lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6316 struct lpfc_cq_event *cq_event) 6317{ 6318 unsigned long iflags; 6319 spin_lock_irqsave(&phba->hbalock, iflags); 6320 __lpfc_sli4_cq_event_release(phba, cq_event); 6321 spin_unlock_irqrestore(&phba->hbalock, iflags); 6322} 6323 6324/** 6325 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 6326 * @phba: pointer to lpfc hba data structure. 6327 * 6328 * This routine is to free all the pending completion-queue events to the 6329 * back into the free pool for device reset. 6330 **/ 6331static void 6332lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 6333{ 6334 LIST_HEAD(cqelist); 6335 struct lpfc_cq_event *cqe; 6336 unsigned long iflags; 6337 6338 /* Retrieve all the pending WCQEs from pending WCQE lists */ 6339 spin_lock_irqsave(&phba->hbalock, iflags); 6340 /* Pending FCP XRI abort events */ 6341 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 6342 &cqelist); 6343 /* Pending ELS XRI abort events */ 6344 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 6345 &cqelist); 6346 /* Pending asynnc events */ 6347 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 6348 &cqelist); 6349 spin_unlock_irqrestore(&phba->hbalock, iflags); 6350 6351 while (!list_empty(&cqelist)) { 6352 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 6353 lpfc_sli4_cq_event_release(phba, cqe); 6354 } 6355} 6356 6357/** 6358 * lpfc_pci_function_reset - Reset pci function. 6359 * @phba: pointer to lpfc hba data structure. 6360 * 6361 * This routine is invoked to request a PCI function reset. It will destroys 6362 * all resources assigned to the PCI function which originates this request. 6363 * 6364 * Return codes 6365 * 0 - successful 6366 * ENOMEM - No availble memory 6367 * EIO - The mailbox failed to complete successfully. 6368 **/ 6369int 6370lpfc_pci_function_reset(struct lpfc_hba *phba) 6371{ 6372 LPFC_MBOXQ_t *mboxq; 6373 uint32_t rc = 0; 6374 uint32_t shdr_status, shdr_add_status; 6375 union lpfc_sli4_cfg_shdr *shdr; 6376 6377 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6378 if (!mboxq) { 6379 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6380 "0494 Unable to allocate memory for issuing " 6381 "SLI_FUNCTION_RESET mailbox command\n"); 6382 return -ENOMEM; 6383 } 6384 6385 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ 6386 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6387 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 6388 LPFC_SLI4_MBX_EMBED); 6389 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6390 shdr = (union lpfc_sli4_cfg_shdr *) 6391 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6392 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6393 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6394 if (rc != MBX_TIMEOUT) 6395 mempool_free(mboxq, phba->mbox_mem_pool); 6396 if (shdr_status || shdr_add_status || rc) { 6397 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6398 "0495 SLI_FUNCTION_RESET mailbox failed with " 6399 "status x%x add_status x%x, mbx status x%x\n", 6400 shdr_status, shdr_add_status, rc); 6401 rc = -ENXIO; 6402 } 6403 return rc; 6404} 6405 6406/** 6407 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 6408 * @phba: pointer to lpfc hba data structure. 6409 * @cnt: number of nop mailbox commands to send. 6410 * 6411 * This routine is invoked to send a number @cnt of NOP mailbox command and 6412 * wait for each command to complete. 6413 * 6414 * Return: the number of NOP mailbox command completed. 6415 **/ 6416static int 6417lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 6418{ 6419 LPFC_MBOXQ_t *mboxq; 6420 int length, cmdsent; 6421 uint32_t mbox_tmo; 6422 uint32_t rc = 0; 6423 uint32_t shdr_status, shdr_add_status; 6424 union lpfc_sli4_cfg_shdr *shdr; 6425 6426 if (cnt == 0) { 6427 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6428 "2518 Requested to send 0 NOP mailbox cmd\n"); 6429 return cnt; 6430 } 6431 6432 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6433 if (!mboxq) { 6434 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6435 "2519 Unable to allocate memory for issuing " 6436 "NOP mailbox command\n"); 6437 return 0; 6438 } 6439 6440 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 6441 length = (sizeof(struct lpfc_mbx_nop) - 6442 sizeof(struct lpfc_sli4_cfg_mhdr)); 6443 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6444 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 6445 6446 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 6447 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 6448 if (!phba->sli4_hba.intr_enable) 6449 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6450 else 6451 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 6452 if (rc == MBX_TIMEOUT) 6453 break; 6454 /* Check return status */ 6455 shdr = (union lpfc_sli4_cfg_shdr *) 6456 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6457 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6458 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 6459 &shdr->response); 6460 if (shdr_status || shdr_add_status || rc) { 6461 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6462 "2520 NOP mailbox command failed " 6463 "status x%x add_status x%x mbx " 6464 "status x%x\n", shdr_status, 6465 shdr_add_status, rc); 6466 break; 6467 } 6468 } 6469 6470 if (rc != MBX_TIMEOUT) 6471 mempool_free(mboxq, phba->mbox_mem_pool); 6472 6473 return cmdsent; 6474} 6475 6476/** 6477 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device 6478 * @phba: pointer to lpfc hba data structure. 6479 * @fcfi: fcf index. 6480 * 6481 * This routine is invoked to unregister a FCFI from device. 6482 **/ 6483void 6484lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) 6485{ 6486 LPFC_MBOXQ_t *mbox; 6487 uint32_t mbox_tmo; 6488 int rc; 6489 unsigned long flags; 6490 6491 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6492 6493 if (!mbox) 6494 return; 6495 6496 lpfc_unreg_fcfi(mbox, fcfi); 6497 6498 if (!phba->sli4_hba.intr_enable) 6499 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6500 else { 6501 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 6502 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6503 } 6504 if (rc != MBX_TIMEOUT) 6505 mempool_free(mbox, phba->mbox_mem_pool); 6506 if (rc != MBX_SUCCESS) 6507 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6508 "2517 Unregister FCFI command failed " 6509 "status %d, mbxStatus x%x\n", rc, 6510 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 6511 else { 6512 spin_lock_irqsave(&phba->hbalock, flags); 6513 /* Mark the FCFI is no longer registered */ 6514 phba->fcf.fcf_flag &= 6515 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE); 6516 spin_unlock_irqrestore(&phba->hbalock, flags); 6517 } 6518} 6519 6520/** 6521 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 6522 * @phba: pointer to lpfc hba data structure. 6523 * 6524 * This routine is invoked to set up the PCI device memory space for device 6525 * with SLI-4 interface spec. 6526 * 6527 * Return codes 6528 * 0 - successful 6529 * other values - error 6530 **/ 6531static int 6532lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 6533{ 6534 struct pci_dev *pdev; 6535 unsigned long bar0map_len, bar1map_len, bar2map_len; 6536 int error = -ENODEV; 6537 6538 /* Obtain PCI device reference */ 6539 if (!phba->pcidev) 6540 return error; 6541 else 6542 pdev = phba->pcidev; 6543 6544 /* Set the device DMA mask size */ 6545 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6546 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6547 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6548 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6549 return error; 6550 } 6551 } 6552 6553 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 6554 * number of bytes required by each mapping. They are actually 6555 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device. 6556 */ 6557 if (pci_resource_start(pdev, 0)) { 6558 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6559 bar0map_len = pci_resource_len(pdev, 0); 6560 } else { 6561 phba->pci_bar0_map = pci_resource_start(pdev, 1); 6562 bar0map_len = pci_resource_len(pdev, 1); 6563 } 6564 phba->pci_bar1_map = pci_resource_start(pdev, 2); 6565 bar1map_len = pci_resource_len(pdev, 2); 6566 6567 phba->pci_bar2_map = pci_resource_start(pdev, 4); 6568 bar2map_len = pci_resource_len(pdev, 4); 6569 6570 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ 6571 phba->sli4_hba.conf_regs_memmap_p = 6572 ioremap(phba->pci_bar0_map, bar0map_len); 6573 if (!phba->sli4_hba.conf_regs_memmap_p) { 6574 dev_printk(KERN_ERR, &pdev->dev, 6575 "ioremap failed for SLI4 PCI config registers.\n"); 6576 goto out; 6577 } 6578 6579 /* Map SLI4 HBA Control Register base to a kernel virtual address. */ 6580 phba->sli4_hba.ctrl_regs_memmap_p = 6581 ioremap(phba->pci_bar1_map, bar1map_len); 6582 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 6583 dev_printk(KERN_ERR, &pdev->dev, 6584 "ioremap failed for SLI4 HBA control registers.\n"); 6585 goto out_iounmap_conf; 6586 } 6587 6588 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ 6589 phba->sli4_hba.drbl_regs_memmap_p = 6590 ioremap(phba->pci_bar2_map, bar2map_len); 6591 if (!phba->sli4_hba.drbl_regs_memmap_p) { 6592 dev_printk(KERN_ERR, &pdev->dev, 6593 "ioremap failed for SLI4 HBA doorbell registers.\n"); 6594 goto out_iounmap_ctrl; 6595 } 6596 6597 /* Set up BAR0 PCI config space register memory map */ 6598 lpfc_sli4_bar0_register_memmap(phba); 6599 6600 /* Set up BAR1 register memory map */ 6601 lpfc_sli4_bar1_register_memmap(phba); 6602 6603 /* Set up BAR2 register memory map */ 6604 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 6605 if (error) 6606 goto out_iounmap_all; 6607 6608 return 0; 6609 6610out_iounmap_all: 6611 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 6612out_iounmap_ctrl: 6613 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 6614out_iounmap_conf: 6615 iounmap(phba->sli4_hba.conf_regs_memmap_p); 6616out: 6617 return error; 6618} 6619 6620/** 6621 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 6622 * @phba: pointer to lpfc hba data structure. 6623 * 6624 * This routine is invoked to unset the PCI device memory space for device 6625 * with SLI-4 interface spec. 6626 **/ 6627static void 6628lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 6629{ 6630 struct pci_dev *pdev; 6631 6632 /* Obtain PCI device reference */ 6633 if (!phba->pcidev) 6634 return; 6635 else 6636 pdev = phba->pcidev; 6637 6638 /* Free coherent DMA memory allocated */ 6639 6640 /* Unmap I/O memory space */ 6641 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 6642 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 6643 iounmap(phba->sli4_hba.conf_regs_memmap_p); 6644 6645 return; 6646} 6647 6648/** 6649 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 6650 * @phba: pointer to lpfc hba data structure. 6651 * 6652 * This routine is invoked to enable the MSI-X interrupt vectors to device 6653 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 6654 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 6655 * invoked, enables either all or nothing, depending on the current 6656 * availability of PCI vector resources. The device driver is responsible 6657 * for calling the individual request_irq() to register each MSI-X vector 6658 * with a interrupt handler, which is done in this function. Note that 6659 * later when device is unloading, the driver should always call free_irq() 6660 * on all MSI-X vectors it has done request_irq() on before calling 6661 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 6662 * will be left with MSI-X enabled and leaks its vectors. 6663 * 6664 * Return codes 6665 * 0 - successful 6666 * other values - error 6667 **/ 6668static int 6669lpfc_sli_enable_msix(struct lpfc_hba *phba) 6670{ 6671 int rc, i; 6672 LPFC_MBOXQ_t *pmb; 6673 6674 /* Set up MSI-X multi-message vectors */ 6675 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6676 phba->msix_entries[i].entry = i; 6677 6678 /* Configure MSI-X capability structure */ 6679 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 6680 ARRAY_SIZE(phba->msix_entries)); 6681 if (rc) { 6682 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6683 "0420 PCI enable MSI-X failed (%d)\n", rc); 6684 goto msi_fail_out; 6685 } 6686 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6687 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6688 "0477 MSI-X entry[%d]: vector=x%x " 6689 "message=%d\n", i, 6690 phba->msix_entries[i].vector, 6691 phba->msix_entries[i].entry); 6692 /* 6693 * Assign MSI-X vectors to interrupt handlers 6694 */ 6695 6696 /* vector-0 is associated to slow-path handler */ 6697 rc = request_irq(phba->msix_entries[0].vector, 6698 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 6699 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6700 if (rc) { 6701 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6702 "0421 MSI-X slow-path request_irq failed " 6703 "(%d)\n", rc); 6704 goto msi_fail_out; 6705 } 6706 6707 /* vector-1 is associated to fast-path handler */ 6708 rc = request_irq(phba->msix_entries[1].vector, 6709 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 6710 LPFC_FP_DRIVER_HANDLER_NAME, phba); 6711 6712 if (rc) { 6713 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6714 "0429 MSI-X fast-path request_irq failed " 6715 "(%d)\n", rc); 6716 goto irq_fail_out; 6717 } 6718 6719 /* 6720 * Configure HBA MSI-X attention conditions to messages 6721 */ 6722 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6723 6724 if (!pmb) { 6725 rc = -ENOMEM; 6726 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6727 "0474 Unable to allocate memory for issuing " 6728 "MBOX_CONFIG_MSI command\n"); 6729 goto mem_fail_out; 6730 } 6731 rc = lpfc_config_msi(phba, pmb); 6732 if (rc) 6733 goto mbx_fail_out; 6734 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6735 if (rc != MBX_SUCCESS) { 6736 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6737 "0351 Config MSI mailbox command failed, " 6738 "mbxCmd x%x, mbxStatus x%x\n", 6739 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 6740 goto mbx_fail_out; 6741 } 6742 6743 /* Free memory allocated for mailbox command */ 6744 mempool_free(pmb, phba->mbox_mem_pool); 6745 return rc; 6746 6747mbx_fail_out: 6748 /* Free memory allocated for mailbox command */ 6749 mempool_free(pmb, phba->mbox_mem_pool); 6750 6751mem_fail_out: 6752 /* free the irq already requested */ 6753 free_irq(phba->msix_entries[1].vector, phba); 6754 6755irq_fail_out: 6756 /* free the irq already requested */ 6757 free_irq(phba->msix_entries[0].vector, phba); 6758 6759msi_fail_out: 6760 /* Unconfigure MSI-X capability structure */ 6761 pci_disable_msix(phba->pcidev); 6762 return rc; 6763} 6764 6765/** 6766 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 6767 * @phba: pointer to lpfc hba data structure. 6768 * 6769 * This routine is invoked to release the MSI-X vectors and then disable the 6770 * MSI-X interrupt mode to device with SLI-3 interface spec. 6771 **/ 6772static void 6773lpfc_sli_disable_msix(struct lpfc_hba *phba) 6774{ 6775 int i; 6776 6777 /* Free up MSI-X multi-message vectors */ 6778 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6779 free_irq(phba->msix_entries[i].vector, phba); 6780 /* Disable MSI-X */ 6781 pci_disable_msix(phba->pcidev); 6782 6783 return; 6784} 6785 6786/** 6787 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 6788 * @phba: pointer to lpfc hba data structure. 6789 * 6790 * This routine is invoked to enable the MSI interrupt mode to device with 6791 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 6792 * enable the MSI vector. The device driver is responsible for calling the 6793 * request_irq() to register MSI vector with a interrupt the handler, which 6794 * is done in this function. 6795 * 6796 * Return codes 6797 * 0 - successful 6798 * other values - error 6799 */ 6800static int 6801lpfc_sli_enable_msi(struct lpfc_hba *phba) 6802{ 6803 int rc; 6804 6805 rc = pci_enable_msi(phba->pcidev); 6806 if (!rc) 6807 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6808 "0462 PCI enable MSI mode success.\n"); 6809 else { 6810 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6811 "0471 PCI enable MSI mode failed (%d)\n", rc); 6812 return rc; 6813 } 6814 6815 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6816 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6817 if (rc) { 6818 pci_disable_msi(phba->pcidev); 6819 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6820 "0478 MSI request_irq failed (%d)\n", rc); 6821 } 6822 return rc; 6823} 6824 6825/** 6826 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 6827 * @phba: pointer to lpfc hba data structure. 6828 * 6829 * This routine is invoked to disable the MSI interrupt mode to device with 6830 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 6831 * done request_irq() on before calling pci_disable_msi(). Failure to do so 6832 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 6833 * its vector. 6834 */ 6835static void 6836lpfc_sli_disable_msi(struct lpfc_hba *phba) 6837{ 6838 free_irq(phba->pcidev->irq, phba); 6839 pci_disable_msi(phba->pcidev); 6840 return; 6841} 6842 6843/** 6844 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 6845 * @phba: pointer to lpfc hba data structure. 6846 * 6847 * This routine is invoked to enable device interrupt and associate driver's 6848 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 6849 * spec. Depends on the interrupt mode configured to the driver, the driver 6850 * will try to fallback from the configured interrupt mode to an interrupt 6851 * mode which is supported by the platform, kernel, and device in the order 6852 * of: 6853 * MSI-X -> MSI -> IRQ. 6854 * 6855 * Return codes 6856 * 0 - successful 6857 * other values - error 6858 **/ 6859static uint32_t 6860lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6861{ 6862 uint32_t intr_mode = LPFC_INTR_ERROR; 6863 int retval; 6864 6865 if (cfg_mode == 2) { 6866 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 6867 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 6868 if (!retval) { 6869 /* Now, try to enable MSI-X interrupt mode */ 6870 retval = lpfc_sli_enable_msix(phba); 6871 if (!retval) { 6872 /* Indicate initialization to MSI-X mode */ 6873 phba->intr_type = MSIX; 6874 intr_mode = 2; 6875 } 6876 } 6877 } 6878 6879 /* Fallback to MSI if MSI-X initialization failed */ 6880 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6881 retval = lpfc_sli_enable_msi(phba); 6882 if (!retval) { 6883 /* Indicate initialization to MSI mode */ 6884 phba->intr_type = MSI; 6885 intr_mode = 1; 6886 } 6887 } 6888 6889 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6890 if (phba->intr_type == NONE) { 6891 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6892 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6893 if (!retval) { 6894 /* Indicate initialization to INTx mode */ 6895 phba->intr_type = INTx; 6896 intr_mode = 0; 6897 } 6898 } 6899 return intr_mode; 6900} 6901 6902/** 6903 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 6904 * @phba: pointer to lpfc hba data structure. 6905 * 6906 * This routine is invoked to disable device interrupt and disassociate the 6907 * driver's interrupt handler(s) from interrupt vector(s) to device with 6908 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 6909 * release the interrupt vector(s) for the message signaled interrupt. 6910 **/ 6911static void 6912lpfc_sli_disable_intr(struct lpfc_hba *phba) 6913{ 6914 /* Disable the currently initialized interrupt mode */ 6915 if (phba->intr_type == MSIX) 6916 lpfc_sli_disable_msix(phba); 6917 else if (phba->intr_type == MSI) 6918 lpfc_sli_disable_msi(phba); 6919 else if (phba->intr_type == INTx) 6920 free_irq(phba->pcidev->irq, phba); 6921 6922 /* Reset interrupt management states */ 6923 phba->intr_type = NONE; 6924 phba->sli.slistat.sli_intr = 0; 6925 6926 return; 6927} 6928 6929/** 6930 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 6931 * @phba: pointer to lpfc hba data structure. 6932 * 6933 * This routine is invoked to enable the MSI-X interrupt vectors to device 6934 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 6935 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 6936 * enables either all or nothing, depending on the current availability of 6937 * PCI vector resources. The device driver is responsible for calling the 6938 * individual request_irq() to register each MSI-X vector with a interrupt 6939 * handler, which is done in this function. Note that later when device is 6940 * unloading, the driver should always call free_irq() on all MSI-X vectors 6941 * it has done request_irq() on before calling pci_disable_msix(). Failure 6942 * to do so results in a BUG_ON() and a device will be left with MSI-X 6943 * enabled and leaks its vectors. 6944 * 6945 * Return codes 6946 * 0 - successful 6947 * other values - error 6948 **/ 6949static int 6950lpfc_sli4_enable_msix(struct lpfc_hba *phba) 6951{ 6952 int rc, index; 6953 6954 /* Set up MSI-X multi-message vectors */ 6955 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 6956 phba->sli4_hba.msix_entries[index].entry = index; 6957 6958 /* Configure MSI-X capability structure */ 6959 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 6960 phba->sli4_hba.cfg_eqn); 6961 if (rc) { 6962 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6963 "0484 PCI enable MSI-X failed (%d)\n", rc); 6964 goto msi_fail_out; 6965 } 6966 /* Log MSI-X vector assignment */ 6967 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 6968 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6969 "0489 MSI-X entry[%d]: vector=x%x " 6970 "message=%d\n", index, 6971 phba->sli4_hba.msix_entries[index].vector, 6972 phba->sli4_hba.msix_entries[index].entry); 6973 /* 6974 * Assign MSI-X vectors to interrupt handlers 6975 */ 6976 6977 /* The first vector must associated to slow-path handler for MQ */ 6978 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 6979 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 6980 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6981 if (rc) { 6982 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6983 "0485 MSI-X slow-path request_irq failed " 6984 "(%d)\n", rc); 6985 goto msi_fail_out; 6986 } 6987 6988 /* The rest of the vector(s) are associated to fast-path handler(s) */ 6989 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) { 6990 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 6991 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 6992 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 6993 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 6994 LPFC_FP_DRIVER_HANDLER_NAME, 6995 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6996 if (rc) { 6997 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6998 "0486 MSI-X fast-path (%d) " 6999 "request_irq failed (%d)\n", index, rc); 7000 goto cfg_fail_out; 7001 } 7002 } 7003 7004 return rc; 7005 7006cfg_fail_out: 7007 /* free the irq already requested */ 7008 for (--index; index >= 1; index--) 7009 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 7010 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7011 7012 /* free the irq already requested */ 7013 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7014 7015msi_fail_out: 7016 /* Unconfigure MSI-X capability structure */ 7017 pci_disable_msix(phba->pcidev); 7018 return rc; 7019} 7020 7021/** 7022 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 7023 * @phba: pointer to lpfc hba data structure. 7024 * 7025 * This routine is invoked to release the MSI-X vectors and then disable the 7026 * MSI-X interrupt mode to device with SLI-4 interface spec. 7027 **/ 7028static void 7029lpfc_sli4_disable_msix(struct lpfc_hba *phba) 7030{ 7031 int index; 7032 7033 /* Free up MSI-X multi-message vectors */ 7034 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7035 7036 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) 7037 free_irq(phba->sli4_hba.msix_entries[index].vector, 7038 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7039 /* Disable MSI-X */ 7040 pci_disable_msix(phba->pcidev); 7041 7042 return; 7043} 7044 7045/** 7046 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 7047 * @phba: pointer to lpfc hba data structure. 7048 * 7049 * This routine is invoked to enable the MSI interrupt mode to device with 7050 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 7051 * to enable the MSI vector. The device driver is responsible for calling 7052 * the request_irq() to register MSI vector with a interrupt the handler, 7053 * which is done in this function. 7054 * 7055 * Return codes 7056 * 0 - successful 7057 * other values - error 7058 **/ 7059static int 7060lpfc_sli4_enable_msi(struct lpfc_hba *phba) 7061{ 7062 int rc, index; 7063 7064 rc = pci_enable_msi(phba->pcidev); 7065 if (!rc) 7066 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7067 "0487 PCI enable MSI mode success.\n"); 7068 else { 7069 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7070 "0488 PCI enable MSI mode failed (%d)\n", rc); 7071 return rc; 7072 } 7073 7074 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7075 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7076 if (rc) { 7077 pci_disable_msi(phba->pcidev); 7078 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7079 "0490 MSI request_irq failed (%d)\n", rc); 7080 } 7081 7082 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 7083 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7084 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7085 } 7086 7087 return rc; 7088} 7089 7090/** 7091 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 7092 * @phba: pointer to lpfc hba data structure. 7093 * 7094 * This routine is invoked to disable the MSI interrupt mode to device with 7095 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 7096 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7097 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7098 * its vector. 7099 **/ 7100static void 7101lpfc_sli4_disable_msi(struct lpfc_hba *phba) 7102{ 7103 free_irq(phba->pcidev->irq, phba); 7104 pci_disable_msi(phba->pcidev); 7105 return; 7106} 7107 7108/** 7109 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 7110 * @phba: pointer to lpfc hba data structure. 7111 * 7112 * This routine is invoked to enable device interrupt and associate driver's 7113 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 7114 * interface spec. Depends on the interrupt mode configured to the driver, 7115 * the driver will try to fallback from the configured interrupt mode to an 7116 * interrupt mode which is supported by the platform, kernel, and device in 7117 * the order of: 7118 * MSI-X -> MSI -> IRQ. 7119 * 7120 * Return codes 7121 * 0 - successful 7122 * other values - error 7123 **/ 7124static uint32_t 7125lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 7126{ 7127 uint32_t intr_mode = LPFC_INTR_ERROR; 7128 int retval, index; 7129 7130 if (cfg_mode == 2) { 7131 /* Preparation before conf_msi mbox cmd */ 7132 retval = 0; 7133 if (!retval) { 7134 /* Now, try to enable MSI-X interrupt mode */ 7135 retval = lpfc_sli4_enable_msix(phba); 7136 if (!retval) { 7137 /* Indicate initialization to MSI-X mode */ 7138 phba->intr_type = MSIX; 7139 intr_mode = 2; 7140 } 7141 } 7142 } 7143 7144 /* Fallback to MSI if MSI-X initialization failed */ 7145 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7146 retval = lpfc_sli4_enable_msi(phba); 7147 if (!retval) { 7148 /* Indicate initialization to MSI mode */ 7149 phba->intr_type = MSI; 7150 intr_mode = 1; 7151 } 7152 } 7153 7154 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7155 if (phba->intr_type == NONE) { 7156 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7157 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7158 if (!retval) { 7159 /* Indicate initialization to INTx mode */ 7160 phba->intr_type = INTx; 7161 intr_mode = 0; 7162 for (index = 0; index < phba->cfg_fcp_eq_count; 7163 index++) { 7164 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7165 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7166 } 7167 } 7168 } 7169 return intr_mode; 7170} 7171 7172/** 7173 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 7174 * @phba: pointer to lpfc hba data structure. 7175 * 7176 * This routine is invoked to disable device interrupt and disassociate 7177 * the driver's interrupt handler(s) from interrupt vector(s) to device 7178 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 7179 * will release the interrupt vector(s) for the message signaled interrupt. 7180 **/ 7181static void 7182lpfc_sli4_disable_intr(struct lpfc_hba *phba) 7183{ 7184 /* Disable the currently initialized interrupt mode */ 7185 if (phba->intr_type == MSIX) 7186 lpfc_sli4_disable_msix(phba); 7187 else if (phba->intr_type == MSI) 7188 lpfc_sli4_disable_msi(phba); 7189 else if (phba->intr_type == INTx) 7190 free_irq(phba->pcidev->irq, phba); 7191 7192 /* Reset interrupt management states */ 7193 phba->intr_type = NONE; 7194 phba->sli.slistat.sli_intr = 0; 7195 7196 return; 7197} 7198 7199/** 7200 * lpfc_unset_hba - Unset SLI3 hba device initialization 7201 * @phba: pointer to lpfc hba data structure. 7202 * 7203 * This routine is invoked to unset the HBA device initialization steps to 7204 * a device with SLI-3 interface spec. 7205 **/ 7206static void 7207lpfc_unset_hba(struct lpfc_hba *phba) 7208{ 7209 struct lpfc_vport *vport = phba->pport; 7210 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7211 7212 spin_lock_irq(shost->host_lock); 7213 vport->load_flag |= FC_UNLOADING; 7214 spin_unlock_irq(shost->host_lock); 7215 7216 lpfc_stop_hba_timers(phba); 7217 7218 phba->pport->work_port_events = 0; 7219 7220 lpfc_sli_hba_down(phba); 7221 7222 lpfc_sli_brdrestart(phba); 7223 7224 lpfc_sli_disable_intr(phba); 7225 7226 return; 7227} 7228 7229/** 7230 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 7231 * @phba: pointer to lpfc hba data structure. 7232 * 7233 * This routine is invoked to unset the HBA device initialization steps to 7234 * a device with SLI-4 interface spec. 7235 **/ 7236static void 7237lpfc_sli4_unset_hba(struct lpfc_hba *phba) 7238{ 7239 struct lpfc_vport *vport = phba->pport; 7240 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7241 7242 spin_lock_irq(shost->host_lock); 7243 vport->load_flag |= FC_UNLOADING; 7244 spin_unlock_irq(shost->host_lock); 7245 7246 phba->pport->work_port_events = 0; 7247 7248 lpfc_sli4_hba_down(phba); 7249 7250 lpfc_sli4_disable_intr(phba); 7251 7252 return; 7253} 7254 7255/** 7256 * lpfc_sli4_hba_unset - Unset the fcoe hba 7257 * @phba: Pointer to HBA context object. 7258 * 7259 * This function is called in the SLI4 code path to reset the HBA's FCoE 7260 * function. The caller is not required to hold any lock. This routine 7261 * issues PCI function reset mailbox command to reset the FCoE function. 7262 * At the end of the function, it calls lpfc_hba_down_post function to 7263 * free any pending commands. 7264 **/ 7265static void 7266lpfc_sli4_hba_unset(struct lpfc_hba *phba) 7267{ 7268 int wait_cnt = 0; 7269 LPFC_MBOXQ_t *mboxq; 7270 7271 lpfc_stop_hba_timers(phba); 7272 phba->sli4_hba.intr_enable = 0; 7273 7274 /* 7275 * Gracefully wait out the potential current outstanding asynchronous 7276 * mailbox command. 7277 */ 7278 7279 /* First, block any pending async mailbox command from posted */ 7280 spin_lock_irq(&phba->hbalock); 7281 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 7282 spin_unlock_irq(&phba->hbalock); 7283 /* Now, trying to wait it out if we can */ 7284 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7285 msleep(10); 7286 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 7287 break; 7288 } 7289 /* Forcefully release the outstanding mailbox command if timed out */ 7290 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7291 spin_lock_irq(&phba->hbalock); 7292 mboxq = phba->sli.mbox_active; 7293 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 7294 __lpfc_mbox_cmpl_put(phba, mboxq); 7295 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7296 phba->sli.mbox_active = NULL; 7297 spin_unlock_irq(&phba->hbalock); 7298 } 7299 7300 /* Tear down the queues in the HBA */ 7301 lpfc_sli4_queue_unset(phba); 7302 7303 /* Disable PCI subsystem interrupt */ 7304 lpfc_sli4_disable_intr(phba); 7305 7306 /* Stop kthread signal shall trigger work_done one more time */ 7307 kthread_stop(phba->worker_thread); 7308 7309 /* Stop the SLI4 device port */ 7310 phba->pport->work_port_events = 0; 7311} 7312 7313 /** 7314 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 7315 * @phba: Pointer to HBA context object. 7316 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 7317 * 7318 * This function is called in the SLI4 code path to read the port's 7319 * sli4 capabilities. 7320 * 7321 * This function may be be called from any context that can block-wait 7322 * for the completion. The expectation is that this routine is called 7323 * typically from probe_one or from the online routine. 7324 **/ 7325int 7326lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7327{ 7328 int rc; 7329 struct lpfc_mqe *mqe; 7330 struct lpfc_pc_sli4_params *sli4_params; 7331 uint32_t mbox_tmo; 7332 7333 rc = 0; 7334 mqe = &mboxq->u.mqe; 7335 7336 /* Read the port's SLI4 Parameters port capabilities */ 7337 lpfc_sli4_params(mboxq); 7338 if (!phba->sli4_hba.intr_enable) 7339 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7340 else { 7341 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES); 7342 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 7343 } 7344 7345 if (unlikely(rc)) 7346 return 1; 7347 7348 sli4_params = &phba->sli4_hba.pc_sli4_params; 7349 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 7350 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 7351 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 7352 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 7353 &mqe->un.sli4_params); 7354 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 7355 &mqe->un.sli4_params); 7356 sli4_params->proto_types = mqe->un.sli4_params.word3; 7357 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 7358 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 7359 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 7360 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 7361 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 7362 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 7363 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 7364 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 7365 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 7366 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 7367 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 7368 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 7369 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 7370 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 7371 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 7372 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 7373 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 7374 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 7375 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 7376 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 7377 return rc; 7378} 7379 7380/** 7381 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 7382 * @pdev: pointer to PCI device 7383 * @pid: pointer to PCI device identifier 7384 * 7385 * This routine is to be called to attach a device with SLI-3 interface spec 7386 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 7387 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 7388 * information of the device and driver to see if the driver state that it can 7389 * support this kind of device. If the match is successful, the driver core 7390 * invokes this routine. If this routine determines it can claim the HBA, it 7391 * does all the initialization that it needs to do to handle the HBA properly. 7392 * 7393 * Return code 7394 * 0 - driver can claim the device 7395 * negative value - driver can not claim the device 7396 **/ 7397static int __devinit 7398lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 7399{ 7400 struct lpfc_hba *phba; 7401 struct lpfc_vport *vport = NULL; 7402 struct Scsi_Host *shost = NULL; 7403 int error; 7404 uint32_t cfg_mode, intr_mode; 7405 7406 /* Allocate memory for HBA structure */ 7407 phba = lpfc_hba_alloc(pdev); 7408 if (!phba) 7409 return -ENOMEM; 7410 7411 /* Perform generic PCI device enabling operation */ 7412 error = lpfc_enable_pci_dev(phba); 7413 if (error) { 7414 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7415 "1401 Failed to enable pci device.\n"); 7416 goto out_free_phba; 7417 } 7418 7419 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 7420 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 7421 if (error) 7422 goto out_disable_pci_dev; 7423 7424 /* Set up SLI-3 specific device PCI memory space */ 7425 error = lpfc_sli_pci_mem_setup(phba); 7426 if (error) { 7427 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7428 "1402 Failed to set up pci memory space.\n"); 7429 goto out_disable_pci_dev; 7430 } 7431 7432 /* Set up phase-1 common device driver resources */ 7433 error = lpfc_setup_driver_resource_phase1(phba); 7434 if (error) { 7435 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7436 "1403 Failed to set up driver resource.\n"); 7437 goto out_unset_pci_mem_s3; 7438 } 7439 7440 /* Set up SLI-3 specific device driver resources */ 7441 error = lpfc_sli_driver_resource_setup(phba); 7442 if (error) { 7443 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7444 "1404 Failed to set up driver resource.\n"); 7445 goto out_unset_pci_mem_s3; 7446 } 7447 7448 /* Initialize and populate the iocb list per host */ 7449 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 7450 if (error) { 7451 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7452 "1405 Failed to initialize iocb list.\n"); 7453 goto out_unset_driver_resource_s3; 7454 } 7455 7456 /* Set up common device driver resources */ 7457 error = lpfc_setup_driver_resource_phase2(phba); 7458 if (error) { 7459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7460 "1406 Failed to set up driver resource.\n"); 7461 goto out_free_iocb_list; 7462 } 7463 7464 /* Create SCSI host to the physical port */ 7465 error = lpfc_create_shost(phba); 7466 if (error) { 7467 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7468 "1407 Failed to create scsi host.\n"); 7469 goto out_unset_driver_resource; 7470 } 7471 7472 /* Configure sysfs attributes */ 7473 vport = phba->pport; 7474 error = lpfc_alloc_sysfs_attr(vport); 7475 if (error) { 7476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7477 "1476 Failed to allocate sysfs attr\n"); 7478 goto out_destroy_shost; 7479 } 7480 7481 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 7482 /* Now, trying to enable interrupt and bring up the device */ 7483 cfg_mode = phba->cfg_use_msi; 7484 while (true) { 7485 /* Put device to a known state before enabling interrupt */ 7486 lpfc_stop_port(phba); 7487 /* Configure and enable interrupt */ 7488 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 7489 if (intr_mode == LPFC_INTR_ERROR) { 7490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7491 "0431 Failed to enable interrupt.\n"); 7492 error = -ENODEV; 7493 goto out_free_sysfs_attr; 7494 } 7495 /* SLI-3 HBA setup */ 7496 if (lpfc_sli_hba_setup(phba)) { 7497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7498 "1477 Failed to set up hba\n"); 7499 error = -ENODEV; 7500 goto out_remove_device; 7501 } 7502 7503 /* Wait 50ms for the interrupts of previous mailbox commands */ 7504 msleep(50); 7505 /* Check active interrupts on message signaled interrupts */ 7506 if (intr_mode == 0 || 7507 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 7508 /* Log the current active interrupt mode */ 7509 phba->intr_mode = intr_mode; 7510 lpfc_log_intr_mode(phba, intr_mode); 7511 break; 7512 } else { 7513 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7514 "0447 Configure interrupt mode (%d) " 7515 "failed active interrupt test.\n", 7516 intr_mode); 7517 /* Disable the current interrupt mode */ 7518 lpfc_sli_disable_intr(phba); 7519 /* Try next level of interrupt mode */ 7520 cfg_mode = --intr_mode; 7521 } 7522 } 7523 7524 /* Perform post initialization setup */ 7525 lpfc_post_init_setup(phba); 7526 7527 /* Check if there are static vports to be created. */ 7528 lpfc_create_static_vport(phba); 7529 7530 return 0; 7531 7532out_remove_device: 7533 lpfc_unset_hba(phba); 7534out_free_sysfs_attr: 7535 lpfc_free_sysfs_attr(vport); 7536out_destroy_shost: 7537 lpfc_destroy_shost(phba); 7538out_unset_driver_resource: 7539 lpfc_unset_driver_resource_phase2(phba); 7540out_free_iocb_list: 7541 lpfc_free_iocb_list(phba); 7542out_unset_driver_resource_s3: 7543 lpfc_sli_driver_resource_unset(phba); 7544out_unset_pci_mem_s3: 7545 lpfc_sli_pci_mem_unset(phba); 7546out_disable_pci_dev: 7547 lpfc_disable_pci_dev(phba); 7548 if (shost) 7549 scsi_host_put(shost); 7550out_free_phba: 7551 lpfc_hba_free(phba); 7552 return error; 7553} 7554 7555/** 7556 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 7557 * @pdev: pointer to PCI device 7558 * 7559 * This routine is to be called to disattach a device with SLI-3 interface 7560 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 7561 * removed from PCI bus, it performs all the necessary cleanup for the HBA 7562 * device to be removed from the PCI subsystem properly. 7563 **/ 7564static void __devexit 7565lpfc_pci_remove_one_s3(struct pci_dev *pdev) 7566{ 7567 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7568 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 7569 struct lpfc_vport **vports; 7570 struct lpfc_hba *phba = vport->phba; 7571 int i; 7572 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 7573 7574 spin_lock_irq(&phba->hbalock); 7575 vport->load_flag |= FC_UNLOADING; 7576 spin_unlock_irq(&phba->hbalock); 7577 7578 lpfc_free_sysfs_attr(vport); 7579 7580 /* Release all the vports against this physical port */ 7581 vports = lpfc_create_vport_work_array(phba); 7582 if (vports != NULL) 7583 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 7584 fc_vport_terminate(vports[i]->fc_vport); 7585 lpfc_destroy_vport_work_array(phba, vports); 7586 7587 /* Remove FC host and then SCSI host with the physical port */ 7588 fc_remove_host(shost); 7589 scsi_remove_host(shost); 7590 lpfc_cleanup(vport); 7591 7592 /* 7593 * Bring down the SLI Layer. This step disable all interrupts, 7594 * clears the rings, discards all mailbox commands, and resets 7595 * the HBA. 7596 */ 7597 7598 /* HBA interrupt will be diabled after this call */ 7599 lpfc_sli_hba_down(phba); 7600 /* Stop kthread signal shall trigger work_done one more time */ 7601 kthread_stop(phba->worker_thread); 7602 /* Final cleanup of txcmplq and reset the HBA */ 7603 lpfc_sli_brdrestart(phba); 7604 7605 lpfc_stop_hba_timers(phba); 7606 spin_lock_irq(&phba->hbalock); 7607 list_del_init(&vport->listentry); 7608 spin_unlock_irq(&phba->hbalock); 7609 7610 lpfc_debugfs_terminate(vport); 7611 7612 /* Disable interrupt */ 7613 lpfc_sli_disable_intr(phba); 7614 7615 pci_set_drvdata(pdev, NULL); 7616 scsi_host_put(shost); 7617 7618 /* 7619 * Call scsi_free before mem_free since scsi bufs are released to their 7620 * corresponding pools here. 7621 */ 7622 lpfc_scsi_free(phba); 7623 lpfc_mem_free_all(phba); 7624 7625 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7626 phba->hbqslimp.virt, phba->hbqslimp.phys); 7627 7628 /* Free resources associated with SLI2 interface */ 7629 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7630 phba->slim2p.virt, phba->slim2p.phys); 7631 7632 /* unmap adapter SLIM and Control Registers */ 7633 iounmap(phba->ctrl_regs_memmap_p); 7634 iounmap(phba->slim_memmap_p); 7635 7636 lpfc_hba_free(phba); 7637 7638 pci_release_selected_regions(pdev, bars); 7639 pci_disable_device(pdev); 7640} 7641 7642/** 7643 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 7644 * @pdev: pointer to PCI device 7645 * @msg: power management message 7646 * 7647 * This routine is to be called from the kernel's PCI subsystem to support 7648 * system Power Management (PM) to device with SLI-3 interface spec. When 7649 * PM invokes this method, it quiesces the device by stopping the driver's 7650 * worker thread for the device, turning off device's interrupt and DMA, 7651 * and bring the device offline. Note that as the driver implements the 7652 * minimum PM requirements to a power-aware driver's PM support for the 7653 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 7654 * to the suspend() method call will be treated as SUSPEND and the driver will 7655 * fully reinitialize its device during resume() method call, the driver will 7656 * set device to PCI_D3hot state in PCI config space instead of setting it 7657 * according to the @msg provided by the PM. 7658 * 7659 * Return code 7660 * 0 - driver suspended the device 7661 * Error otherwise 7662 **/ 7663static int 7664lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 7665{ 7666 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7667 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7668 7669 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7670 "0473 PCI device Power Management suspend.\n"); 7671 7672 /* Bring down the device */ 7673 lpfc_offline_prep(phba); 7674 lpfc_offline(phba); 7675 kthread_stop(phba->worker_thread); 7676 7677 /* Disable interrupt from device */ 7678 lpfc_sli_disable_intr(phba); 7679 7680 /* Save device state to PCI config space */ 7681 pci_save_state(pdev); 7682 pci_set_power_state(pdev, PCI_D3hot); 7683 7684 return 0; 7685} 7686 7687/** 7688 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 7689 * @pdev: pointer to PCI device 7690 * 7691 * This routine is to be called from the kernel's PCI subsystem to support 7692 * system Power Management (PM) to device with SLI-3 interface spec. When PM 7693 * invokes this method, it restores the device's PCI config space state and 7694 * fully reinitializes the device and brings it online. Note that as the 7695 * driver implements the minimum PM requirements to a power-aware driver's 7696 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 7697 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 7698 * driver will fully reinitialize its device during resume() method call, 7699 * the device will be set to PCI_D0 directly in PCI config space before 7700 * restoring the state. 7701 * 7702 * Return code 7703 * 0 - driver suspended the device 7704 * Error otherwise 7705 **/ 7706static int 7707lpfc_pci_resume_one_s3(struct pci_dev *pdev) 7708{ 7709 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7710 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7711 uint32_t intr_mode; 7712 int error; 7713 7714 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7715 "0452 PCI device Power Management resume.\n"); 7716 7717 /* Restore device state from PCI config space */ 7718 pci_set_power_state(pdev, PCI_D0); 7719 pci_restore_state(pdev); 7720 7721 /* 7722 * As the new kernel behavior of pci_restore_state() API call clears 7723 * device saved_state flag, need to save the restored state again. 7724 */ 7725 pci_save_state(pdev); 7726 7727 if (pdev->is_busmaster) 7728 pci_set_master(pdev); 7729 7730 /* Startup the kernel thread for this host adapter. */ 7731 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7732 "lpfc_worker_%d", phba->brd_no); 7733 if (IS_ERR(phba->worker_thread)) { 7734 error = PTR_ERR(phba->worker_thread); 7735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7736 "0434 PM resume failed to start worker " 7737 "thread: error=x%x.\n", error); 7738 return error; 7739 } 7740 7741 /* Configure and enable interrupt */ 7742 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7743 if (intr_mode == LPFC_INTR_ERROR) { 7744 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7745 "0430 PM resume Failed to enable interrupt\n"); 7746 return -EIO; 7747 } else 7748 phba->intr_mode = intr_mode; 7749 7750 /* Restart HBA and bring it online */ 7751 lpfc_sli_brdrestart(phba); 7752 lpfc_online(phba); 7753 7754 /* Log the current active interrupt mode */ 7755 lpfc_log_intr_mode(phba, phba->intr_mode); 7756 7757 return 0; 7758} 7759 7760/** 7761 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 7762 * @phba: pointer to lpfc hba data structure. 7763 * 7764 * This routine is called to prepare the SLI3 device for PCI slot recover. It 7765 * aborts and stops all the on-going I/Os on the pci device. 7766 **/ 7767static void 7768lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 7769{ 7770 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7771 "2723 PCI channel I/O abort preparing for recovery\n"); 7772 /* Prepare for bringing HBA offline */ 7773 lpfc_offline_prep(phba); 7774 /* Clear sli active flag to prevent sysfs access to HBA */ 7775 spin_lock_irq(&phba->hbalock); 7776 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 7777 spin_unlock_irq(&phba->hbalock); 7778 /* Stop and flush all I/Os and bring HBA offline */ 7779 lpfc_offline(phba); 7780} 7781 7782/** 7783 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 7784 * @phba: pointer to lpfc hba data structure. 7785 * 7786 * This routine is called to prepare the SLI3 device for PCI slot reset. It 7787 * disables the device interrupt and pci device, and aborts the internal FCP 7788 * pending I/Os. 7789 **/ 7790static void 7791lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 7792{ 7793 struct lpfc_sli *psli = &phba->sli; 7794 struct lpfc_sli_ring *pring; 7795 7796 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7797 "2710 PCI channel disable preparing for reset\n"); 7798 /* Disable interrupt and pci device */ 7799 lpfc_sli_disable_intr(phba); 7800 pci_disable_device(phba->pcidev); 7801 /* 7802 * There may be I/Os dropped by the firmware. 7803 * Error iocb (I/O) on txcmplq and let the SCSI layer 7804 * retry it after re-establishing link. 7805 */ 7806 pring = &psli->ring[psli->fcp_ring]; 7807 lpfc_sli_abort_iocb_ring(phba, pring); 7808} 7809 7810/** 7811 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 7812 * @phba: pointer to lpfc hba data structure. 7813 * 7814 * This routine is called to prepare the SLI3 device for PCI slot permanently 7815 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 7816 * pending I/Os. 7817 **/ 7818static void 7819lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba) 7820{ 7821 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7822 "2711 PCI channel permanent disable for failure\n"); 7823 /* Clean up all driver's outstanding SCSI I/Os */ 7824 lpfc_sli_flush_fcp_rings(phba); 7825} 7826 7827/** 7828 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 7829 * @pdev: pointer to PCI device. 7830 * @state: the current PCI connection state. 7831 * 7832 * This routine is called from the PCI subsystem for I/O error handling to 7833 * device with SLI-3 interface spec. This function is called by the PCI 7834 * subsystem after a PCI bus error affecting this device has been detected. 7835 * When this function is invoked, it will need to stop all the I/Os and 7836 * interrupt(s) to the device. Once that is done, it will return 7837 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 7838 * as desired. 7839 * 7840 * Return codes 7841 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 7842 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7843 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7844 **/ 7845static pci_ers_result_t 7846lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 7847{ 7848 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7849 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7850 7851 /* Block all SCSI devices' I/Os on the host */ 7852 lpfc_scsi_dev_block(phba); 7853 7854 switch (state) { 7855 case pci_channel_io_normal: 7856 /* Non-fatal error, prepare for recovery */ 7857 lpfc_sli_prep_dev_for_recover(phba); 7858 return PCI_ERS_RESULT_CAN_RECOVER; 7859 case pci_channel_io_frozen: 7860 /* Fatal error, prepare for slot reset */ 7861 lpfc_sli_prep_dev_for_reset(phba); 7862 return PCI_ERS_RESULT_NEED_RESET; 7863 case pci_channel_io_perm_failure: 7864 /* Permanent failure, prepare for device down */ 7865 lpfc_prep_dev_for_perm_failure(phba); 7866 return PCI_ERS_RESULT_DISCONNECT; 7867 default: 7868 /* Unknown state, prepare and request slot reset */ 7869 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7870 "0472 Unknown PCI error state: x%x\n", state); 7871 lpfc_sli_prep_dev_for_reset(phba); 7872 return PCI_ERS_RESULT_NEED_RESET; 7873 } 7874} 7875 7876/** 7877 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 7878 * @pdev: pointer to PCI device. 7879 * 7880 * This routine is called from the PCI subsystem for error handling to 7881 * device with SLI-3 interface spec. This is called after PCI bus has been 7882 * reset to restart the PCI card from scratch, as if from a cold-boot. 7883 * During the PCI subsystem error recovery, after driver returns 7884 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 7885 * recovery and then call this routine before calling the .resume method 7886 * to recover the device. This function will initialize the HBA device, 7887 * enable the interrupt, but it will just put the HBA to offline state 7888 * without passing any I/O traffic. 7889 * 7890 * Return codes 7891 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7892 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7893 */ 7894static pci_ers_result_t 7895lpfc_io_slot_reset_s3(struct pci_dev *pdev) 7896{ 7897 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7898 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7899 struct lpfc_sli *psli = &phba->sli; 7900 uint32_t intr_mode; 7901 7902 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 7903 if (pci_enable_device_mem(pdev)) { 7904 printk(KERN_ERR "lpfc: Cannot re-enable " 7905 "PCI device after reset.\n"); 7906 return PCI_ERS_RESULT_DISCONNECT; 7907 } 7908 7909 pci_restore_state(pdev); 7910 7911 /* 7912 * As the new kernel behavior of pci_restore_state() API call clears 7913 * device saved_state flag, need to save the restored state again. 7914 */ 7915 pci_save_state(pdev); 7916 7917 if (pdev->is_busmaster) 7918 pci_set_master(pdev); 7919 7920 spin_lock_irq(&phba->hbalock); 7921 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 7922 spin_unlock_irq(&phba->hbalock); 7923 7924 /* Configure and enable interrupt */ 7925 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7926 if (intr_mode == LPFC_INTR_ERROR) { 7927 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7928 "0427 Cannot re-enable interrupt after " 7929 "slot reset.\n"); 7930 return PCI_ERS_RESULT_DISCONNECT; 7931 } else 7932 phba->intr_mode = intr_mode; 7933 7934 /* Take device offline; this will perform cleanup */ 7935 lpfc_offline(phba); 7936 lpfc_sli_brdrestart(phba); 7937 7938 /* Log the current active interrupt mode */ 7939 lpfc_log_intr_mode(phba, phba->intr_mode); 7940 7941 return PCI_ERS_RESULT_RECOVERED; 7942} 7943 7944/** 7945 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 7946 * @pdev: pointer to PCI device 7947 * 7948 * This routine is called from the PCI subsystem for error handling to device 7949 * with SLI-3 interface spec. It is called when kernel error recovery tells 7950 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 7951 * error recovery. After this call, traffic can start to flow from this device 7952 * again. 7953 */ 7954static void 7955lpfc_io_resume_s3(struct pci_dev *pdev) 7956{ 7957 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7958 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7959 7960 /* Bring the device online */ 7961 lpfc_online(phba); 7962 7963 /* Clean up Advanced Error Reporting (AER) if needed */ 7964 if (phba->hba_flag & HBA_AER_ENABLED) 7965 pci_cleanup_aer_uncorrect_error_status(pdev); 7966} 7967 7968/** 7969 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 7970 * @phba: pointer to lpfc hba data structure. 7971 * 7972 * returns the number of ELS/CT IOCBs to reserve 7973 **/ 7974int 7975lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 7976{ 7977 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 7978 7979 if (phba->sli_rev == LPFC_SLI_REV4) { 7980 if (max_xri <= 100) 7981 return 10; 7982 else if (max_xri <= 256) 7983 return 25; 7984 else if (max_xri <= 512) 7985 return 50; 7986 else if (max_xri <= 1024) 7987 return 100; 7988 else 7989 return 150; 7990 } else 7991 return 0; 7992} 7993 7994/** 7995 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 7996 * @pdev: pointer to PCI device 7997 * @pid: pointer to PCI device identifier 7998 * 7999 * This routine is called from the kernel's PCI subsystem to device with 8000 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8001 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 8002 * information of the device and driver to see if the driver state that it 8003 * can support this kind of device. If the match is successful, the driver 8004 * core invokes this routine. If this routine determines it can claim the HBA, 8005 * it does all the initialization that it needs to do to handle the HBA 8006 * properly. 8007 * 8008 * Return code 8009 * 0 - driver can claim the device 8010 * negative value - driver can not claim the device 8011 **/ 8012static int __devinit 8013lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 8014{ 8015 struct lpfc_hba *phba; 8016 struct lpfc_vport *vport = NULL; 8017 struct Scsi_Host *shost = NULL; 8018 int error; 8019 uint32_t cfg_mode, intr_mode; 8020 int mcnt; 8021 8022 /* Allocate memory for HBA structure */ 8023 phba = lpfc_hba_alloc(pdev); 8024 if (!phba) 8025 return -ENOMEM; 8026 8027 /* Perform generic PCI device enabling operation */ 8028 error = lpfc_enable_pci_dev(phba); 8029 if (error) { 8030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8031 "1409 Failed to enable pci device.\n"); 8032 goto out_free_phba; 8033 } 8034 8035 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 8036 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 8037 if (error) 8038 goto out_disable_pci_dev; 8039 8040 /* Set up SLI-4 specific device PCI memory space */ 8041 error = lpfc_sli4_pci_mem_setup(phba); 8042 if (error) { 8043 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8044 "1410 Failed to set up pci memory space.\n"); 8045 goto out_disable_pci_dev; 8046 } 8047 8048 /* Set up phase-1 common device driver resources */ 8049 error = lpfc_setup_driver_resource_phase1(phba); 8050 if (error) { 8051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8052 "1411 Failed to set up driver resource.\n"); 8053 goto out_unset_pci_mem_s4; 8054 } 8055 8056 /* Set up SLI-4 Specific device driver resources */ 8057 error = lpfc_sli4_driver_resource_setup(phba); 8058 if (error) { 8059 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8060 "1412 Failed to set up driver resource.\n"); 8061 goto out_unset_pci_mem_s4; 8062 } 8063 8064 /* Initialize and populate the iocb list per host */ 8065 error = lpfc_init_iocb_list(phba, 8066 phba->sli4_hba.max_cfg_param.max_xri); 8067 if (error) { 8068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8069 "1413 Failed to initialize iocb list.\n"); 8070 goto out_unset_driver_resource_s4; 8071 } 8072 8073 /* Set up common device driver resources */ 8074 error = lpfc_setup_driver_resource_phase2(phba); 8075 if (error) { 8076 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8077 "1414 Failed to set up driver resource.\n"); 8078 goto out_free_iocb_list; 8079 } 8080 8081 /* Create SCSI host to the physical port */ 8082 error = lpfc_create_shost(phba); 8083 if (error) { 8084 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8085 "1415 Failed to create scsi host.\n"); 8086 goto out_unset_driver_resource; 8087 } 8088 8089 /* Configure sysfs attributes */ 8090 vport = phba->pport; 8091 error = lpfc_alloc_sysfs_attr(vport); 8092 if (error) { 8093 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8094 "1416 Failed to allocate sysfs attr\n"); 8095 goto out_destroy_shost; 8096 } 8097 8098 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 8099 /* Now, trying to enable interrupt and bring up the device */ 8100 cfg_mode = phba->cfg_use_msi; 8101 while (true) { 8102 /* Put device to a known state before enabling interrupt */ 8103 lpfc_stop_port(phba); 8104 /* Configure and enable interrupt */ 8105 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 8106 if (intr_mode == LPFC_INTR_ERROR) { 8107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8108 "0426 Failed to enable interrupt.\n"); 8109 error = -ENODEV; 8110 goto out_free_sysfs_attr; 8111 } 8112 /* Default to single FCP EQ for non-MSI-X */ 8113 if (phba->intr_type != MSIX) 8114 phba->cfg_fcp_eq_count = 1; 8115 /* Set up SLI-4 HBA */ 8116 if (lpfc_sli4_hba_setup(phba)) { 8117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8118 "1421 Failed to set up hba\n"); 8119 error = -ENODEV; 8120 goto out_disable_intr; 8121 } 8122 8123 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 8124 if (intr_mode != 0) 8125 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 8126 LPFC_ACT_INTR_CNT); 8127 8128 /* Check active interrupts received only for MSI/MSI-X */ 8129 if (intr_mode == 0 || 8130 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 8131 /* Log the current active interrupt mode */ 8132 phba->intr_mode = intr_mode; 8133 lpfc_log_intr_mode(phba, intr_mode); 8134 break; 8135 } 8136 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8137 "0451 Configure interrupt mode (%d) " 8138 "failed active interrupt test.\n", 8139 intr_mode); 8140 /* Unset the preivous SLI-4 HBA setup */ 8141 lpfc_sli4_unset_hba(phba); 8142 /* Try next level of interrupt mode */ 8143 cfg_mode = --intr_mode; 8144 } 8145 8146 /* Perform post initialization setup */ 8147 lpfc_post_init_setup(phba); 8148 8149 /* Check if there are static vports to be created. */ 8150 lpfc_create_static_vport(phba); 8151 8152 return 0; 8153 8154out_disable_intr: 8155 lpfc_sli4_disable_intr(phba); 8156out_free_sysfs_attr: 8157 lpfc_free_sysfs_attr(vport); 8158out_destroy_shost: 8159 lpfc_destroy_shost(phba); 8160out_unset_driver_resource: 8161 lpfc_unset_driver_resource_phase2(phba); 8162out_free_iocb_list: 8163 lpfc_free_iocb_list(phba); 8164out_unset_driver_resource_s4: 8165 lpfc_sli4_driver_resource_unset(phba); 8166out_unset_pci_mem_s4: 8167 lpfc_sli4_pci_mem_unset(phba); 8168out_disable_pci_dev: 8169 lpfc_disable_pci_dev(phba); 8170 if (shost) 8171 scsi_host_put(shost); 8172out_free_phba: 8173 lpfc_hba_free(phba); 8174 return error; 8175} 8176 8177/** 8178 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 8179 * @pdev: pointer to PCI device 8180 * 8181 * This routine is called from the kernel's PCI subsystem to device with 8182 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8183 * removed from PCI bus, it performs all the necessary cleanup for the HBA 8184 * device to be removed from the PCI subsystem properly. 8185 **/ 8186static void __devexit 8187lpfc_pci_remove_one_s4(struct pci_dev *pdev) 8188{ 8189 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8190 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8191 struct lpfc_vport **vports; 8192 struct lpfc_hba *phba = vport->phba; 8193 int i; 8194 8195 /* Mark the device unloading flag */ 8196 spin_lock_irq(&phba->hbalock); 8197 vport->load_flag |= FC_UNLOADING; 8198 spin_unlock_irq(&phba->hbalock); 8199 8200 /* Free the HBA sysfs attributes */ 8201 lpfc_free_sysfs_attr(vport); 8202 8203 /* Release all the vports against this physical port */ 8204 vports = lpfc_create_vport_work_array(phba); 8205 if (vports != NULL) 8206 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8207 fc_vport_terminate(vports[i]->fc_vport); 8208 lpfc_destroy_vport_work_array(phba, vports); 8209 8210 /* Remove FC host and then SCSI host with the physical port */ 8211 fc_remove_host(shost); 8212 scsi_remove_host(shost); 8213 8214 /* Perform cleanup on the physical port */ 8215 lpfc_cleanup(vport); 8216 8217 /* 8218 * Bring down the SLI Layer. This step disables all interrupts, 8219 * clears the rings, discards all mailbox commands, and resets 8220 * the HBA FCoE function. 8221 */ 8222 lpfc_debugfs_terminate(vport); 8223 lpfc_sli4_hba_unset(phba); 8224 8225 spin_lock_irq(&phba->hbalock); 8226 list_del_init(&vport->listentry); 8227 spin_unlock_irq(&phba->hbalock); 8228 8229 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi 8230 * buffers are released to their corresponding pools here. 8231 */ 8232 lpfc_scsi_free(phba); 8233 lpfc_sli4_driver_resource_unset(phba); 8234 8235 /* Unmap adapter Control and Doorbell registers */ 8236 lpfc_sli4_pci_mem_unset(phba); 8237 8238 /* Release PCI resources and disable device's PCI function */ 8239 scsi_host_put(shost); 8240 lpfc_disable_pci_dev(phba); 8241 8242 /* Finally, free the driver's device data structure */ 8243 lpfc_hba_free(phba); 8244 8245 return; 8246} 8247 8248/** 8249 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 8250 * @pdev: pointer to PCI device 8251 * @msg: power management message 8252 * 8253 * This routine is called from the kernel's PCI subsystem to support system 8254 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 8255 * this method, it quiesces the device by stopping the driver's worker 8256 * thread for the device, turning off device's interrupt and DMA, and bring 8257 * the device offline. Note that as the driver implements the minimum PM 8258 * requirements to a power-aware driver's PM support for suspend/resume -- all 8259 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 8260 * method call will be treated as SUSPEND and the driver will fully 8261 * reinitialize its device during resume() method call, the driver will set 8262 * device to PCI_D3hot state in PCI config space instead of setting it 8263 * according to the @msg provided by the PM. 8264 * 8265 * Return code 8266 * 0 - driver suspended the device 8267 * Error otherwise 8268 **/ 8269static int 8270lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 8271{ 8272 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8273 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8274 8275 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8276 "0298 PCI device Power Management suspend.\n"); 8277 8278 /* Bring down the device */ 8279 lpfc_offline_prep(phba); 8280 lpfc_offline(phba); 8281 kthread_stop(phba->worker_thread); 8282 8283 /* Disable interrupt from device */ 8284 lpfc_sli4_disable_intr(phba); 8285 8286 /* Save device state to PCI config space */ 8287 pci_save_state(pdev); 8288 pci_set_power_state(pdev, PCI_D3hot); 8289 8290 return 0; 8291} 8292 8293/** 8294 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 8295 * @pdev: pointer to PCI device 8296 * 8297 * This routine is called from the kernel's PCI subsystem to support system 8298 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 8299 * this method, it restores the device's PCI config space state and fully 8300 * reinitializes the device and brings it online. Note that as the driver 8301 * implements the minimum PM requirements to a power-aware driver's PM for 8302 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8303 * to the suspend() method call will be treated as SUSPEND and the driver 8304 * will fully reinitialize its device during resume() method call, the device 8305 * will be set to PCI_D0 directly in PCI config space before restoring the 8306 * state. 8307 * 8308 * Return code 8309 * 0 - driver suspended the device 8310 * Error otherwise 8311 **/ 8312static int 8313lpfc_pci_resume_one_s4(struct pci_dev *pdev) 8314{ 8315 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8316 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8317 uint32_t intr_mode; 8318 int error; 8319 8320 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8321 "0292 PCI device Power Management resume.\n"); 8322 8323 /* Restore device state from PCI config space */ 8324 pci_set_power_state(pdev, PCI_D0); 8325 pci_restore_state(pdev); 8326 8327 /* 8328 * As the new kernel behavior of pci_restore_state() API call clears 8329 * device saved_state flag, need to save the restored state again. 8330 */ 8331 pci_save_state(pdev); 8332 8333 if (pdev->is_busmaster) 8334 pci_set_master(pdev); 8335 8336 /* Startup the kernel thread for this host adapter. */ 8337 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8338 "lpfc_worker_%d", phba->brd_no); 8339 if (IS_ERR(phba->worker_thread)) { 8340 error = PTR_ERR(phba->worker_thread); 8341 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8342 "0293 PM resume failed to start worker " 8343 "thread: error=x%x.\n", error); 8344 return error; 8345 } 8346 8347 /* Configure and enable interrupt */ 8348 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 8349 if (intr_mode == LPFC_INTR_ERROR) { 8350 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8351 "0294 PM resume Failed to enable interrupt\n"); 8352 return -EIO; 8353 } else 8354 phba->intr_mode = intr_mode; 8355 8356 /* Restart HBA and bring it online */ 8357 lpfc_sli_brdrestart(phba); 8358 lpfc_online(phba); 8359 8360 /* Log the current active interrupt mode */ 8361 lpfc_log_intr_mode(phba, phba->intr_mode); 8362 8363 return 0; 8364} 8365 8366/** 8367 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 8368 * @pdev: pointer to PCI device. 8369 * @state: the current PCI connection state. 8370 * 8371 * This routine is called from the PCI subsystem for error handling to device 8372 * with SLI-4 interface spec. This function is called by the PCI subsystem 8373 * after a PCI bus error affecting this device has been detected. When this 8374 * function is invoked, it will need to stop all the I/Os and interrupt(s) 8375 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 8376 * for the PCI subsystem to perform proper recovery as desired. 8377 * 8378 * Return codes 8379 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8380 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8381 **/ 8382static pci_ers_result_t 8383lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 8384{ 8385 return PCI_ERS_RESULT_NEED_RESET; 8386} 8387 8388/** 8389 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 8390 * @pdev: pointer to PCI device. 8391 * 8392 * This routine is called from the PCI subsystem for error handling to device 8393 * with SLI-4 interface spec. It is called after PCI bus has been reset to 8394 * restart the PCI card from scratch, as if from a cold-boot. During the 8395 * PCI subsystem error recovery, after the driver returns 8396 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 8397 * recovery and then call this routine before calling the .resume method to 8398 * recover the device. This function will initialize the HBA device, enable 8399 * the interrupt, but it will just put the HBA to offline state without 8400 * passing any I/O traffic. 8401 * 8402 * Return codes 8403 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8404 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8405 */ 8406static pci_ers_result_t 8407lpfc_io_slot_reset_s4(struct pci_dev *pdev) 8408{ 8409 return PCI_ERS_RESULT_RECOVERED; 8410} 8411 8412/** 8413 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 8414 * @pdev: pointer to PCI device 8415 * 8416 * This routine is called from the PCI subsystem for error handling to device 8417 * with SLI-4 interface spec. It is called when kernel error recovery tells 8418 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 8419 * error recovery. After this call, traffic can start to flow from this device 8420 * again. 8421 **/ 8422static void 8423lpfc_io_resume_s4(struct pci_dev *pdev) 8424{ 8425 return; 8426} 8427 8428/** 8429 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 8430 * @pdev: pointer to PCI device 8431 * @pid: pointer to PCI device identifier 8432 * 8433 * This routine is to be registered to the kernel's PCI subsystem. When an 8434 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 8435 * at PCI device-specific information of the device and driver to see if the 8436 * driver state that it can support this kind of device. If the match is 8437 * successful, the driver core invokes this routine. This routine dispatches 8438 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 8439 * do all the initialization that it needs to do to handle the HBA device 8440 * properly. 8441 * 8442 * Return code 8443 * 0 - driver can claim the device 8444 * negative value - driver can not claim the device 8445 **/ 8446static int __devinit 8447lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 8448{ 8449 int rc; 8450 struct lpfc_sli_intf intf; 8451 8452 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 8453 return -ENODEV; 8454 8455 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 8456 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 8457 rc = lpfc_pci_probe_one_s4(pdev, pid); 8458 else 8459 rc = lpfc_pci_probe_one_s3(pdev, pid); 8460 8461 return rc; 8462} 8463 8464/** 8465 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 8466 * @pdev: pointer to PCI device 8467 * 8468 * This routine is to be registered to the kernel's PCI subsystem. When an 8469 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 8470 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 8471 * remove routine, which will perform all the necessary cleanup for the 8472 * device to be removed from the PCI subsystem properly. 8473 **/ 8474static void __devexit 8475lpfc_pci_remove_one(struct pci_dev *pdev) 8476{ 8477 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8478 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8479 8480 switch (phba->pci_dev_grp) { 8481 case LPFC_PCI_DEV_LP: 8482 lpfc_pci_remove_one_s3(pdev); 8483 break; 8484 case LPFC_PCI_DEV_OC: 8485 lpfc_pci_remove_one_s4(pdev); 8486 break; 8487 default: 8488 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8489 "1424 Invalid PCI device group: 0x%x\n", 8490 phba->pci_dev_grp); 8491 break; 8492 } 8493 return; 8494} 8495 8496/** 8497 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 8498 * @pdev: pointer to PCI device 8499 * @msg: power management message 8500 * 8501 * This routine is to be registered to the kernel's PCI subsystem to support 8502 * system Power Management (PM). When PM invokes this method, it dispatches 8503 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 8504 * suspend the device. 8505 * 8506 * Return code 8507 * 0 - driver suspended the device 8508 * Error otherwise 8509 **/ 8510static int 8511lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 8512{ 8513 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8514 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8515 int rc = -ENODEV; 8516 8517 switch (phba->pci_dev_grp) { 8518 case LPFC_PCI_DEV_LP: 8519 rc = lpfc_pci_suspend_one_s3(pdev, msg); 8520 break; 8521 case LPFC_PCI_DEV_OC: 8522 rc = lpfc_pci_suspend_one_s4(pdev, msg); 8523 break; 8524 default: 8525 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8526 "1425 Invalid PCI device group: 0x%x\n", 8527 phba->pci_dev_grp); 8528 break; 8529 } 8530 return rc; 8531} 8532 8533/** 8534 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 8535 * @pdev: pointer to PCI device 8536 * 8537 * This routine is to be registered to the kernel's PCI subsystem to support 8538 * system Power Management (PM). When PM invokes this method, it dispatches 8539 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 8540 * resume the device. 8541 * 8542 * Return code 8543 * 0 - driver suspended the device 8544 * Error otherwise 8545 **/ 8546static int 8547lpfc_pci_resume_one(struct pci_dev *pdev) 8548{ 8549 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8550 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8551 int rc = -ENODEV; 8552 8553 switch (phba->pci_dev_grp) { 8554 case LPFC_PCI_DEV_LP: 8555 rc = lpfc_pci_resume_one_s3(pdev); 8556 break; 8557 case LPFC_PCI_DEV_OC: 8558 rc = lpfc_pci_resume_one_s4(pdev); 8559 break; 8560 default: 8561 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8562 "1426 Invalid PCI device group: 0x%x\n", 8563 phba->pci_dev_grp); 8564 break; 8565 } 8566 return rc; 8567} 8568 8569/** 8570 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 8571 * @pdev: pointer to PCI device. 8572 * @state: the current PCI connection state. 8573 * 8574 * This routine is registered to the PCI subsystem for error handling. This 8575 * function is called by the PCI subsystem after a PCI bus error affecting 8576 * this device has been detected. When this routine is invoked, it dispatches 8577 * the action to the proper SLI-3 or SLI-4 device error detected handling 8578 * routine, which will perform the proper error detected operation. 8579 * 8580 * Return codes 8581 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8582 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8583 **/ 8584static pci_ers_result_t 8585lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 8586{ 8587 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8588 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8589 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 8590 8591 switch (phba->pci_dev_grp) { 8592 case LPFC_PCI_DEV_LP: 8593 rc = lpfc_io_error_detected_s3(pdev, state); 8594 break; 8595 case LPFC_PCI_DEV_OC: 8596 rc = lpfc_io_error_detected_s4(pdev, state); 8597 break; 8598 default: 8599 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8600 "1427 Invalid PCI device group: 0x%x\n", 8601 phba->pci_dev_grp); 8602 break; 8603 } 8604 return rc; 8605} 8606 8607/** 8608 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 8609 * @pdev: pointer to PCI device. 8610 * 8611 * This routine is registered to the PCI subsystem for error handling. This 8612 * function is called after PCI bus has been reset to restart the PCI card 8613 * from scratch, as if from a cold-boot. When this routine is invoked, it 8614 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 8615 * routine, which will perform the proper device reset. 8616 * 8617 * Return codes 8618 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8619 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8620 **/ 8621static pci_ers_result_t 8622lpfc_io_slot_reset(struct pci_dev *pdev) 8623{ 8624 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8625 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8626 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 8627 8628 switch (phba->pci_dev_grp) { 8629 case LPFC_PCI_DEV_LP: 8630 rc = lpfc_io_slot_reset_s3(pdev); 8631 break; 8632 case LPFC_PCI_DEV_OC: 8633 rc = lpfc_io_slot_reset_s4(pdev); 8634 break; 8635 default: 8636 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8637 "1428 Invalid PCI device group: 0x%x\n", 8638 phba->pci_dev_grp); 8639 break; 8640 } 8641 return rc; 8642} 8643 8644/** 8645 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 8646 * @pdev: pointer to PCI device 8647 * 8648 * This routine is registered to the PCI subsystem for error handling. It 8649 * is called when kernel error recovery tells the lpfc driver that it is 8650 * OK to resume normal PCI operation after PCI bus error recovery. When 8651 * this routine is invoked, it dispatches the action to the proper SLI-3 8652 * or SLI-4 device io_resume routine, which will resume the device operation. 8653 **/ 8654static void 8655lpfc_io_resume(struct pci_dev *pdev) 8656{ 8657 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8658 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8659 8660 switch (phba->pci_dev_grp) { 8661 case LPFC_PCI_DEV_LP: 8662 lpfc_io_resume_s3(pdev); 8663 break; 8664 case LPFC_PCI_DEV_OC: 8665 lpfc_io_resume_s4(pdev); 8666 break; 8667 default: 8668 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8669 "1429 Invalid PCI device group: 0x%x\n", 8670 phba->pci_dev_grp); 8671 break; 8672 } 8673 return; 8674} 8675 8676static struct pci_device_id lpfc_id_table[] = { 8677 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 8678 PCI_ANY_ID, PCI_ANY_ID, }, 8679 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 8680 PCI_ANY_ID, PCI_ANY_ID, }, 8681 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 8682 PCI_ANY_ID, PCI_ANY_ID, }, 8683 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 8684 PCI_ANY_ID, PCI_ANY_ID, }, 8685 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 8686 PCI_ANY_ID, PCI_ANY_ID, }, 8687 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 8688 PCI_ANY_ID, PCI_ANY_ID, }, 8689 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 8690 PCI_ANY_ID, PCI_ANY_ID, }, 8691 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 8692 PCI_ANY_ID, PCI_ANY_ID, }, 8693 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 8694 PCI_ANY_ID, PCI_ANY_ID, }, 8695 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 8696 PCI_ANY_ID, PCI_ANY_ID, }, 8697 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 8698 PCI_ANY_ID, PCI_ANY_ID, }, 8699 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 8700 PCI_ANY_ID, PCI_ANY_ID, }, 8701 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 8702 PCI_ANY_ID, PCI_ANY_ID, }, 8703 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 8704 PCI_ANY_ID, PCI_ANY_ID, }, 8705 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 8706 PCI_ANY_ID, PCI_ANY_ID, }, 8707 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 8708 PCI_ANY_ID, PCI_ANY_ID, }, 8709 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 8710 PCI_ANY_ID, PCI_ANY_ID, }, 8711 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 8712 PCI_ANY_ID, PCI_ANY_ID, }, 8713 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 8714 PCI_ANY_ID, PCI_ANY_ID, }, 8715 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 8716 PCI_ANY_ID, PCI_ANY_ID, }, 8717 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 8718 PCI_ANY_ID, PCI_ANY_ID, }, 8719 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 8720 PCI_ANY_ID, PCI_ANY_ID, }, 8721 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 8722 PCI_ANY_ID, PCI_ANY_ID, }, 8723 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 8724 PCI_ANY_ID, PCI_ANY_ID, }, 8725 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 8726 PCI_ANY_ID, PCI_ANY_ID, }, 8727 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 8728 PCI_ANY_ID, PCI_ANY_ID, }, 8729 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 8730 PCI_ANY_ID, PCI_ANY_ID, }, 8731 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 8732 PCI_ANY_ID, PCI_ANY_ID, }, 8733 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 8734 PCI_ANY_ID, PCI_ANY_ID, }, 8735 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 8736 PCI_ANY_ID, PCI_ANY_ID, }, 8737 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 8738 PCI_ANY_ID, PCI_ANY_ID, }, 8739 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 8740 PCI_ANY_ID, PCI_ANY_ID, }, 8741 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 8742 PCI_ANY_ID, PCI_ANY_ID, }, 8743 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 8744 PCI_ANY_ID, PCI_ANY_ID, }, 8745 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 8746 PCI_ANY_ID, PCI_ANY_ID, }, 8747 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 8748 PCI_ANY_ID, PCI_ANY_ID, }, 8749 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 8750 PCI_ANY_ID, PCI_ANY_ID, }, 8751 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 8752 PCI_ANY_ID, PCI_ANY_ID, }, 8753 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 8754 PCI_ANY_ID, PCI_ANY_ID, }, 8755 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 8756 PCI_ANY_ID, PCI_ANY_ID, }, 8757 { 0 } 8758}; 8759 8760MODULE_DEVICE_TABLE(pci, lpfc_id_table); 8761 8762static struct pci_error_handlers lpfc_err_handler = { 8763 .error_detected = lpfc_io_error_detected, 8764 .slot_reset = lpfc_io_slot_reset, 8765 .resume = lpfc_io_resume, 8766}; 8767 8768static struct pci_driver lpfc_driver = { 8769 .name = LPFC_DRIVER_NAME, 8770 .id_table = lpfc_id_table, 8771 .probe = lpfc_pci_probe_one, 8772 .remove = __devexit_p(lpfc_pci_remove_one), 8773 .suspend = lpfc_pci_suspend_one, 8774 .resume = lpfc_pci_resume_one, 8775 .err_handler = &lpfc_err_handler, 8776}; 8777 8778/** 8779 * lpfc_init - lpfc module initialization routine 8780 * 8781 * This routine is to be invoked when the lpfc module is loaded into the 8782 * kernel. The special kernel macro module_init() is used to indicate the 8783 * role of this routine to the kernel as lpfc module entry point. 8784 * 8785 * Return codes 8786 * 0 - successful 8787 * -ENOMEM - FC attach transport failed 8788 * all others - failed 8789 */ 8790static int __init 8791lpfc_init(void) 8792{ 8793 int error = 0; 8794 8795 printk(LPFC_MODULE_DESC "\n"); 8796 printk(LPFC_COPYRIGHT "\n"); 8797 8798 if (lpfc_enable_npiv) { 8799 lpfc_transport_functions.vport_create = lpfc_vport_create; 8800 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 8801 } 8802 lpfc_transport_template = 8803 fc_attach_transport(&lpfc_transport_functions); 8804 if (lpfc_transport_template == NULL) 8805 return -ENOMEM; 8806 if (lpfc_enable_npiv) { 8807 lpfc_vport_transport_template = 8808 fc_attach_transport(&lpfc_vport_transport_functions); 8809 if (lpfc_vport_transport_template == NULL) { 8810 fc_release_transport(lpfc_transport_template); 8811 return -ENOMEM; 8812 } 8813 } 8814 error = pci_register_driver(&lpfc_driver); 8815 if (error) { 8816 fc_release_transport(lpfc_transport_template); 8817 if (lpfc_enable_npiv) 8818 fc_release_transport(lpfc_vport_transport_template); 8819 } 8820 8821 return error; 8822} 8823 8824/** 8825 * lpfc_exit - lpfc module removal routine 8826 * 8827 * This routine is invoked when the lpfc module is removed from the kernel. 8828 * The special kernel macro module_exit() is used to indicate the role of 8829 * this routine to the kernel as lpfc module exit point. 8830 */ 8831static void __exit 8832lpfc_exit(void) 8833{ 8834 pci_unregister_driver(&lpfc_driver); 8835 fc_release_transport(lpfc_transport_template); 8836 if (lpfc_enable_npiv) 8837 fc_release_transport(lpfc_vport_transport_template); 8838 if (_dump_buf_data) { 8839 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 8840 "_dump_buf_data at 0x%p\n", 8841 (1L << _dump_buf_data_order), _dump_buf_data); 8842 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 8843 } 8844 8845 if (_dump_buf_dif) { 8846 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 8847 "_dump_buf_dif at 0x%p\n", 8848 (1L << _dump_buf_dif_order), _dump_buf_dif); 8849 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 8850 } 8851} 8852 8853module_init(lpfc_init); 8854module_exit(lpfc_exit); 8855MODULE_LICENSE("GPL"); 8856MODULE_DESCRIPTION(LPFC_MODULE_DESC); 8857MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 8858MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 8859