lpfc_init.c revision 3ef6d24cd9f473518dd7941e86cc2a5f8992eed0
1/******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/dma-mapping.h> 25#include <linux/idr.h> 26#include <linux/interrupt.h> 27#include <linux/module.h> 28#include <linux/kthread.h> 29#include <linux/pci.h> 30#include <linux/spinlock.h> 31#include <linux/ctype.h> 32#include <linux/aer.h> 33#include <linux/slab.h> 34#include <linux/firmware.h> 35#include <linux/miscdevice.h> 36 37#include <scsi/scsi.h> 38#include <scsi/scsi_device.h> 39#include <scsi/scsi_host.h> 40#include <scsi/scsi_transport_fc.h> 41 42#include "lpfc_hw4.h" 43#include "lpfc_hw.h" 44#include "lpfc_sli.h" 45#include "lpfc_sli4.h" 46#include "lpfc_nl.h" 47#include "lpfc_disc.h" 48#include "lpfc_scsi.h" 49#include "lpfc.h" 50#include "lpfc_logmsg.h" 51#include "lpfc_crtn.h" 52#include "lpfc_vport.h" 53#include "lpfc_version.h" 54 55char *_dump_buf_data; 56unsigned long _dump_buf_data_order; 57char *_dump_buf_dif; 58unsigned long _dump_buf_dif_order; 59spinlock_t _dump_buf_lock; 60 61static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 62static int lpfc_post_rcv_buf(struct lpfc_hba *); 63static int lpfc_sli4_queue_verify(struct lpfc_hba *); 64static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 65static int lpfc_setup_endian_order(struct lpfc_hba *); 66static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 67static void lpfc_free_sgl_list(struct lpfc_hba *); 68static int lpfc_init_sgl_list(struct lpfc_hba *); 69static int lpfc_init_active_sgl_array(struct lpfc_hba *); 70static void lpfc_free_active_sgl(struct lpfc_hba *); 71static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 72static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 73static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 74static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 75static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 76 77static struct scsi_transport_template *lpfc_transport_template = NULL; 78static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 79static DEFINE_IDR(lpfc_hba_index); 80 81/** 82 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 83 * @phba: pointer to lpfc hba data structure. 84 * 85 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 86 * mailbox command. It retrieves the revision information from the HBA and 87 * collects the Vital Product Data (VPD) about the HBA for preparing the 88 * configuration of the HBA. 89 * 90 * Return codes: 91 * 0 - success. 92 * -ERESTART - requests the SLI layer to reset the HBA and try again. 93 * Any other value - indicates an error. 94 **/ 95int 96lpfc_config_port_prep(struct lpfc_hba *phba) 97{ 98 lpfc_vpd_t *vp = &phba->vpd; 99 int i = 0, rc; 100 LPFC_MBOXQ_t *pmb; 101 MAILBOX_t *mb; 102 char *lpfc_vpd_data = NULL; 103 uint16_t offset = 0; 104 static char licensed[56] = 105 "key unlock for use with gnu public licensed code only\0"; 106 static int init_key = 1; 107 108 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 109 if (!pmb) { 110 phba->link_state = LPFC_HBA_ERROR; 111 return -ENOMEM; 112 } 113 114 mb = &pmb->u.mb; 115 phba->link_state = LPFC_INIT_MBX_CMDS; 116 117 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 118 if (init_key) { 119 uint32_t *ptext = (uint32_t *) licensed; 120 121 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 122 *ptext = cpu_to_be32(*ptext); 123 init_key = 0; 124 } 125 126 lpfc_read_nv(phba, pmb); 127 memset((char*)mb->un.varRDnvp.rsvd3, 0, 128 sizeof (mb->un.varRDnvp.rsvd3)); 129 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 130 sizeof (licensed)); 131 132 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 133 134 if (rc != MBX_SUCCESS) { 135 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 136 "0324 Config Port initialization " 137 "error, mbxCmd x%x READ_NVPARM, " 138 "mbxStatus x%x\n", 139 mb->mbxCommand, mb->mbxStatus); 140 mempool_free(pmb, phba->mbox_mem_pool); 141 return -ERESTART; 142 } 143 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 144 sizeof(phba->wwnn)); 145 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 146 sizeof(phba->wwpn)); 147 } 148 149 phba->sli3_options = 0x0; 150 151 /* Setup and issue mailbox READ REV command */ 152 lpfc_read_rev(phba, pmb); 153 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 154 if (rc != MBX_SUCCESS) { 155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 156 "0439 Adapter failed to init, mbxCmd x%x " 157 "READ_REV, mbxStatus x%x\n", 158 mb->mbxCommand, mb->mbxStatus); 159 mempool_free( pmb, phba->mbox_mem_pool); 160 return -ERESTART; 161 } 162 163 164 /* 165 * The value of rr must be 1 since the driver set the cv field to 1. 166 * This setting requires the FW to set all revision fields. 167 */ 168 if (mb->un.varRdRev.rr == 0) { 169 vp->rev.rBit = 0; 170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 171 "0440 Adapter failed to init, READ_REV has " 172 "missing revision information.\n"); 173 mempool_free(pmb, phba->mbox_mem_pool); 174 return -ERESTART; 175 } 176 177 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 178 mempool_free(pmb, phba->mbox_mem_pool); 179 return -EINVAL; 180 } 181 182 /* Save information as VPD data */ 183 vp->rev.rBit = 1; 184 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 185 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 186 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 187 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 188 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 189 vp->rev.biuRev = mb->un.varRdRev.biuRev; 190 vp->rev.smRev = mb->un.varRdRev.smRev; 191 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 192 vp->rev.endecRev = mb->un.varRdRev.endecRev; 193 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 194 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 195 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 196 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 197 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 198 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 199 200 /* If the sli feature level is less then 9, we must 201 * tear down all RPIs and VPIs on link down if NPIV 202 * is enabled. 203 */ 204 if (vp->rev.feaLevelHigh < 9) 205 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 206 207 if (lpfc_is_LC_HBA(phba->pcidev->device)) 208 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 209 sizeof (phba->RandomData)); 210 211 /* Get adapter VPD information */ 212 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 213 if (!lpfc_vpd_data) 214 goto out_free_mbox; 215 do { 216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 218 219 if (rc != MBX_SUCCESS) { 220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 221 "0441 VPD not present on adapter, " 222 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 223 mb->mbxCommand, mb->mbxStatus); 224 mb->un.varDmp.word_cnt = 0; 225 } 226 /* dump mem may return a zero when finished or we got a 227 * mailbox error, either way we are done. 228 */ 229 if (mb->un.varDmp.word_cnt == 0) 230 break; 231 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 232 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 233 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 234 lpfc_vpd_data + offset, 235 mb->un.varDmp.word_cnt); 236 offset += mb->un.varDmp.word_cnt; 237 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 238 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 239 240 kfree(lpfc_vpd_data); 241out_free_mbox: 242 mempool_free(pmb, phba->mbox_mem_pool); 243 return 0; 244} 245 246/** 247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 248 * @phba: pointer to lpfc hba data structure. 249 * @pmboxq: pointer to the driver internal queue element for mailbox command. 250 * 251 * This is the completion handler for driver's configuring asynchronous event 252 * mailbox command to the device. If the mailbox command returns successfully, 253 * it will set internal async event support flag to 1; otherwise, it will 254 * set internal async event support flag to 0. 255 **/ 256static void 257lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 258{ 259 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 260 phba->temp_sensor_support = 1; 261 else 262 phba->temp_sensor_support = 0; 263 mempool_free(pmboxq, phba->mbox_mem_pool); 264 return; 265} 266 267/** 268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 269 * @phba: pointer to lpfc hba data structure. 270 * @pmboxq: pointer to the driver internal queue element for mailbox command. 271 * 272 * This is the completion handler for dump mailbox command for getting 273 * wake up parameters. When this command complete, the response contain 274 * Option rom version of the HBA. This function translate the version number 275 * into a human readable string and store it in OptionROMVersion. 276 **/ 277static void 278lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 279{ 280 struct prog_id *prg; 281 uint32_t prog_id_word; 282 char dist = ' '; 283 /* character array used for decoding dist type. */ 284 char dist_char[] = "nabx"; 285 286 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 287 mempool_free(pmboxq, phba->mbox_mem_pool); 288 return; 289 } 290 291 prg = (struct prog_id *) &prog_id_word; 292 293 /* word 7 contain option rom version */ 294 prog_id_word = pmboxq->u.mb.un.varWords[7]; 295 296 /* Decode the Option rom version word to a readable string */ 297 if (prg->dist < 4) 298 dist = dist_char[prg->dist]; 299 300 if ((prg->dist == 3) && (prg->num == 0)) 301 sprintf(phba->OptionROMVersion, "%d.%d%d", 302 prg->ver, prg->rev, prg->lev); 303 else 304 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 305 prg->ver, prg->rev, prg->lev, 306 dist, prg->num); 307 mempool_free(pmboxq, phba->mbox_mem_pool); 308 return; 309} 310 311/** 312 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 313 * cfg_soft_wwnn, cfg_soft_wwpn 314 * @vport: pointer to lpfc vport data structure. 315 * 316 * 317 * Return codes 318 * None. 319 **/ 320void 321lpfc_update_vport_wwn(struct lpfc_vport *vport) 322{ 323 /* If the soft name exists then update it using the service params */ 324 if (vport->phba->cfg_soft_wwnn) 325 u64_to_wwn(vport->phba->cfg_soft_wwnn, 326 vport->fc_sparam.nodeName.u.wwn); 327 if (vport->phba->cfg_soft_wwpn) 328 u64_to_wwn(vport->phba->cfg_soft_wwpn, 329 vport->fc_sparam.portName.u.wwn); 330 331 /* 332 * If the name is empty or there exists a soft name 333 * then copy the service params name, otherwise use the fc name 334 */ 335 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 336 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 337 sizeof(struct lpfc_name)); 338 else 339 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 340 sizeof(struct lpfc_name)); 341 342 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn) 343 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 344 sizeof(struct lpfc_name)); 345 else 346 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 347 sizeof(struct lpfc_name)); 348} 349 350/** 351 * lpfc_config_port_post - Perform lpfc initialization after config port 352 * @phba: pointer to lpfc hba data structure. 353 * 354 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 355 * command call. It performs all internal resource and state setups on the 356 * port: post IOCB buffers, enable appropriate host interrupt attentions, 357 * ELS ring timers, etc. 358 * 359 * Return codes 360 * 0 - success. 361 * Any other value - error. 362 **/ 363int 364lpfc_config_port_post(struct lpfc_hba *phba) 365{ 366 struct lpfc_vport *vport = phba->pport; 367 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 368 LPFC_MBOXQ_t *pmb; 369 MAILBOX_t *mb; 370 struct lpfc_dmabuf *mp; 371 struct lpfc_sli *psli = &phba->sli; 372 uint32_t status, timeout; 373 int i, j; 374 int rc; 375 376 spin_lock_irq(&phba->hbalock); 377 /* 378 * If the Config port completed correctly the HBA is not 379 * over heated any more. 380 */ 381 if (phba->over_temp_state == HBA_OVER_TEMP) 382 phba->over_temp_state = HBA_NORMAL_TEMP; 383 spin_unlock_irq(&phba->hbalock); 384 385 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 386 if (!pmb) { 387 phba->link_state = LPFC_HBA_ERROR; 388 return -ENOMEM; 389 } 390 mb = &pmb->u.mb; 391 392 /* Get login parameters for NID. */ 393 rc = lpfc_read_sparam(phba, pmb, 0); 394 if (rc) { 395 mempool_free(pmb, phba->mbox_mem_pool); 396 return -ENOMEM; 397 } 398 399 pmb->vport = vport; 400 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 401 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 402 "0448 Adapter failed init, mbxCmd x%x " 403 "READ_SPARM mbxStatus x%x\n", 404 mb->mbxCommand, mb->mbxStatus); 405 phba->link_state = LPFC_HBA_ERROR; 406 mp = (struct lpfc_dmabuf *) pmb->context1; 407 mempool_free(pmb, phba->mbox_mem_pool); 408 lpfc_mbuf_free(phba, mp->virt, mp->phys); 409 kfree(mp); 410 return -EIO; 411 } 412 413 mp = (struct lpfc_dmabuf *) pmb->context1; 414 415 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 416 lpfc_mbuf_free(phba, mp->virt, mp->phys); 417 kfree(mp); 418 pmb->context1 = NULL; 419 lpfc_update_vport_wwn(vport); 420 421 /* Update the fc_host data structures with new wwn. */ 422 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 423 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 424 fc_host_max_npiv_vports(shost) = phba->max_vpi; 425 426 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 427 /* This should be consolidated into parse_vpd ? - mr */ 428 if (phba->SerialNumber[0] == 0) { 429 uint8_t *outptr; 430 431 outptr = &vport->fc_nodename.u.s.IEEE[0]; 432 for (i = 0; i < 12; i++) { 433 status = *outptr++; 434 j = ((status & 0xf0) >> 4); 435 if (j <= 9) 436 phba->SerialNumber[i] = 437 (char)((uint8_t) 0x30 + (uint8_t) j); 438 else 439 phba->SerialNumber[i] = 440 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 441 i++; 442 j = (status & 0xf); 443 if (j <= 9) 444 phba->SerialNumber[i] = 445 (char)((uint8_t) 0x30 + (uint8_t) j); 446 else 447 phba->SerialNumber[i] = 448 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 449 } 450 } 451 452 lpfc_read_config(phba, pmb); 453 pmb->vport = vport; 454 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 455 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 456 "0453 Adapter failed to init, mbxCmd x%x " 457 "READ_CONFIG, mbxStatus x%x\n", 458 mb->mbxCommand, mb->mbxStatus); 459 phba->link_state = LPFC_HBA_ERROR; 460 mempool_free( pmb, phba->mbox_mem_pool); 461 return -EIO; 462 } 463 464 /* Check if the port is disabled */ 465 lpfc_sli_read_link_ste(phba); 466 467 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 468 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 469 phba->cfg_hba_queue_depth = 470 (mb->un.varRdConfig.max_xri + 1) - 471 lpfc_sli4_get_els_iocb_cnt(phba); 472 473 phba->lmt = mb->un.varRdConfig.lmt; 474 475 /* Get the default values for Model Name and Description */ 476 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 477 478 phba->link_state = LPFC_LINK_DOWN; 479 480 /* Only process IOCBs on ELS ring till hba_state is READY */ 481 if (psli->ring[psli->extra_ring].cmdringaddr) 482 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 483 if (psli->ring[psli->fcp_ring].cmdringaddr) 484 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 485 if (psli->ring[psli->next_ring].cmdringaddr) 486 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 487 488 /* Post receive buffers for desired rings */ 489 if (phba->sli_rev != 3) 490 lpfc_post_rcv_buf(phba); 491 492 /* 493 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 494 */ 495 if (phba->intr_type == MSIX) { 496 rc = lpfc_config_msi(phba, pmb); 497 if (rc) { 498 mempool_free(pmb, phba->mbox_mem_pool); 499 return -EIO; 500 } 501 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 502 if (rc != MBX_SUCCESS) { 503 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 504 "0352 Config MSI mailbox command " 505 "failed, mbxCmd x%x, mbxStatus x%x\n", 506 pmb->u.mb.mbxCommand, 507 pmb->u.mb.mbxStatus); 508 mempool_free(pmb, phba->mbox_mem_pool); 509 return -EIO; 510 } 511 } 512 513 spin_lock_irq(&phba->hbalock); 514 /* Initialize ERATT handling flag */ 515 phba->hba_flag &= ~HBA_ERATT_HANDLED; 516 517 /* Enable appropriate host interrupts */ 518 if (lpfc_readl(phba->HCregaddr, &status)) { 519 spin_unlock_irq(&phba->hbalock); 520 return -EIO; 521 } 522 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 523 if (psli->num_rings > 0) 524 status |= HC_R0INT_ENA; 525 if (psli->num_rings > 1) 526 status |= HC_R1INT_ENA; 527 if (psli->num_rings > 2) 528 status |= HC_R2INT_ENA; 529 if (psli->num_rings > 3) 530 status |= HC_R3INT_ENA; 531 532 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 533 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 534 status &= ~(HC_R0INT_ENA); 535 536 writel(status, phba->HCregaddr); 537 readl(phba->HCregaddr); /* flush */ 538 spin_unlock_irq(&phba->hbalock); 539 540 /* Set up ring-0 (ELS) timer */ 541 timeout = phba->fc_ratov * 2; 542 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 543 /* Set up heart beat (HB) timer */ 544 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 545 phba->hb_outstanding = 0; 546 phba->last_completion_time = jiffies; 547 /* Set up error attention (ERATT) polling timer */ 548 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 549 550 if (phba->hba_flag & LINK_DISABLED) { 551 lpfc_printf_log(phba, 552 KERN_ERR, LOG_INIT, 553 "2598 Adapter Link is disabled.\n"); 554 lpfc_down_link(phba, pmb); 555 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 556 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 557 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 558 lpfc_printf_log(phba, 559 KERN_ERR, LOG_INIT, 560 "2599 Adapter failed to issue DOWN_LINK" 561 " mbox command rc 0x%x\n", rc); 562 563 mempool_free(pmb, phba->mbox_mem_pool); 564 return -EIO; 565 } 566 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 567 mempool_free(pmb, phba->mbox_mem_pool); 568 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 569 if (rc) 570 return rc; 571 } 572 /* MBOX buffer will be freed in mbox compl */ 573 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 574 if (!pmb) { 575 phba->link_state = LPFC_HBA_ERROR; 576 return -ENOMEM; 577 } 578 579 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 580 pmb->mbox_cmpl = lpfc_config_async_cmpl; 581 pmb->vport = phba->pport; 582 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 583 584 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 585 lpfc_printf_log(phba, 586 KERN_ERR, 587 LOG_INIT, 588 "0456 Adapter failed to issue " 589 "ASYNCEVT_ENABLE mbox status x%x\n", 590 rc); 591 mempool_free(pmb, phba->mbox_mem_pool); 592 } 593 594 /* Get Option rom version */ 595 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 596 if (!pmb) { 597 phba->link_state = LPFC_HBA_ERROR; 598 return -ENOMEM; 599 } 600 601 lpfc_dump_wakeup_param(phba, pmb); 602 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 603 pmb->vport = phba->pport; 604 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 605 606 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 607 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 608 "to get Option ROM version status x%x\n", rc); 609 mempool_free(pmb, phba->mbox_mem_pool); 610 } 611 612 return 0; 613} 614 615/** 616 * lpfc_hba_init_link - Initialize the FC link 617 * @phba: pointer to lpfc hba data structure. 618 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 619 * 620 * This routine will issue the INIT_LINK mailbox command call. 621 * It is available to other drivers through the lpfc_hba data 622 * structure for use as a delayed link up mechanism with the 623 * module parameter lpfc_suppress_link_up. 624 * 625 * Return code 626 * 0 - success 627 * Any other value - error 628 **/ 629int 630lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 631{ 632 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 633} 634 635/** 636 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 637 * @phba: pointer to lpfc hba data structure. 638 * @fc_topology: desired fc topology. 639 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 640 * 641 * This routine will issue the INIT_LINK mailbox command call. 642 * It is available to other drivers through the lpfc_hba data 643 * structure for use as a delayed link up mechanism with the 644 * module parameter lpfc_suppress_link_up. 645 * 646 * Return code 647 * 0 - success 648 * Any other value - error 649 **/ 650int 651lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 652 uint32_t flag) 653{ 654 struct lpfc_vport *vport = phba->pport; 655 LPFC_MBOXQ_t *pmb; 656 MAILBOX_t *mb; 657 int rc; 658 659 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 660 if (!pmb) { 661 phba->link_state = LPFC_HBA_ERROR; 662 return -ENOMEM; 663 } 664 mb = &pmb->u.mb; 665 pmb->vport = vport; 666 667 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 668 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 669 !(phba->lmt & LMT_1Gb)) || 670 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 671 !(phba->lmt & LMT_2Gb)) || 672 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 673 !(phba->lmt & LMT_4Gb)) || 674 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 675 !(phba->lmt & LMT_8Gb)) || 676 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 677 !(phba->lmt & LMT_10Gb)) || 678 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 679 !(phba->lmt & LMT_16Gb))) { 680 /* Reset link speed to auto */ 681 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 682 "1302 Invalid speed for this board:%d " 683 "Reset link speed to auto.\n", 684 phba->cfg_link_speed); 685 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 686 } 687 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 688 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 689 if (phba->sli_rev < LPFC_SLI_REV4) 690 lpfc_set_loopback_flag(phba); 691 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 692 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 693 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 694 "0498 Adapter failed to init, mbxCmd x%x " 695 "INIT_LINK, mbxStatus x%x\n", 696 mb->mbxCommand, mb->mbxStatus); 697 if (phba->sli_rev <= LPFC_SLI_REV3) { 698 /* Clear all interrupt enable conditions */ 699 writel(0, phba->HCregaddr); 700 readl(phba->HCregaddr); /* flush */ 701 /* Clear all pending interrupts */ 702 writel(0xffffffff, phba->HAregaddr); 703 readl(phba->HAregaddr); /* flush */ 704 } 705 phba->link_state = LPFC_HBA_ERROR; 706 if (rc != MBX_BUSY || flag == MBX_POLL) 707 mempool_free(pmb, phba->mbox_mem_pool); 708 return -EIO; 709 } 710 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 711 if (flag == MBX_POLL) 712 mempool_free(pmb, phba->mbox_mem_pool); 713 714 return 0; 715} 716 717/** 718 * lpfc_hba_down_link - this routine downs the FC link 719 * @phba: pointer to lpfc hba data structure. 720 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 721 * 722 * This routine will issue the DOWN_LINK mailbox command call. 723 * It is available to other drivers through the lpfc_hba data 724 * structure for use to stop the link. 725 * 726 * Return code 727 * 0 - success 728 * Any other value - error 729 **/ 730int 731lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 732{ 733 LPFC_MBOXQ_t *pmb; 734 int rc; 735 736 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 737 if (!pmb) { 738 phba->link_state = LPFC_HBA_ERROR; 739 return -ENOMEM; 740 } 741 742 lpfc_printf_log(phba, 743 KERN_ERR, LOG_INIT, 744 "0491 Adapter Link is disabled.\n"); 745 lpfc_down_link(phba, pmb); 746 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 747 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 748 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 749 lpfc_printf_log(phba, 750 KERN_ERR, LOG_INIT, 751 "2522 Adapter failed to issue DOWN_LINK" 752 " mbox command rc 0x%x\n", rc); 753 754 mempool_free(pmb, phba->mbox_mem_pool); 755 return -EIO; 756 } 757 if (flag == MBX_POLL) 758 mempool_free(pmb, phba->mbox_mem_pool); 759 760 return 0; 761} 762 763/** 764 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 765 * @phba: pointer to lpfc HBA data structure. 766 * 767 * This routine will do LPFC uninitialization before the HBA is reset when 768 * bringing down the SLI Layer. 769 * 770 * Return codes 771 * 0 - success. 772 * Any other value - error. 773 **/ 774int 775lpfc_hba_down_prep(struct lpfc_hba *phba) 776{ 777 struct lpfc_vport **vports; 778 int i; 779 780 if (phba->sli_rev <= LPFC_SLI_REV3) { 781 /* Disable interrupts */ 782 writel(0, phba->HCregaddr); 783 readl(phba->HCregaddr); /* flush */ 784 } 785 786 if (phba->pport->load_flag & FC_UNLOADING) 787 lpfc_cleanup_discovery_resources(phba->pport); 788 else { 789 vports = lpfc_create_vport_work_array(phba); 790 if (vports != NULL) 791 for (i = 0; i <= phba->max_vports && 792 vports[i] != NULL; i++) 793 lpfc_cleanup_discovery_resources(vports[i]); 794 lpfc_destroy_vport_work_array(phba, vports); 795 } 796 return 0; 797} 798 799/** 800 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 801 * @phba: pointer to lpfc HBA data structure. 802 * 803 * This routine will do uninitialization after the HBA is reset when bring 804 * down the SLI Layer. 805 * 806 * Return codes 807 * 0 - success. 808 * Any other value - error. 809 **/ 810static int 811lpfc_hba_down_post_s3(struct lpfc_hba *phba) 812{ 813 struct lpfc_sli *psli = &phba->sli; 814 struct lpfc_sli_ring *pring; 815 struct lpfc_dmabuf *mp, *next_mp; 816 LIST_HEAD(completions); 817 int i; 818 819 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 820 lpfc_sli_hbqbuf_free_all(phba); 821 else { 822 /* Cleanup preposted buffers on the ELS ring */ 823 pring = &psli->ring[LPFC_ELS_RING]; 824 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 825 list_del(&mp->list); 826 pring->postbufq_cnt--; 827 lpfc_mbuf_free(phba, mp->virt, mp->phys); 828 kfree(mp); 829 } 830 } 831 832 spin_lock_irq(&phba->hbalock); 833 for (i = 0; i < psli->num_rings; i++) { 834 pring = &psli->ring[i]; 835 836 /* At this point in time the HBA is either reset or DOA. Either 837 * way, nothing should be on txcmplq as it will NEVER complete. 838 */ 839 list_splice_init(&pring->txcmplq, &completions); 840 pring->txcmplq_cnt = 0; 841 spin_unlock_irq(&phba->hbalock); 842 843 /* Cancel all the IOCBs from the completions list */ 844 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 845 IOERR_SLI_ABORTED); 846 847 lpfc_sli_abort_iocb_ring(phba, pring); 848 spin_lock_irq(&phba->hbalock); 849 } 850 spin_unlock_irq(&phba->hbalock); 851 852 return 0; 853} 854 855/** 856 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 857 * @phba: pointer to lpfc HBA data structure. 858 * 859 * This routine will do uninitialization after the HBA is reset when bring 860 * down the SLI Layer. 861 * 862 * Return codes 863 * 0 - success. 864 * Any other value - error. 865 **/ 866static int 867lpfc_hba_down_post_s4(struct lpfc_hba *phba) 868{ 869 struct lpfc_scsi_buf *psb, *psb_next; 870 LIST_HEAD(aborts); 871 int ret; 872 unsigned long iflag = 0; 873 struct lpfc_sglq *sglq_entry = NULL; 874 875 ret = lpfc_hba_down_post_s3(phba); 876 if (ret) 877 return ret; 878 /* At this point in time the HBA is either reset or DOA. Either 879 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 880 * on the lpfc_sgl_list so that it can either be freed if the 881 * driver is unloading or reposted if the driver is restarting 882 * the port. 883 */ 884 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 885 /* scsl_buf_list */ 886 /* abts_sgl_list_lock required because worker thread uses this 887 * list. 888 */ 889 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 890 list_for_each_entry(sglq_entry, 891 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 892 sglq_entry->state = SGL_FREED; 893 894 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 895 &phba->sli4_hba.lpfc_sgl_list); 896 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 897 /* abts_scsi_buf_list_lock required because worker thread uses this 898 * list. 899 */ 900 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 901 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 902 &aborts); 903 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 904 spin_unlock_irq(&phba->hbalock); 905 906 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 907 psb->pCmd = NULL; 908 psb->status = IOSTAT_SUCCESS; 909 } 910 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 911 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 912 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 913 return 0; 914} 915 916/** 917 * lpfc_hba_down_post - Wrapper func for hba down post routine 918 * @phba: pointer to lpfc HBA data structure. 919 * 920 * This routine wraps the actual SLI3 or SLI4 routine for performing 921 * uninitialization after the HBA is reset when bring down the SLI Layer. 922 * 923 * Return codes 924 * 0 - success. 925 * Any other value - error. 926 **/ 927int 928lpfc_hba_down_post(struct lpfc_hba *phba) 929{ 930 return (*phba->lpfc_hba_down_post)(phba); 931} 932 933/** 934 * lpfc_hb_timeout - The HBA-timer timeout handler 935 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 936 * 937 * This is the HBA-timer timeout handler registered to the lpfc driver. When 938 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 939 * work-port-events bitmap and the worker thread is notified. This timeout 940 * event will be used by the worker thread to invoke the actual timeout 941 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 942 * be performed in the timeout handler and the HBA timeout event bit shall 943 * be cleared by the worker thread after it has taken the event bitmap out. 944 **/ 945static void 946lpfc_hb_timeout(unsigned long ptr) 947{ 948 struct lpfc_hba *phba; 949 uint32_t tmo_posted; 950 unsigned long iflag; 951 952 phba = (struct lpfc_hba *)ptr; 953 954 /* Check for heart beat timeout conditions */ 955 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 956 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 957 if (!tmo_posted) 958 phba->pport->work_port_events |= WORKER_HB_TMO; 959 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 960 961 /* Tell the worker thread there is work to do */ 962 if (!tmo_posted) 963 lpfc_worker_wake_up(phba); 964 return; 965} 966 967/** 968 * lpfc_rrq_timeout - The RRQ-timer timeout handler 969 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 970 * 971 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 972 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 973 * work-port-events bitmap and the worker thread is notified. This timeout 974 * event will be used by the worker thread to invoke the actual timeout 975 * handler routine, lpfc_rrq_handler. Any periodical operations will 976 * be performed in the timeout handler and the RRQ timeout event bit shall 977 * be cleared by the worker thread after it has taken the event bitmap out. 978 **/ 979static void 980lpfc_rrq_timeout(unsigned long ptr) 981{ 982 struct lpfc_hba *phba; 983 unsigned long iflag; 984 985 phba = (struct lpfc_hba *)ptr; 986 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 987 phba->hba_flag |= HBA_RRQ_ACTIVE; 988 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 989 lpfc_worker_wake_up(phba); 990} 991 992/** 993 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 994 * @phba: pointer to lpfc hba data structure. 995 * @pmboxq: pointer to the driver internal queue element for mailbox command. 996 * 997 * This is the callback function to the lpfc heart-beat mailbox command. 998 * If configured, the lpfc driver issues the heart-beat mailbox command to 999 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1000 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1001 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1002 * heart-beat outstanding state. Once the mailbox command comes back and 1003 * no error conditions detected, the heart-beat mailbox command timer is 1004 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1005 * state is cleared for the next heart-beat. If the timer expired with the 1006 * heart-beat outstanding state set, the driver will put the HBA offline. 1007 **/ 1008static void 1009lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1010{ 1011 unsigned long drvr_flag; 1012 1013 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1014 phba->hb_outstanding = 0; 1015 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1016 1017 /* Check and reset heart-beat timer is necessary */ 1018 mempool_free(pmboxq, phba->mbox_mem_pool); 1019 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1020 !(phba->link_state == LPFC_HBA_ERROR) && 1021 !(phba->pport->load_flag & FC_UNLOADING)) 1022 mod_timer(&phba->hb_tmofunc, 1023 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1024 return; 1025} 1026 1027/** 1028 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1029 * @phba: pointer to lpfc hba data structure. 1030 * 1031 * This is the actual HBA-timer timeout handler to be invoked by the worker 1032 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1033 * handler performs any periodic operations needed for the device. If such 1034 * periodic event has already been attended to either in the interrupt handler 1035 * or by processing slow-ring or fast-ring events within the HBA-timer 1036 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1037 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1038 * is configured and there is no heart-beat mailbox command outstanding, a 1039 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1040 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1041 * to offline. 1042 **/ 1043void 1044lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1045{ 1046 struct lpfc_vport **vports; 1047 LPFC_MBOXQ_t *pmboxq; 1048 struct lpfc_dmabuf *buf_ptr; 1049 int retval, i; 1050 struct lpfc_sli *psli = &phba->sli; 1051 LIST_HEAD(completions); 1052 1053 vports = lpfc_create_vport_work_array(phba); 1054 if (vports != NULL) 1055 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1056 lpfc_rcv_seq_check_edtov(vports[i]); 1057 lpfc_destroy_vport_work_array(phba, vports); 1058 1059 if ((phba->link_state == LPFC_HBA_ERROR) || 1060 (phba->pport->load_flag & FC_UNLOADING) || 1061 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1062 return; 1063 1064 spin_lock_irq(&phba->pport->work_port_lock); 1065 1066 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 1067 jiffies)) { 1068 spin_unlock_irq(&phba->pport->work_port_lock); 1069 if (!phba->hb_outstanding) 1070 mod_timer(&phba->hb_tmofunc, 1071 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1072 else 1073 mod_timer(&phba->hb_tmofunc, 1074 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1075 return; 1076 } 1077 spin_unlock_irq(&phba->pport->work_port_lock); 1078 1079 if (phba->elsbuf_cnt && 1080 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1081 spin_lock_irq(&phba->hbalock); 1082 list_splice_init(&phba->elsbuf, &completions); 1083 phba->elsbuf_cnt = 0; 1084 phba->elsbuf_prev_cnt = 0; 1085 spin_unlock_irq(&phba->hbalock); 1086 1087 while (!list_empty(&completions)) { 1088 list_remove_head(&completions, buf_ptr, 1089 struct lpfc_dmabuf, list); 1090 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1091 kfree(buf_ptr); 1092 } 1093 } 1094 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1095 1096 /* If there is no heart beat outstanding, issue a heartbeat command */ 1097 if (phba->cfg_enable_hba_heartbeat) { 1098 if (!phba->hb_outstanding) { 1099 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1100 (list_empty(&psli->mboxq))) { 1101 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1102 GFP_KERNEL); 1103 if (!pmboxq) { 1104 mod_timer(&phba->hb_tmofunc, 1105 jiffies + 1106 HZ * LPFC_HB_MBOX_INTERVAL); 1107 return; 1108 } 1109 1110 lpfc_heart_beat(phba, pmboxq); 1111 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1112 pmboxq->vport = phba->pport; 1113 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1114 MBX_NOWAIT); 1115 1116 if (retval != MBX_BUSY && 1117 retval != MBX_SUCCESS) { 1118 mempool_free(pmboxq, 1119 phba->mbox_mem_pool); 1120 mod_timer(&phba->hb_tmofunc, 1121 jiffies + 1122 HZ * LPFC_HB_MBOX_INTERVAL); 1123 return; 1124 } 1125 phba->skipped_hb = 0; 1126 phba->hb_outstanding = 1; 1127 } else if (time_before_eq(phba->last_completion_time, 1128 phba->skipped_hb)) { 1129 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1130 "2857 Last completion time not " 1131 " updated in %d ms\n", 1132 jiffies_to_msecs(jiffies 1133 - phba->last_completion_time)); 1134 } else 1135 phba->skipped_hb = jiffies; 1136 1137 mod_timer(&phba->hb_tmofunc, 1138 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1139 return; 1140 } else { 1141 /* 1142 * If heart beat timeout called with hb_outstanding set 1143 * we need to give the hb mailbox cmd a chance to 1144 * complete or TMO. 1145 */ 1146 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1147 "0459 Adapter heartbeat still out" 1148 "standing:last compl time was %d ms.\n", 1149 jiffies_to_msecs(jiffies 1150 - phba->last_completion_time)); 1151 mod_timer(&phba->hb_tmofunc, 1152 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1153 } 1154 } 1155} 1156 1157/** 1158 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1159 * @phba: pointer to lpfc hba data structure. 1160 * 1161 * This routine is called to bring the HBA offline when HBA hardware error 1162 * other than Port Error 6 has been detected. 1163 **/ 1164static void 1165lpfc_offline_eratt(struct lpfc_hba *phba) 1166{ 1167 struct lpfc_sli *psli = &phba->sli; 1168 1169 spin_lock_irq(&phba->hbalock); 1170 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1171 spin_unlock_irq(&phba->hbalock); 1172 lpfc_offline_prep(phba); 1173 1174 lpfc_offline(phba); 1175 lpfc_reset_barrier(phba); 1176 spin_lock_irq(&phba->hbalock); 1177 lpfc_sli_brdreset(phba); 1178 spin_unlock_irq(&phba->hbalock); 1179 lpfc_hba_down_post(phba); 1180 lpfc_sli_brdready(phba, HS_MBRDY); 1181 lpfc_unblock_mgmt_io(phba); 1182 phba->link_state = LPFC_HBA_ERROR; 1183 return; 1184} 1185 1186/** 1187 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1188 * @phba: pointer to lpfc hba data structure. 1189 * 1190 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1191 * other than Port Error 6 has been detected. 1192 **/ 1193static void 1194lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1195{ 1196 lpfc_offline_prep(phba); 1197 lpfc_offline(phba); 1198 lpfc_sli4_brdreset(phba); 1199 lpfc_hba_down_post(phba); 1200 lpfc_sli4_post_status_check(phba); 1201 lpfc_unblock_mgmt_io(phba); 1202 phba->link_state = LPFC_HBA_ERROR; 1203} 1204 1205/** 1206 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1207 * @phba: pointer to lpfc hba data structure. 1208 * 1209 * This routine is invoked to handle the deferred HBA hardware error 1210 * conditions. This type of error is indicated by HBA by setting ER1 1211 * and another ER bit in the host status register. The driver will 1212 * wait until the ER1 bit clears before handling the error condition. 1213 **/ 1214static void 1215lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1216{ 1217 uint32_t old_host_status = phba->work_hs; 1218 struct lpfc_sli_ring *pring; 1219 struct lpfc_sli *psli = &phba->sli; 1220 1221 /* If the pci channel is offline, ignore possible errors, 1222 * since we cannot communicate with the pci card anyway. 1223 */ 1224 if (pci_channel_offline(phba->pcidev)) { 1225 spin_lock_irq(&phba->hbalock); 1226 phba->hba_flag &= ~DEFER_ERATT; 1227 spin_unlock_irq(&phba->hbalock); 1228 return; 1229 } 1230 1231 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1232 "0479 Deferred Adapter Hardware Error " 1233 "Data: x%x x%x x%x\n", 1234 phba->work_hs, 1235 phba->work_status[0], phba->work_status[1]); 1236 1237 spin_lock_irq(&phba->hbalock); 1238 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1239 spin_unlock_irq(&phba->hbalock); 1240 1241 1242 /* 1243 * Firmware stops when it triggred erratt. That could cause the I/Os 1244 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1245 * SCSI layer retry it after re-establishing link. 1246 */ 1247 pring = &psli->ring[psli->fcp_ring]; 1248 lpfc_sli_abort_iocb_ring(phba, pring); 1249 1250 /* 1251 * There was a firmware error. Take the hba offline and then 1252 * attempt to restart it. 1253 */ 1254 lpfc_offline_prep(phba); 1255 lpfc_offline(phba); 1256 1257 /* Wait for the ER1 bit to clear.*/ 1258 while (phba->work_hs & HS_FFER1) { 1259 msleep(100); 1260 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1261 phba->work_hs = UNPLUG_ERR ; 1262 break; 1263 } 1264 /* If driver is unloading let the worker thread continue */ 1265 if (phba->pport->load_flag & FC_UNLOADING) { 1266 phba->work_hs = 0; 1267 break; 1268 } 1269 } 1270 1271 /* 1272 * This is to ptrotect against a race condition in which 1273 * first write to the host attention register clear the 1274 * host status register. 1275 */ 1276 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1277 phba->work_hs = old_host_status & ~HS_FFER1; 1278 1279 spin_lock_irq(&phba->hbalock); 1280 phba->hba_flag &= ~DEFER_ERATT; 1281 spin_unlock_irq(&phba->hbalock); 1282 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1283 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1284} 1285 1286static void 1287lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1288{ 1289 struct lpfc_board_event_header board_event; 1290 struct Scsi_Host *shost; 1291 1292 board_event.event_type = FC_REG_BOARD_EVENT; 1293 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1294 shost = lpfc_shost_from_vport(phba->pport); 1295 fc_host_post_vendor_event(shost, fc_get_event_number(), 1296 sizeof(board_event), 1297 (char *) &board_event, 1298 LPFC_NL_VENDOR_ID); 1299} 1300 1301/** 1302 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1303 * @phba: pointer to lpfc hba data structure. 1304 * 1305 * This routine is invoked to handle the following HBA hardware error 1306 * conditions: 1307 * 1 - HBA error attention interrupt 1308 * 2 - DMA ring index out of range 1309 * 3 - Mailbox command came back as unknown 1310 **/ 1311static void 1312lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1313{ 1314 struct lpfc_vport *vport = phba->pport; 1315 struct lpfc_sli *psli = &phba->sli; 1316 struct lpfc_sli_ring *pring; 1317 uint32_t event_data; 1318 unsigned long temperature; 1319 struct temp_event temp_event_data; 1320 struct Scsi_Host *shost; 1321 1322 /* If the pci channel is offline, ignore possible errors, 1323 * since we cannot communicate with the pci card anyway. 1324 */ 1325 if (pci_channel_offline(phba->pcidev)) { 1326 spin_lock_irq(&phba->hbalock); 1327 phba->hba_flag &= ~DEFER_ERATT; 1328 spin_unlock_irq(&phba->hbalock); 1329 return; 1330 } 1331 1332 /* If resets are disabled then leave the HBA alone and return */ 1333 if (!phba->cfg_enable_hba_reset) 1334 return; 1335 1336 /* Send an internal error event to mgmt application */ 1337 lpfc_board_errevt_to_mgmt(phba); 1338 1339 if (phba->hba_flag & DEFER_ERATT) 1340 lpfc_handle_deferred_eratt(phba); 1341 1342 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1343 if (phba->work_hs & HS_FFER6) 1344 /* Re-establishing Link */ 1345 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1346 "1301 Re-establishing Link " 1347 "Data: x%x x%x x%x\n", 1348 phba->work_hs, phba->work_status[0], 1349 phba->work_status[1]); 1350 if (phba->work_hs & HS_FFER8) 1351 /* Device Zeroization */ 1352 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1353 "2861 Host Authentication device " 1354 "zeroization Data:x%x x%x x%x\n", 1355 phba->work_hs, phba->work_status[0], 1356 phba->work_status[1]); 1357 1358 spin_lock_irq(&phba->hbalock); 1359 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1360 spin_unlock_irq(&phba->hbalock); 1361 1362 /* 1363 * Firmware stops when it triggled erratt with HS_FFER6. 1364 * That could cause the I/Os dropped by the firmware. 1365 * Error iocb (I/O) on txcmplq and let the SCSI layer 1366 * retry it after re-establishing link. 1367 */ 1368 pring = &psli->ring[psli->fcp_ring]; 1369 lpfc_sli_abort_iocb_ring(phba, pring); 1370 1371 /* 1372 * There was a firmware error. Take the hba offline and then 1373 * attempt to restart it. 1374 */ 1375 lpfc_offline_prep(phba); 1376 lpfc_offline(phba); 1377 lpfc_sli_brdrestart(phba); 1378 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1379 lpfc_unblock_mgmt_io(phba); 1380 return; 1381 } 1382 lpfc_unblock_mgmt_io(phba); 1383 } else if (phba->work_hs & HS_CRIT_TEMP) { 1384 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1385 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1386 temp_event_data.event_code = LPFC_CRIT_TEMP; 1387 temp_event_data.data = (uint32_t)temperature; 1388 1389 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1390 "0406 Adapter maximum temperature exceeded " 1391 "(%ld), taking this port offline " 1392 "Data: x%x x%x x%x\n", 1393 temperature, phba->work_hs, 1394 phba->work_status[0], phba->work_status[1]); 1395 1396 shost = lpfc_shost_from_vport(phba->pport); 1397 fc_host_post_vendor_event(shost, fc_get_event_number(), 1398 sizeof(temp_event_data), 1399 (char *) &temp_event_data, 1400 SCSI_NL_VID_TYPE_PCI 1401 | PCI_VENDOR_ID_EMULEX); 1402 1403 spin_lock_irq(&phba->hbalock); 1404 phba->over_temp_state = HBA_OVER_TEMP; 1405 spin_unlock_irq(&phba->hbalock); 1406 lpfc_offline_eratt(phba); 1407 1408 } else { 1409 /* The if clause above forces this code path when the status 1410 * failure is a value other than FFER6. Do not call the offline 1411 * twice. This is the adapter hardware error path. 1412 */ 1413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1414 "0457 Adapter Hardware Error " 1415 "Data: x%x x%x x%x\n", 1416 phba->work_hs, 1417 phba->work_status[0], phba->work_status[1]); 1418 1419 event_data = FC_REG_DUMP_EVENT; 1420 shost = lpfc_shost_from_vport(vport); 1421 fc_host_post_vendor_event(shost, fc_get_event_number(), 1422 sizeof(event_data), (char *) &event_data, 1423 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1424 1425 lpfc_offline_eratt(phba); 1426 } 1427 return; 1428} 1429 1430/** 1431 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1432 * @phba: pointer to lpfc hba data structure. 1433 * 1434 * This routine is invoked to handle the SLI4 HBA hardware error attention 1435 * conditions. 1436 **/ 1437static void 1438lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1439{ 1440 struct lpfc_vport *vport = phba->pport; 1441 uint32_t event_data; 1442 struct Scsi_Host *shost; 1443 uint32_t if_type; 1444 struct lpfc_register portstat_reg = {0}; 1445 uint32_t reg_err1, reg_err2; 1446 uint32_t uerrlo_reg, uemasklo_reg; 1447 uint32_t pci_rd_rc1, pci_rd_rc2; 1448 int rc; 1449 1450 /* If the pci channel is offline, ignore possible errors, since 1451 * we cannot communicate with the pci card anyway. 1452 */ 1453 if (pci_channel_offline(phba->pcidev)) 1454 return; 1455 /* If resets are disabled then leave the HBA alone and return */ 1456 if (!phba->cfg_enable_hba_reset) 1457 return; 1458 1459 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1460 switch (if_type) { 1461 case LPFC_SLI_INTF_IF_TYPE_0: 1462 pci_rd_rc1 = lpfc_readl( 1463 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1464 &uerrlo_reg); 1465 pci_rd_rc2 = lpfc_readl( 1466 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1467 &uemasklo_reg); 1468 /* consider PCI bus read error as pci_channel_offline */ 1469 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1470 return; 1471 lpfc_sli4_offline_eratt(phba); 1472 break; 1473 case LPFC_SLI_INTF_IF_TYPE_2: 1474 pci_rd_rc1 = lpfc_readl( 1475 phba->sli4_hba.u.if_type2.STATUSregaddr, 1476 &portstat_reg.word0); 1477 /* consider PCI bus read error as pci_channel_offline */ 1478 if (pci_rd_rc1 == -EIO) 1479 return; 1480 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1481 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1482 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1483 /* TODO: Register for Overtemp async events. */ 1484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1485 "2889 Port Overtemperature event, " 1486 "taking port offline\n"); 1487 spin_lock_irq(&phba->hbalock); 1488 phba->over_temp_state = HBA_OVER_TEMP; 1489 spin_unlock_irq(&phba->hbalock); 1490 lpfc_sli4_offline_eratt(phba); 1491 break; 1492 } 1493 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1494 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) 1495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1496 "3143 Port Down: Firmware Restarted\n"); 1497 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1498 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1499 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1500 "3144 Port Down: Debug Dump\n"); 1501 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1502 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1503 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1504 "3145 Port Down: Provisioning\n"); 1505 /* 1506 * On error status condition, driver need to wait for port 1507 * ready before performing reset. 1508 */ 1509 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1510 if (!rc) { 1511 /* need reset: attempt for port recovery */ 1512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1513 "2887 Reset Needed: Attempting Port " 1514 "Recovery...\n"); 1515 lpfc_offline_prep(phba); 1516 lpfc_offline(phba); 1517 lpfc_sli_brdrestart(phba); 1518 if (lpfc_online(phba) == 0) { 1519 lpfc_unblock_mgmt_io(phba); 1520 /* don't report event on forced debug dump */ 1521 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1522 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1523 return; 1524 else 1525 break; 1526 } 1527 /* fall through for not able to recover */ 1528 } 1529 lpfc_sli4_offline_eratt(phba); 1530 break; 1531 case LPFC_SLI_INTF_IF_TYPE_1: 1532 default: 1533 break; 1534 } 1535 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1536 "3123 Report dump event to upper layer\n"); 1537 /* Send an internal error event to mgmt application */ 1538 lpfc_board_errevt_to_mgmt(phba); 1539 1540 event_data = FC_REG_DUMP_EVENT; 1541 shost = lpfc_shost_from_vport(vport); 1542 fc_host_post_vendor_event(shost, fc_get_event_number(), 1543 sizeof(event_data), (char *) &event_data, 1544 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1545} 1546 1547/** 1548 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1549 * @phba: pointer to lpfc HBA data structure. 1550 * 1551 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1552 * routine from the API jump table function pointer from the lpfc_hba struct. 1553 * 1554 * Return codes 1555 * 0 - success. 1556 * Any other value - error. 1557 **/ 1558void 1559lpfc_handle_eratt(struct lpfc_hba *phba) 1560{ 1561 (*phba->lpfc_handle_eratt)(phba); 1562} 1563 1564/** 1565 * lpfc_handle_latt - The HBA link event handler 1566 * @phba: pointer to lpfc hba data structure. 1567 * 1568 * This routine is invoked from the worker thread to handle a HBA host 1569 * attention link event. 1570 **/ 1571void 1572lpfc_handle_latt(struct lpfc_hba *phba) 1573{ 1574 struct lpfc_vport *vport = phba->pport; 1575 struct lpfc_sli *psli = &phba->sli; 1576 LPFC_MBOXQ_t *pmb; 1577 volatile uint32_t control; 1578 struct lpfc_dmabuf *mp; 1579 int rc = 0; 1580 1581 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1582 if (!pmb) { 1583 rc = 1; 1584 goto lpfc_handle_latt_err_exit; 1585 } 1586 1587 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1588 if (!mp) { 1589 rc = 2; 1590 goto lpfc_handle_latt_free_pmb; 1591 } 1592 1593 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1594 if (!mp->virt) { 1595 rc = 3; 1596 goto lpfc_handle_latt_free_mp; 1597 } 1598 1599 /* Cleanup any outstanding ELS commands */ 1600 lpfc_els_flush_all_cmd(phba); 1601 1602 psli->slistat.link_event++; 1603 lpfc_read_topology(phba, pmb, mp); 1604 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 1605 pmb->vport = vport; 1606 /* Block ELS IOCBs until we have processed this mbox command */ 1607 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1608 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1609 if (rc == MBX_NOT_FINISHED) { 1610 rc = 4; 1611 goto lpfc_handle_latt_free_mbuf; 1612 } 1613 1614 /* Clear Link Attention in HA REG */ 1615 spin_lock_irq(&phba->hbalock); 1616 writel(HA_LATT, phba->HAregaddr); 1617 readl(phba->HAregaddr); /* flush */ 1618 spin_unlock_irq(&phba->hbalock); 1619 1620 return; 1621 1622lpfc_handle_latt_free_mbuf: 1623 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1624 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1625lpfc_handle_latt_free_mp: 1626 kfree(mp); 1627lpfc_handle_latt_free_pmb: 1628 mempool_free(pmb, phba->mbox_mem_pool); 1629lpfc_handle_latt_err_exit: 1630 /* Enable Link attention interrupts */ 1631 spin_lock_irq(&phba->hbalock); 1632 psli->sli_flag |= LPFC_PROCESS_LA; 1633 control = readl(phba->HCregaddr); 1634 control |= HC_LAINT_ENA; 1635 writel(control, phba->HCregaddr); 1636 readl(phba->HCregaddr); /* flush */ 1637 1638 /* Clear Link Attention in HA REG */ 1639 writel(HA_LATT, phba->HAregaddr); 1640 readl(phba->HAregaddr); /* flush */ 1641 spin_unlock_irq(&phba->hbalock); 1642 lpfc_linkdown(phba); 1643 phba->link_state = LPFC_HBA_ERROR; 1644 1645 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1646 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1647 1648 return; 1649} 1650 1651/** 1652 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1653 * @phba: pointer to lpfc hba data structure. 1654 * @vpd: pointer to the vital product data. 1655 * @len: length of the vital product data in bytes. 1656 * 1657 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1658 * an array of characters. In this routine, the ModelName, ProgramType, and 1659 * ModelDesc, etc. fields of the phba data structure will be populated. 1660 * 1661 * Return codes 1662 * 0 - pointer to the VPD passed in is NULL 1663 * 1 - success 1664 **/ 1665int 1666lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1667{ 1668 uint8_t lenlo, lenhi; 1669 int Length; 1670 int i, j; 1671 int finished = 0; 1672 int index = 0; 1673 1674 if (!vpd) 1675 return 0; 1676 1677 /* Vital Product */ 1678 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1679 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1680 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1681 (uint32_t) vpd[3]); 1682 while (!finished && (index < (len - 4))) { 1683 switch (vpd[index]) { 1684 case 0x82: 1685 case 0x91: 1686 index += 1; 1687 lenlo = vpd[index]; 1688 index += 1; 1689 lenhi = vpd[index]; 1690 index += 1; 1691 i = ((((unsigned short)lenhi) << 8) + lenlo); 1692 index += i; 1693 break; 1694 case 0x90: 1695 index += 1; 1696 lenlo = vpd[index]; 1697 index += 1; 1698 lenhi = vpd[index]; 1699 index += 1; 1700 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1701 if (Length > len - index) 1702 Length = len - index; 1703 while (Length > 0) { 1704 /* Look for Serial Number */ 1705 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1706 index += 2; 1707 i = vpd[index]; 1708 index += 1; 1709 j = 0; 1710 Length -= (3+i); 1711 while(i--) { 1712 phba->SerialNumber[j++] = vpd[index++]; 1713 if (j == 31) 1714 break; 1715 } 1716 phba->SerialNumber[j] = 0; 1717 continue; 1718 } 1719 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1720 phba->vpd_flag |= VPD_MODEL_DESC; 1721 index += 2; 1722 i = vpd[index]; 1723 index += 1; 1724 j = 0; 1725 Length -= (3+i); 1726 while(i--) { 1727 phba->ModelDesc[j++] = vpd[index++]; 1728 if (j == 255) 1729 break; 1730 } 1731 phba->ModelDesc[j] = 0; 1732 continue; 1733 } 1734 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1735 phba->vpd_flag |= VPD_MODEL_NAME; 1736 index += 2; 1737 i = vpd[index]; 1738 index += 1; 1739 j = 0; 1740 Length -= (3+i); 1741 while(i--) { 1742 phba->ModelName[j++] = vpd[index++]; 1743 if (j == 79) 1744 break; 1745 } 1746 phba->ModelName[j] = 0; 1747 continue; 1748 } 1749 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1750 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1751 index += 2; 1752 i = vpd[index]; 1753 index += 1; 1754 j = 0; 1755 Length -= (3+i); 1756 while(i--) { 1757 phba->ProgramType[j++] = vpd[index++]; 1758 if (j == 255) 1759 break; 1760 } 1761 phba->ProgramType[j] = 0; 1762 continue; 1763 } 1764 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1765 phba->vpd_flag |= VPD_PORT; 1766 index += 2; 1767 i = vpd[index]; 1768 index += 1; 1769 j = 0; 1770 Length -= (3+i); 1771 while(i--) { 1772 if ((phba->sli_rev == LPFC_SLI_REV4) && 1773 (phba->sli4_hba.pport_name_sta == 1774 LPFC_SLI4_PPNAME_GET)) { 1775 j++; 1776 index++; 1777 } else 1778 phba->Port[j++] = vpd[index++]; 1779 if (j == 19) 1780 break; 1781 } 1782 if ((phba->sli_rev != LPFC_SLI_REV4) || 1783 (phba->sli4_hba.pport_name_sta == 1784 LPFC_SLI4_PPNAME_NON)) 1785 phba->Port[j] = 0; 1786 continue; 1787 } 1788 else { 1789 index += 2; 1790 i = vpd[index]; 1791 index += 1; 1792 index += i; 1793 Length -= (3 + i); 1794 } 1795 } 1796 finished = 0; 1797 break; 1798 case 0x78: 1799 finished = 1; 1800 break; 1801 default: 1802 index ++; 1803 break; 1804 } 1805 } 1806 1807 return(1); 1808} 1809 1810/** 1811 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1812 * @phba: pointer to lpfc hba data structure. 1813 * @mdp: pointer to the data structure to hold the derived model name. 1814 * @descp: pointer to the data structure to hold the derived description. 1815 * 1816 * This routine retrieves HBA's description based on its registered PCI device 1817 * ID. The @descp passed into this function points to an array of 256 chars. It 1818 * shall be returned with the model name, maximum speed, and the host bus type. 1819 * The @mdp passed into this function points to an array of 80 chars. When the 1820 * function returns, the @mdp will be filled with the model name. 1821 **/ 1822static void 1823lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1824{ 1825 lpfc_vpd_t *vp; 1826 uint16_t dev_id = phba->pcidev->device; 1827 int max_speed; 1828 int GE = 0; 1829 int oneConnect = 0; /* default is not a oneConnect */ 1830 struct { 1831 char *name; 1832 char *bus; 1833 char *function; 1834 } m = {"<Unknown>", "", ""}; 1835 1836 if (mdp && mdp[0] != '\0' 1837 && descp && descp[0] != '\0') 1838 return; 1839 1840 if (phba->lmt & LMT_16Gb) 1841 max_speed = 16; 1842 else if (phba->lmt & LMT_10Gb) 1843 max_speed = 10; 1844 else if (phba->lmt & LMT_8Gb) 1845 max_speed = 8; 1846 else if (phba->lmt & LMT_4Gb) 1847 max_speed = 4; 1848 else if (phba->lmt & LMT_2Gb) 1849 max_speed = 2; 1850 else 1851 max_speed = 1; 1852 1853 vp = &phba->vpd; 1854 1855 switch (dev_id) { 1856 case PCI_DEVICE_ID_FIREFLY: 1857 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 1858 break; 1859 case PCI_DEVICE_ID_SUPERFLY: 1860 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1861 m = (typeof(m)){"LP7000", "PCI", 1862 "Fibre Channel Adapter"}; 1863 else 1864 m = (typeof(m)){"LP7000E", "PCI", 1865 "Fibre Channel Adapter"}; 1866 break; 1867 case PCI_DEVICE_ID_DRAGONFLY: 1868 m = (typeof(m)){"LP8000", "PCI", 1869 "Fibre Channel Adapter"}; 1870 break; 1871 case PCI_DEVICE_ID_CENTAUR: 1872 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1873 m = (typeof(m)){"LP9002", "PCI", 1874 "Fibre Channel Adapter"}; 1875 else 1876 m = (typeof(m)){"LP9000", "PCI", 1877 "Fibre Channel Adapter"}; 1878 break; 1879 case PCI_DEVICE_ID_RFLY: 1880 m = (typeof(m)){"LP952", "PCI", 1881 "Fibre Channel Adapter"}; 1882 break; 1883 case PCI_DEVICE_ID_PEGASUS: 1884 m = (typeof(m)){"LP9802", "PCI-X", 1885 "Fibre Channel Adapter"}; 1886 break; 1887 case PCI_DEVICE_ID_THOR: 1888 m = (typeof(m)){"LP10000", "PCI-X", 1889 "Fibre Channel Adapter"}; 1890 break; 1891 case PCI_DEVICE_ID_VIPER: 1892 m = (typeof(m)){"LPX1000", "PCI-X", 1893 "Fibre Channel Adapter"}; 1894 break; 1895 case PCI_DEVICE_ID_PFLY: 1896 m = (typeof(m)){"LP982", "PCI-X", 1897 "Fibre Channel Adapter"}; 1898 break; 1899 case PCI_DEVICE_ID_TFLY: 1900 m = (typeof(m)){"LP1050", "PCI-X", 1901 "Fibre Channel Adapter"}; 1902 break; 1903 case PCI_DEVICE_ID_HELIOS: 1904 m = (typeof(m)){"LP11000", "PCI-X2", 1905 "Fibre Channel Adapter"}; 1906 break; 1907 case PCI_DEVICE_ID_HELIOS_SCSP: 1908 m = (typeof(m)){"LP11000-SP", "PCI-X2", 1909 "Fibre Channel Adapter"}; 1910 break; 1911 case PCI_DEVICE_ID_HELIOS_DCSP: 1912 m = (typeof(m)){"LP11002-SP", "PCI-X2", 1913 "Fibre Channel Adapter"}; 1914 break; 1915 case PCI_DEVICE_ID_NEPTUNE: 1916 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 1917 break; 1918 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1919 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 1920 break; 1921 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1922 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 1923 break; 1924 case PCI_DEVICE_ID_BMID: 1925 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 1926 break; 1927 case PCI_DEVICE_ID_BSMB: 1928 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 1929 break; 1930 case PCI_DEVICE_ID_ZEPHYR: 1931 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1932 break; 1933 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1934 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1935 break; 1936 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1937 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 1938 GE = 1; 1939 break; 1940 case PCI_DEVICE_ID_ZMID: 1941 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 1942 break; 1943 case PCI_DEVICE_ID_ZSMB: 1944 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 1945 break; 1946 case PCI_DEVICE_ID_LP101: 1947 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 1948 break; 1949 case PCI_DEVICE_ID_LP10000S: 1950 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 1951 break; 1952 case PCI_DEVICE_ID_LP11000S: 1953 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 1954 break; 1955 case PCI_DEVICE_ID_LPE11000S: 1956 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 1957 break; 1958 case PCI_DEVICE_ID_SAT: 1959 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 1960 break; 1961 case PCI_DEVICE_ID_SAT_MID: 1962 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 1963 break; 1964 case PCI_DEVICE_ID_SAT_SMB: 1965 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 1966 break; 1967 case PCI_DEVICE_ID_SAT_DCSP: 1968 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 1969 break; 1970 case PCI_DEVICE_ID_SAT_SCSP: 1971 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 1972 break; 1973 case PCI_DEVICE_ID_SAT_S: 1974 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 1975 break; 1976 case PCI_DEVICE_ID_HORNET: 1977 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 1978 GE = 1; 1979 break; 1980 case PCI_DEVICE_ID_PROTEUS_VF: 1981 m = (typeof(m)){"LPev12000", "PCIe IOV", 1982 "Fibre Channel Adapter"}; 1983 break; 1984 case PCI_DEVICE_ID_PROTEUS_PF: 1985 m = (typeof(m)){"LPev12000", "PCIe IOV", 1986 "Fibre Channel Adapter"}; 1987 break; 1988 case PCI_DEVICE_ID_PROTEUS_S: 1989 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 1990 "Fibre Channel Adapter"}; 1991 break; 1992 case PCI_DEVICE_ID_TIGERSHARK: 1993 oneConnect = 1; 1994 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 1995 break; 1996 case PCI_DEVICE_ID_TOMCAT: 1997 oneConnect = 1; 1998 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 1999 break; 2000 case PCI_DEVICE_ID_FALCON: 2001 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2002 "EmulexSecure Fibre"}; 2003 break; 2004 case PCI_DEVICE_ID_BALIUS: 2005 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2006 "Fibre Channel Adapter"}; 2007 break; 2008 case PCI_DEVICE_ID_LANCER_FC: 2009 case PCI_DEVICE_ID_LANCER_FC_VF: 2010 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2011 break; 2012 case PCI_DEVICE_ID_LANCER_FCOE: 2013 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2014 oneConnect = 1; 2015 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2016 break; 2017 default: 2018 m = (typeof(m)){"Unknown", "", ""}; 2019 break; 2020 } 2021 2022 if (mdp && mdp[0] == '\0') 2023 snprintf(mdp, 79,"%s", m.name); 2024 /* 2025 * oneConnect hba requires special processing, they are all initiators 2026 * and we put the port number on the end 2027 */ 2028 if (descp && descp[0] == '\0') { 2029 if (oneConnect) 2030 snprintf(descp, 255, 2031 "Emulex OneConnect %s, %s Initiator, Port %s", 2032 m.name, m.function, 2033 phba->Port); 2034 else 2035 snprintf(descp, 255, 2036 "Emulex %s %d%s %s %s", 2037 m.name, max_speed, (GE) ? "GE" : "Gb", 2038 m.bus, m.function); 2039 } 2040} 2041 2042/** 2043 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2044 * @phba: pointer to lpfc hba data structure. 2045 * @pring: pointer to a IOCB ring. 2046 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2047 * 2048 * This routine posts a given number of IOCBs with the associated DMA buffer 2049 * descriptors specified by the cnt argument to the given IOCB ring. 2050 * 2051 * Return codes 2052 * The number of IOCBs NOT able to be posted to the IOCB ring. 2053 **/ 2054int 2055lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2056{ 2057 IOCB_t *icmd; 2058 struct lpfc_iocbq *iocb; 2059 struct lpfc_dmabuf *mp1, *mp2; 2060 2061 cnt += pring->missbufcnt; 2062 2063 /* While there are buffers to post */ 2064 while (cnt > 0) { 2065 /* Allocate buffer for command iocb */ 2066 iocb = lpfc_sli_get_iocbq(phba); 2067 if (iocb == NULL) { 2068 pring->missbufcnt = cnt; 2069 return cnt; 2070 } 2071 icmd = &iocb->iocb; 2072 2073 /* 2 buffers can be posted per command */ 2074 /* Allocate buffer to post */ 2075 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2076 if (mp1) 2077 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2078 if (!mp1 || !mp1->virt) { 2079 kfree(mp1); 2080 lpfc_sli_release_iocbq(phba, iocb); 2081 pring->missbufcnt = cnt; 2082 return cnt; 2083 } 2084 2085 INIT_LIST_HEAD(&mp1->list); 2086 /* Allocate buffer to post */ 2087 if (cnt > 1) { 2088 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2089 if (mp2) 2090 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2091 &mp2->phys); 2092 if (!mp2 || !mp2->virt) { 2093 kfree(mp2); 2094 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2095 kfree(mp1); 2096 lpfc_sli_release_iocbq(phba, iocb); 2097 pring->missbufcnt = cnt; 2098 return cnt; 2099 } 2100 2101 INIT_LIST_HEAD(&mp2->list); 2102 } else { 2103 mp2 = NULL; 2104 } 2105 2106 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2107 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2108 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2109 icmd->ulpBdeCount = 1; 2110 cnt--; 2111 if (mp2) { 2112 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2113 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2114 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2115 cnt--; 2116 icmd->ulpBdeCount = 2; 2117 } 2118 2119 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2120 icmd->ulpLe = 1; 2121 2122 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2123 IOCB_ERROR) { 2124 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2125 kfree(mp1); 2126 cnt++; 2127 if (mp2) { 2128 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2129 kfree(mp2); 2130 cnt++; 2131 } 2132 lpfc_sli_release_iocbq(phba, iocb); 2133 pring->missbufcnt = cnt; 2134 return cnt; 2135 } 2136 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2137 if (mp2) 2138 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2139 } 2140 pring->missbufcnt = 0; 2141 return 0; 2142} 2143 2144/** 2145 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2146 * @phba: pointer to lpfc hba data structure. 2147 * 2148 * This routine posts initial receive IOCB buffers to the ELS ring. The 2149 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2150 * set to 64 IOCBs. 2151 * 2152 * Return codes 2153 * 0 - success (currently always success) 2154 **/ 2155static int 2156lpfc_post_rcv_buf(struct lpfc_hba *phba) 2157{ 2158 struct lpfc_sli *psli = &phba->sli; 2159 2160 /* Ring 0, ELS / CT buffers */ 2161 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2162 /* Ring 2 - FCP no buffers needed */ 2163 2164 return 0; 2165} 2166 2167#define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2168 2169/** 2170 * lpfc_sha_init - Set up initial array of hash table entries 2171 * @HashResultPointer: pointer to an array as hash table. 2172 * 2173 * This routine sets up the initial values to the array of hash table entries 2174 * for the LC HBAs. 2175 **/ 2176static void 2177lpfc_sha_init(uint32_t * HashResultPointer) 2178{ 2179 HashResultPointer[0] = 0x67452301; 2180 HashResultPointer[1] = 0xEFCDAB89; 2181 HashResultPointer[2] = 0x98BADCFE; 2182 HashResultPointer[3] = 0x10325476; 2183 HashResultPointer[4] = 0xC3D2E1F0; 2184} 2185 2186/** 2187 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2188 * @HashResultPointer: pointer to an initial/result hash table. 2189 * @HashWorkingPointer: pointer to an working hash table. 2190 * 2191 * This routine iterates an initial hash table pointed by @HashResultPointer 2192 * with the values from the working hash table pointeed by @HashWorkingPointer. 2193 * The results are putting back to the initial hash table, returned through 2194 * the @HashResultPointer as the result hash table. 2195 **/ 2196static void 2197lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2198{ 2199 int t; 2200 uint32_t TEMP; 2201 uint32_t A, B, C, D, E; 2202 t = 16; 2203 do { 2204 HashWorkingPointer[t] = 2205 S(1, 2206 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2207 8] ^ 2208 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2209 } while (++t <= 79); 2210 t = 0; 2211 A = HashResultPointer[0]; 2212 B = HashResultPointer[1]; 2213 C = HashResultPointer[2]; 2214 D = HashResultPointer[3]; 2215 E = HashResultPointer[4]; 2216 2217 do { 2218 if (t < 20) { 2219 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2220 } else if (t < 40) { 2221 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2222 } else if (t < 60) { 2223 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2224 } else { 2225 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2226 } 2227 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2228 E = D; 2229 D = C; 2230 C = S(30, B); 2231 B = A; 2232 A = TEMP; 2233 } while (++t <= 79); 2234 2235 HashResultPointer[0] += A; 2236 HashResultPointer[1] += B; 2237 HashResultPointer[2] += C; 2238 HashResultPointer[3] += D; 2239 HashResultPointer[4] += E; 2240 2241} 2242 2243/** 2244 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2245 * @RandomChallenge: pointer to the entry of host challenge random number array. 2246 * @HashWorking: pointer to the entry of the working hash array. 2247 * 2248 * This routine calculates the working hash array referred by @HashWorking 2249 * from the challenge random numbers associated with the host, referred by 2250 * @RandomChallenge. The result is put into the entry of the working hash 2251 * array and returned by reference through @HashWorking. 2252 **/ 2253static void 2254lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2255{ 2256 *HashWorking = (*RandomChallenge ^ *HashWorking); 2257} 2258 2259/** 2260 * lpfc_hba_init - Perform special handling for LC HBA initialization 2261 * @phba: pointer to lpfc hba data structure. 2262 * @hbainit: pointer to an array of unsigned 32-bit integers. 2263 * 2264 * This routine performs the special handling for LC HBA initialization. 2265 **/ 2266void 2267lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2268{ 2269 int t; 2270 uint32_t *HashWorking; 2271 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2272 2273 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2274 if (!HashWorking) 2275 return; 2276 2277 HashWorking[0] = HashWorking[78] = *pwwnn++; 2278 HashWorking[1] = HashWorking[79] = *pwwnn; 2279 2280 for (t = 0; t < 7; t++) 2281 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2282 2283 lpfc_sha_init(hbainit); 2284 lpfc_sha_iterate(hbainit, HashWorking); 2285 kfree(HashWorking); 2286} 2287 2288/** 2289 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2290 * @vport: pointer to a virtual N_Port data structure. 2291 * 2292 * This routine performs the necessary cleanups before deleting the @vport. 2293 * It invokes the discovery state machine to perform necessary state 2294 * transitions and to release the ndlps associated with the @vport. Note, 2295 * the physical port is treated as @vport 0. 2296 **/ 2297void 2298lpfc_cleanup(struct lpfc_vport *vport) 2299{ 2300 struct lpfc_hba *phba = vport->phba; 2301 struct lpfc_nodelist *ndlp, *next_ndlp; 2302 int i = 0; 2303 2304 if (phba->link_state > LPFC_LINK_DOWN) 2305 lpfc_port_link_failure(vport); 2306 2307 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2308 if (!NLP_CHK_NODE_ACT(ndlp)) { 2309 ndlp = lpfc_enable_node(vport, ndlp, 2310 NLP_STE_UNUSED_NODE); 2311 if (!ndlp) 2312 continue; 2313 spin_lock_irq(&phba->ndlp_lock); 2314 NLP_SET_FREE_REQ(ndlp); 2315 spin_unlock_irq(&phba->ndlp_lock); 2316 /* Trigger the release of the ndlp memory */ 2317 lpfc_nlp_put(ndlp); 2318 continue; 2319 } 2320 spin_lock_irq(&phba->ndlp_lock); 2321 if (NLP_CHK_FREE_REQ(ndlp)) { 2322 /* The ndlp should not be in memory free mode already */ 2323 spin_unlock_irq(&phba->ndlp_lock); 2324 continue; 2325 } else 2326 /* Indicate request for freeing ndlp memory */ 2327 NLP_SET_FREE_REQ(ndlp); 2328 spin_unlock_irq(&phba->ndlp_lock); 2329 2330 if (vport->port_type != LPFC_PHYSICAL_PORT && 2331 ndlp->nlp_DID == Fabric_DID) { 2332 /* Just free up ndlp with Fabric_DID for vports */ 2333 lpfc_nlp_put(ndlp); 2334 continue; 2335 } 2336 2337 if (ndlp->nlp_type & NLP_FABRIC) 2338 lpfc_disc_state_machine(vport, ndlp, NULL, 2339 NLP_EVT_DEVICE_RECOVERY); 2340 2341 lpfc_disc_state_machine(vport, ndlp, NULL, 2342 NLP_EVT_DEVICE_RM); 2343 2344 } 2345 2346 /* At this point, ALL ndlp's should be gone 2347 * because of the previous NLP_EVT_DEVICE_RM. 2348 * Lets wait for this to happen, if needed. 2349 */ 2350 while (!list_empty(&vport->fc_nodes)) { 2351 if (i++ > 3000) { 2352 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2353 "0233 Nodelist not empty\n"); 2354 list_for_each_entry_safe(ndlp, next_ndlp, 2355 &vport->fc_nodes, nlp_listp) { 2356 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2357 LOG_NODE, 2358 "0282 did:x%x ndlp:x%p " 2359 "usgmap:x%x refcnt:%d\n", 2360 ndlp->nlp_DID, (void *)ndlp, 2361 ndlp->nlp_usg_map, 2362 atomic_read( 2363 &ndlp->kref.refcount)); 2364 } 2365 break; 2366 } 2367 2368 /* Wait for any activity on ndlps to settle */ 2369 msleep(10); 2370 } 2371 lpfc_cleanup_vports_rrqs(vport, NULL); 2372} 2373 2374/** 2375 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2376 * @vport: pointer to a virtual N_Port data structure. 2377 * 2378 * This routine stops all the timers associated with a @vport. This function 2379 * is invoked before disabling or deleting a @vport. Note that the physical 2380 * port is treated as @vport 0. 2381 **/ 2382void 2383lpfc_stop_vport_timers(struct lpfc_vport *vport) 2384{ 2385 del_timer_sync(&vport->els_tmofunc); 2386 del_timer_sync(&vport->fc_fdmitmo); 2387 del_timer_sync(&vport->delayed_disc_tmo); 2388 lpfc_can_disctmo(vport); 2389 return; 2390} 2391 2392/** 2393 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2394 * @phba: pointer to lpfc hba data structure. 2395 * 2396 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2397 * caller of this routine should already hold the host lock. 2398 **/ 2399void 2400__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2401{ 2402 /* Clear pending FCF rediscovery wait flag */ 2403 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2404 2405 /* Now, try to stop the timer */ 2406 del_timer(&phba->fcf.redisc_wait); 2407} 2408 2409/** 2410 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2411 * @phba: pointer to lpfc hba data structure. 2412 * 2413 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2414 * checks whether the FCF rediscovery wait timer is pending with the host 2415 * lock held before proceeding with disabling the timer and clearing the 2416 * wait timer pendig flag. 2417 **/ 2418void 2419lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2420{ 2421 spin_lock_irq(&phba->hbalock); 2422 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2423 /* FCF rediscovery timer already fired or stopped */ 2424 spin_unlock_irq(&phba->hbalock); 2425 return; 2426 } 2427 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2428 /* Clear failover in progress flags */ 2429 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2430 spin_unlock_irq(&phba->hbalock); 2431} 2432 2433/** 2434 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2435 * @phba: pointer to lpfc hba data structure. 2436 * 2437 * This routine stops all the timers associated with a HBA. This function is 2438 * invoked before either putting a HBA offline or unloading the driver. 2439 **/ 2440void 2441lpfc_stop_hba_timers(struct lpfc_hba *phba) 2442{ 2443 lpfc_stop_vport_timers(phba->pport); 2444 del_timer_sync(&phba->sli.mbox_tmo); 2445 del_timer_sync(&phba->fabric_block_timer); 2446 del_timer_sync(&phba->eratt_poll); 2447 del_timer_sync(&phba->hb_tmofunc); 2448 if (phba->sli_rev == LPFC_SLI_REV4) { 2449 del_timer_sync(&phba->rrq_tmr); 2450 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2451 } 2452 phba->hb_outstanding = 0; 2453 2454 switch (phba->pci_dev_grp) { 2455 case LPFC_PCI_DEV_LP: 2456 /* Stop any LightPulse device specific driver timers */ 2457 del_timer_sync(&phba->fcp_poll_timer); 2458 break; 2459 case LPFC_PCI_DEV_OC: 2460 /* Stop any OneConnect device sepcific driver timers */ 2461 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2462 break; 2463 default: 2464 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2465 "0297 Invalid device group (x%x)\n", 2466 phba->pci_dev_grp); 2467 break; 2468 } 2469 return; 2470} 2471 2472/** 2473 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2474 * @phba: pointer to lpfc hba data structure. 2475 * 2476 * This routine marks a HBA's management interface as blocked. Once the HBA's 2477 * management interface is marked as blocked, all the user space access to 2478 * the HBA, whether they are from sysfs interface or libdfc interface will 2479 * all be blocked. The HBA is set to block the management interface when the 2480 * driver prepares the HBA interface for online or offline. 2481 **/ 2482static void 2483lpfc_block_mgmt_io(struct lpfc_hba * phba) 2484{ 2485 unsigned long iflag; 2486 uint8_t actcmd = MBX_HEARTBEAT; 2487 unsigned long timeout; 2488 2489 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 2490 spin_lock_irqsave(&phba->hbalock, iflag); 2491 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2492 if (phba->sli.mbox_active) { 2493 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2494 /* Determine how long we might wait for the active mailbox 2495 * command to be gracefully completed by firmware. 2496 */ 2497 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2498 phba->sli.mbox_active) * 1000) + jiffies; 2499 } 2500 spin_unlock_irqrestore(&phba->hbalock, iflag); 2501 2502 /* Wait for the outstnading mailbox command to complete */ 2503 while (phba->sli.mbox_active) { 2504 /* Check active mailbox complete status every 2ms */ 2505 msleep(2); 2506 if (time_after(jiffies, timeout)) { 2507 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2508 "2813 Mgmt IO is Blocked %x " 2509 "- mbox cmd %x still active\n", 2510 phba->sli.sli_flag, actcmd); 2511 break; 2512 } 2513 } 2514} 2515 2516/** 2517 * lpfc_online - Initialize and bring a HBA online 2518 * @phba: pointer to lpfc hba data structure. 2519 * 2520 * This routine initializes the HBA and brings a HBA online. During this 2521 * process, the management interface is blocked to prevent user space access 2522 * to the HBA interfering with the driver initialization. 2523 * 2524 * Return codes 2525 * 0 - successful 2526 * 1 - failed 2527 **/ 2528int 2529lpfc_online(struct lpfc_hba *phba) 2530{ 2531 struct lpfc_vport *vport; 2532 struct lpfc_vport **vports; 2533 int i; 2534 2535 if (!phba) 2536 return 0; 2537 vport = phba->pport; 2538 2539 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2540 return 0; 2541 2542 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2543 "0458 Bring Adapter online\n"); 2544 2545 lpfc_block_mgmt_io(phba); 2546 2547 if (!lpfc_sli_queue_setup(phba)) { 2548 lpfc_unblock_mgmt_io(phba); 2549 return 1; 2550 } 2551 2552 if (phba->sli_rev == LPFC_SLI_REV4) { 2553 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2554 lpfc_unblock_mgmt_io(phba); 2555 return 1; 2556 } 2557 } else { 2558 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2559 lpfc_unblock_mgmt_io(phba); 2560 return 1; 2561 } 2562 } 2563 2564 vports = lpfc_create_vport_work_array(phba); 2565 if (vports != NULL) 2566 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2567 struct Scsi_Host *shost; 2568 shost = lpfc_shost_from_vport(vports[i]); 2569 spin_lock_irq(shost->host_lock); 2570 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2571 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2572 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2573 if (phba->sli_rev == LPFC_SLI_REV4) 2574 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2575 spin_unlock_irq(shost->host_lock); 2576 } 2577 lpfc_destroy_vport_work_array(phba, vports); 2578 2579 lpfc_unblock_mgmt_io(phba); 2580 return 0; 2581} 2582 2583/** 2584 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2585 * @phba: pointer to lpfc hba data structure. 2586 * 2587 * This routine marks a HBA's management interface as not blocked. Once the 2588 * HBA's management interface is marked as not blocked, all the user space 2589 * access to the HBA, whether they are from sysfs interface or libdfc 2590 * interface will be allowed. The HBA is set to block the management interface 2591 * when the driver prepares the HBA interface for online or offline and then 2592 * set to unblock the management interface afterwards. 2593 **/ 2594void 2595lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2596{ 2597 unsigned long iflag; 2598 2599 spin_lock_irqsave(&phba->hbalock, iflag); 2600 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2601 spin_unlock_irqrestore(&phba->hbalock, iflag); 2602} 2603 2604/** 2605 * lpfc_offline_prep - Prepare a HBA to be brought offline 2606 * @phba: pointer to lpfc hba data structure. 2607 * 2608 * This routine is invoked to prepare a HBA to be brought offline. It performs 2609 * unregistration login to all the nodes on all vports and flushes the mailbox 2610 * queue to make it ready to be brought offline. 2611 **/ 2612void 2613lpfc_offline_prep(struct lpfc_hba * phba) 2614{ 2615 struct lpfc_vport *vport = phba->pport; 2616 struct lpfc_nodelist *ndlp, *next_ndlp; 2617 struct lpfc_vport **vports; 2618 struct Scsi_Host *shost; 2619 int i; 2620 2621 if (vport->fc_flag & FC_OFFLINE_MODE) 2622 return; 2623 2624 lpfc_block_mgmt_io(phba); 2625 2626 lpfc_linkdown(phba); 2627 2628 /* Issue an unreg_login to all nodes on all vports */ 2629 vports = lpfc_create_vport_work_array(phba); 2630 if (vports != NULL) { 2631 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2632 if (vports[i]->load_flag & FC_UNLOADING) 2633 continue; 2634 shost = lpfc_shost_from_vport(vports[i]); 2635 spin_lock_irq(shost->host_lock); 2636 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2637 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2638 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2639 spin_unlock_irq(shost->host_lock); 2640 2641 shost = lpfc_shost_from_vport(vports[i]); 2642 list_for_each_entry_safe(ndlp, next_ndlp, 2643 &vports[i]->fc_nodes, 2644 nlp_listp) { 2645 if (!NLP_CHK_NODE_ACT(ndlp)) 2646 continue; 2647 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2648 continue; 2649 if (ndlp->nlp_type & NLP_FABRIC) { 2650 lpfc_disc_state_machine(vports[i], ndlp, 2651 NULL, NLP_EVT_DEVICE_RECOVERY); 2652 lpfc_disc_state_machine(vports[i], ndlp, 2653 NULL, NLP_EVT_DEVICE_RM); 2654 } 2655 spin_lock_irq(shost->host_lock); 2656 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2657 spin_unlock_irq(shost->host_lock); 2658 lpfc_unreg_rpi(vports[i], ndlp); 2659 } 2660 } 2661 } 2662 lpfc_destroy_vport_work_array(phba, vports); 2663 2664 lpfc_sli_mbox_sys_shutdown(phba); 2665} 2666 2667/** 2668 * lpfc_offline - Bring a HBA offline 2669 * @phba: pointer to lpfc hba data structure. 2670 * 2671 * This routine actually brings a HBA offline. It stops all the timers 2672 * associated with the HBA, brings down the SLI layer, and eventually 2673 * marks the HBA as in offline state for the upper layer protocol. 2674 **/ 2675void 2676lpfc_offline(struct lpfc_hba *phba) 2677{ 2678 struct Scsi_Host *shost; 2679 struct lpfc_vport **vports; 2680 int i; 2681 2682 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2683 return; 2684 2685 /* stop port and all timers associated with this hba */ 2686 lpfc_stop_port(phba); 2687 vports = lpfc_create_vport_work_array(phba); 2688 if (vports != NULL) 2689 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2690 lpfc_stop_vport_timers(vports[i]); 2691 lpfc_destroy_vport_work_array(phba, vports); 2692 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2693 "0460 Bring Adapter offline\n"); 2694 /* Bring down the SLI Layer and cleanup. The HBA is offline 2695 now. */ 2696 lpfc_sli_hba_down(phba); 2697 spin_lock_irq(&phba->hbalock); 2698 phba->work_ha = 0; 2699 spin_unlock_irq(&phba->hbalock); 2700 vports = lpfc_create_vport_work_array(phba); 2701 if (vports != NULL) 2702 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2703 shost = lpfc_shost_from_vport(vports[i]); 2704 spin_lock_irq(shost->host_lock); 2705 vports[i]->work_port_events = 0; 2706 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2707 spin_unlock_irq(shost->host_lock); 2708 } 2709 lpfc_destroy_vport_work_array(phba, vports); 2710} 2711 2712/** 2713 * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated. 2714 * @phba: pointer to lpfc hba data structure. 2715 * 2716 * This routine goes through all the scsi buffers in the system and updates the 2717 * Physical XRIs assigned to the SCSI buffer because these may change after any 2718 * firmware reset 2719 * 2720 * Return codes 2721 * 0 - successful (for now, it always returns 0) 2722 **/ 2723int 2724lpfc_scsi_buf_update(struct lpfc_hba *phba) 2725{ 2726 struct lpfc_scsi_buf *sb, *sb_next; 2727 2728 spin_lock_irq(&phba->hbalock); 2729 spin_lock(&phba->scsi_buf_list_lock); 2730 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) 2731 sb->cur_iocbq.sli4_xritag = 2732 phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag]; 2733 spin_unlock(&phba->scsi_buf_list_lock); 2734 spin_unlock_irq(&phba->hbalock); 2735 return 0; 2736} 2737 2738/** 2739 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2740 * @phba: pointer to lpfc hba data structure. 2741 * 2742 * This routine is to free all the SCSI buffers and IOCBs from the driver 2743 * list back to kernel. It is called from lpfc_pci_remove_one to free 2744 * the internal resources before the device is removed from the system. 2745 * 2746 * Return codes 2747 * 0 - successful (for now, it always returns 0) 2748 **/ 2749static int 2750lpfc_scsi_free(struct lpfc_hba *phba) 2751{ 2752 struct lpfc_scsi_buf *sb, *sb_next; 2753 struct lpfc_iocbq *io, *io_next; 2754 2755 spin_lock_irq(&phba->hbalock); 2756 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2757 spin_lock(&phba->scsi_buf_list_lock); 2758 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2759 list_del(&sb->list); 2760 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2761 sb->dma_handle); 2762 kfree(sb); 2763 phba->total_scsi_bufs--; 2764 } 2765 spin_unlock(&phba->scsi_buf_list_lock); 2766 2767 /* Release all the lpfc_iocbq entries maintained by this host. */ 2768 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2769 list_del(&io->list); 2770 kfree(io); 2771 phba->total_iocbq_bufs--; 2772 } 2773 2774 spin_unlock_irq(&phba->hbalock); 2775 return 0; 2776} 2777 2778/** 2779 * lpfc_create_port - Create an FC port 2780 * @phba: pointer to lpfc hba data structure. 2781 * @instance: a unique integer ID to this FC port. 2782 * @dev: pointer to the device data structure. 2783 * 2784 * This routine creates a FC port for the upper layer protocol. The FC port 2785 * can be created on top of either a physical port or a virtual port provided 2786 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2787 * and associates the FC port created before adding the shost into the SCSI 2788 * layer. 2789 * 2790 * Return codes 2791 * @vport - pointer to the virtual N_Port data structure. 2792 * NULL - port create failed. 2793 **/ 2794struct lpfc_vport * 2795lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2796{ 2797 struct lpfc_vport *vport; 2798 struct Scsi_Host *shost; 2799 int error = 0; 2800 2801 if (dev != &phba->pcidev->dev) 2802 shost = scsi_host_alloc(&lpfc_vport_template, 2803 sizeof(struct lpfc_vport)); 2804 else 2805 shost = scsi_host_alloc(&lpfc_template, 2806 sizeof(struct lpfc_vport)); 2807 if (!shost) 2808 goto out; 2809 2810 vport = (struct lpfc_vport *) shost->hostdata; 2811 vport->phba = phba; 2812 vport->load_flag |= FC_LOADING; 2813 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2814 vport->fc_rscn_flush = 0; 2815 2816 lpfc_get_vport_cfgparam(vport); 2817 shost->unique_id = instance; 2818 shost->max_id = LPFC_MAX_TARGET; 2819 shost->max_lun = vport->cfg_max_luns; 2820 shost->this_id = -1; 2821 shost->max_cmd_len = 16; 2822 if (phba->sli_rev == LPFC_SLI_REV4) { 2823 shost->dma_boundary = 2824 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 2825 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2826 } 2827 2828 /* 2829 * Set initial can_queue value since 0 is no longer supported and 2830 * scsi_add_host will fail. This will be adjusted later based on the 2831 * max xri value determined in hba setup. 2832 */ 2833 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2834 if (dev != &phba->pcidev->dev) { 2835 shost->transportt = lpfc_vport_transport_template; 2836 vport->port_type = LPFC_NPIV_PORT; 2837 } else { 2838 shost->transportt = lpfc_transport_template; 2839 vport->port_type = LPFC_PHYSICAL_PORT; 2840 } 2841 2842 /* Initialize all internally managed lists. */ 2843 INIT_LIST_HEAD(&vport->fc_nodes); 2844 INIT_LIST_HEAD(&vport->rcv_buffer_list); 2845 spin_lock_init(&vport->work_port_lock); 2846 2847 init_timer(&vport->fc_disctmo); 2848 vport->fc_disctmo.function = lpfc_disc_timeout; 2849 vport->fc_disctmo.data = (unsigned long)vport; 2850 2851 init_timer(&vport->fc_fdmitmo); 2852 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2853 vport->fc_fdmitmo.data = (unsigned long)vport; 2854 2855 init_timer(&vport->els_tmofunc); 2856 vport->els_tmofunc.function = lpfc_els_timeout; 2857 vport->els_tmofunc.data = (unsigned long)vport; 2858 2859 init_timer(&vport->delayed_disc_tmo); 2860 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; 2861 vport->delayed_disc_tmo.data = (unsigned long)vport; 2862 2863 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2864 if (error) 2865 goto out_put_shost; 2866 2867 spin_lock_irq(&phba->hbalock); 2868 list_add_tail(&vport->listentry, &phba->port_list); 2869 spin_unlock_irq(&phba->hbalock); 2870 return vport; 2871 2872out_put_shost: 2873 scsi_host_put(shost); 2874out: 2875 return NULL; 2876} 2877 2878/** 2879 * destroy_port - destroy an FC port 2880 * @vport: pointer to an lpfc virtual N_Port data structure. 2881 * 2882 * This routine destroys a FC port from the upper layer protocol. All the 2883 * resources associated with the port are released. 2884 **/ 2885void 2886destroy_port(struct lpfc_vport *vport) 2887{ 2888 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2889 struct lpfc_hba *phba = vport->phba; 2890 2891 lpfc_debugfs_terminate(vport); 2892 fc_remove_host(shost); 2893 scsi_remove_host(shost); 2894 2895 spin_lock_irq(&phba->hbalock); 2896 list_del_init(&vport->listentry); 2897 spin_unlock_irq(&phba->hbalock); 2898 2899 lpfc_cleanup(vport); 2900 return; 2901} 2902 2903/** 2904 * lpfc_get_instance - Get a unique integer ID 2905 * 2906 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2907 * uses the kernel idr facility to perform the task. 2908 * 2909 * Return codes: 2910 * instance - a unique integer ID allocated as the new instance. 2911 * -1 - lpfc get instance failed. 2912 **/ 2913int 2914lpfc_get_instance(void) 2915{ 2916 int instance = 0; 2917 2918 /* Assign an unused number */ 2919 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2920 return -1; 2921 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2922 return -1; 2923 return instance; 2924} 2925 2926/** 2927 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2928 * @shost: pointer to SCSI host data structure. 2929 * @time: elapsed time of the scan in jiffies. 2930 * 2931 * This routine is called by the SCSI layer with a SCSI host to determine 2932 * whether the scan host is finished. 2933 * 2934 * Note: there is no scan_start function as adapter initialization will have 2935 * asynchronously kicked off the link initialization. 2936 * 2937 * Return codes 2938 * 0 - SCSI host scan is not over yet. 2939 * 1 - SCSI host scan is over. 2940 **/ 2941int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2942{ 2943 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2944 struct lpfc_hba *phba = vport->phba; 2945 int stat = 0; 2946 2947 spin_lock_irq(shost->host_lock); 2948 2949 if (vport->load_flag & FC_UNLOADING) { 2950 stat = 1; 2951 goto finished; 2952 } 2953 if (time >= 30 * HZ) { 2954 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2955 "0461 Scanning longer than 30 " 2956 "seconds. Continuing initialization\n"); 2957 stat = 1; 2958 goto finished; 2959 } 2960 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2961 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2962 "0465 Link down longer than 15 " 2963 "seconds. Continuing initialization\n"); 2964 stat = 1; 2965 goto finished; 2966 } 2967 2968 if (vport->port_state != LPFC_VPORT_READY) 2969 goto finished; 2970 if (vport->num_disc_nodes || vport->fc_prli_sent) 2971 goto finished; 2972 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2973 goto finished; 2974 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2975 goto finished; 2976 2977 stat = 1; 2978 2979finished: 2980 spin_unlock_irq(shost->host_lock); 2981 return stat; 2982} 2983 2984/** 2985 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2986 * @shost: pointer to SCSI host data structure. 2987 * 2988 * This routine initializes a given SCSI host attributes on a FC port. The 2989 * SCSI host can be either on top of a physical port or a virtual port. 2990 **/ 2991void lpfc_host_attrib_init(struct Scsi_Host *shost) 2992{ 2993 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2994 struct lpfc_hba *phba = vport->phba; 2995 /* 2996 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2997 */ 2998 2999 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 3000 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 3001 fc_host_supported_classes(shost) = FC_COS_CLASS3; 3002 3003 memset(fc_host_supported_fc4s(shost), 0, 3004 sizeof(fc_host_supported_fc4s(shost))); 3005 fc_host_supported_fc4s(shost)[2] = 1; 3006 fc_host_supported_fc4s(shost)[7] = 1; 3007 3008 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 3009 sizeof fc_host_symbolic_name(shost)); 3010 3011 fc_host_supported_speeds(shost) = 0; 3012 if (phba->lmt & LMT_16Gb) 3013 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 3014 if (phba->lmt & LMT_10Gb) 3015 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 3016 if (phba->lmt & LMT_8Gb) 3017 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 3018 if (phba->lmt & LMT_4Gb) 3019 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 3020 if (phba->lmt & LMT_2Gb) 3021 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 3022 if (phba->lmt & LMT_1Gb) 3023 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 3024 3025 fc_host_maxframe_size(shost) = 3026 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 3027 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 3028 3029 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 3030 3031 /* This value is also unchanging */ 3032 memset(fc_host_active_fc4s(shost), 0, 3033 sizeof(fc_host_active_fc4s(shost))); 3034 fc_host_active_fc4s(shost)[2] = 1; 3035 fc_host_active_fc4s(shost)[7] = 1; 3036 3037 fc_host_max_npiv_vports(shost) = phba->max_vpi; 3038 spin_lock_irq(shost->host_lock); 3039 vport->load_flag &= ~FC_LOADING; 3040 spin_unlock_irq(shost->host_lock); 3041} 3042 3043/** 3044 * lpfc_stop_port_s3 - Stop SLI3 device port 3045 * @phba: pointer to lpfc hba data structure. 3046 * 3047 * This routine is invoked to stop an SLI3 device port, it stops the device 3048 * from generating interrupts and stops the device driver's timers for the 3049 * device. 3050 **/ 3051static void 3052lpfc_stop_port_s3(struct lpfc_hba *phba) 3053{ 3054 /* Clear all interrupt enable conditions */ 3055 writel(0, phba->HCregaddr); 3056 readl(phba->HCregaddr); /* flush */ 3057 /* Clear all pending interrupts */ 3058 writel(0xffffffff, phba->HAregaddr); 3059 readl(phba->HAregaddr); /* flush */ 3060 3061 /* Reset some HBA SLI setup states */ 3062 lpfc_stop_hba_timers(phba); 3063 phba->pport->work_port_events = 0; 3064} 3065 3066/** 3067 * lpfc_stop_port_s4 - Stop SLI4 device port 3068 * @phba: pointer to lpfc hba data structure. 3069 * 3070 * This routine is invoked to stop an SLI4 device port, it stops the device 3071 * from generating interrupts and stops the device driver's timers for the 3072 * device. 3073 **/ 3074static void 3075lpfc_stop_port_s4(struct lpfc_hba *phba) 3076{ 3077 /* Reset some HBA SLI4 setup states */ 3078 lpfc_stop_hba_timers(phba); 3079 phba->pport->work_port_events = 0; 3080 phba->sli4_hba.intr_enable = 0; 3081} 3082 3083/** 3084 * lpfc_stop_port - Wrapper function for stopping hba port 3085 * @phba: Pointer to HBA context object. 3086 * 3087 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 3088 * the API jump table function pointer from the lpfc_hba struct. 3089 **/ 3090void 3091lpfc_stop_port(struct lpfc_hba *phba) 3092{ 3093 phba->lpfc_stop_port(phba); 3094} 3095 3096/** 3097 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 3098 * @phba: Pointer to hba for which this call is being executed. 3099 * 3100 * This routine starts the timer waiting for the FCF rediscovery to complete. 3101 **/ 3102void 3103lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 3104{ 3105 unsigned long fcf_redisc_wait_tmo = 3106 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 3107 /* Start fcf rediscovery wait period timer */ 3108 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 3109 spin_lock_irq(&phba->hbalock); 3110 /* Allow action to new fcf asynchronous event */ 3111 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 3112 /* Mark the FCF rediscovery pending state */ 3113 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 3114 spin_unlock_irq(&phba->hbalock); 3115} 3116 3117/** 3118 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 3119 * @ptr: Map to lpfc_hba data structure pointer. 3120 * 3121 * This routine is invoked when waiting for FCF table rediscover has been 3122 * timed out. If new FCF record(s) has (have) been discovered during the 3123 * wait period, a new FCF event shall be added to the FCOE async event 3124 * list, and then worker thread shall be waked up for processing from the 3125 * worker thread context. 3126 **/ 3127void 3128lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 3129{ 3130 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 3131 3132 /* Don't send FCF rediscovery event if timer cancelled */ 3133 spin_lock_irq(&phba->hbalock); 3134 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3135 spin_unlock_irq(&phba->hbalock); 3136 return; 3137 } 3138 /* Clear FCF rediscovery timer pending flag */ 3139 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3140 /* FCF rediscovery event to worker thread */ 3141 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 3142 spin_unlock_irq(&phba->hbalock); 3143 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3144 "2776 FCF rediscover quiescent timer expired\n"); 3145 /* wake up worker thread */ 3146 lpfc_worker_wake_up(phba); 3147} 3148 3149/** 3150 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3151 * @phba: pointer to lpfc hba data structure. 3152 * @acqe_link: pointer to the async link completion queue entry. 3153 * 3154 * This routine is to parse the SLI4 link-attention link fault code and 3155 * translate it into the base driver's read link attention mailbox command 3156 * status. 3157 * 3158 * Return: Link-attention status in terms of base driver's coding. 3159 **/ 3160static uint16_t 3161lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3162 struct lpfc_acqe_link *acqe_link) 3163{ 3164 uint16_t latt_fault; 3165 3166 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3167 case LPFC_ASYNC_LINK_FAULT_NONE: 3168 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3169 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3170 latt_fault = 0; 3171 break; 3172 default: 3173 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3174 "0398 Invalid link fault code: x%x\n", 3175 bf_get(lpfc_acqe_link_fault, acqe_link)); 3176 latt_fault = MBXERR_ERROR; 3177 break; 3178 } 3179 return latt_fault; 3180} 3181 3182/** 3183 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3184 * @phba: pointer to lpfc hba data structure. 3185 * @acqe_link: pointer to the async link completion queue entry. 3186 * 3187 * This routine is to parse the SLI4 link attention type and translate it 3188 * into the base driver's link attention type coding. 3189 * 3190 * Return: Link attention type in terms of base driver's coding. 3191 **/ 3192static uint8_t 3193lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3194 struct lpfc_acqe_link *acqe_link) 3195{ 3196 uint8_t att_type; 3197 3198 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3199 case LPFC_ASYNC_LINK_STATUS_DOWN: 3200 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3201 att_type = LPFC_ATT_LINK_DOWN; 3202 break; 3203 case LPFC_ASYNC_LINK_STATUS_UP: 3204 /* Ignore physical link up events - wait for logical link up */ 3205 att_type = LPFC_ATT_RESERVED; 3206 break; 3207 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3208 att_type = LPFC_ATT_LINK_UP; 3209 break; 3210 default: 3211 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3212 "0399 Invalid link attention type: x%x\n", 3213 bf_get(lpfc_acqe_link_status, acqe_link)); 3214 att_type = LPFC_ATT_RESERVED; 3215 break; 3216 } 3217 return att_type; 3218} 3219 3220/** 3221 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 3222 * @phba: pointer to lpfc hba data structure. 3223 * @acqe_link: pointer to the async link completion queue entry. 3224 * 3225 * This routine is to parse the SLI4 link-attention link speed and translate 3226 * it into the base driver's link-attention link speed coding. 3227 * 3228 * Return: Link-attention link speed in terms of base driver's coding. 3229 **/ 3230static uint8_t 3231lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 3232 struct lpfc_acqe_link *acqe_link) 3233{ 3234 uint8_t link_speed; 3235 3236 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3237 case LPFC_ASYNC_LINK_SPEED_ZERO: 3238 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3239 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3240 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3241 break; 3242 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3243 link_speed = LPFC_LINK_SPEED_1GHZ; 3244 break; 3245 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3246 link_speed = LPFC_LINK_SPEED_10GHZ; 3247 break; 3248 default: 3249 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3250 "0483 Invalid link-attention link speed: x%x\n", 3251 bf_get(lpfc_acqe_link_speed, acqe_link)); 3252 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3253 break; 3254 } 3255 return link_speed; 3256} 3257 3258/** 3259 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 3260 * @phba: pointer to lpfc hba data structure. 3261 * @acqe_link: pointer to the async link completion queue entry. 3262 * 3263 * This routine is to handle the SLI4 asynchronous FCoE link event. 3264 **/ 3265static void 3266lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3267 struct lpfc_acqe_link *acqe_link) 3268{ 3269 struct lpfc_dmabuf *mp; 3270 LPFC_MBOXQ_t *pmb; 3271 MAILBOX_t *mb; 3272 struct lpfc_mbx_read_top *la; 3273 uint8_t att_type; 3274 int rc; 3275 3276 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3277 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 3278 return; 3279 phba->fcoe_eventtag = acqe_link->event_tag; 3280 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3281 if (!pmb) { 3282 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3283 "0395 The mboxq allocation failed\n"); 3284 return; 3285 } 3286 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3287 if (!mp) { 3288 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3289 "0396 The lpfc_dmabuf allocation failed\n"); 3290 goto out_free_pmb; 3291 } 3292 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3293 if (!mp->virt) { 3294 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3295 "0397 The mbuf allocation failed\n"); 3296 goto out_free_dmabuf; 3297 } 3298 3299 /* Cleanup any outstanding ELS commands */ 3300 lpfc_els_flush_all_cmd(phba); 3301 3302 /* Block ELS IOCBs until we have done process link event */ 3303 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3304 3305 /* Update link event statistics */ 3306 phba->sli.slistat.link_event++; 3307 3308 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3309 lpfc_read_topology(phba, pmb, mp); 3310 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3311 pmb->vport = phba->pport; 3312 3313 /* Keep the link status for extra SLI4 state machine reference */ 3314 phba->sli4_hba.link_state.speed = 3315 bf_get(lpfc_acqe_link_speed, acqe_link); 3316 phba->sli4_hba.link_state.duplex = 3317 bf_get(lpfc_acqe_link_duplex, acqe_link); 3318 phba->sli4_hba.link_state.status = 3319 bf_get(lpfc_acqe_link_status, acqe_link); 3320 phba->sli4_hba.link_state.type = 3321 bf_get(lpfc_acqe_link_type, acqe_link); 3322 phba->sli4_hba.link_state.number = 3323 bf_get(lpfc_acqe_link_number, acqe_link); 3324 phba->sli4_hba.link_state.fault = 3325 bf_get(lpfc_acqe_link_fault, acqe_link); 3326 phba->sli4_hba.link_state.logical_speed = 3327 bf_get(lpfc_acqe_logical_link_speed, acqe_link); 3328 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3329 "2900 Async FC/FCoE Link event - Speed:%dGBit " 3330 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 3331 "Logical speed:%dMbps Fault:%d\n", 3332 phba->sli4_hba.link_state.speed, 3333 phba->sli4_hba.link_state.topology, 3334 phba->sli4_hba.link_state.status, 3335 phba->sli4_hba.link_state.type, 3336 phba->sli4_hba.link_state.number, 3337 phba->sli4_hba.link_state.logical_speed * 10, 3338 phba->sli4_hba.link_state.fault); 3339 /* 3340 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 3341 * topology info. Note: Optional for non FC-AL ports. 3342 */ 3343 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3344 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3345 if (rc == MBX_NOT_FINISHED) 3346 goto out_free_dmabuf; 3347 return; 3348 } 3349 /* 3350 * For FCoE Mode: fill in all the topology information we need and call 3351 * the READ_TOPOLOGY completion routine to continue without actually 3352 * sending the READ_TOPOLOGY mailbox command to the port. 3353 */ 3354 /* Parse and translate status field */ 3355 mb = &pmb->u.mb; 3356 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3357 3358 /* Parse and translate link attention fields */ 3359 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 3360 la->eventTag = acqe_link->event_tag; 3361 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 3362 bf_set(lpfc_mbx_read_top_link_spd, la, 3363 lpfc_sli4_parse_latt_link_speed(phba, acqe_link)); 3364 3365 /* Fake the the following irrelvant fields */ 3366 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 3367 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 3368 bf_set(lpfc_mbx_read_top_il, la, 0); 3369 bf_set(lpfc_mbx_read_top_pb, la, 0); 3370 bf_set(lpfc_mbx_read_top_fa, la, 0); 3371 bf_set(lpfc_mbx_read_top_mm, la, 0); 3372 3373 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3374 lpfc_mbx_cmpl_read_topology(phba, pmb); 3375 3376 return; 3377 3378out_free_dmabuf: 3379 kfree(mp); 3380out_free_pmb: 3381 mempool_free(pmb, phba->mbox_mem_pool); 3382} 3383 3384/** 3385 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 3386 * @phba: pointer to lpfc hba data structure. 3387 * @acqe_fc: pointer to the async fc completion queue entry. 3388 * 3389 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 3390 * that the event was received and then issue a read_topology mailbox command so 3391 * that the rest of the driver will treat it the same as SLI3. 3392 **/ 3393static void 3394lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 3395{ 3396 struct lpfc_dmabuf *mp; 3397 LPFC_MBOXQ_t *pmb; 3398 int rc; 3399 3400 if (bf_get(lpfc_trailer_type, acqe_fc) != 3401 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 3402 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3403 "2895 Non FC link Event detected.(%d)\n", 3404 bf_get(lpfc_trailer_type, acqe_fc)); 3405 return; 3406 } 3407 /* Keep the link status for extra SLI4 state machine reference */ 3408 phba->sli4_hba.link_state.speed = 3409 bf_get(lpfc_acqe_fc_la_speed, acqe_fc); 3410 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 3411 phba->sli4_hba.link_state.topology = 3412 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 3413 phba->sli4_hba.link_state.status = 3414 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 3415 phba->sli4_hba.link_state.type = 3416 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 3417 phba->sli4_hba.link_state.number = 3418 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 3419 phba->sli4_hba.link_state.fault = 3420 bf_get(lpfc_acqe_link_fault, acqe_fc); 3421 phba->sli4_hba.link_state.logical_speed = 3422 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc); 3423 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3424 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 3425 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 3426 "%dMbps Fault:%d\n", 3427 phba->sli4_hba.link_state.speed, 3428 phba->sli4_hba.link_state.topology, 3429 phba->sli4_hba.link_state.status, 3430 phba->sli4_hba.link_state.type, 3431 phba->sli4_hba.link_state.number, 3432 phba->sli4_hba.link_state.logical_speed * 10, 3433 phba->sli4_hba.link_state.fault); 3434 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3435 if (!pmb) { 3436 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3437 "2897 The mboxq allocation failed\n"); 3438 return; 3439 } 3440 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3441 if (!mp) { 3442 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3443 "2898 The lpfc_dmabuf allocation failed\n"); 3444 goto out_free_pmb; 3445 } 3446 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3447 if (!mp->virt) { 3448 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3449 "2899 The mbuf allocation failed\n"); 3450 goto out_free_dmabuf; 3451 } 3452 3453 /* Cleanup any outstanding ELS commands */ 3454 lpfc_els_flush_all_cmd(phba); 3455 3456 /* Block ELS IOCBs until we have done process link event */ 3457 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3458 3459 /* Update link event statistics */ 3460 phba->sli.slistat.link_event++; 3461 3462 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3463 lpfc_read_topology(phba, pmb, mp); 3464 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3465 pmb->vport = phba->pport; 3466 3467 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3468 if (rc == MBX_NOT_FINISHED) 3469 goto out_free_dmabuf; 3470 return; 3471 3472out_free_dmabuf: 3473 kfree(mp); 3474out_free_pmb: 3475 mempool_free(pmb, phba->mbox_mem_pool); 3476} 3477 3478/** 3479 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 3480 * @phba: pointer to lpfc hba data structure. 3481 * @acqe_fc: pointer to the async SLI completion queue entry. 3482 * 3483 * This routine is to handle the SLI4 asynchronous SLI events. 3484 **/ 3485static void 3486lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 3487{ 3488 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3489 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 3490 "x%08x SLI Event Type:%d", 3491 acqe_sli->event_data1, acqe_sli->event_data2, 3492 bf_get(lpfc_trailer_type, acqe_sli)); 3493 return; 3494} 3495 3496/** 3497 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 3498 * @vport: pointer to vport data structure. 3499 * 3500 * This routine is to perform Clear Virtual Link (CVL) on a vport in 3501 * response to a CVL event. 3502 * 3503 * Return the pointer to the ndlp with the vport if successful, otherwise 3504 * return NULL. 3505 **/ 3506static struct lpfc_nodelist * 3507lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 3508{ 3509 struct lpfc_nodelist *ndlp; 3510 struct Scsi_Host *shost; 3511 struct lpfc_hba *phba; 3512 3513 if (!vport) 3514 return NULL; 3515 phba = vport->phba; 3516 if (!phba) 3517 return NULL; 3518 ndlp = lpfc_findnode_did(vport, Fabric_DID); 3519 if (!ndlp) { 3520 /* Cannot find existing Fabric ndlp, so allocate a new one */ 3521 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3522 if (!ndlp) 3523 return 0; 3524 lpfc_nlp_init(vport, ndlp, Fabric_DID); 3525 /* Set the node type */ 3526 ndlp->nlp_type |= NLP_FABRIC; 3527 /* Put ndlp onto node list */ 3528 lpfc_enqueue_node(vport, ndlp); 3529 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 3530 /* re-setup ndlp without removing from node list */ 3531 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 3532 if (!ndlp) 3533 return 0; 3534 } 3535 if ((phba->pport->port_state < LPFC_FLOGI) && 3536 (phba->pport->port_state != LPFC_VPORT_FAILED)) 3537 return NULL; 3538 /* If virtual link is not yet instantiated ignore CVL */ 3539 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 3540 && (vport->port_state != LPFC_VPORT_FAILED)) 3541 return NULL; 3542 shost = lpfc_shost_from_vport(vport); 3543 if (!shost) 3544 return NULL; 3545 lpfc_linkdown_port(vport); 3546 lpfc_cleanup_pending_mbox(vport); 3547 spin_lock_irq(shost->host_lock); 3548 vport->fc_flag |= FC_VPORT_CVL_RCVD; 3549 spin_unlock_irq(shost->host_lock); 3550 3551 return ndlp; 3552} 3553 3554/** 3555 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 3556 * @vport: pointer to lpfc hba data structure. 3557 * 3558 * This routine is to perform Clear Virtual Link (CVL) on all vports in 3559 * response to a FCF dead event. 3560 **/ 3561static void 3562lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 3563{ 3564 struct lpfc_vport **vports; 3565 int i; 3566 3567 vports = lpfc_create_vport_work_array(phba); 3568 if (vports) 3569 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3570 lpfc_sli4_perform_vport_cvl(vports[i]); 3571 lpfc_destroy_vport_work_array(phba, vports); 3572} 3573 3574/** 3575 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 3576 * @phba: pointer to lpfc hba data structure. 3577 * @acqe_link: pointer to the async fcoe completion queue entry. 3578 * 3579 * This routine is to handle the SLI4 asynchronous fcoe event. 3580 **/ 3581static void 3582lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 3583 struct lpfc_acqe_fip *acqe_fip) 3584{ 3585 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 3586 int rc; 3587 struct lpfc_vport *vport; 3588 struct lpfc_nodelist *ndlp; 3589 struct Scsi_Host *shost; 3590 int active_vlink_present; 3591 struct lpfc_vport **vports; 3592 int i; 3593 3594 phba->fc_eventTag = acqe_fip->event_tag; 3595 phba->fcoe_eventtag = acqe_fip->event_tag; 3596 switch (event_type) { 3597 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 3598 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 3599 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 3600 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3601 LOG_DISCOVERY, 3602 "2546 New FCF event, evt_tag:x%x, " 3603 "index:x%x\n", 3604 acqe_fip->event_tag, 3605 acqe_fip->index); 3606 else 3607 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3608 LOG_DISCOVERY, 3609 "2788 FCF param modified event, " 3610 "evt_tag:x%x, index:x%x\n", 3611 acqe_fip->event_tag, 3612 acqe_fip->index); 3613 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3614 /* 3615 * During period of FCF discovery, read the FCF 3616 * table record indexed by the event to update 3617 * FCF roundrobin failover eligible FCF bmask. 3618 */ 3619 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3620 LOG_DISCOVERY, 3621 "2779 Read FCF (x%x) for updating " 3622 "roundrobin FCF failover bmask\n", 3623 acqe_fip->index); 3624 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 3625 } 3626 3627 /* If the FCF discovery is in progress, do nothing. */ 3628 spin_lock_irq(&phba->hbalock); 3629 if (phba->hba_flag & FCF_TS_INPROG) { 3630 spin_unlock_irq(&phba->hbalock); 3631 break; 3632 } 3633 /* If fast FCF failover rescan event is pending, do nothing */ 3634 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3635 spin_unlock_irq(&phba->hbalock); 3636 break; 3637 } 3638 3639 /* If the FCF has been in discovered state, do nothing. */ 3640 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 3641 spin_unlock_irq(&phba->hbalock); 3642 break; 3643 } 3644 spin_unlock_irq(&phba->hbalock); 3645 3646 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3647 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3648 "2770 Start FCF table scan per async FCF " 3649 "event, evt_tag:x%x, index:x%x\n", 3650 acqe_fip->event_tag, acqe_fip->index); 3651 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3652 LPFC_FCOE_FCF_GET_FIRST); 3653 if (rc) 3654 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3655 "2547 Issue FCF scan read FCF mailbox " 3656 "command failed (x%x)\n", rc); 3657 break; 3658 3659 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 3660 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3661 "2548 FCF Table full count 0x%x tag 0x%x\n", 3662 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 3663 acqe_fip->event_tag); 3664 break; 3665 3666 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 3667 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3668 "2549 FCF (x%x) disconnected from network, " 3669 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 3670 /* 3671 * If we are in the middle of FCF failover process, clear 3672 * the corresponding FCF bit in the roundrobin bitmap. 3673 */ 3674 spin_lock_irq(&phba->hbalock); 3675 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3676 spin_unlock_irq(&phba->hbalock); 3677 /* Update FLOGI FCF failover eligible FCF bmask */ 3678 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 3679 break; 3680 } 3681 spin_unlock_irq(&phba->hbalock); 3682 3683 /* If the event is not for currently used fcf do nothing */ 3684 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 3685 break; 3686 3687 /* 3688 * Otherwise, request the port to rediscover the entire FCF 3689 * table for a fast recovery from case that the current FCF 3690 * is no longer valid as we are not in the middle of FCF 3691 * failover process already. 3692 */ 3693 spin_lock_irq(&phba->hbalock); 3694 /* Mark the fast failover process in progress */ 3695 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 3696 spin_unlock_irq(&phba->hbalock); 3697 3698 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3699 "2771 Start FCF fast failover process due to " 3700 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 3701 "\n", acqe_fip->event_tag, acqe_fip->index); 3702 rc = lpfc_sli4_redisc_fcf_table(phba); 3703 if (rc) { 3704 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3705 LOG_DISCOVERY, 3706 "2772 Issue FCF rediscover mabilbox " 3707 "command failed, fail through to FCF " 3708 "dead event\n"); 3709 spin_lock_irq(&phba->hbalock); 3710 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 3711 spin_unlock_irq(&phba->hbalock); 3712 /* 3713 * Last resort will fail over by treating this 3714 * as a link down to FCF registration. 3715 */ 3716 lpfc_sli4_fcf_dead_failthrough(phba); 3717 } else { 3718 /* Reset FCF roundrobin bmask for new discovery */ 3719 lpfc_sli4_clear_fcf_rr_bmask(phba); 3720 /* 3721 * Handling fast FCF failover to a DEAD FCF event is 3722 * considered equalivant to receiving CVL to all vports. 3723 */ 3724 lpfc_sli4_perform_all_vport_cvl(phba); 3725 } 3726 break; 3727 case LPFC_FIP_EVENT_TYPE_CVL: 3728 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3729 "2718 Clear Virtual Link Received for VPI 0x%x" 3730 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 3731 3732 vport = lpfc_find_vport_by_vpid(phba, 3733 acqe_fip->index); 3734 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3735 if (!ndlp) 3736 break; 3737 active_vlink_present = 0; 3738 3739 vports = lpfc_create_vport_work_array(phba); 3740 if (vports) { 3741 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 3742 i++) { 3743 if ((!(vports[i]->fc_flag & 3744 FC_VPORT_CVL_RCVD)) && 3745 (vports[i]->port_state > LPFC_FDISC)) { 3746 active_vlink_present = 1; 3747 break; 3748 } 3749 } 3750 lpfc_destroy_vport_work_array(phba, vports); 3751 } 3752 3753 if (active_vlink_present) { 3754 /* 3755 * If there are other active VLinks present, 3756 * re-instantiate the Vlink using FDISC. 3757 */ 3758 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3759 shost = lpfc_shost_from_vport(vport); 3760 spin_lock_irq(shost->host_lock); 3761 ndlp->nlp_flag |= NLP_DELAY_TMO; 3762 spin_unlock_irq(shost->host_lock); 3763 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 3764 vport->port_state = LPFC_FDISC; 3765 } else { 3766 /* 3767 * Otherwise, we request port to rediscover 3768 * the entire FCF table for a fast recovery 3769 * from possible case that the current FCF 3770 * is no longer valid if we are not already 3771 * in the FCF failover process. 3772 */ 3773 spin_lock_irq(&phba->hbalock); 3774 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3775 spin_unlock_irq(&phba->hbalock); 3776 break; 3777 } 3778 /* Mark the fast failover process in progress */ 3779 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 3780 spin_unlock_irq(&phba->hbalock); 3781 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3782 LOG_DISCOVERY, 3783 "2773 Start FCF failover per CVL, " 3784 "evt_tag:x%x\n", acqe_fip->event_tag); 3785 rc = lpfc_sli4_redisc_fcf_table(phba); 3786 if (rc) { 3787 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3788 LOG_DISCOVERY, 3789 "2774 Issue FCF rediscover " 3790 "mabilbox command failed, " 3791 "through to CVL event\n"); 3792 spin_lock_irq(&phba->hbalock); 3793 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 3794 spin_unlock_irq(&phba->hbalock); 3795 /* 3796 * Last resort will be re-try on the 3797 * the current registered FCF entry. 3798 */ 3799 lpfc_retry_pport_discovery(phba); 3800 } else 3801 /* 3802 * Reset FCF roundrobin bmask for new 3803 * discovery. 3804 */ 3805 lpfc_sli4_clear_fcf_rr_bmask(phba); 3806 } 3807 break; 3808 default: 3809 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3810 "0288 Unknown FCoE event type 0x%x event tag " 3811 "0x%x\n", event_type, acqe_fip->event_tag); 3812 break; 3813 } 3814} 3815 3816/** 3817 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 3818 * @phba: pointer to lpfc hba data structure. 3819 * @acqe_link: pointer to the async dcbx completion queue entry. 3820 * 3821 * This routine is to handle the SLI4 asynchronous dcbx event. 3822 **/ 3823static void 3824lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3825 struct lpfc_acqe_dcbx *acqe_dcbx) 3826{ 3827 phba->fc_eventTag = acqe_dcbx->event_tag; 3828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3829 "0290 The SLI4 DCBX asynchronous event is not " 3830 "handled yet\n"); 3831} 3832 3833/** 3834 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 3835 * @phba: pointer to lpfc hba data structure. 3836 * @acqe_link: pointer to the async grp5 completion queue entry. 3837 * 3838 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 3839 * is an asynchronous notified of a logical link speed change. The Port 3840 * reports the logical link speed in units of 10Mbps. 3841 **/ 3842static void 3843lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 3844 struct lpfc_acqe_grp5 *acqe_grp5) 3845{ 3846 uint16_t prev_ll_spd; 3847 3848 phba->fc_eventTag = acqe_grp5->event_tag; 3849 phba->fcoe_eventtag = acqe_grp5->event_tag; 3850 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 3851 phba->sli4_hba.link_state.logical_speed = 3852 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); 3853 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3854 "2789 GRP5 Async Event: Updating logical link speed " 3855 "from %dMbps to %dMbps\n", (prev_ll_spd * 10), 3856 (phba->sli4_hba.link_state.logical_speed*10)); 3857} 3858 3859/** 3860 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 3861 * @phba: pointer to lpfc hba data structure. 3862 * 3863 * This routine is invoked by the worker thread to process all the pending 3864 * SLI4 asynchronous events. 3865 **/ 3866void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 3867{ 3868 struct lpfc_cq_event *cq_event; 3869 3870 /* First, declare the async event has been handled */ 3871 spin_lock_irq(&phba->hbalock); 3872 phba->hba_flag &= ~ASYNC_EVENT; 3873 spin_unlock_irq(&phba->hbalock); 3874 /* Now, handle all the async events */ 3875 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 3876 /* Get the first event from the head of the event queue */ 3877 spin_lock_irq(&phba->hbalock); 3878 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 3879 cq_event, struct lpfc_cq_event, list); 3880 spin_unlock_irq(&phba->hbalock); 3881 /* Process the asynchronous event */ 3882 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 3883 case LPFC_TRAILER_CODE_LINK: 3884 lpfc_sli4_async_link_evt(phba, 3885 &cq_event->cqe.acqe_link); 3886 break; 3887 case LPFC_TRAILER_CODE_FCOE: 3888 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 3889 break; 3890 case LPFC_TRAILER_CODE_DCBX: 3891 lpfc_sli4_async_dcbx_evt(phba, 3892 &cq_event->cqe.acqe_dcbx); 3893 break; 3894 case LPFC_TRAILER_CODE_GRP5: 3895 lpfc_sli4_async_grp5_evt(phba, 3896 &cq_event->cqe.acqe_grp5); 3897 break; 3898 case LPFC_TRAILER_CODE_FC: 3899 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 3900 break; 3901 case LPFC_TRAILER_CODE_SLI: 3902 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 3903 break; 3904 default: 3905 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3906 "1804 Invalid asynchrous event code: " 3907 "x%x\n", bf_get(lpfc_trailer_code, 3908 &cq_event->cqe.mcqe_cmpl)); 3909 break; 3910 } 3911 /* Free the completion event processed to the free pool */ 3912 lpfc_sli4_cq_event_release(phba, cq_event); 3913 } 3914} 3915 3916/** 3917 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 3918 * @phba: pointer to lpfc hba data structure. 3919 * 3920 * This routine is invoked by the worker thread to process FCF table 3921 * rediscovery pending completion event. 3922 **/ 3923void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 3924{ 3925 int rc; 3926 3927 spin_lock_irq(&phba->hbalock); 3928 /* Clear FCF rediscovery timeout event */ 3929 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 3930 /* Clear driver fast failover FCF record flag */ 3931 phba->fcf.failover_rec.flag = 0; 3932 /* Set state for FCF fast failover */ 3933 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 3934 spin_unlock_irq(&phba->hbalock); 3935 3936 /* Scan FCF table from the first entry to re-discover SAN */ 3937 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3938 "2777 Start post-quiescent FCF table scan\n"); 3939 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 3940 if (rc) 3941 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3942 "2747 Issue FCF scan read FCF mailbox " 3943 "command failed 0x%x\n", rc); 3944} 3945 3946/** 3947 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3948 * @phba: pointer to lpfc hba data structure. 3949 * @dev_grp: The HBA PCI-Device group number. 3950 * 3951 * This routine is invoked to set up the per HBA PCI-Device group function 3952 * API jump table entries. 3953 * 3954 * Return: 0 if success, otherwise -ENODEV 3955 **/ 3956int 3957lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3958{ 3959 int rc; 3960 3961 /* Set up lpfc PCI-device group */ 3962 phba->pci_dev_grp = dev_grp; 3963 3964 /* The LPFC_PCI_DEV_OC uses SLI4 */ 3965 if (dev_grp == LPFC_PCI_DEV_OC) 3966 phba->sli_rev = LPFC_SLI_REV4; 3967 3968 /* Set up device INIT API function jump table */ 3969 rc = lpfc_init_api_table_setup(phba, dev_grp); 3970 if (rc) 3971 return -ENODEV; 3972 /* Set up SCSI API function jump table */ 3973 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 3974 if (rc) 3975 return -ENODEV; 3976 /* Set up SLI API function jump table */ 3977 rc = lpfc_sli_api_table_setup(phba, dev_grp); 3978 if (rc) 3979 return -ENODEV; 3980 /* Set up MBOX API function jump table */ 3981 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 3982 if (rc) 3983 return -ENODEV; 3984 3985 return 0; 3986} 3987 3988/** 3989 * lpfc_log_intr_mode - Log the active interrupt mode 3990 * @phba: pointer to lpfc hba data structure. 3991 * @intr_mode: active interrupt mode adopted. 3992 * 3993 * This routine it invoked to log the currently used active interrupt mode 3994 * to the device. 3995 **/ 3996static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 3997{ 3998 switch (intr_mode) { 3999 case 0: 4000 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4001 "0470 Enable INTx interrupt mode.\n"); 4002 break; 4003 case 1: 4004 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4005 "0481 Enabled MSI interrupt mode.\n"); 4006 break; 4007 case 2: 4008 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4009 "0480 Enabled MSI-X interrupt mode.\n"); 4010 break; 4011 default: 4012 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4013 "0482 Illegal interrupt mode.\n"); 4014 break; 4015 } 4016 return; 4017} 4018 4019/** 4020 * lpfc_enable_pci_dev - Enable a generic PCI device. 4021 * @phba: pointer to lpfc hba data structure. 4022 * 4023 * This routine is invoked to enable the PCI device that is common to all 4024 * PCI devices. 4025 * 4026 * Return codes 4027 * 0 - successful 4028 * other values - error 4029 **/ 4030static int 4031lpfc_enable_pci_dev(struct lpfc_hba *phba) 4032{ 4033 struct pci_dev *pdev; 4034 int bars = 0; 4035 4036 /* Obtain PCI device reference */ 4037 if (!phba->pcidev) 4038 goto out_error; 4039 else 4040 pdev = phba->pcidev; 4041 /* Select PCI BARs */ 4042 bars = pci_select_bars(pdev, IORESOURCE_MEM); 4043 /* Enable PCI device */ 4044 if (pci_enable_device_mem(pdev)) 4045 goto out_error; 4046 /* Request PCI resource for the device */ 4047 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 4048 goto out_disable_device; 4049 /* Set up device as PCI master and save state for EEH */ 4050 pci_set_master(pdev); 4051 pci_try_set_mwi(pdev); 4052 pci_save_state(pdev); 4053 4054 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 4055 if (pci_find_capability(pdev, PCI_CAP_ID_EXP)) 4056 pdev->needs_freset = 1; 4057 4058 return 0; 4059 4060out_disable_device: 4061 pci_disable_device(pdev); 4062out_error: 4063 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4064 "1401 Failed to enable pci device, bars:x%x\n", bars); 4065 return -ENODEV; 4066} 4067 4068/** 4069 * lpfc_disable_pci_dev - Disable a generic PCI device. 4070 * @phba: pointer to lpfc hba data structure. 4071 * 4072 * This routine is invoked to disable the PCI device that is common to all 4073 * PCI devices. 4074 **/ 4075static void 4076lpfc_disable_pci_dev(struct lpfc_hba *phba) 4077{ 4078 struct pci_dev *pdev; 4079 int bars; 4080 4081 /* Obtain PCI device reference */ 4082 if (!phba->pcidev) 4083 return; 4084 else 4085 pdev = phba->pcidev; 4086 /* Select PCI BARs */ 4087 bars = pci_select_bars(pdev, IORESOURCE_MEM); 4088 /* Release PCI resource and disable PCI device */ 4089 pci_release_selected_regions(pdev, bars); 4090 pci_disable_device(pdev); 4091 /* Null out PCI private reference to driver */ 4092 pci_set_drvdata(pdev, NULL); 4093 4094 return; 4095} 4096 4097/** 4098 * lpfc_reset_hba - Reset a hba 4099 * @phba: pointer to lpfc hba data structure. 4100 * 4101 * This routine is invoked to reset a hba device. It brings the HBA 4102 * offline, performs a board restart, and then brings the board back 4103 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 4104 * on outstanding mailbox commands. 4105 **/ 4106void 4107lpfc_reset_hba(struct lpfc_hba *phba) 4108{ 4109 /* If resets are disabled then set error state and return. */ 4110 if (!phba->cfg_enable_hba_reset) { 4111 phba->link_state = LPFC_HBA_ERROR; 4112 return; 4113 } 4114 lpfc_offline_prep(phba); 4115 lpfc_offline(phba); 4116 lpfc_sli_brdrestart(phba); 4117 lpfc_online(phba); 4118 lpfc_unblock_mgmt_io(phba); 4119} 4120 4121/** 4122 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 4123 * @phba: pointer to lpfc hba data structure. 4124 * 4125 * This function enables the PCI SR-IOV virtual functions to a physical 4126 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4127 * enable the number of virtual functions to the physical function. As 4128 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4129 * API call does not considered as an error condition for most of the device. 4130 **/ 4131uint16_t 4132lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 4133{ 4134 struct pci_dev *pdev = phba->pcidev; 4135 uint16_t nr_virtfn; 4136 int pos; 4137 4138 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 4139 if (pos == 0) 4140 return 0; 4141 4142 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 4143 return nr_virtfn; 4144} 4145 4146/** 4147 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 4148 * @phba: pointer to lpfc hba data structure. 4149 * @nr_vfn: number of virtual functions to be enabled. 4150 * 4151 * This function enables the PCI SR-IOV virtual functions to a physical 4152 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4153 * enable the number of virtual functions to the physical function. As 4154 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4155 * API call does not considered as an error condition for most of the device. 4156 **/ 4157int 4158lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 4159{ 4160 struct pci_dev *pdev = phba->pcidev; 4161 uint16_t max_nr_vfn; 4162 int rc; 4163 4164 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 4165 if (nr_vfn > max_nr_vfn) { 4166 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4167 "3057 Requested vfs (%d) greater than " 4168 "supported vfs (%d)", nr_vfn, max_nr_vfn); 4169 return -EINVAL; 4170 } 4171 4172 rc = pci_enable_sriov(pdev, nr_vfn); 4173 if (rc) { 4174 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4175 "2806 Failed to enable sriov on this device " 4176 "with vfn number nr_vf:%d, rc:%d\n", 4177 nr_vfn, rc); 4178 } else 4179 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4180 "2807 Successful enable sriov on this device " 4181 "with vfn number nr_vf:%d\n", nr_vfn); 4182 return rc; 4183} 4184 4185/** 4186 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 4187 * @phba: pointer to lpfc hba data structure. 4188 * 4189 * This routine is invoked to set up the driver internal resources specific to 4190 * support the SLI-3 HBA device it attached to. 4191 * 4192 * Return codes 4193 * 0 - successful 4194 * other values - error 4195 **/ 4196static int 4197lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 4198{ 4199 struct lpfc_sli *psli; 4200 int rc; 4201 4202 /* 4203 * Initialize timers used by driver 4204 */ 4205 4206 /* Heartbeat timer */ 4207 init_timer(&phba->hb_tmofunc); 4208 phba->hb_tmofunc.function = lpfc_hb_timeout; 4209 phba->hb_tmofunc.data = (unsigned long)phba; 4210 4211 psli = &phba->sli; 4212 /* MBOX heartbeat timer */ 4213 init_timer(&psli->mbox_tmo); 4214 psli->mbox_tmo.function = lpfc_mbox_timeout; 4215 psli->mbox_tmo.data = (unsigned long) phba; 4216 /* FCP polling mode timer */ 4217 init_timer(&phba->fcp_poll_timer); 4218 phba->fcp_poll_timer.function = lpfc_poll_timeout; 4219 phba->fcp_poll_timer.data = (unsigned long) phba; 4220 /* Fabric block timer */ 4221 init_timer(&phba->fabric_block_timer); 4222 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4223 phba->fabric_block_timer.data = (unsigned long) phba; 4224 /* EA polling mode timer */ 4225 init_timer(&phba->eratt_poll); 4226 phba->eratt_poll.function = lpfc_poll_eratt; 4227 phba->eratt_poll.data = (unsigned long) phba; 4228 4229 /* Host attention work mask setup */ 4230 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 4231 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 4232 4233 /* Get all the module params for configuring this host */ 4234 lpfc_get_cfgparam(phba); 4235 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 4236 phba->menlo_flag |= HBA_MENLO_SUPPORT; 4237 /* check for menlo minimum sg count */ 4238 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 4239 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 4240 } 4241 4242 /* 4243 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4244 * used to create the sg_dma_buf_pool must be dynamically calculated. 4245 * 2 segments are added since the IOCB needs a command and response bde. 4246 */ 4247 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 4248 sizeof(struct fcp_rsp) + 4249 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 4250 4251 if (phba->cfg_enable_bg) { 4252 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 4253 phba->cfg_sg_dma_buf_size += 4254 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 4255 } 4256 4257 /* Also reinitialize the host templates with new values. */ 4258 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4259 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4260 4261 phba->max_vpi = LPFC_MAX_VPI; 4262 /* This will be set to correct value after config_port mbox */ 4263 phba->max_vports = 0; 4264 4265 /* 4266 * Initialize the SLI Layer to run with lpfc HBAs. 4267 */ 4268 lpfc_sli_setup(phba); 4269 lpfc_sli_queue_setup(phba); 4270 4271 /* Allocate device driver memory */ 4272 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 4273 return -ENOMEM; 4274 4275 /* 4276 * Enable sr-iov virtual functions if supported and configured 4277 * through the module parameter. 4278 */ 4279 if (phba->cfg_sriov_nr_virtfn > 0) { 4280 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 4281 phba->cfg_sriov_nr_virtfn); 4282 if (rc) { 4283 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4284 "2808 Requested number of SR-IOV " 4285 "virtual functions (%d) is not " 4286 "supported\n", 4287 phba->cfg_sriov_nr_virtfn); 4288 phba->cfg_sriov_nr_virtfn = 0; 4289 } 4290 } 4291 4292 return 0; 4293} 4294 4295/** 4296 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 4297 * @phba: pointer to lpfc hba data structure. 4298 * 4299 * This routine is invoked to unset the driver internal resources set up 4300 * specific for supporting the SLI-3 HBA device it attached to. 4301 **/ 4302static void 4303lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 4304{ 4305 /* Free device driver memory allocated */ 4306 lpfc_mem_free_all(phba); 4307 4308 return; 4309} 4310 4311/** 4312 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 4313 * @phba: pointer to lpfc hba data structure. 4314 * 4315 * This routine is invoked to set up the driver internal resources specific to 4316 * support the SLI-4 HBA device it attached to. 4317 * 4318 * Return codes 4319 * 0 - successful 4320 * other values - error 4321 **/ 4322static int 4323lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 4324{ 4325 struct lpfc_sli *psli; 4326 LPFC_MBOXQ_t *mboxq; 4327 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 4328 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4329 struct lpfc_mqe *mqe; 4330 int longs, sli_family; 4331 4332 /* Before proceed, wait for POST done and device ready */ 4333 rc = lpfc_sli4_post_status_check(phba); 4334 if (rc) 4335 return -ENODEV; 4336 4337 /* 4338 * Initialize timers used by driver 4339 */ 4340 4341 /* Heartbeat timer */ 4342 init_timer(&phba->hb_tmofunc); 4343 phba->hb_tmofunc.function = lpfc_hb_timeout; 4344 phba->hb_tmofunc.data = (unsigned long)phba; 4345 init_timer(&phba->rrq_tmr); 4346 phba->rrq_tmr.function = lpfc_rrq_timeout; 4347 phba->rrq_tmr.data = (unsigned long)phba; 4348 4349 psli = &phba->sli; 4350 /* MBOX heartbeat timer */ 4351 init_timer(&psli->mbox_tmo); 4352 psli->mbox_tmo.function = lpfc_mbox_timeout; 4353 psli->mbox_tmo.data = (unsigned long) phba; 4354 /* Fabric block timer */ 4355 init_timer(&phba->fabric_block_timer); 4356 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4357 phba->fabric_block_timer.data = (unsigned long) phba; 4358 /* EA polling mode timer */ 4359 init_timer(&phba->eratt_poll); 4360 phba->eratt_poll.function = lpfc_poll_eratt; 4361 phba->eratt_poll.data = (unsigned long) phba; 4362 /* FCF rediscover timer */ 4363 init_timer(&phba->fcf.redisc_wait); 4364 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 4365 phba->fcf.redisc_wait.data = (unsigned long)phba; 4366 4367 /* 4368 * Control structure for handling external multi-buffer mailbox 4369 * command pass-through. 4370 */ 4371 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 4372 sizeof(struct lpfc_mbox_ext_buf_ctx)); 4373 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4374 4375 /* 4376 * We need to do a READ_CONFIG mailbox command here before 4377 * calling lpfc_get_cfgparam. For VFs this will report the 4378 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 4379 * All of the resources allocated 4380 * for this Port are tied to these values. 4381 */ 4382 /* Get all the module params for configuring this host */ 4383 lpfc_get_cfgparam(phba); 4384 phba->max_vpi = LPFC_MAX_VPI; 4385 /* This will be set to correct value after the read_config mbox */ 4386 phba->max_vports = 0; 4387 4388 /* Program the default value of vlan_id and fc_map */ 4389 phba->valid_vlan = 0; 4390 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4391 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4392 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4393 4394 /* 4395 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4396 * used to create the sg_dma_buf_pool must be dynamically calculated. 4397 * 2 segments are added since the IOCB needs a command and response bde. 4398 * To insure that the scsi sgl does not cross a 4k page boundary only 4399 * sgl sizes of must be a power of 2. 4400 */ 4401 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 4402 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); 4403 4404 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 4405 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 4406 switch (sli_family) { 4407 case LPFC_SLI_INTF_FAMILY_BE2: 4408 case LPFC_SLI_INTF_FAMILY_BE3: 4409 /* There is a single hint for BE - 2 pages per BPL. */ 4410 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == 4411 LPFC_SLI_INTF_SLI_HINT1_1) 4412 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 4413 break; 4414 case LPFC_SLI_INTF_FAMILY_LNCR_A0: 4415 case LPFC_SLI_INTF_FAMILY_LNCR_B0: 4416 default: 4417 break; 4418 } 4419 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 4420 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 4421 dma_buf_size = dma_buf_size << 1) 4422 ; 4423 if (dma_buf_size == max_buf_size) 4424 phba->cfg_sg_seg_cnt = (dma_buf_size - 4425 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - 4426 (2 * sizeof(struct sli4_sge))) / 4427 sizeof(struct sli4_sge); 4428 phba->cfg_sg_dma_buf_size = dma_buf_size; 4429 4430 /* Initialize buffer queue management fields */ 4431 hbq_count = lpfc_sli_hbq_count(); 4432 for (i = 0; i < hbq_count; ++i) 4433 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 4434 INIT_LIST_HEAD(&phba->rb_pend_list); 4435 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 4436 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 4437 4438 /* 4439 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 4440 */ 4441 /* Initialize the Abort scsi buffer list used by driver */ 4442 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 4443 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 4444 /* This abort list used by worker thread */ 4445 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 4446 4447 /* 4448 * Initialize driver internal slow-path work queues 4449 */ 4450 4451 /* Driver internel slow-path CQ Event pool */ 4452 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 4453 /* Response IOCB work queue list */ 4454 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 4455 /* Asynchronous event CQ Event work queue list */ 4456 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 4457 /* Fast-path XRI aborted CQ Event work queue list */ 4458 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 4459 /* Slow-path XRI aborted CQ Event work queue list */ 4460 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 4461 /* Receive queue CQ Event work queue list */ 4462 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 4463 4464 /* Initialize extent block lists. */ 4465 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 4466 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 4467 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 4468 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 4469 4470 /* Initialize the driver internal SLI layer lists. */ 4471 lpfc_sli_setup(phba); 4472 lpfc_sli_queue_setup(phba); 4473 4474 /* Allocate device driver memory */ 4475 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 4476 if (rc) 4477 return -ENOMEM; 4478 4479 /* IF Type 2 ports get initialized now. */ 4480 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 4481 LPFC_SLI_INTF_IF_TYPE_2) { 4482 rc = lpfc_pci_function_reset(phba); 4483 if (unlikely(rc)) 4484 return -ENODEV; 4485 } 4486 4487 /* Create the bootstrap mailbox command */ 4488 rc = lpfc_create_bootstrap_mbox(phba); 4489 if (unlikely(rc)) 4490 goto out_free_mem; 4491 4492 /* Set up the host's endian order with the device. */ 4493 rc = lpfc_setup_endian_order(phba); 4494 if (unlikely(rc)) 4495 goto out_free_bsmbx; 4496 4497 /* Set up the hba's configuration parameters. */ 4498 rc = lpfc_sli4_read_config(phba); 4499 if (unlikely(rc)) 4500 goto out_free_bsmbx; 4501 4502 /* IF Type 0 ports get initialized now. */ 4503 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 4504 LPFC_SLI_INTF_IF_TYPE_0) { 4505 rc = lpfc_pci_function_reset(phba); 4506 if (unlikely(rc)) 4507 goto out_free_bsmbx; 4508 } 4509 4510 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4511 GFP_KERNEL); 4512 if (!mboxq) { 4513 rc = -ENOMEM; 4514 goto out_free_bsmbx; 4515 } 4516 4517 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 4518 lpfc_supported_pages(mboxq); 4519 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4520 if (!rc) { 4521 mqe = &mboxq->u.mqe; 4522 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 4523 LPFC_MAX_SUPPORTED_PAGES); 4524 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 4525 switch (pn_page[i]) { 4526 case LPFC_SLI4_PARAMETERS: 4527 phba->sli4_hba.pc_sli4_params.supported = 1; 4528 break; 4529 default: 4530 break; 4531 } 4532 } 4533 /* Read the port's SLI4 Parameters capabilities if supported. */ 4534 if (phba->sli4_hba.pc_sli4_params.supported) 4535 rc = lpfc_pc_sli4_params_get(phba, mboxq); 4536 if (rc) { 4537 mempool_free(mboxq, phba->mbox_mem_pool); 4538 rc = -EIO; 4539 goto out_free_bsmbx; 4540 } 4541 } 4542 /* 4543 * Get sli4 parameters that override parameters from Port capabilities. 4544 * If this call fails, it isn't critical unless the SLI4 parameters come 4545 * back in conflict. 4546 */ 4547 rc = lpfc_get_sli4_parameters(phba, mboxq); 4548 if (rc) { 4549 if (phba->sli4_hba.extents_in_use && 4550 phba->sli4_hba.rpi_hdrs_in_use) { 4551 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4552 "2999 Unsupported SLI4 Parameters " 4553 "Extents and RPI headers enabled.\n"); 4554 goto out_free_bsmbx; 4555 } 4556 } 4557 mempool_free(mboxq, phba->mbox_mem_pool); 4558 /* Verify all the SLI4 queues */ 4559 rc = lpfc_sli4_queue_verify(phba); 4560 if (rc) 4561 goto out_free_bsmbx; 4562 4563 /* Create driver internal CQE event pool */ 4564 rc = lpfc_sli4_cq_event_pool_create(phba); 4565 if (rc) 4566 goto out_free_bsmbx; 4567 4568 /* Initialize and populate the iocb list per host */ 4569 rc = lpfc_init_sgl_list(phba); 4570 if (rc) { 4571 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4572 "1400 Failed to initialize sgl list.\n"); 4573 goto out_destroy_cq_event_pool; 4574 } 4575 rc = lpfc_init_active_sgl_array(phba); 4576 if (rc) { 4577 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4578 "1430 Failed to initialize sgl list.\n"); 4579 goto out_free_sgl_list; 4580 } 4581 rc = lpfc_sli4_init_rpi_hdrs(phba); 4582 if (rc) { 4583 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4584 "1432 Failed to initialize rpi headers.\n"); 4585 goto out_free_active_sgl; 4586 } 4587 4588 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 4589 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 4590 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 4591 GFP_KERNEL); 4592 if (!phba->fcf.fcf_rr_bmask) { 4593 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4594 "2759 Failed allocate memory for FCF round " 4595 "robin failover bmask\n"); 4596 rc = -ENOMEM; 4597 goto out_remove_rpi_hdrs; 4598 } 4599 4600 /* 4601 * The cfg_fcp_eq_count can be zero whenever there is exactly one 4602 * interrupt vector. This is not an error 4603 */ 4604 if (phba->cfg_fcp_eq_count) { 4605 phba->sli4_hba.fcp_eq_hdl = 4606 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4607 phba->cfg_fcp_eq_count), GFP_KERNEL); 4608 if (!phba->sli4_hba.fcp_eq_hdl) { 4609 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4610 "2572 Failed allocate memory for " 4611 "fast-path per-EQ handle array\n"); 4612 rc = -ENOMEM; 4613 goto out_free_fcf_rr_bmask; 4614 } 4615 } 4616 4617 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4618 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 4619 if (!phba->sli4_hba.msix_entries) { 4620 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4621 "2573 Failed allocate memory for msi-x " 4622 "interrupt vector entries\n"); 4623 rc = -ENOMEM; 4624 goto out_free_fcp_eq_hdl; 4625 } 4626 4627 /* 4628 * Enable sr-iov virtual functions if supported and configured 4629 * through the module parameter. 4630 */ 4631 if (phba->cfg_sriov_nr_virtfn > 0) { 4632 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 4633 phba->cfg_sriov_nr_virtfn); 4634 if (rc) { 4635 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4636 "3020 Requested number of SR-IOV " 4637 "virtual functions (%d) is not " 4638 "supported\n", 4639 phba->cfg_sriov_nr_virtfn); 4640 phba->cfg_sriov_nr_virtfn = 0; 4641 } 4642 } 4643 4644 return 0; 4645 4646out_free_fcp_eq_hdl: 4647 kfree(phba->sli4_hba.fcp_eq_hdl); 4648out_free_fcf_rr_bmask: 4649 kfree(phba->fcf.fcf_rr_bmask); 4650out_remove_rpi_hdrs: 4651 lpfc_sli4_remove_rpi_hdrs(phba); 4652out_free_active_sgl: 4653 lpfc_free_active_sgl(phba); 4654out_free_sgl_list: 4655 lpfc_free_sgl_list(phba); 4656out_destroy_cq_event_pool: 4657 lpfc_sli4_cq_event_pool_destroy(phba); 4658out_free_bsmbx: 4659 lpfc_destroy_bootstrap_mbox(phba); 4660out_free_mem: 4661 lpfc_mem_free(phba); 4662 return rc; 4663} 4664 4665/** 4666 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 4667 * @phba: pointer to lpfc hba data structure. 4668 * 4669 * This routine is invoked to unset the driver internal resources set up 4670 * specific for supporting the SLI-4 HBA device it attached to. 4671 **/ 4672static void 4673lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 4674{ 4675 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 4676 4677 /* Free memory allocated for msi-x interrupt vector entries */ 4678 kfree(phba->sli4_hba.msix_entries); 4679 4680 /* Free memory allocated for fast-path work queue handles */ 4681 kfree(phba->sli4_hba.fcp_eq_hdl); 4682 4683 /* Free the allocated rpi headers. */ 4684 lpfc_sli4_remove_rpi_hdrs(phba); 4685 lpfc_sli4_remove_rpis(phba); 4686 4687 /* Free eligible FCF index bmask */ 4688 kfree(phba->fcf.fcf_rr_bmask); 4689 4690 /* Free the ELS sgl list */ 4691 lpfc_free_active_sgl(phba); 4692 lpfc_free_sgl_list(phba); 4693 4694 /* Free the SCSI sgl management array */ 4695 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4696 4697 /* Free the completion queue EQ event pool */ 4698 lpfc_sli4_cq_event_release_all(phba); 4699 lpfc_sli4_cq_event_pool_destroy(phba); 4700 4701 /* Release resource identifiers. */ 4702 lpfc_sli4_dealloc_resource_identifiers(phba); 4703 4704 /* Free the bsmbx region. */ 4705 lpfc_destroy_bootstrap_mbox(phba); 4706 4707 /* Free the SLI Layer memory with SLI4 HBAs */ 4708 lpfc_mem_free_all(phba); 4709 4710 /* Free the current connect table */ 4711 list_for_each_entry_safe(conn_entry, next_conn_entry, 4712 &phba->fcf_conn_rec_list, list) { 4713 list_del_init(&conn_entry->list); 4714 kfree(conn_entry); 4715 } 4716 4717 return; 4718} 4719 4720/** 4721 * lpfc_init_api_table_setup - Set up init api function jump table 4722 * @phba: The hba struct for which this call is being executed. 4723 * @dev_grp: The HBA PCI-Device group number. 4724 * 4725 * This routine sets up the device INIT interface API function jump table 4726 * in @phba struct. 4727 * 4728 * Returns: 0 - success, -ENODEV - failure. 4729 **/ 4730int 4731lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4732{ 4733 phba->lpfc_hba_init_link = lpfc_hba_init_link; 4734 phba->lpfc_hba_down_link = lpfc_hba_down_link; 4735 phba->lpfc_selective_reset = lpfc_selective_reset; 4736 switch (dev_grp) { 4737 case LPFC_PCI_DEV_LP: 4738 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4739 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 4740 phba->lpfc_stop_port = lpfc_stop_port_s3; 4741 break; 4742 case LPFC_PCI_DEV_OC: 4743 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 4744 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 4745 phba->lpfc_stop_port = lpfc_stop_port_s4; 4746 break; 4747 default: 4748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4749 "1431 Invalid HBA PCI-device group: 0x%x\n", 4750 dev_grp); 4751 return -ENODEV; 4752 break; 4753 } 4754 return 0; 4755} 4756 4757/** 4758 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 4759 * @phba: pointer to lpfc hba data structure. 4760 * 4761 * This routine is invoked to set up the driver internal resources before the 4762 * device specific resource setup to support the HBA device it attached to. 4763 * 4764 * Return codes 4765 * 0 - successful 4766 * other values - error 4767 **/ 4768static int 4769lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 4770{ 4771 /* 4772 * Driver resources common to all SLI revisions 4773 */ 4774 atomic_set(&phba->fast_event_count, 0); 4775 spin_lock_init(&phba->hbalock); 4776 4777 /* Initialize ndlp management spinlock */ 4778 spin_lock_init(&phba->ndlp_lock); 4779 4780 INIT_LIST_HEAD(&phba->port_list); 4781 INIT_LIST_HEAD(&phba->work_list); 4782 init_waitqueue_head(&phba->wait_4_mlo_m_q); 4783 4784 /* Initialize the wait queue head for the kernel thread */ 4785 init_waitqueue_head(&phba->work_waitq); 4786 4787 /* Initialize the scsi buffer list used by driver for scsi IO */ 4788 spin_lock_init(&phba->scsi_buf_list_lock); 4789 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 4790 4791 /* Initialize the fabric iocb list */ 4792 INIT_LIST_HEAD(&phba->fabric_iocb_list); 4793 4794 /* Initialize list to save ELS buffers */ 4795 INIT_LIST_HEAD(&phba->elsbuf); 4796 4797 /* Initialize FCF connection rec list */ 4798 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 4799 4800 return 0; 4801} 4802 4803/** 4804 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 4805 * @phba: pointer to lpfc hba data structure. 4806 * 4807 * This routine is invoked to set up the driver internal resources after the 4808 * device specific resource setup to support the HBA device it attached to. 4809 * 4810 * Return codes 4811 * 0 - successful 4812 * other values - error 4813 **/ 4814static int 4815lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 4816{ 4817 int error; 4818 4819 /* Startup the kernel thread for this host adapter. */ 4820 phba->worker_thread = kthread_run(lpfc_do_work, phba, 4821 "lpfc_worker_%d", phba->brd_no); 4822 if (IS_ERR(phba->worker_thread)) { 4823 error = PTR_ERR(phba->worker_thread); 4824 return error; 4825 } 4826 4827 return 0; 4828} 4829 4830/** 4831 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 4832 * @phba: pointer to lpfc hba data structure. 4833 * 4834 * This routine is invoked to unset the driver internal resources set up after 4835 * the device specific resource setup for supporting the HBA device it 4836 * attached to. 4837 **/ 4838static void 4839lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 4840{ 4841 /* Stop kernel worker thread */ 4842 kthread_stop(phba->worker_thread); 4843} 4844 4845/** 4846 * lpfc_free_iocb_list - Free iocb list. 4847 * @phba: pointer to lpfc hba data structure. 4848 * 4849 * This routine is invoked to free the driver's IOCB list and memory. 4850 **/ 4851static void 4852lpfc_free_iocb_list(struct lpfc_hba *phba) 4853{ 4854 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 4855 4856 spin_lock_irq(&phba->hbalock); 4857 list_for_each_entry_safe(iocbq_entry, iocbq_next, 4858 &phba->lpfc_iocb_list, list) { 4859 list_del(&iocbq_entry->list); 4860 kfree(iocbq_entry); 4861 phba->total_iocbq_bufs--; 4862 } 4863 spin_unlock_irq(&phba->hbalock); 4864 4865 return; 4866} 4867 4868/** 4869 * lpfc_init_iocb_list - Allocate and initialize iocb list. 4870 * @phba: pointer to lpfc hba data structure. 4871 * 4872 * This routine is invoked to allocate and initizlize the driver's IOCB 4873 * list and set up the IOCB tag array accordingly. 4874 * 4875 * Return codes 4876 * 0 - successful 4877 * other values - error 4878 **/ 4879static int 4880lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 4881{ 4882 struct lpfc_iocbq *iocbq_entry = NULL; 4883 uint16_t iotag; 4884 int i; 4885 4886 /* Initialize and populate the iocb list per host. */ 4887 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 4888 for (i = 0; i < iocb_count; i++) { 4889 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 4890 if (iocbq_entry == NULL) { 4891 printk(KERN_ERR "%s: only allocated %d iocbs of " 4892 "expected %d count. Unloading driver.\n", 4893 __func__, i, LPFC_IOCB_LIST_CNT); 4894 goto out_free_iocbq; 4895 } 4896 4897 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 4898 if (iotag == 0) { 4899 kfree(iocbq_entry); 4900 printk(KERN_ERR "%s: failed to allocate IOTAG. " 4901 "Unloading driver.\n", __func__); 4902 goto out_free_iocbq; 4903 } 4904 iocbq_entry->sli4_lxritag = NO_XRI; 4905 iocbq_entry->sli4_xritag = NO_XRI; 4906 4907 spin_lock_irq(&phba->hbalock); 4908 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 4909 phba->total_iocbq_bufs++; 4910 spin_unlock_irq(&phba->hbalock); 4911 } 4912 4913 return 0; 4914 4915out_free_iocbq: 4916 lpfc_free_iocb_list(phba); 4917 4918 return -ENOMEM; 4919} 4920 4921/** 4922 * lpfc_free_sgl_list - Free sgl list. 4923 * @phba: pointer to lpfc hba data structure. 4924 * 4925 * This routine is invoked to free the driver's sgl list and memory. 4926 **/ 4927static void 4928lpfc_free_sgl_list(struct lpfc_hba *phba) 4929{ 4930 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 4931 LIST_HEAD(sglq_list); 4932 4933 spin_lock_irq(&phba->hbalock); 4934 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 4935 spin_unlock_irq(&phba->hbalock); 4936 4937 list_for_each_entry_safe(sglq_entry, sglq_next, 4938 &sglq_list, list) { 4939 list_del(&sglq_entry->list); 4940 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 4941 kfree(sglq_entry); 4942 phba->sli4_hba.total_sglq_bufs--; 4943 } 4944 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4945} 4946 4947/** 4948 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 4949 * @phba: pointer to lpfc hba data structure. 4950 * 4951 * This routine is invoked to allocate the driver's active sgl memory. 4952 * This array will hold the sglq_entry's for active IOs. 4953 **/ 4954static int 4955lpfc_init_active_sgl_array(struct lpfc_hba *phba) 4956{ 4957 int size; 4958 size = sizeof(struct lpfc_sglq *); 4959 size *= phba->sli4_hba.max_cfg_param.max_xri; 4960 4961 phba->sli4_hba.lpfc_sglq_active_list = 4962 kzalloc(size, GFP_KERNEL); 4963 if (!phba->sli4_hba.lpfc_sglq_active_list) 4964 return -ENOMEM; 4965 return 0; 4966} 4967 4968/** 4969 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 4970 * @phba: pointer to lpfc hba data structure. 4971 * 4972 * This routine is invoked to walk through the array of active sglq entries 4973 * and free all of the resources. 4974 * This is just a place holder for now. 4975 **/ 4976static void 4977lpfc_free_active_sgl(struct lpfc_hba *phba) 4978{ 4979 kfree(phba->sli4_hba.lpfc_sglq_active_list); 4980} 4981 4982/** 4983 * lpfc_init_sgl_list - Allocate and initialize sgl list. 4984 * @phba: pointer to lpfc hba data structure. 4985 * 4986 * This routine is invoked to allocate and initizlize the driver's sgl 4987 * list and set up the sgl xritag tag array accordingly. 4988 * 4989 * Return codes 4990 * 0 - successful 4991 * other values - error 4992 **/ 4993static int 4994lpfc_init_sgl_list(struct lpfc_hba *phba) 4995{ 4996 struct lpfc_sglq *sglq_entry = NULL; 4997 int i; 4998 int els_xri_cnt; 4999 5000 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 5001 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5002 "2400 ELS XRI count %d.\n", 5003 els_xri_cnt); 5004 /* Initialize and populate the sglq list per host/VF. */ 5005 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 5006 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 5007 5008 /* Sanity check on XRI management */ 5009 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 5010 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5011 "2562 No room left for SCSI XRI allocation: " 5012 "max_xri=%d, els_xri=%d\n", 5013 phba->sli4_hba.max_cfg_param.max_xri, 5014 els_xri_cnt); 5015 return -ENOMEM; 5016 } 5017 5018 /* Allocate memory for the ELS XRI management array */ 5019 phba->sli4_hba.lpfc_els_sgl_array = 5020 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), 5021 GFP_KERNEL); 5022 5023 if (!phba->sli4_hba.lpfc_els_sgl_array) { 5024 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5025 "2401 Failed to allocate memory for ELS " 5026 "XRI management array of size %d.\n", 5027 els_xri_cnt); 5028 return -ENOMEM; 5029 } 5030 5031 /* Keep the SCSI XRI into the XRI management array */ 5032 phba->sli4_hba.scsi_xri_max = 5033 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 5034 phba->sli4_hba.scsi_xri_cnt = 0; 5035 phba->sli4_hba.lpfc_scsi_psb_array = 5036 kzalloc((sizeof(struct lpfc_scsi_buf *) * 5037 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 5038 5039 if (!phba->sli4_hba.lpfc_scsi_psb_array) { 5040 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5041 "2563 Failed to allocate memory for SCSI " 5042 "XRI management array of size %d.\n", 5043 phba->sli4_hba.scsi_xri_max); 5044 kfree(phba->sli4_hba.lpfc_els_sgl_array); 5045 return -ENOMEM; 5046 } 5047 5048 for (i = 0; i < els_xri_cnt; i++) { 5049 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); 5050 if (sglq_entry == NULL) { 5051 printk(KERN_ERR "%s: only allocated %d sgls of " 5052 "expected %d count. Unloading driver.\n", 5053 __func__, i, els_xri_cnt); 5054 goto out_free_mem; 5055 } 5056 5057 sglq_entry->buff_type = GEN_BUFF_TYPE; 5058 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 5059 if (sglq_entry->virt == NULL) { 5060 kfree(sglq_entry); 5061 printk(KERN_ERR "%s: failed to allocate mbuf.\n" 5062 "Unloading driver.\n", __func__); 5063 goto out_free_mem; 5064 } 5065 sglq_entry->sgl = sglq_entry->virt; 5066 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 5067 5068 /* The list order is used by later block SGL registraton */ 5069 spin_lock_irq(&phba->hbalock); 5070 sglq_entry->state = SGL_FREED; 5071 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 5072 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 5073 phba->sli4_hba.total_sglq_bufs++; 5074 spin_unlock_irq(&phba->hbalock); 5075 } 5076 return 0; 5077 5078out_free_mem: 5079 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 5080 lpfc_free_sgl_list(phba); 5081 return -ENOMEM; 5082} 5083 5084/** 5085 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 5086 * @phba: pointer to lpfc hba data structure. 5087 * 5088 * This routine is invoked to post rpi header templates to the 5089 * port for those SLI4 ports that do not support extents. This routine 5090 * posts a PAGE_SIZE memory region to the port to hold up to 5091 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 5092 * and should be called only when interrupts are disabled. 5093 * 5094 * Return codes 5095 * 0 - successful 5096 * -ERROR - otherwise. 5097 **/ 5098int 5099lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 5100{ 5101 int rc = 0; 5102 struct lpfc_rpi_hdr *rpi_hdr; 5103 5104 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 5105 if (!phba->sli4_hba.rpi_hdrs_in_use) 5106 return rc; 5107 if (phba->sli4_hba.extents_in_use) 5108 return -EIO; 5109 5110 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 5111 if (!rpi_hdr) { 5112 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5113 "0391 Error during rpi post operation\n"); 5114 lpfc_sli4_remove_rpis(phba); 5115 rc = -ENODEV; 5116 } 5117 5118 return rc; 5119} 5120 5121/** 5122 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 5123 * @phba: pointer to lpfc hba data structure. 5124 * 5125 * This routine is invoked to allocate a single 4KB memory region to 5126 * support rpis and stores them in the phba. This single region 5127 * provides support for up to 64 rpis. The region is used globally 5128 * by the device. 5129 * 5130 * Returns: 5131 * A valid rpi hdr on success. 5132 * A NULL pointer on any failure. 5133 **/ 5134struct lpfc_rpi_hdr * 5135lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 5136{ 5137 uint16_t rpi_limit, curr_rpi_range; 5138 struct lpfc_dmabuf *dmabuf; 5139 struct lpfc_rpi_hdr *rpi_hdr; 5140 uint32_t rpi_count; 5141 5142 /* 5143 * If the SLI4 port supports extents, posting the rpi header isn't 5144 * required. Set the expected maximum count and let the actual value 5145 * get set when extents are fully allocated. 5146 */ 5147 if (!phba->sli4_hba.rpi_hdrs_in_use) 5148 return NULL; 5149 if (phba->sli4_hba.extents_in_use) 5150 return NULL; 5151 5152 /* The limit on the logical index is just the max_rpi count. */ 5153 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 5154 phba->sli4_hba.max_cfg_param.max_rpi - 1; 5155 5156 spin_lock_irq(&phba->hbalock); 5157 /* 5158 * Establish the starting RPI in this header block. The starting 5159 * rpi is normalized to a zero base because the physical rpi is 5160 * port based. 5161 */ 5162 curr_rpi_range = phba->sli4_hba.next_rpi - 5163 phba->sli4_hba.max_cfg_param.rpi_base; 5164 spin_unlock_irq(&phba->hbalock); 5165 5166 /* 5167 * The port has a limited number of rpis. The increment here 5168 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 5169 * and to allow the full max_rpi range per port. 5170 */ 5171 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 5172 rpi_count = rpi_limit - curr_rpi_range; 5173 else 5174 rpi_count = LPFC_RPI_HDR_COUNT; 5175 5176 if (!rpi_count) 5177 return NULL; 5178 /* 5179 * First allocate the protocol header region for the port. The 5180 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 5181 */ 5182 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5183 if (!dmabuf) 5184 return NULL; 5185 5186 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5187 LPFC_HDR_TEMPLATE_SIZE, 5188 &dmabuf->phys, 5189 GFP_KERNEL); 5190 if (!dmabuf->virt) { 5191 rpi_hdr = NULL; 5192 goto err_free_dmabuf; 5193 } 5194 5195 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 5196 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 5197 rpi_hdr = NULL; 5198 goto err_free_coherent; 5199 } 5200 5201 /* Save the rpi header data for cleanup later. */ 5202 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 5203 if (!rpi_hdr) 5204 goto err_free_coherent; 5205 5206 rpi_hdr->dmabuf = dmabuf; 5207 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 5208 rpi_hdr->page_count = 1; 5209 spin_lock_irq(&phba->hbalock); 5210 5211 /* The rpi_hdr stores the logical index only. */ 5212 rpi_hdr->start_rpi = curr_rpi_range; 5213 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 5214 5215 /* 5216 * The next_rpi stores the next logical module-64 rpi value used 5217 * to post physical rpis in subsequent rpi postings. 5218 */ 5219 phba->sli4_hba.next_rpi += rpi_count; 5220 spin_unlock_irq(&phba->hbalock); 5221 return rpi_hdr; 5222 5223 err_free_coherent: 5224 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 5225 dmabuf->virt, dmabuf->phys); 5226 err_free_dmabuf: 5227 kfree(dmabuf); 5228 return NULL; 5229} 5230 5231/** 5232 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 5233 * @phba: pointer to lpfc hba data structure. 5234 * 5235 * This routine is invoked to remove all memory resources allocated 5236 * to support rpis for SLI4 ports not supporting extents. This routine 5237 * presumes the caller has released all rpis consumed by fabric or port 5238 * logins and is prepared to have the header pages removed. 5239 **/ 5240void 5241lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 5242{ 5243 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 5244 5245 if (!phba->sli4_hba.rpi_hdrs_in_use) 5246 goto exit; 5247 5248 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 5249 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 5250 list_del(&rpi_hdr->list); 5251 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 5252 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 5253 kfree(rpi_hdr->dmabuf); 5254 kfree(rpi_hdr); 5255 } 5256 exit: 5257 /* There are no rpis available to the port now. */ 5258 phba->sli4_hba.next_rpi = 0; 5259} 5260 5261/** 5262 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 5263 * @pdev: pointer to pci device data structure. 5264 * 5265 * This routine is invoked to allocate the driver hba data structure for an 5266 * HBA device. If the allocation is successful, the phba reference to the 5267 * PCI device data structure is set. 5268 * 5269 * Return codes 5270 * pointer to @phba - successful 5271 * NULL - error 5272 **/ 5273static struct lpfc_hba * 5274lpfc_hba_alloc(struct pci_dev *pdev) 5275{ 5276 struct lpfc_hba *phba; 5277 5278 /* Allocate memory for HBA structure */ 5279 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 5280 if (!phba) { 5281 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 5282 return NULL; 5283 } 5284 5285 /* Set reference to PCI device in HBA structure */ 5286 phba->pcidev = pdev; 5287 5288 /* Assign an unused board number */ 5289 phba->brd_no = lpfc_get_instance(); 5290 if (phba->brd_no < 0) { 5291 kfree(phba); 5292 return NULL; 5293 } 5294 5295 spin_lock_init(&phba->ct_ev_lock); 5296 INIT_LIST_HEAD(&phba->ct_ev_waiters); 5297 5298 return phba; 5299} 5300 5301/** 5302 * lpfc_hba_free - Free driver hba data structure with a device. 5303 * @phba: pointer to lpfc hba data structure. 5304 * 5305 * This routine is invoked to free the driver hba data structure with an 5306 * HBA device. 5307 **/ 5308static void 5309lpfc_hba_free(struct lpfc_hba *phba) 5310{ 5311 /* Release the driver assigned board number */ 5312 idr_remove(&lpfc_hba_index, phba->brd_no); 5313 5314 kfree(phba); 5315 return; 5316} 5317 5318/** 5319 * lpfc_create_shost - Create hba physical port with associated scsi host. 5320 * @phba: pointer to lpfc hba data structure. 5321 * 5322 * This routine is invoked to create HBA physical port and associate a SCSI 5323 * host with it. 5324 * 5325 * Return codes 5326 * 0 - successful 5327 * other values - error 5328 **/ 5329static int 5330lpfc_create_shost(struct lpfc_hba *phba) 5331{ 5332 struct lpfc_vport *vport; 5333 struct Scsi_Host *shost; 5334 5335 /* Initialize HBA FC structure */ 5336 phba->fc_edtov = FF_DEF_EDTOV; 5337 phba->fc_ratov = FF_DEF_RATOV; 5338 phba->fc_altov = FF_DEF_ALTOV; 5339 phba->fc_arbtov = FF_DEF_ARBTOV; 5340 5341 atomic_set(&phba->sdev_cnt, 0); 5342 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 5343 if (!vport) 5344 return -ENODEV; 5345 5346 shost = lpfc_shost_from_vport(vport); 5347 phba->pport = vport; 5348 lpfc_debugfs_initialize(vport); 5349 /* Put reference to SCSI host to driver's device private data */ 5350 pci_set_drvdata(phba->pcidev, shost); 5351 5352 return 0; 5353} 5354 5355/** 5356 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 5357 * @phba: pointer to lpfc hba data structure. 5358 * 5359 * This routine is invoked to destroy HBA physical port and the associated 5360 * SCSI host. 5361 **/ 5362static void 5363lpfc_destroy_shost(struct lpfc_hba *phba) 5364{ 5365 struct lpfc_vport *vport = phba->pport; 5366 5367 /* Destroy physical port that associated with the SCSI host */ 5368 destroy_port(vport); 5369 5370 return; 5371} 5372 5373/** 5374 * lpfc_setup_bg - Setup Block guard structures and debug areas. 5375 * @phba: pointer to lpfc hba data structure. 5376 * @shost: the shost to be used to detect Block guard settings. 5377 * 5378 * This routine sets up the local Block guard protocol settings for @shost. 5379 * This routine also allocates memory for debugging bg buffers. 5380 **/ 5381static void 5382lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 5383{ 5384 int pagecnt = 10; 5385 if (lpfc_prot_mask && lpfc_prot_guard) { 5386 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5387 "1478 Registering BlockGuard with the " 5388 "SCSI layer\n"); 5389 scsi_host_set_prot(shost, lpfc_prot_mask); 5390 scsi_host_set_guard(shost, lpfc_prot_guard); 5391 } 5392 if (!_dump_buf_data) { 5393 while (pagecnt) { 5394 spin_lock_init(&_dump_buf_lock); 5395 _dump_buf_data = 5396 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5397 if (_dump_buf_data) { 5398 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5399 "9043 BLKGRD: allocated %d pages for " 5400 "_dump_buf_data at 0x%p\n", 5401 (1 << pagecnt), _dump_buf_data); 5402 _dump_buf_data_order = pagecnt; 5403 memset(_dump_buf_data, 0, 5404 ((1 << PAGE_SHIFT) << pagecnt)); 5405 break; 5406 } else 5407 --pagecnt; 5408 } 5409 if (!_dump_buf_data_order) 5410 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5411 "9044 BLKGRD: ERROR unable to allocate " 5412 "memory for hexdump\n"); 5413 } else 5414 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5415 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 5416 "\n", _dump_buf_data); 5417 if (!_dump_buf_dif) { 5418 while (pagecnt) { 5419 _dump_buf_dif = 5420 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5421 if (_dump_buf_dif) { 5422 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5423 "9046 BLKGRD: allocated %d pages for " 5424 "_dump_buf_dif at 0x%p\n", 5425 (1 << pagecnt), _dump_buf_dif); 5426 _dump_buf_dif_order = pagecnt; 5427 memset(_dump_buf_dif, 0, 5428 ((1 << PAGE_SHIFT) << pagecnt)); 5429 break; 5430 } else 5431 --pagecnt; 5432 } 5433 if (!_dump_buf_dif_order) 5434 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5435 "9047 BLKGRD: ERROR unable to allocate " 5436 "memory for hexdump\n"); 5437 } else 5438 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5439 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 5440 _dump_buf_dif); 5441} 5442 5443/** 5444 * lpfc_post_init_setup - Perform necessary device post initialization setup. 5445 * @phba: pointer to lpfc hba data structure. 5446 * 5447 * This routine is invoked to perform all the necessary post initialization 5448 * setup for the device. 5449 **/ 5450static void 5451lpfc_post_init_setup(struct lpfc_hba *phba) 5452{ 5453 struct Scsi_Host *shost; 5454 struct lpfc_adapter_event_header adapter_event; 5455 5456 /* Get the default values for Model Name and Description */ 5457 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 5458 5459 /* 5460 * hba setup may have changed the hba_queue_depth so we need to 5461 * adjust the value of can_queue. 5462 */ 5463 shost = pci_get_drvdata(phba->pcidev); 5464 shost->can_queue = phba->cfg_hba_queue_depth - 10; 5465 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 5466 lpfc_setup_bg(phba, shost); 5467 5468 lpfc_host_attrib_init(shost); 5469 5470 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 5471 spin_lock_irq(shost->host_lock); 5472 lpfc_poll_start_timer(phba); 5473 spin_unlock_irq(shost->host_lock); 5474 } 5475 5476 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5477 "0428 Perform SCSI scan\n"); 5478 /* Send board arrival event to upper layer */ 5479 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 5480 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 5481 fc_host_post_vendor_event(shost, fc_get_event_number(), 5482 sizeof(adapter_event), 5483 (char *) &adapter_event, 5484 LPFC_NL_VENDOR_ID); 5485 return; 5486} 5487 5488/** 5489 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 5490 * @phba: pointer to lpfc hba data structure. 5491 * 5492 * This routine is invoked to set up the PCI device memory space for device 5493 * with SLI-3 interface spec. 5494 * 5495 * Return codes 5496 * 0 - successful 5497 * other values - error 5498 **/ 5499static int 5500lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 5501{ 5502 struct pci_dev *pdev; 5503 unsigned long bar0map_len, bar2map_len; 5504 int i, hbq_count; 5505 void *ptr; 5506 int error = -ENODEV; 5507 5508 /* Obtain PCI device reference */ 5509 if (!phba->pcidev) 5510 return error; 5511 else 5512 pdev = phba->pcidev; 5513 5514 /* Set the device DMA mask size */ 5515 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 5516 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 5517 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 5518 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 5519 return error; 5520 } 5521 } 5522 5523 /* Get the bus address of Bar0 and Bar2 and the number of bytes 5524 * required by each mapping. 5525 */ 5526 phba->pci_bar0_map = pci_resource_start(pdev, 0); 5527 bar0map_len = pci_resource_len(pdev, 0); 5528 5529 phba->pci_bar2_map = pci_resource_start(pdev, 2); 5530 bar2map_len = pci_resource_len(pdev, 2); 5531 5532 /* Map HBA SLIM to a kernel virtual address. */ 5533 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 5534 if (!phba->slim_memmap_p) { 5535 dev_printk(KERN_ERR, &pdev->dev, 5536 "ioremap failed for SLIM memory.\n"); 5537 goto out; 5538 } 5539 5540 /* Map HBA Control Registers to a kernel virtual address. */ 5541 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 5542 if (!phba->ctrl_regs_memmap_p) { 5543 dev_printk(KERN_ERR, &pdev->dev, 5544 "ioremap failed for HBA control registers.\n"); 5545 goto out_iounmap_slim; 5546 } 5547 5548 /* Allocate memory for SLI-2 structures */ 5549 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 5550 SLI2_SLIM_SIZE, 5551 &phba->slim2p.phys, 5552 GFP_KERNEL); 5553 if (!phba->slim2p.virt) 5554 goto out_iounmap; 5555 5556 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 5557 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 5558 phba->mbox_ext = (phba->slim2p.virt + 5559 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 5560 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 5561 phba->IOCBs = (phba->slim2p.virt + 5562 offsetof(struct lpfc_sli2_slim, IOCBs)); 5563 5564 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 5565 lpfc_sli_hbq_size(), 5566 &phba->hbqslimp.phys, 5567 GFP_KERNEL); 5568 if (!phba->hbqslimp.virt) 5569 goto out_free_slim; 5570 5571 hbq_count = lpfc_sli_hbq_count(); 5572 ptr = phba->hbqslimp.virt; 5573 for (i = 0; i < hbq_count; ++i) { 5574 phba->hbqs[i].hbq_virt = ptr; 5575 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5576 ptr += (lpfc_hbq_defs[i]->entry_count * 5577 sizeof(struct lpfc_hbq_entry)); 5578 } 5579 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 5580 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 5581 5582 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 5583 5584 INIT_LIST_HEAD(&phba->rb_pend_list); 5585 5586 phba->MBslimaddr = phba->slim_memmap_p; 5587 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 5588 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 5589 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 5590 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 5591 5592 return 0; 5593 5594out_free_slim: 5595 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5596 phba->slim2p.virt, phba->slim2p.phys); 5597out_iounmap: 5598 iounmap(phba->ctrl_regs_memmap_p); 5599out_iounmap_slim: 5600 iounmap(phba->slim_memmap_p); 5601out: 5602 return error; 5603} 5604 5605/** 5606 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 5607 * @phba: pointer to lpfc hba data structure. 5608 * 5609 * This routine is invoked to unset the PCI device memory space for device 5610 * with SLI-3 interface spec. 5611 **/ 5612static void 5613lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 5614{ 5615 struct pci_dev *pdev; 5616 5617 /* Obtain PCI device reference */ 5618 if (!phba->pcidev) 5619 return; 5620 else 5621 pdev = phba->pcidev; 5622 5623 /* Free coherent DMA memory allocated */ 5624 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 5625 phba->hbqslimp.virt, phba->hbqslimp.phys); 5626 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5627 phba->slim2p.virt, phba->slim2p.phys); 5628 5629 /* I/O memory unmap */ 5630 iounmap(phba->ctrl_regs_memmap_p); 5631 iounmap(phba->slim_memmap_p); 5632 5633 return; 5634} 5635 5636/** 5637 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 5638 * @phba: pointer to lpfc hba data structure. 5639 * 5640 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 5641 * done and check status. 5642 * 5643 * Return 0 if successful, otherwise -ENODEV. 5644 **/ 5645int 5646lpfc_sli4_post_status_check(struct lpfc_hba *phba) 5647{ 5648 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 5649 struct lpfc_register reg_data; 5650 int i, port_error = 0; 5651 uint32_t if_type; 5652 5653 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 5654 memset(®_data, 0, sizeof(reg_data)); 5655 if (!phba->sli4_hba.PSMPHRregaddr) 5656 return -ENODEV; 5657 5658 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 5659 for (i = 0; i < 3000; i++) { 5660 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 5661 &portsmphr_reg.word0) || 5662 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 5663 /* Port has a fatal POST error, break out */ 5664 port_error = -ENODEV; 5665 break; 5666 } 5667 if (LPFC_POST_STAGE_PORT_READY == 5668 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 5669 break; 5670 msleep(10); 5671 } 5672 5673 /* 5674 * If there was a port error during POST, then don't proceed with 5675 * other register reads as the data may not be valid. Just exit. 5676 */ 5677 if (port_error) { 5678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5679 "1408 Port Failed POST - portsmphr=0x%x, " 5680 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 5681 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 5682 portsmphr_reg.word0, 5683 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 5684 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 5685 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 5686 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 5687 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 5688 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 5689 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 5690 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 5691 } else { 5692 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5693 "2534 Device Info: SLIFamily=0x%x, " 5694 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 5695 "SLIHint_2=0x%x, FT=0x%x\n", 5696 bf_get(lpfc_sli_intf_sli_family, 5697 &phba->sli4_hba.sli_intf), 5698 bf_get(lpfc_sli_intf_slirev, 5699 &phba->sli4_hba.sli_intf), 5700 bf_get(lpfc_sli_intf_if_type, 5701 &phba->sli4_hba.sli_intf), 5702 bf_get(lpfc_sli_intf_sli_hint1, 5703 &phba->sli4_hba.sli_intf), 5704 bf_get(lpfc_sli_intf_sli_hint2, 5705 &phba->sli4_hba.sli_intf), 5706 bf_get(lpfc_sli_intf_func_type, 5707 &phba->sli4_hba.sli_intf)); 5708 /* 5709 * Check for other Port errors during the initialization 5710 * process. Fail the load if the port did not come up 5711 * correctly. 5712 */ 5713 if_type = bf_get(lpfc_sli_intf_if_type, 5714 &phba->sli4_hba.sli_intf); 5715 switch (if_type) { 5716 case LPFC_SLI_INTF_IF_TYPE_0: 5717 phba->sli4_hba.ue_mask_lo = 5718 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 5719 phba->sli4_hba.ue_mask_hi = 5720 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 5721 uerrlo_reg.word0 = 5722 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 5723 uerrhi_reg.word0 = 5724 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 5725 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 5726 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 5727 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5728 "1422 Unrecoverable Error " 5729 "Detected during POST " 5730 "uerr_lo_reg=0x%x, " 5731 "uerr_hi_reg=0x%x, " 5732 "ue_mask_lo_reg=0x%x, " 5733 "ue_mask_hi_reg=0x%x\n", 5734 uerrlo_reg.word0, 5735 uerrhi_reg.word0, 5736 phba->sli4_hba.ue_mask_lo, 5737 phba->sli4_hba.ue_mask_hi); 5738 port_error = -ENODEV; 5739 } 5740 break; 5741 case LPFC_SLI_INTF_IF_TYPE_2: 5742 /* Final checks. The port status should be clean. */ 5743 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 5744 ®_data.word0) || 5745 (bf_get(lpfc_sliport_status_err, ®_data) && 5746 !bf_get(lpfc_sliport_status_rn, ®_data))) { 5747 phba->work_status[0] = 5748 readl(phba->sli4_hba.u.if_type2. 5749 ERR1regaddr); 5750 phba->work_status[1] = 5751 readl(phba->sli4_hba.u.if_type2. 5752 ERR2regaddr); 5753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5754 "2888 Port Error Detected " 5755 "during POST: " 5756 "port status reg 0x%x, " 5757 "port_smphr reg 0x%x, " 5758 "error 1=0x%x, error 2=0x%x\n", 5759 reg_data.word0, 5760 portsmphr_reg.word0, 5761 phba->work_status[0], 5762 phba->work_status[1]); 5763 port_error = -ENODEV; 5764 } 5765 break; 5766 case LPFC_SLI_INTF_IF_TYPE_1: 5767 default: 5768 break; 5769 } 5770 } 5771 return port_error; 5772} 5773 5774/** 5775 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 5776 * @phba: pointer to lpfc hba data structure. 5777 * @if_type: The SLI4 interface type getting configured. 5778 * 5779 * This routine is invoked to set up SLI4 BAR0 PCI config space register 5780 * memory map. 5781 **/ 5782static void 5783lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 5784{ 5785 switch (if_type) { 5786 case LPFC_SLI_INTF_IF_TYPE_0: 5787 phba->sli4_hba.u.if_type0.UERRLOregaddr = 5788 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 5789 phba->sli4_hba.u.if_type0.UERRHIregaddr = 5790 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 5791 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 5792 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 5793 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 5794 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 5795 phba->sli4_hba.SLIINTFregaddr = 5796 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 5797 break; 5798 case LPFC_SLI_INTF_IF_TYPE_2: 5799 phba->sli4_hba.u.if_type2.ERR1regaddr = 5800 phba->sli4_hba.conf_regs_memmap_p + 5801 LPFC_CTL_PORT_ER1_OFFSET; 5802 phba->sli4_hba.u.if_type2.ERR2regaddr = 5803 phba->sli4_hba.conf_regs_memmap_p + 5804 LPFC_CTL_PORT_ER2_OFFSET; 5805 phba->sli4_hba.u.if_type2.CTRLregaddr = 5806 phba->sli4_hba.conf_regs_memmap_p + 5807 LPFC_CTL_PORT_CTL_OFFSET; 5808 phba->sli4_hba.u.if_type2.STATUSregaddr = 5809 phba->sli4_hba.conf_regs_memmap_p + 5810 LPFC_CTL_PORT_STA_OFFSET; 5811 phba->sli4_hba.SLIINTFregaddr = 5812 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 5813 phba->sli4_hba.PSMPHRregaddr = 5814 phba->sli4_hba.conf_regs_memmap_p + 5815 LPFC_CTL_PORT_SEM_OFFSET; 5816 phba->sli4_hba.RQDBregaddr = 5817 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL; 5818 phba->sli4_hba.WQDBregaddr = 5819 phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL; 5820 phba->sli4_hba.EQCQDBregaddr = 5821 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 5822 phba->sli4_hba.MQDBregaddr = 5823 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 5824 phba->sli4_hba.BMBXregaddr = 5825 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 5826 break; 5827 case LPFC_SLI_INTF_IF_TYPE_1: 5828 default: 5829 dev_printk(KERN_ERR, &phba->pcidev->dev, 5830 "FATAL - unsupported SLI4 interface type - %d\n", 5831 if_type); 5832 break; 5833 } 5834} 5835 5836/** 5837 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 5838 * @phba: pointer to lpfc hba data structure. 5839 * 5840 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 5841 * memory map. 5842 **/ 5843static void 5844lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 5845{ 5846 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5847 LPFC_SLIPORT_IF0_SMPHR; 5848 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5849 LPFC_HST_ISR0; 5850 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5851 LPFC_HST_IMR0; 5852 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5853 LPFC_HST_ISCR0; 5854} 5855 5856/** 5857 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 5858 * @phba: pointer to lpfc hba data structure. 5859 * @vf: virtual function number 5860 * 5861 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 5862 * based on the given viftual function number, @vf. 5863 * 5864 * Return 0 if successful, otherwise -ENODEV. 5865 **/ 5866static int 5867lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 5868{ 5869 if (vf > LPFC_VIR_FUNC_MAX) 5870 return -ENODEV; 5871 5872 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5873 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 5874 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5875 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 5876 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5877 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 5878 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5879 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 5880 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5881 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 5882 return 0; 5883} 5884 5885/** 5886 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 5887 * @phba: pointer to lpfc hba data structure. 5888 * 5889 * This routine is invoked to create the bootstrap mailbox 5890 * region consistent with the SLI-4 interface spec. This 5891 * routine allocates all memory necessary to communicate 5892 * mailbox commands to the port and sets up all alignment 5893 * needs. No locks are expected to be held when calling 5894 * this routine. 5895 * 5896 * Return codes 5897 * 0 - successful 5898 * -ENOMEM - could not allocated memory. 5899 **/ 5900static int 5901lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 5902{ 5903 uint32_t bmbx_size; 5904 struct lpfc_dmabuf *dmabuf; 5905 struct dma_address *dma_address; 5906 uint32_t pa_addr; 5907 uint64_t phys_addr; 5908 5909 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5910 if (!dmabuf) 5911 return -ENOMEM; 5912 5913 /* 5914 * The bootstrap mailbox region is comprised of 2 parts 5915 * plus an alignment restriction of 16 bytes. 5916 */ 5917 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 5918 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5919 bmbx_size, 5920 &dmabuf->phys, 5921 GFP_KERNEL); 5922 if (!dmabuf->virt) { 5923 kfree(dmabuf); 5924 return -ENOMEM; 5925 } 5926 memset(dmabuf->virt, 0, bmbx_size); 5927 5928 /* 5929 * Initialize the bootstrap mailbox pointers now so that the register 5930 * operations are simple later. The mailbox dma address is required 5931 * to be 16-byte aligned. Also align the virtual memory as each 5932 * maibox is copied into the bmbx mailbox region before issuing the 5933 * command to the port. 5934 */ 5935 phba->sli4_hba.bmbx.dmabuf = dmabuf; 5936 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 5937 5938 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 5939 LPFC_ALIGN_16_BYTE); 5940 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 5941 LPFC_ALIGN_16_BYTE); 5942 5943 /* 5944 * Set the high and low physical addresses now. The SLI4 alignment 5945 * requirement is 16 bytes and the mailbox is posted to the port 5946 * as two 30-bit addresses. The other data is a bit marking whether 5947 * the 30-bit address is the high or low address. 5948 * Upcast bmbx aphys to 64bits so shift instruction compiles 5949 * clean on 32 bit machines. 5950 */ 5951 dma_address = &phba->sli4_hba.bmbx.dma_address; 5952 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 5953 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 5954 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 5955 LPFC_BMBX_BIT1_ADDR_HI); 5956 5957 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 5958 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 5959 LPFC_BMBX_BIT1_ADDR_LO); 5960 return 0; 5961} 5962 5963/** 5964 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 5965 * @phba: pointer to lpfc hba data structure. 5966 * 5967 * This routine is invoked to teardown the bootstrap mailbox 5968 * region and release all host resources. This routine requires 5969 * the caller to ensure all mailbox commands recovered, no 5970 * additional mailbox comands are sent, and interrupts are disabled 5971 * before calling this routine. 5972 * 5973 **/ 5974static void 5975lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 5976{ 5977 dma_free_coherent(&phba->pcidev->dev, 5978 phba->sli4_hba.bmbx.bmbx_size, 5979 phba->sli4_hba.bmbx.dmabuf->virt, 5980 phba->sli4_hba.bmbx.dmabuf->phys); 5981 5982 kfree(phba->sli4_hba.bmbx.dmabuf); 5983 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 5984} 5985 5986/** 5987 * lpfc_sli4_read_config - Get the config parameters. 5988 * @phba: pointer to lpfc hba data structure. 5989 * 5990 * This routine is invoked to read the configuration parameters from the HBA. 5991 * The configuration parameters are used to set the base and maximum values 5992 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 5993 * allocation for the port. 5994 * 5995 * Return codes 5996 * 0 - successful 5997 * -ENOMEM - No available memory 5998 * -EIO - The mailbox failed to complete successfully. 5999 **/ 6000int 6001lpfc_sli4_read_config(struct lpfc_hba *phba) 6002{ 6003 LPFC_MBOXQ_t *pmb; 6004 struct lpfc_mbx_read_config *rd_config; 6005 union lpfc_sli4_cfg_shdr *shdr; 6006 uint32_t shdr_status, shdr_add_status; 6007 struct lpfc_mbx_get_func_cfg *get_func_cfg; 6008 struct lpfc_rsrc_desc_fcfcoe *desc; 6009 uint32_t desc_count; 6010 int length, i, rc = 0; 6011 6012 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6013 if (!pmb) { 6014 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6015 "2011 Unable to allocate memory for issuing " 6016 "SLI_CONFIG_SPECIAL mailbox command\n"); 6017 return -ENOMEM; 6018 } 6019 6020 lpfc_read_config(phba, pmb); 6021 6022 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6023 if (rc != MBX_SUCCESS) { 6024 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6025 "2012 Mailbox failed , mbxCmd x%x " 6026 "READ_CONFIG, mbxStatus x%x\n", 6027 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6028 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6029 rc = -EIO; 6030 } else { 6031 rd_config = &pmb->u.mqe.un.rd_config; 6032 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 6033 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 6034 phba->sli4_hba.lnk_info.lnk_tp = 6035 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 6036 phba->sli4_hba.lnk_info.lnk_no = 6037 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 6038 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6039 "3081 lnk_type:%d, lnk_numb:%d\n", 6040 phba->sli4_hba.lnk_info.lnk_tp, 6041 phba->sli4_hba.lnk_info.lnk_no); 6042 } else 6043 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6044 "3082 Mailbox (x%x) returned ldv:x0\n", 6045 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 6046 phba->sli4_hba.extents_in_use = 6047 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 6048 phba->sli4_hba.max_cfg_param.max_xri = 6049 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 6050 phba->sli4_hba.max_cfg_param.xri_base = 6051 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 6052 phba->sli4_hba.max_cfg_param.max_vpi = 6053 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 6054 phba->sli4_hba.max_cfg_param.vpi_base = 6055 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 6056 phba->sli4_hba.max_cfg_param.max_rpi = 6057 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 6058 phba->sli4_hba.max_cfg_param.rpi_base = 6059 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 6060 phba->sli4_hba.max_cfg_param.max_vfi = 6061 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 6062 phba->sli4_hba.max_cfg_param.vfi_base = 6063 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 6064 phba->sli4_hba.max_cfg_param.max_fcfi = 6065 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 6066 phba->sli4_hba.max_cfg_param.max_eq = 6067 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 6068 phba->sli4_hba.max_cfg_param.max_rq = 6069 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 6070 phba->sli4_hba.max_cfg_param.max_wq = 6071 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 6072 phba->sli4_hba.max_cfg_param.max_cq = 6073 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 6074 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 6075 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 6076 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 6077 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 6078 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 6079 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 6080 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 6081 phba->max_vports = phba->max_vpi; 6082 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6083 "2003 cfg params Extents? %d " 6084 "XRI(B:%d M:%d), " 6085 "VPI(B:%d M:%d) " 6086 "VFI(B:%d M:%d) " 6087 "RPI(B:%d M:%d) " 6088 "FCFI(Count:%d)\n", 6089 phba->sli4_hba.extents_in_use, 6090 phba->sli4_hba.max_cfg_param.xri_base, 6091 phba->sli4_hba.max_cfg_param.max_xri, 6092 phba->sli4_hba.max_cfg_param.vpi_base, 6093 phba->sli4_hba.max_cfg_param.max_vpi, 6094 phba->sli4_hba.max_cfg_param.vfi_base, 6095 phba->sli4_hba.max_cfg_param.max_vfi, 6096 phba->sli4_hba.max_cfg_param.rpi_base, 6097 phba->sli4_hba.max_cfg_param.max_rpi, 6098 phba->sli4_hba.max_cfg_param.max_fcfi); 6099 } 6100 6101 if (rc) 6102 goto read_cfg_out; 6103 6104 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 6105 if (phba->cfg_hba_queue_depth > 6106 (phba->sli4_hba.max_cfg_param.max_xri - 6107 lpfc_sli4_get_els_iocb_cnt(phba))) 6108 phba->cfg_hba_queue_depth = 6109 phba->sli4_hba.max_cfg_param.max_xri - 6110 lpfc_sli4_get_els_iocb_cnt(phba); 6111 6112 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 6113 LPFC_SLI_INTF_IF_TYPE_2) 6114 goto read_cfg_out; 6115 6116 /* get the pf# and vf# for SLI4 if_type 2 port */ 6117 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 6118 sizeof(struct lpfc_sli4_cfg_mhdr)); 6119 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 6120 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 6121 length, LPFC_SLI4_MBX_EMBED); 6122 6123 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6124 shdr = (union lpfc_sli4_cfg_shdr *) 6125 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 6126 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6127 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6128 if (rc || shdr_status || shdr_add_status) { 6129 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6130 "3026 Mailbox failed , mbxCmd x%x " 6131 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 6132 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6133 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6134 rc = -EIO; 6135 goto read_cfg_out; 6136 } 6137 6138 /* search for fc_fcoe resrouce descriptor */ 6139 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 6140 desc_count = get_func_cfg->func_cfg.rsrc_desc_count; 6141 6142 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 6143 desc = (struct lpfc_rsrc_desc_fcfcoe *) 6144 &get_func_cfg->func_cfg.desc[i]; 6145 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 6146 bf_get(lpfc_rsrc_desc_pcie_type, desc)) { 6147 phba->sli4_hba.iov.pf_number = 6148 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 6149 phba->sli4_hba.iov.vf_number = 6150 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 6151 break; 6152 } 6153 } 6154 6155 if (i < LPFC_RSRC_DESC_MAX_NUM) 6156 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6157 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 6158 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 6159 phba->sli4_hba.iov.vf_number); 6160 else { 6161 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6162 "3028 GET_FUNCTION_CONFIG: failed to find " 6163 "Resrouce Descriptor:x%x\n", 6164 LPFC_RSRC_DESC_TYPE_FCFCOE); 6165 rc = -EIO; 6166 } 6167 6168read_cfg_out: 6169 mempool_free(pmb, phba->mbox_mem_pool); 6170 return rc; 6171} 6172 6173/** 6174 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 6175 * @phba: pointer to lpfc hba data structure. 6176 * 6177 * This routine is invoked to setup the port-side endian order when 6178 * the port if_type is 0. This routine has no function for other 6179 * if_types. 6180 * 6181 * Return codes 6182 * 0 - successful 6183 * -ENOMEM - No available memory 6184 * -EIO - The mailbox failed to complete successfully. 6185 **/ 6186static int 6187lpfc_setup_endian_order(struct lpfc_hba *phba) 6188{ 6189 LPFC_MBOXQ_t *mboxq; 6190 uint32_t if_type, rc = 0; 6191 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 6192 HOST_ENDIAN_HIGH_WORD1}; 6193 6194 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 6195 switch (if_type) { 6196 case LPFC_SLI_INTF_IF_TYPE_0: 6197 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6198 GFP_KERNEL); 6199 if (!mboxq) { 6200 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6201 "0492 Unable to allocate memory for " 6202 "issuing SLI_CONFIG_SPECIAL mailbox " 6203 "command\n"); 6204 return -ENOMEM; 6205 } 6206 6207 /* 6208 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 6209 * two words to contain special data values and no other data. 6210 */ 6211 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 6212 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 6213 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6214 if (rc != MBX_SUCCESS) { 6215 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6216 "0493 SLI_CONFIG_SPECIAL mailbox " 6217 "failed with status x%x\n", 6218 rc); 6219 rc = -EIO; 6220 } 6221 mempool_free(mboxq, phba->mbox_mem_pool); 6222 break; 6223 case LPFC_SLI_INTF_IF_TYPE_2: 6224 case LPFC_SLI_INTF_IF_TYPE_1: 6225 default: 6226 break; 6227 } 6228 return rc; 6229} 6230 6231/** 6232 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts 6233 * @phba: pointer to lpfc hba data structure. 6234 * 6235 * This routine is invoked to check the user settable queue counts for EQs and 6236 * CQs. after this routine is called the counts will be set to valid values that 6237 * adhere to the constraints of the system's interrupt vectors and the port's 6238 * queue resources. 6239 * 6240 * Return codes 6241 * 0 - successful 6242 * -ENOMEM - No available memory 6243 **/ 6244static int 6245lpfc_sli4_queue_verify(struct lpfc_hba *phba) 6246{ 6247 int cfg_fcp_wq_count; 6248 int cfg_fcp_eq_count; 6249 6250 /* 6251 * Sanity check for confiugred queue parameters against the run-time 6252 * device parameters 6253 */ 6254 6255 /* Sanity check on FCP fast-path WQ parameters */ 6256 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 6257 if (cfg_fcp_wq_count > 6258 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 6259 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 6260 LPFC_SP_WQN_DEF; 6261 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 6262 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6263 "2581 Not enough WQs (%d) from " 6264 "the pci function for supporting " 6265 "FCP WQs (%d)\n", 6266 phba->sli4_hba.max_cfg_param.max_wq, 6267 phba->cfg_fcp_wq_count); 6268 goto out_error; 6269 } 6270 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6271 "2582 Not enough WQs (%d) from the pci " 6272 "function for supporting the requested " 6273 "FCP WQs (%d), the actual FCP WQs can " 6274 "be supported: %d\n", 6275 phba->sli4_hba.max_cfg_param.max_wq, 6276 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 6277 } 6278 /* The actual number of FCP work queues adopted */ 6279 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 6280 6281 /* Sanity check on FCP fast-path EQ parameters */ 6282 cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 6283 if (cfg_fcp_eq_count > 6284 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 6285 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 6286 LPFC_SP_EQN_DEF; 6287 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 6288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6289 "2574 Not enough EQs (%d) from the " 6290 "pci function for supporting FCP " 6291 "EQs (%d)\n", 6292 phba->sli4_hba.max_cfg_param.max_eq, 6293 phba->cfg_fcp_eq_count); 6294 goto out_error; 6295 } 6296 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6297 "2575 Not enough EQs (%d) from the pci " 6298 "function for supporting the requested " 6299 "FCP EQs (%d), the actual FCP EQs can " 6300 "be supported: %d\n", 6301 phba->sli4_hba.max_cfg_param.max_eq, 6302 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 6303 } 6304 /* It does not make sense to have more EQs than WQs */ 6305 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 6306 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6307 "2593 The FCP EQ count(%d) cannot be greater " 6308 "than the FCP WQ count(%d), limiting the " 6309 "FCP EQ count to %d\n", cfg_fcp_eq_count, 6310 phba->cfg_fcp_wq_count, 6311 phba->cfg_fcp_wq_count); 6312 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 6313 } 6314 /* The actual number of FCP event queues adopted */ 6315 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 6316 /* The overall number of event queues used */ 6317 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 6318 6319 /* Get EQ depth from module parameter, fake the default for now */ 6320 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 6321 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 6322 6323 /* Get CQ depth from module parameter, fake the default for now */ 6324 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 6325 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 6326 6327 return 0; 6328out_error: 6329 return -ENOMEM; 6330} 6331 6332/** 6333 * lpfc_sli4_queue_create - Create all the SLI4 queues 6334 * @phba: pointer to lpfc hba data structure. 6335 * 6336 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 6337 * operation. For each SLI4 queue type, the parameters such as queue entry 6338 * count (queue depth) shall be taken from the module parameter. For now, 6339 * we just use some constant number as place holder. 6340 * 6341 * Return codes 6342 * 0 - sucessful 6343 * -ENOMEM - No availble memory 6344 * -EIO - The mailbox failed to complete successfully. 6345 **/ 6346int 6347lpfc_sli4_queue_create(struct lpfc_hba *phba) 6348{ 6349 struct lpfc_queue *qdesc; 6350 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6351 6352 /* 6353 * Create Event Queues (EQs) 6354 */ 6355 6356 /* Create slow path event queue */ 6357 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6358 phba->sli4_hba.eq_ecount); 6359 if (!qdesc) { 6360 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6361 "0496 Failed allocate slow-path EQ\n"); 6362 goto out_error; 6363 } 6364 phba->sli4_hba.sp_eq = qdesc; 6365 6366 /* 6367 * Create fast-path FCP Event Queue(s). The cfg_fcp_eq_count can be 6368 * zero whenever there is exactly one interrupt vector. This is not 6369 * an error. 6370 */ 6371 if (phba->cfg_fcp_eq_count) { 6372 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 6373 phba->cfg_fcp_eq_count), GFP_KERNEL); 6374 if (!phba->sli4_hba.fp_eq) { 6375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6376 "2576 Failed allocate memory for " 6377 "fast-path EQ record array\n"); 6378 goto out_free_sp_eq; 6379 } 6380 } 6381 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6382 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6383 phba->sli4_hba.eq_ecount); 6384 if (!qdesc) { 6385 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6386 "0497 Failed allocate fast-path EQ\n"); 6387 goto out_free_fp_eq; 6388 } 6389 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 6390 } 6391 6392 /* 6393 * Create Complete Queues (CQs) 6394 */ 6395 6396 /* Create slow-path Mailbox Command Complete Queue */ 6397 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6398 phba->sli4_hba.cq_ecount); 6399 if (!qdesc) { 6400 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6401 "0500 Failed allocate slow-path mailbox CQ\n"); 6402 goto out_free_fp_eq; 6403 } 6404 phba->sli4_hba.mbx_cq = qdesc; 6405 6406 /* Create slow-path ELS Complete Queue */ 6407 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6408 phba->sli4_hba.cq_ecount); 6409 if (!qdesc) { 6410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6411 "0501 Failed allocate slow-path ELS CQ\n"); 6412 goto out_free_mbx_cq; 6413 } 6414 phba->sli4_hba.els_cq = qdesc; 6415 6416 6417 /* 6418 * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs. 6419 * If there are no FCP EQs then create exactly one FCP CQ. 6420 */ 6421 if (phba->cfg_fcp_eq_count) 6422 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 6423 phba->cfg_fcp_eq_count), 6424 GFP_KERNEL); 6425 else 6426 phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *), 6427 GFP_KERNEL); 6428 if (!phba->sli4_hba.fcp_cq) { 6429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6430 "2577 Failed allocate memory for fast-path " 6431 "CQ record array\n"); 6432 goto out_free_els_cq; 6433 } 6434 fcp_cqidx = 0; 6435 do { 6436 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6437 phba->sli4_hba.cq_ecount); 6438 if (!qdesc) { 6439 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6440 "0499 Failed allocate fast-path FCP " 6441 "CQ (%d)\n", fcp_cqidx); 6442 goto out_free_fcp_cq; 6443 } 6444 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 6445 } while (++fcp_cqidx < phba->cfg_fcp_eq_count); 6446 6447 /* Create Mailbox Command Queue */ 6448 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 6449 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 6450 6451 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 6452 phba->sli4_hba.mq_ecount); 6453 if (!qdesc) { 6454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6455 "0505 Failed allocate slow-path MQ\n"); 6456 goto out_free_fcp_cq; 6457 } 6458 phba->sli4_hba.mbx_wq = qdesc; 6459 6460 /* 6461 * Create all the Work Queues (WQs) 6462 */ 6463 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 6464 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 6465 6466 /* Create slow-path ELS Work Queue */ 6467 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6468 phba->sli4_hba.wq_ecount); 6469 if (!qdesc) { 6470 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6471 "0504 Failed allocate slow-path ELS WQ\n"); 6472 goto out_free_mbx_wq; 6473 } 6474 phba->sli4_hba.els_wq = qdesc; 6475 6476 /* Create fast-path FCP Work Queue(s) */ 6477 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 6478 phba->cfg_fcp_wq_count), GFP_KERNEL); 6479 if (!phba->sli4_hba.fcp_wq) { 6480 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6481 "2578 Failed allocate memory for fast-path " 6482 "WQ record array\n"); 6483 goto out_free_els_wq; 6484 } 6485 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6486 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6487 phba->sli4_hba.wq_ecount); 6488 if (!qdesc) { 6489 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6490 "0503 Failed allocate fast-path FCP " 6491 "WQ (%d)\n", fcp_wqidx); 6492 goto out_free_fcp_wq; 6493 } 6494 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 6495 } 6496 6497 /* 6498 * Create Receive Queue (RQ) 6499 */ 6500 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 6501 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 6502 6503 /* Create Receive Queue for header */ 6504 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6505 phba->sli4_hba.rq_ecount); 6506 if (!qdesc) { 6507 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6508 "0506 Failed allocate receive HRQ\n"); 6509 goto out_free_fcp_wq; 6510 } 6511 phba->sli4_hba.hdr_rq = qdesc; 6512 6513 /* Create Receive Queue for data */ 6514 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6515 phba->sli4_hba.rq_ecount); 6516 if (!qdesc) { 6517 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6518 "0507 Failed allocate receive DRQ\n"); 6519 goto out_free_hdr_rq; 6520 } 6521 phba->sli4_hba.dat_rq = qdesc; 6522 6523 return 0; 6524 6525out_free_hdr_rq: 6526 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6527 phba->sli4_hba.hdr_rq = NULL; 6528out_free_fcp_wq: 6529 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 6530 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 6531 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 6532 } 6533 kfree(phba->sli4_hba.fcp_wq); 6534 phba->sli4_hba.fcp_wq = NULL; 6535out_free_els_wq: 6536 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6537 phba->sli4_hba.els_wq = NULL; 6538out_free_mbx_wq: 6539 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6540 phba->sli4_hba.mbx_wq = NULL; 6541out_free_fcp_cq: 6542 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 6543 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 6544 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 6545 } 6546 kfree(phba->sli4_hba.fcp_cq); 6547 phba->sli4_hba.fcp_cq = NULL; 6548out_free_els_cq: 6549 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6550 phba->sli4_hba.els_cq = NULL; 6551out_free_mbx_cq: 6552 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6553 phba->sli4_hba.mbx_cq = NULL; 6554out_free_fp_eq: 6555 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 6556 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 6557 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 6558 } 6559 kfree(phba->sli4_hba.fp_eq); 6560 phba->sli4_hba.fp_eq = NULL; 6561out_free_sp_eq: 6562 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6563 phba->sli4_hba.sp_eq = NULL; 6564out_error: 6565 return -ENOMEM; 6566} 6567 6568/** 6569 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 6570 * @phba: pointer to lpfc hba data structure. 6571 * 6572 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 6573 * operation. 6574 * 6575 * Return codes 6576 * 0 - successful 6577 * -ENOMEM - No available memory 6578 * -EIO - The mailbox failed to complete successfully. 6579 **/ 6580void 6581lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 6582{ 6583 int fcp_qidx; 6584 6585 /* Release mailbox command work queue */ 6586 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6587 phba->sli4_hba.mbx_wq = NULL; 6588 6589 /* Release ELS work queue */ 6590 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6591 phba->sli4_hba.els_wq = NULL; 6592 6593 /* Release FCP work queue */ 6594 if (phba->sli4_hba.fcp_wq != NULL) 6595 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; 6596 fcp_qidx++) 6597 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 6598 kfree(phba->sli4_hba.fcp_wq); 6599 phba->sli4_hba.fcp_wq = NULL; 6600 6601 /* Release unsolicited receive queue */ 6602 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6603 phba->sli4_hba.hdr_rq = NULL; 6604 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 6605 phba->sli4_hba.dat_rq = NULL; 6606 6607 /* Release ELS complete queue */ 6608 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6609 phba->sli4_hba.els_cq = NULL; 6610 6611 /* Release mailbox command complete queue */ 6612 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6613 phba->sli4_hba.mbx_cq = NULL; 6614 6615 /* Release FCP response complete queue */ 6616 fcp_qidx = 0; 6617 if (phba->sli4_hba.fcp_cq != NULL) 6618 do 6619 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6620 while (++fcp_qidx < phba->cfg_fcp_eq_count); 6621 kfree(phba->sli4_hba.fcp_cq); 6622 phba->sli4_hba.fcp_cq = NULL; 6623 6624 /* Release fast-path event queue */ 6625 if (phba->sli4_hba.fp_eq != NULL) 6626 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; 6627 fcp_qidx++) 6628 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 6629 kfree(phba->sli4_hba.fp_eq); 6630 phba->sli4_hba.fp_eq = NULL; 6631 6632 /* Release slow-path event queue */ 6633 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6634 phba->sli4_hba.sp_eq = NULL; 6635 6636 return; 6637} 6638 6639/** 6640 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 6641 * @phba: pointer to lpfc hba data structure. 6642 * 6643 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 6644 * operation. 6645 * 6646 * Return codes 6647 * 0 - successful 6648 * -ENOMEM - No available memory 6649 * -EIO - The mailbox failed to complete successfully. 6650 **/ 6651int 6652lpfc_sli4_queue_setup(struct lpfc_hba *phba) 6653{ 6654 int rc = -ENOMEM; 6655 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6656 int fcp_cq_index = 0; 6657 6658 /* 6659 * Set up Event Queues (EQs) 6660 */ 6661 6662 /* Set up slow-path event queue */ 6663 if (!phba->sli4_hba.sp_eq) { 6664 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6665 "0520 Slow-path EQ not allocated\n"); 6666 goto out_error; 6667 } 6668 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 6669 LPFC_SP_DEF_IMAX); 6670 if (rc) { 6671 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6672 "0521 Failed setup of slow-path EQ: " 6673 "rc = 0x%x\n", rc); 6674 goto out_error; 6675 } 6676 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6677 "2583 Slow-path EQ setup: queue-id=%d\n", 6678 phba->sli4_hba.sp_eq->queue_id); 6679 6680 /* Set up fast-path event queue */ 6681 if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) { 6682 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6683 "3147 Fast-path EQs not allocated\n"); 6684 rc = -ENOMEM; 6685 goto out_destroy_sp_eq; 6686 } 6687 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6688 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6689 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6690 "0522 Fast-path EQ (%d) not " 6691 "allocated\n", fcp_eqidx); 6692 rc = -ENOMEM; 6693 goto out_destroy_fp_eq; 6694 } 6695 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 6696 phba->cfg_fcp_imax); 6697 if (rc) { 6698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6699 "0523 Failed setup of fast-path EQ " 6700 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 6701 goto out_destroy_fp_eq; 6702 } 6703 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6704 "2584 Fast-path EQ setup: " 6705 "queue[%d]-id=%d\n", fcp_eqidx, 6706 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 6707 } 6708 6709 /* 6710 * Set up Complete Queues (CQs) 6711 */ 6712 6713 /* Set up slow-path MBOX Complete Queue as the first CQ */ 6714 if (!phba->sli4_hba.mbx_cq) { 6715 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6716 "0528 Mailbox CQ not allocated\n"); 6717 rc = -ENOMEM; 6718 goto out_destroy_fp_eq; 6719 } 6720 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 6721 LPFC_MCQ, LPFC_MBOX); 6722 if (rc) { 6723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6724 "0529 Failed setup of slow-path mailbox CQ: " 6725 "rc = 0x%x\n", rc); 6726 goto out_destroy_fp_eq; 6727 } 6728 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6729 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 6730 phba->sli4_hba.mbx_cq->queue_id, 6731 phba->sli4_hba.sp_eq->queue_id); 6732 6733 /* Set up slow-path ELS Complete Queue */ 6734 if (!phba->sli4_hba.els_cq) { 6735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6736 "0530 ELS CQ not allocated\n"); 6737 rc = -ENOMEM; 6738 goto out_destroy_mbx_cq; 6739 } 6740 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 6741 LPFC_WCQ, LPFC_ELS); 6742 if (rc) { 6743 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6744 "0531 Failed setup of slow-path ELS CQ: " 6745 "rc = 0x%x\n", rc); 6746 goto out_destroy_mbx_cq; 6747 } 6748 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6749 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 6750 phba->sli4_hba.els_cq->queue_id, 6751 phba->sli4_hba.sp_eq->queue_id); 6752 6753 /* Set up fast-path FCP Response Complete Queue */ 6754 if (!phba->sli4_hba.fcp_cq) { 6755 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6756 "3148 Fast-path FCP CQ array not " 6757 "allocated\n"); 6758 rc = -ENOMEM; 6759 goto out_destroy_els_cq; 6760 } 6761 fcp_cqidx = 0; 6762 do { 6763 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6764 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6765 "0526 Fast-path FCP CQ (%d) not " 6766 "allocated\n", fcp_cqidx); 6767 rc = -ENOMEM; 6768 goto out_destroy_fcp_cq; 6769 } 6770 if (phba->cfg_fcp_eq_count) 6771 rc = lpfc_cq_create(phba, 6772 phba->sli4_hba.fcp_cq[fcp_cqidx], 6773 phba->sli4_hba.fp_eq[fcp_cqidx], 6774 LPFC_WCQ, LPFC_FCP); 6775 else 6776 rc = lpfc_cq_create(phba, 6777 phba->sli4_hba.fcp_cq[fcp_cqidx], 6778 phba->sli4_hba.sp_eq, 6779 LPFC_WCQ, LPFC_FCP); 6780 if (rc) { 6781 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6782 "0527 Failed setup of fast-path FCP " 6783 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 6784 goto out_destroy_fcp_cq; 6785 } 6786 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6787 "2588 FCP CQ setup: cq[%d]-id=%d, " 6788 "parent %seq[%d]-id=%d\n", 6789 fcp_cqidx, 6790 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 6791 (phba->cfg_fcp_eq_count) ? "" : "sp_", 6792 fcp_cqidx, 6793 (phba->cfg_fcp_eq_count) ? 6794 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id : 6795 phba->sli4_hba.sp_eq->queue_id); 6796 } while (++fcp_cqidx < phba->cfg_fcp_eq_count); 6797 6798 /* 6799 * Set up all the Work Queues (WQs) 6800 */ 6801 6802 /* Set up Mailbox Command Queue */ 6803 if (!phba->sli4_hba.mbx_wq) { 6804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6805 "0538 Slow-path MQ not allocated\n"); 6806 rc = -ENOMEM; 6807 goto out_destroy_fcp_cq; 6808 } 6809 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 6810 phba->sli4_hba.mbx_cq, LPFC_MBOX); 6811 if (rc) { 6812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6813 "0539 Failed setup of slow-path MQ: " 6814 "rc = 0x%x\n", rc); 6815 goto out_destroy_fcp_cq; 6816 } 6817 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6818 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 6819 phba->sli4_hba.mbx_wq->queue_id, 6820 phba->sli4_hba.mbx_cq->queue_id); 6821 6822 /* Set up slow-path ELS Work Queue */ 6823 if (!phba->sli4_hba.els_wq) { 6824 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6825 "0536 Slow-path ELS WQ not allocated\n"); 6826 rc = -ENOMEM; 6827 goto out_destroy_mbx_wq; 6828 } 6829 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 6830 phba->sli4_hba.els_cq, LPFC_ELS); 6831 if (rc) { 6832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6833 "0537 Failed setup of slow-path ELS WQ: " 6834 "rc = 0x%x\n", rc); 6835 goto out_destroy_mbx_wq; 6836 } 6837 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6838 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 6839 phba->sli4_hba.els_wq->queue_id, 6840 phba->sli4_hba.els_cq->queue_id); 6841 6842 /* Set up fast-path FCP Work Queue */ 6843 if (!phba->sli4_hba.fcp_wq) { 6844 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6845 "3149 Fast-path FCP WQ array not " 6846 "allocated\n"); 6847 rc = -ENOMEM; 6848 goto out_destroy_els_wq; 6849 } 6850 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6851 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 6852 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6853 "0534 Fast-path FCP WQ (%d) not " 6854 "allocated\n", fcp_wqidx); 6855 rc = -ENOMEM; 6856 goto out_destroy_fcp_wq; 6857 } 6858 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 6859 phba->sli4_hba.fcp_cq[fcp_cq_index], 6860 LPFC_FCP); 6861 if (rc) { 6862 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6863 "0535 Failed setup of fast-path FCP " 6864 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 6865 goto out_destroy_fcp_wq; 6866 } 6867 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6868 "2591 FCP WQ setup: wq[%d]-id=%d, " 6869 "parent cq[%d]-id=%d\n", 6870 fcp_wqidx, 6871 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 6872 fcp_cq_index, 6873 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 6874 /* Round robin FCP Work Queue's Completion Queue assignment */ 6875 if (phba->cfg_fcp_eq_count) 6876 fcp_cq_index = ((fcp_cq_index + 1) % 6877 phba->cfg_fcp_eq_count); 6878 } 6879 6880 /* 6881 * Create Receive Queue (RQ) 6882 */ 6883 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 6884 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6885 "0540 Receive Queue not allocated\n"); 6886 rc = -ENOMEM; 6887 goto out_destroy_fcp_wq; 6888 } 6889 6890 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); 6891 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ); 6892 6893 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 6894 phba->sli4_hba.els_cq, LPFC_USOL); 6895 if (rc) { 6896 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6897 "0541 Failed setup of Receive Queue: " 6898 "rc = 0x%x\n", rc); 6899 goto out_destroy_fcp_wq; 6900 } 6901 6902 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6903 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 6904 "parent cq-id=%d\n", 6905 phba->sli4_hba.hdr_rq->queue_id, 6906 phba->sli4_hba.dat_rq->queue_id, 6907 phba->sli4_hba.els_cq->queue_id); 6908 return 0; 6909 6910out_destroy_fcp_wq: 6911 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 6912 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 6913out_destroy_els_wq: 6914 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6915out_destroy_mbx_wq: 6916 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6917out_destroy_fcp_cq: 6918 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6919 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6920out_destroy_els_cq: 6921 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6922out_destroy_mbx_cq: 6923 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6924out_destroy_fp_eq: 6925 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 6926 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 6927out_destroy_sp_eq: 6928 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6929out_error: 6930 return rc; 6931} 6932 6933/** 6934 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 6935 * @phba: pointer to lpfc hba data structure. 6936 * 6937 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 6938 * operation. 6939 * 6940 * Return codes 6941 * 0 - successful 6942 * -ENOMEM - No available memory 6943 * -EIO - The mailbox failed to complete successfully. 6944 **/ 6945void 6946lpfc_sli4_queue_unset(struct lpfc_hba *phba) 6947{ 6948 int fcp_qidx; 6949 6950 /* Unset mailbox command work queue */ 6951 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6952 /* Unset ELS work queue */ 6953 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6954 /* Unset unsolicited receive queue */ 6955 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 6956 /* Unset FCP work queue */ 6957 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6958 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 6959 /* Unset mailbox command complete queue */ 6960 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6961 /* Unset ELS complete queue */ 6962 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6963 /* Unset FCP response complete queue */ 6964 if (phba->sli4_hba.fcp_cq) { 6965 fcp_qidx = 0; 6966 do { 6967 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6968 } while (++fcp_qidx < phba->cfg_fcp_eq_count); 6969 } 6970 /* Unset fast-path event queue */ 6971 if (phba->sli4_hba.fp_eq) { 6972 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; 6973 fcp_qidx++) 6974 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6975 } 6976 /* Unset slow-path event queue */ 6977 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6978} 6979 6980/** 6981 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 6982 * @phba: pointer to lpfc hba data structure. 6983 * 6984 * This routine is invoked to allocate and set up a pool of completion queue 6985 * events. The body of the completion queue event is a completion queue entry 6986 * CQE. For now, this pool is used for the interrupt service routine to queue 6987 * the following HBA completion queue events for the worker thread to process: 6988 * - Mailbox asynchronous events 6989 * - Receive queue completion unsolicited events 6990 * Later, this can be used for all the slow-path events. 6991 * 6992 * Return codes 6993 * 0 - successful 6994 * -ENOMEM - No available memory 6995 **/ 6996static int 6997lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 6998{ 6999 struct lpfc_cq_event *cq_event; 7000 int i; 7001 7002 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 7003 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 7004 if (!cq_event) 7005 goto out_pool_create_fail; 7006 list_add_tail(&cq_event->list, 7007 &phba->sli4_hba.sp_cqe_event_pool); 7008 } 7009 return 0; 7010 7011out_pool_create_fail: 7012 lpfc_sli4_cq_event_pool_destroy(phba); 7013 return -ENOMEM; 7014} 7015 7016/** 7017 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 7018 * @phba: pointer to lpfc hba data structure. 7019 * 7020 * This routine is invoked to free the pool of completion queue events at 7021 * driver unload time. Note that, it is the responsibility of the driver 7022 * cleanup routine to free all the outstanding completion-queue events 7023 * allocated from this pool back into the pool before invoking this routine 7024 * to destroy the pool. 7025 **/ 7026static void 7027lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 7028{ 7029 struct lpfc_cq_event *cq_event, *next_cq_event; 7030 7031 list_for_each_entry_safe(cq_event, next_cq_event, 7032 &phba->sli4_hba.sp_cqe_event_pool, list) { 7033 list_del(&cq_event->list); 7034 kfree(cq_event); 7035 } 7036} 7037 7038/** 7039 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 7040 * @phba: pointer to lpfc hba data structure. 7041 * 7042 * This routine is the lock free version of the API invoked to allocate a 7043 * completion-queue event from the free pool. 7044 * 7045 * Return: Pointer to the newly allocated completion-queue event if successful 7046 * NULL otherwise. 7047 **/ 7048struct lpfc_cq_event * 7049__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 7050{ 7051 struct lpfc_cq_event *cq_event = NULL; 7052 7053 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 7054 struct lpfc_cq_event, list); 7055 return cq_event; 7056} 7057 7058/** 7059 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 7060 * @phba: pointer to lpfc hba data structure. 7061 * 7062 * This routine is the lock version of the API invoked to allocate a 7063 * completion-queue event from the free pool. 7064 * 7065 * Return: Pointer to the newly allocated completion-queue event if successful 7066 * NULL otherwise. 7067 **/ 7068struct lpfc_cq_event * 7069lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 7070{ 7071 struct lpfc_cq_event *cq_event; 7072 unsigned long iflags; 7073 7074 spin_lock_irqsave(&phba->hbalock, iflags); 7075 cq_event = __lpfc_sli4_cq_event_alloc(phba); 7076 spin_unlock_irqrestore(&phba->hbalock, iflags); 7077 return cq_event; 7078} 7079 7080/** 7081 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 7082 * @phba: pointer to lpfc hba data structure. 7083 * @cq_event: pointer to the completion queue event to be freed. 7084 * 7085 * This routine is the lock free version of the API invoked to release a 7086 * completion-queue event back into the free pool. 7087 **/ 7088void 7089__lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 7090 struct lpfc_cq_event *cq_event) 7091{ 7092 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 7093} 7094 7095/** 7096 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 7097 * @phba: pointer to lpfc hba data structure. 7098 * @cq_event: pointer to the completion queue event to be freed. 7099 * 7100 * This routine is the lock version of the API invoked to release a 7101 * completion-queue event back into the free pool. 7102 **/ 7103void 7104lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 7105 struct lpfc_cq_event *cq_event) 7106{ 7107 unsigned long iflags; 7108 spin_lock_irqsave(&phba->hbalock, iflags); 7109 __lpfc_sli4_cq_event_release(phba, cq_event); 7110 spin_unlock_irqrestore(&phba->hbalock, iflags); 7111} 7112 7113/** 7114 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 7115 * @phba: pointer to lpfc hba data structure. 7116 * 7117 * This routine is to free all the pending completion-queue events to the 7118 * back into the free pool for device reset. 7119 **/ 7120static void 7121lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 7122{ 7123 LIST_HEAD(cqelist); 7124 struct lpfc_cq_event *cqe; 7125 unsigned long iflags; 7126 7127 /* Retrieve all the pending WCQEs from pending WCQE lists */ 7128 spin_lock_irqsave(&phba->hbalock, iflags); 7129 /* Pending FCP XRI abort events */ 7130 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 7131 &cqelist); 7132 /* Pending ELS XRI abort events */ 7133 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 7134 &cqelist); 7135 /* Pending asynnc events */ 7136 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 7137 &cqelist); 7138 spin_unlock_irqrestore(&phba->hbalock, iflags); 7139 7140 while (!list_empty(&cqelist)) { 7141 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 7142 lpfc_sli4_cq_event_release(phba, cqe); 7143 } 7144} 7145 7146/** 7147 * lpfc_pci_function_reset - Reset pci function. 7148 * @phba: pointer to lpfc hba data structure. 7149 * 7150 * This routine is invoked to request a PCI function reset. It will destroys 7151 * all resources assigned to the PCI function which originates this request. 7152 * 7153 * Return codes 7154 * 0 - successful 7155 * -ENOMEM - No available memory 7156 * -EIO - The mailbox failed to complete successfully. 7157 **/ 7158int 7159lpfc_pci_function_reset(struct lpfc_hba *phba) 7160{ 7161 LPFC_MBOXQ_t *mboxq; 7162 uint32_t rc = 0, if_type; 7163 uint32_t shdr_status, shdr_add_status; 7164 uint32_t rdy_chk, num_resets = 0, reset_again = 0; 7165 union lpfc_sli4_cfg_shdr *shdr; 7166 struct lpfc_register reg_data; 7167 7168 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7169 switch (if_type) { 7170 case LPFC_SLI_INTF_IF_TYPE_0: 7171 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 7172 GFP_KERNEL); 7173 if (!mboxq) { 7174 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7175 "0494 Unable to allocate memory for " 7176 "issuing SLI_FUNCTION_RESET mailbox " 7177 "command\n"); 7178 return -ENOMEM; 7179 } 7180 7181 /* Setup PCI function reset mailbox-ioctl command */ 7182 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7183 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 7184 LPFC_SLI4_MBX_EMBED); 7185 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7186 shdr = (union lpfc_sli4_cfg_shdr *) 7187 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 7188 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7189 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 7190 &shdr->response); 7191 if (rc != MBX_TIMEOUT) 7192 mempool_free(mboxq, phba->mbox_mem_pool); 7193 if (shdr_status || shdr_add_status || rc) { 7194 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7195 "0495 SLI_FUNCTION_RESET mailbox " 7196 "failed with status x%x add_status x%x," 7197 " mbx status x%x\n", 7198 shdr_status, shdr_add_status, rc); 7199 rc = -ENXIO; 7200 } 7201 break; 7202 case LPFC_SLI_INTF_IF_TYPE_2: 7203 for (num_resets = 0; 7204 num_resets < MAX_IF_TYPE_2_RESETS; 7205 num_resets++) { 7206 reg_data.word0 = 0; 7207 bf_set(lpfc_sliport_ctrl_end, ®_data, 7208 LPFC_SLIPORT_LITTLE_ENDIAN); 7209 bf_set(lpfc_sliport_ctrl_ip, ®_data, 7210 LPFC_SLIPORT_INIT_PORT); 7211 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 7212 CTRLregaddr); 7213 7214 /* 7215 * Poll the Port Status Register and wait for RDY for 7216 * up to 10 seconds. If the port doesn't respond, treat 7217 * it as an error. If the port responds with RN, start 7218 * the loop again. 7219 */ 7220 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { 7221 msleep(10); 7222 if (lpfc_readl(phba->sli4_hba.u.if_type2. 7223 STATUSregaddr, ®_data.word0)) { 7224 rc = -ENODEV; 7225 goto out; 7226 } 7227 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 7228 break; 7229 if (bf_get(lpfc_sliport_status_rn, ®_data)) { 7230 reset_again++; 7231 break; 7232 } 7233 } 7234 7235 /* 7236 * If the port responds to the init request with 7237 * reset needed, delay for a bit and restart the loop. 7238 */ 7239 if (reset_again) { 7240 msleep(10); 7241 reset_again = 0; 7242 continue; 7243 } 7244 7245 /* Detect any port errors. */ 7246 if ((bf_get(lpfc_sliport_status_err, ®_data)) || 7247 (rdy_chk >= 1000)) { 7248 phba->work_status[0] = readl( 7249 phba->sli4_hba.u.if_type2.ERR1regaddr); 7250 phba->work_status[1] = readl( 7251 phba->sli4_hba.u.if_type2.ERR2regaddr); 7252 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7253 "2890 Port Error Detected " 7254 "during Port Reset: " 7255 "port status reg 0x%x, " 7256 "error 1=0x%x, error 2=0x%x\n", 7257 reg_data.word0, 7258 phba->work_status[0], 7259 phba->work_status[1]); 7260 rc = -ENODEV; 7261 } 7262 7263 /* 7264 * Terminate the outer loop provided the Port indicated 7265 * ready within 10 seconds. 7266 */ 7267 if (rdy_chk < 1000) 7268 break; 7269 } 7270 /* delay driver action following IF_TYPE_2 function reset */ 7271 msleep(100); 7272 break; 7273 case LPFC_SLI_INTF_IF_TYPE_1: 7274 default: 7275 break; 7276 } 7277 7278out: 7279 /* Catch the not-ready port failure after a port reset. */ 7280 if (num_resets >= MAX_IF_TYPE_2_RESETS) 7281 rc = -ENODEV; 7282 7283 return rc; 7284} 7285 7286/** 7287 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 7288 * @phba: pointer to lpfc hba data structure. 7289 * @cnt: number of nop mailbox commands to send. 7290 * 7291 * This routine is invoked to send a number @cnt of NOP mailbox command and 7292 * wait for each command to complete. 7293 * 7294 * Return: the number of NOP mailbox command completed. 7295 **/ 7296static int 7297lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 7298{ 7299 LPFC_MBOXQ_t *mboxq; 7300 int length, cmdsent; 7301 uint32_t mbox_tmo; 7302 uint32_t rc = 0; 7303 uint32_t shdr_status, shdr_add_status; 7304 union lpfc_sli4_cfg_shdr *shdr; 7305 7306 if (cnt == 0) { 7307 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7308 "2518 Requested to send 0 NOP mailbox cmd\n"); 7309 return cnt; 7310 } 7311 7312 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7313 if (!mboxq) { 7314 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7315 "2519 Unable to allocate memory for issuing " 7316 "NOP mailbox command\n"); 7317 return 0; 7318 } 7319 7320 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 7321 length = (sizeof(struct lpfc_mbx_nop) - 7322 sizeof(struct lpfc_sli4_cfg_mhdr)); 7323 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7324 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 7325 7326 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 7327 if (!phba->sli4_hba.intr_enable) 7328 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7329 else { 7330 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 7331 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 7332 } 7333 if (rc == MBX_TIMEOUT) 7334 break; 7335 /* Check return status */ 7336 shdr = (union lpfc_sli4_cfg_shdr *) 7337 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 7338 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7339 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 7340 &shdr->response); 7341 if (shdr_status || shdr_add_status || rc) { 7342 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7343 "2520 NOP mailbox command failed " 7344 "status x%x add_status x%x mbx " 7345 "status x%x\n", shdr_status, 7346 shdr_add_status, rc); 7347 break; 7348 } 7349 } 7350 7351 if (rc != MBX_TIMEOUT) 7352 mempool_free(mboxq, phba->mbox_mem_pool); 7353 7354 return cmdsent; 7355} 7356 7357/** 7358 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 7359 * @phba: pointer to lpfc hba data structure. 7360 * 7361 * This routine is invoked to set up the PCI device memory space for device 7362 * with SLI-4 interface spec. 7363 * 7364 * Return codes 7365 * 0 - successful 7366 * other values - error 7367 **/ 7368static int 7369lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 7370{ 7371 struct pci_dev *pdev; 7372 unsigned long bar0map_len, bar1map_len, bar2map_len; 7373 int error = -ENODEV; 7374 uint32_t if_type; 7375 7376 /* Obtain PCI device reference */ 7377 if (!phba->pcidev) 7378 return error; 7379 else 7380 pdev = phba->pcidev; 7381 7382 /* Set the device DMA mask size */ 7383 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 7384 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 7385 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 7386 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 7387 return error; 7388 } 7389 } 7390 7391 /* 7392 * The BARs and register set definitions and offset locations are 7393 * dependent on the if_type. 7394 */ 7395 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 7396 &phba->sli4_hba.sli_intf.word0)) { 7397 return error; 7398 } 7399 7400 /* There is no SLI3 failback for SLI4 devices. */ 7401 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 7402 LPFC_SLI_INTF_VALID) { 7403 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7404 "2894 SLI_INTF reg contents invalid " 7405 "sli_intf reg 0x%x\n", 7406 phba->sli4_hba.sli_intf.word0); 7407 return error; 7408 } 7409 7410 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7411 /* 7412 * Get the bus address of SLI4 device Bar regions and the 7413 * number of bytes required by each mapping. The mapping of the 7414 * particular PCI BARs regions is dependent on the type of 7415 * SLI4 device. 7416 */ 7417 if (pci_resource_start(pdev, 0)) { 7418 phba->pci_bar0_map = pci_resource_start(pdev, 0); 7419 bar0map_len = pci_resource_len(pdev, 0); 7420 7421 /* 7422 * Map SLI4 PCI Config Space Register base to a kernel virtual 7423 * addr 7424 */ 7425 phba->sli4_hba.conf_regs_memmap_p = 7426 ioremap(phba->pci_bar0_map, bar0map_len); 7427 if (!phba->sli4_hba.conf_regs_memmap_p) { 7428 dev_printk(KERN_ERR, &pdev->dev, 7429 "ioremap failed for SLI4 PCI config " 7430 "registers.\n"); 7431 goto out; 7432 } 7433 /* Set up BAR0 PCI config space register memory map */ 7434 lpfc_sli4_bar0_register_memmap(phba, if_type); 7435 } else { 7436 phba->pci_bar0_map = pci_resource_start(pdev, 1); 7437 bar0map_len = pci_resource_len(pdev, 1); 7438 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 7439 dev_printk(KERN_ERR, &pdev->dev, 7440 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 7441 goto out; 7442 } 7443 phba->sli4_hba.conf_regs_memmap_p = 7444 ioremap(phba->pci_bar0_map, bar0map_len); 7445 if (!phba->sli4_hba.conf_regs_memmap_p) { 7446 dev_printk(KERN_ERR, &pdev->dev, 7447 "ioremap failed for SLI4 PCI config " 7448 "registers.\n"); 7449 goto out; 7450 } 7451 lpfc_sli4_bar0_register_memmap(phba, if_type); 7452 } 7453 7454 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 7455 (pci_resource_start(pdev, 2))) { 7456 /* 7457 * Map SLI4 if type 0 HBA Control Register base to a kernel 7458 * virtual address and setup the registers. 7459 */ 7460 phba->pci_bar1_map = pci_resource_start(pdev, 2); 7461 bar1map_len = pci_resource_len(pdev, 2); 7462 phba->sli4_hba.ctrl_regs_memmap_p = 7463 ioremap(phba->pci_bar1_map, bar1map_len); 7464 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 7465 dev_printk(KERN_ERR, &pdev->dev, 7466 "ioremap failed for SLI4 HBA control registers.\n"); 7467 goto out_iounmap_conf; 7468 } 7469 lpfc_sli4_bar1_register_memmap(phba); 7470 } 7471 7472 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 7473 (pci_resource_start(pdev, 4))) { 7474 /* 7475 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 7476 * virtual address and setup the registers. 7477 */ 7478 phba->pci_bar2_map = pci_resource_start(pdev, 4); 7479 bar2map_len = pci_resource_len(pdev, 4); 7480 phba->sli4_hba.drbl_regs_memmap_p = 7481 ioremap(phba->pci_bar2_map, bar2map_len); 7482 if (!phba->sli4_hba.drbl_regs_memmap_p) { 7483 dev_printk(KERN_ERR, &pdev->dev, 7484 "ioremap failed for SLI4 HBA doorbell registers.\n"); 7485 goto out_iounmap_ctrl; 7486 } 7487 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 7488 if (error) 7489 goto out_iounmap_all; 7490 } 7491 7492 return 0; 7493 7494out_iounmap_all: 7495 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7496out_iounmap_ctrl: 7497 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7498out_iounmap_conf: 7499 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7500out: 7501 return error; 7502} 7503 7504/** 7505 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 7506 * @phba: pointer to lpfc hba data structure. 7507 * 7508 * This routine is invoked to unset the PCI device memory space for device 7509 * with SLI-4 interface spec. 7510 **/ 7511static void 7512lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 7513{ 7514 uint32_t if_type; 7515 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7516 7517 switch (if_type) { 7518 case LPFC_SLI_INTF_IF_TYPE_0: 7519 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7520 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7521 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7522 break; 7523 case LPFC_SLI_INTF_IF_TYPE_2: 7524 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7525 break; 7526 case LPFC_SLI_INTF_IF_TYPE_1: 7527 default: 7528 dev_printk(KERN_ERR, &phba->pcidev->dev, 7529 "FATAL - unsupported SLI4 interface type - %d\n", 7530 if_type); 7531 break; 7532 } 7533} 7534 7535/** 7536 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 7537 * @phba: pointer to lpfc hba data structure. 7538 * 7539 * This routine is invoked to enable the MSI-X interrupt vectors to device 7540 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 7541 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 7542 * invoked, enables either all or nothing, depending on the current 7543 * availability of PCI vector resources. The device driver is responsible 7544 * for calling the individual request_irq() to register each MSI-X vector 7545 * with a interrupt handler, which is done in this function. Note that 7546 * later when device is unloading, the driver should always call free_irq() 7547 * on all MSI-X vectors it has done request_irq() on before calling 7548 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 7549 * will be left with MSI-X enabled and leaks its vectors. 7550 * 7551 * Return codes 7552 * 0 - successful 7553 * other values - error 7554 **/ 7555static int 7556lpfc_sli_enable_msix(struct lpfc_hba *phba) 7557{ 7558 int rc, i; 7559 LPFC_MBOXQ_t *pmb; 7560 7561 /* Set up MSI-X multi-message vectors */ 7562 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7563 phba->msix_entries[i].entry = i; 7564 7565 /* Configure MSI-X capability structure */ 7566 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 7567 ARRAY_SIZE(phba->msix_entries)); 7568 if (rc) { 7569 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7570 "0420 PCI enable MSI-X failed (%d)\n", rc); 7571 goto msi_fail_out; 7572 } 7573 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7574 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7575 "0477 MSI-X entry[%d]: vector=x%x " 7576 "message=%d\n", i, 7577 phba->msix_entries[i].vector, 7578 phba->msix_entries[i].entry); 7579 /* 7580 * Assign MSI-X vectors to interrupt handlers 7581 */ 7582 7583 /* vector-0 is associated to slow-path handler */ 7584 rc = request_irq(phba->msix_entries[0].vector, 7585 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 7586 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7587 if (rc) { 7588 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7589 "0421 MSI-X slow-path request_irq failed " 7590 "(%d)\n", rc); 7591 goto msi_fail_out; 7592 } 7593 7594 /* vector-1 is associated to fast-path handler */ 7595 rc = request_irq(phba->msix_entries[1].vector, 7596 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 7597 LPFC_FP_DRIVER_HANDLER_NAME, phba); 7598 7599 if (rc) { 7600 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7601 "0429 MSI-X fast-path request_irq failed " 7602 "(%d)\n", rc); 7603 goto irq_fail_out; 7604 } 7605 7606 /* 7607 * Configure HBA MSI-X attention conditions to messages 7608 */ 7609 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7610 7611 if (!pmb) { 7612 rc = -ENOMEM; 7613 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7614 "0474 Unable to allocate memory for issuing " 7615 "MBOX_CONFIG_MSI command\n"); 7616 goto mem_fail_out; 7617 } 7618 rc = lpfc_config_msi(phba, pmb); 7619 if (rc) 7620 goto mbx_fail_out; 7621 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 7622 if (rc != MBX_SUCCESS) { 7623 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 7624 "0351 Config MSI mailbox command failed, " 7625 "mbxCmd x%x, mbxStatus x%x\n", 7626 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 7627 goto mbx_fail_out; 7628 } 7629 7630 /* Free memory allocated for mailbox command */ 7631 mempool_free(pmb, phba->mbox_mem_pool); 7632 return rc; 7633 7634mbx_fail_out: 7635 /* Free memory allocated for mailbox command */ 7636 mempool_free(pmb, phba->mbox_mem_pool); 7637 7638mem_fail_out: 7639 /* free the irq already requested */ 7640 free_irq(phba->msix_entries[1].vector, phba); 7641 7642irq_fail_out: 7643 /* free the irq already requested */ 7644 free_irq(phba->msix_entries[0].vector, phba); 7645 7646msi_fail_out: 7647 /* Unconfigure MSI-X capability structure */ 7648 pci_disable_msix(phba->pcidev); 7649 return rc; 7650} 7651 7652/** 7653 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 7654 * @phba: pointer to lpfc hba data structure. 7655 * 7656 * This routine is invoked to release the MSI-X vectors and then disable the 7657 * MSI-X interrupt mode to device with SLI-3 interface spec. 7658 **/ 7659static void 7660lpfc_sli_disable_msix(struct lpfc_hba *phba) 7661{ 7662 int i; 7663 7664 /* Free up MSI-X multi-message vectors */ 7665 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7666 free_irq(phba->msix_entries[i].vector, phba); 7667 /* Disable MSI-X */ 7668 pci_disable_msix(phba->pcidev); 7669 7670 return; 7671} 7672 7673/** 7674 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 7675 * @phba: pointer to lpfc hba data structure. 7676 * 7677 * This routine is invoked to enable the MSI interrupt mode to device with 7678 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 7679 * enable the MSI vector. The device driver is responsible for calling the 7680 * request_irq() to register MSI vector with a interrupt the handler, which 7681 * is done in this function. 7682 * 7683 * Return codes 7684 * 0 - successful 7685 * other values - error 7686 */ 7687static int 7688lpfc_sli_enable_msi(struct lpfc_hba *phba) 7689{ 7690 int rc; 7691 7692 rc = pci_enable_msi(phba->pcidev); 7693 if (!rc) 7694 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7695 "0462 PCI enable MSI mode success.\n"); 7696 else { 7697 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7698 "0471 PCI enable MSI mode failed (%d)\n", rc); 7699 return rc; 7700 } 7701 7702 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 7703 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7704 if (rc) { 7705 pci_disable_msi(phba->pcidev); 7706 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7707 "0478 MSI request_irq failed (%d)\n", rc); 7708 } 7709 return rc; 7710} 7711 7712/** 7713 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 7714 * @phba: pointer to lpfc hba data structure. 7715 * 7716 * This routine is invoked to disable the MSI interrupt mode to device with 7717 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 7718 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7719 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7720 * its vector. 7721 */ 7722static void 7723lpfc_sli_disable_msi(struct lpfc_hba *phba) 7724{ 7725 free_irq(phba->pcidev->irq, phba); 7726 pci_disable_msi(phba->pcidev); 7727 return; 7728} 7729 7730/** 7731 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 7732 * @phba: pointer to lpfc hba data structure. 7733 * 7734 * This routine is invoked to enable device interrupt and associate driver's 7735 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 7736 * spec. Depends on the interrupt mode configured to the driver, the driver 7737 * will try to fallback from the configured interrupt mode to an interrupt 7738 * mode which is supported by the platform, kernel, and device in the order 7739 * of: 7740 * MSI-X -> MSI -> IRQ. 7741 * 7742 * Return codes 7743 * 0 - successful 7744 * other values - error 7745 **/ 7746static uint32_t 7747lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 7748{ 7749 uint32_t intr_mode = LPFC_INTR_ERROR; 7750 int retval; 7751 7752 if (cfg_mode == 2) { 7753 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 7754 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 7755 if (!retval) { 7756 /* Now, try to enable MSI-X interrupt mode */ 7757 retval = lpfc_sli_enable_msix(phba); 7758 if (!retval) { 7759 /* Indicate initialization to MSI-X mode */ 7760 phba->intr_type = MSIX; 7761 intr_mode = 2; 7762 } 7763 } 7764 } 7765 7766 /* Fallback to MSI if MSI-X initialization failed */ 7767 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7768 retval = lpfc_sli_enable_msi(phba); 7769 if (!retval) { 7770 /* Indicate initialization to MSI mode */ 7771 phba->intr_type = MSI; 7772 intr_mode = 1; 7773 } 7774 } 7775 7776 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7777 if (phba->intr_type == NONE) { 7778 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 7779 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7780 if (!retval) { 7781 /* Indicate initialization to INTx mode */ 7782 phba->intr_type = INTx; 7783 intr_mode = 0; 7784 } 7785 } 7786 return intr_mode; 7787} 7788 7789/** 7790 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 7791 * @phba: pointer to lpfc hba data structure. 7792 * 7793 * This routine is invoked to disable device interrupt and disassociate the 7794 * driver's interrupt handler(s) from interrupt vector(s) to device with 7795 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 7796 * release the interrupt vector(s) for the message signaled interrupt. 7797 **/ 7798static void 7799lpfc_sli_disable_intr(struct lpfc_hba *phba) 7800{ 7801 /* Disable the currently initialized interrupt mode */ 7802 if (phba->intr_type == MSIX) 7803 lpfc_sli_disable_msix(phba); 7804 else if (phba->intr_type == MSI) 7805 lpfc_sli_disable_msi(phba); 7806 else if (phba->intr_type == INTx) 7807 free_irq(phba->pcidev->irq, phba); 7808 7809 /* Reset interrupt management states */ 7810 phba->intr_type = NONE; 7811 phba->sli.slistat.sli_intr = 0; 7812 7813 return; 7814} 7815 7816/** 7817 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 7818 * @phba: pointer to lpfc hba data structure. 7819 * 7820 * This routine is invoked to enable the MSI-X interrupt vectors to device 7821 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 7822 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 7823 * enables either all or nothing, depending on the current availability of 7824 * PCI vector resources. The device driver is responsible for calling the 7825 * individual request_irq() to register each MSI-X vector with a interrupt 7826 * handler, which is done in this function. Note that later when device is 7827 * unloading, the driver should always call free_irq() on all MSI-X vectors 7828 * it has done request_irq() on before calling pci_disable_msix(). Failure 7829 * to do so results in a BUG_ON() and a device will be left with MSI-X 7830 * enabled and leaks its vectors. 7831 * 7832 * Return codes 7833 * 0 - successful 7834 * other values - error 7835 **/ 7836static int 7837lpfc_sli4_enable_msix(struct lpfc_hba *phba) 7838{ 7839 int vectors, rc, index; 7840 7841 /* Set up MSI-X multi-message vectors */ 7842 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 7843 phba->sli4_hba.msix_entries[index].entry = index; 7844 7845 /* Configure MSI-X capability structure */ 7846 vectors = phba->sli4_hba.cfg_eqn; 7847enable_msix_vectors: 7848 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 7849 vectors); 7850 if (rc > 1) { 7851 vectors = rc; 7852 goto enable_msix_vectors; 7853 } else if (rc) { 7854 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7855 "0484 PCI enable MSI-X failed (%d)\n", rc); 7856 goto msi_fail_out; 7857 } 7858 7859 /* Log MSI-X vector assignment */ 7860 for (index = 0; index < vectors; index++) 7861 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7862 "0489 MSI-X entry[%d]: vector=x%x " 7863 "message=%d\n", index, 7864 phba->sli4_hba.msix_entries[index].vector, 7865 phba->sli4_hba.msix_entries[index].entry); 7866 /* 7867 * Assign MSI-X vectors to interrupt handlers 7868 */ 7869 if (vectors > 1) 7870 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 7871 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 7872 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7873 else 7874 /* All Interrupts need to be handled by one EQ */ 7875 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 7876 &lpfc_sli4_intr_handler, IRQF_SHARED, 7877 LPFC_DRIVER_NAME, phba); 7878 if (rc) { 7879 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7880 "0485 MSI-X slow-path request_irq failed " 7881 "(%d)\n", rc); 7882 goto msi_fail_out; 7883 } 7884 7885 /* The rest of the vector(s) are associated to fast-path handler(s) */ 7886 for (index = 1; index < vectors; index++) { 7887 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 7888 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 7889 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 7890 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 7891 LPFC_FP_DRIVER_HANDLER_NAME, 7892 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7893 if (rc) { 7894 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7895 "0486 MSI-X fast-path (%d) " 7896 "request_irq failed (%d)\n", index, rc); 7897 goto cfg_fail_out; 7898 } 7899 } 7900 phba->sli4_hba.msix_vec_nr = vectors; 7901 7902 return rc; 7903 7904cfg_fail_out: 7905 /* free the irq already requested */ 7906 for (--index; index >= 1; index--) 7907 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 7908 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7909 7910 /* free the irq already requested */ 7911 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7912 7913msi_fail_out: 7914 /* Unconfigure MSI-X capability structure */ 7915 pci_disable_msix(phba->pcidev); 7916 return rc; 7917} 7918 7919/** 7920 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 7921 * @phba: pointer to lpfc hba data structure. 7922 * 7923 * This routine is invoked to release the MSI-X vectors and then disable the 7924 * MSI-X interrupt mode to device with SLI-4 interface spec. 7925 **/ 7926static void 7927lpfc_sli4_disable_msix(struct lpfc_hba *phba) 7928{ 7929 int index; 7930 7931 /* Free up MSI-X multi-message vectors */ 7932 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7933 7934 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++) 7935 free_irq(phba->sli4_hba.msix_entries[index].vector, 7936 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7937 7938 /* Disable MSI-X */ 7939 pci_disable_msix(phba->pcidev); 7940 7941 return; 7942} 7943 7944/** 7945 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 7946 * @phba: pointer to lpfc hba data structure. 7947 * 7948 * This routine is invoked to enable the MSI interrupt mode to device with 7949 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 7950 * to enable the MSI vector. The device driver is responsible for calling 7951 * the request_irq() to register MSI vector with a interrupt the handler, 7952 * which is done in this function. 7953 * 7954 * Return codes 7955 * 0 - successful 7956 * other values - error 7957 **/ 7958static int 7959lpfc_sli4_enable_msi(struct lpfc_hba *phba) 7960{ 7961 int rc, index; 7962 7963 rc = pci_enable_msi(phba->pcidev); 7964 if (!rc) 7965 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7966 "0487 PCI enable MSI mode success.\n"); 7967 else { 7968 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7969 "0488 PCI enable MSI mode failed (%d)\n", rc); 7970 return rc; 7971 } 7972 7973 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7974 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7975 if (rc) { 7976 pci_disable_msi(phba->pcidev); 7977 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7978 "0490 MSI request_irq failed (%d)\n", rc); 7979 return rc; 7980 } 7981 7982 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 7983 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7984 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7985 } 7986 7987 return 0; 7988} 7989 7990/** 7991 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 7992 * @phba: pointer to lpfc hba data structure. 7993 * 7994 * This routine is invoked to disable the MSI interrupt mode to device with 7995 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 7996 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7997 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7998 * its vector. 7999 **/ 8000static void 8001lpfc_sli4_disable_msi(struct lpfc_hba *phba) 8002{ 8003 free_irq(phba->pcidev->irq, phba); 8004 pci_disable_msi(phba->pcidev); 8005 return; 8006} 8007 8008/** 8009 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 8010 * @phba: pointer to lpfc hba data structure. 8011 * 8012 * This routine is invoked to enable device interrupt and associate driver's 8013 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 8014 * interface spec. Depends on the interrupt mode configured to the driver, 8015 * the driver will try to fallback from the configured interrupt mode to an 8016 * interrupt mode which is supported by the platform, kernel, and device in 8017 * the order of: 8018 * MSI-X -> MSI -> IRQ. 8019 * 8020 * Return codes 8021 * 0 - successful 8022 * other values - error 8023 **/ 8024static uint32_t 8025lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 8026{ 8027 uint32_t intr_mode = LPFC_INTR_ERROR; 8028 int retval, index; 8029 8030 if (cfg_mode == 2) { 8031 /* Preparation before conf_msi mbox cmd */ 8032 retval = 0; 8033 if (!retval) { 8034 /* Now, try to enable MSI-X interrupt mode */ 8035 retval = lpfc_sli4_enable_msix(phba); 8036 if (!retval) { 8037 /* Indicate initialization to MSI-X mode */ 8038 phba->intr_type = MSIX; 8039 intr_mode = 2; 8040 } 8041 } 8042 } 8043 8044 /* Fallback to MSI if MSI-X initialization failed */ 8045 if (cfg_mode >= 1 && phba->intr_type == NONE) { 8046 retval = lpfc_sli4_enable_msi(phba); 8047 if (!retval) { 8048 /* Indicate initialization to MSI mode */ 8049 phba->intr_type = MSI; 8050 intr_mode = 1; 8051 } 8052 } 8053 8054 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 8055 if (phba->intr_type == NONE) { 8056 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 8057 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 8058 if (!retval) { 8059 /* Indicate initialization to INTx mode */ 8060 phba->intr_type = INTx; 8061 intr_mode = 0; 8062 for (index = 0; index < phba->cfg_fcp_eq_count; 8063 index++) { 8064 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8065 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8066 } 8067 } 8068 } 8069 return intr_mode; 8070} 8071 8072/** 8073 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 8074 * @phba: pointer to lpfc hba data structure. 8075 * 8076 * This routine is invoked to disable device interrupt and disassociate 8077 * the driver's interrupt handler(s) from interrupt vector(s) to device 8078 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 8079 * will release the interrupt vector(s) for the message signaled interrupt. 8080 **/ 8081static void 8082lpfc_sli4_disable_intr(struct lpfc_hba *phba) 8083{ 8084 /* Disable the currently initialized interrupt mode */ 8085 if (phba->intr_type == MSIX) 8086 lpfc_sli4_disable_msix(phba); 8087 else if (phba->intr_type == MSI) 8088 lpfc_sli4_disable_msi(phba); 8089 else if (phba->intr_type == INTx) 8090 free_irq(phba->pcidev->irq, phba); 8091 8092 /* Reset interrupt management states */ 8093 phba->intr_type = NONE; 8094 phba->sli.slistat.sli_intr = 0; 8095 8096 return; 8097} 8098 8099/** 8100 * lpfc_unset_hba - Unset SLI3 hba device initialization 8101 * @phba: pointer to lpfc hba data structure. 8102 * 8103 * This routine is invoked to unset the HBA device initialization steps to 8104 * a device with SLI-3 interface spec. 8105 **/ 8106static void 8107lpfc_unset_hba(struct lpfc_hba *phba) 8108{ 8109 struct lpfc_vport *vport = phba->pport; 8110 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8111 8112 spin_lock_irq(shost->host_lock); 8113 vport->load_flag |= FC_UNLOADING; 8114 spin_unlock_irq(shost->host_lock); 8115 8116 lpfc_stop_hba_timers(phba); 8117 8118 phba->pport->work_port_events = 0; 8119 8120 lpfc_sli_hba_down(phba); 8121 8122 lpfc_sli_brdrestart(phba); 8123 8124 lpfc_sli_disable_intr(phba); 8125 8126 return; 8127} 8128 8129/** 8130 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 8131 * @phba: pointer to lpfc hba data structure. 8132 * 8133 * This routine is invoked to unset the HBA device initialization steps to 8134 * a device with SLI-4 interface spec. 8135 **/ 8136static void 8137lpfc_sli4_unset_hba(struct lpfc_hba *phba) 8138{ 8139 struct lpfc_vport *vport = phba->pport; 8140 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8141 8142 spin_lock_irq(shost->host_lock); 8143 vport->load_flag |= FC_UNLOADING; 8144 spin_unlock_irq(shost->host_lock); 8145 8146 phba->pport->work_port_events = 0; 8147 8148 /* Stop the SLI4 device port */ 8149 lpfc_stop_port(phba); 8150 8151 lpfc_sli4_disable_intr(phba); 8152 8153 /* Reset SLI4 HBA FCoE function */ 8154 lpfc_pci_function_reset(phba); 8155 lpfc_sli4_queue_destroy(phba); 8156 8157 return; 8158} 8159 8160/** 8161 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 8162 * @phba: Pointer to HBA context object. 8163 * 8164 * This function is called in the SLI4 code path to wait for completion 8165 * of device's XRIs exchange busy. It will check the XRI exchange busy 8166 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 8167 * that, it will check the XRI exchange busy on outstanding FCP and ELS 8168 * I/Os every 30 seconds, log error message, and wait forever. Only when 8169 * all XRI exchange busy complete, the driver unload shall proceed with 8170 * invoking the function reset ioctl mailbox command to the CNA and the 8171 * the rest of the driver unload resource release. 8172 **/ 8173static void 8174lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 8175{ 8176 int wait_time = 0; 8177 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 8178 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 8179 8180 while (!fcp_xri_cmpl || !els_xri_cmpl) { 8181 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 8182 if (!fcp_xri_cmpl) 8183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8184 "2877 FCP XRI exchange busy " 8185 "wait time: %d seconds.\n", 8186 wait_time/1000); 8187 if (!els_xri_cmpl) 8188 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8189 "2878 ELS XRI exchange busy " 8190 "wait time: %d seconds.\n", 8191 wait_time/1000); 8192 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 8193 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 8194 } else { 8195 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 8196 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 8197 } 8198 fcp_xri_cmpl = 8199 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 8200 els_xri_cmpl = 8201 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 8202 } 8203} 8204 8205/** 8206 * lpfc_sli4_hba_unset - Unset the fcoe hba 8207 * @phba: Pointer to HBA context object. 8208 * 8209 * This function is called in the SLI4 code path to reset the HBA's FCoE 8210 * function. The caller is not required to hold any lock. This routine 8211 * issues PCI function reset mailbox command to reset the FCoE function. 8212 * At the end of the function, it calls lpfc_hba_down_post function to 8213 * free any pending commands. 8214 **/ 8215static void 8216lpfc_sli4_hba_unset(struct lpfc_hba *phba) 8217{ 8218 int wait_cnt = 0; 8219 LPFC_MBOXQ_t *mboxq; 8220 struct pci_dev *pdev = phba->pcidev; 8221 8222 lpfc_stop_hba_timers(phba); 8223 phba->sli4_hba.intr_enable = 0; 8224 8225 /* 8226 * Gracefully wait out the potential current outstanding asynchronous 8227 * mailbox command. 8228 */ 8229 8230 /* First, block any pending async mailbox command from posted */ 8231 spin_lock_irq(&phba->hbalock); 8232 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 8233 spin_unlock_irq(&phba->hbalock); 8234 /* Now, trying to wait it out if we can */ 8235 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8236 msleep(10); 8237 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 8238 break; 8239 } 8240 /* Forcefully release the outstanding mailbox command if timed out */ 8241 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8242 spin_lock_irq(&phba->hbalock); 8243 mboxq = phba->sli.mbox_active; 8244 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 8245 __lpfc_mbox_cmpl_put(phba, mboxq); 8246 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8247 phba->sli.mbox_active = NULL; 8248 spin_unlock_irq(&phba->hbalock); 8249 } 8250 8251 /* Abort all iocbs associated with the hba */ 8252 lpfc_sli_hba_iocb_abort(phba); 8253 8254 /* Wait for completion of device XRI exchange busy */ 8255 lpfc_sli4_xri_exchange_busy_wait(phba); 8256 8257 /* Disable PCI subsystem interrupt */ 8258 lpfc_sli4_disable_intr(phba); 8259 8260 /* Disable SR-IOV if enabled */ 8261 if (phba->cfg_sriov_nr_virtfn) 8262 pci_disable_sriov(pdev); 8263 8264 /* Stop kthread signal shall trigger work_done one more time */ 8265 kthread_stop(phba->worker_thread); 8266 8267 /* Reset SLI4 HBA FCoE function */ 8268 lpfc_pci_function_reset(phba); 8269 lpfc_sli4_queue_destroy(phba); 8270 8271 /* Stop the SLI4 device port */ 8272 phba->pport->work_port_events = 0; 8273} 8274 8275 /** 8276 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 8277 * @phba: Pointer to HBA context object. 8278 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 8279 * 8280 * This function is called in the SLI4 code path to read the port's 8281 * sli4 capabilities. 8282 * 8283 * This function may be be called from any context that can block-wait 8284 * for the completion. The expectation is that this routine is called 8285 * typically from probe_one or from the online routine. 8286 **/ 8287int 8288lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8289{ 8290 int rc; 8291 struct lpfc_mqe *mqe; 8292 struct lpfc_pc_sli4_params *sli4_params; 8293 uint32_t mbox_tmo; 8294 8295 rc = 0; 8296 mqe = &mboxq->u.mqe; 8297 8298 /* Read the port's SLI4 Parameters port capabilities */ 8299 lpfc_pc_sli4_params(mboxq); 8300 if (!phba->sli4_hba.intr_enable) 8301 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8302 else { 8303 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 8304 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 8305 } 8306 8307 if (unlikely(rc)) 8308 return 1; 8309 8310 sli4_params = &phba->sli4_hba.pc_sli4_params; 8311 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 8312 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 8313 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 8314 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 8315 &mqe->un.sli4_params); 8316 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 8317 &mqe->un.sli4_params); 8318 sli4_params->proto_types = mqe->un.sli4_params.word3; 8319 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 8320 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 8321 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 8322 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 8323 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 8324 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 8325 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 8326 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 8327 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 8328 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 8329 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 8330 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 8331 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 8332 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 8333 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 8334 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 8335 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 8336 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 8337 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 8338 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 8339 8340 /* Make sure that sge_supp_len can be handled by the driver */ 8341 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 8342 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 8343 8344 return rc; 8345} 8346 8347/** 8348 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 8349 * @phba: Pointer to HBA context object. 8350 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 8351 * 8352 * This function is called in the SLI4 code path to read the port's 8353 * sli4 capabilities. 8354 * 8355 * This function may be be called from any context that can block-wait 8356 * for the completion. The expectation is that this routine is called 8357 * typically from probe_one or from the online routine. 8358 **/ 8359int 8360lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8361{ 8362 int rc; 8363 struct lpfc_mqe *mqe = &mboxq->u.mqe; 8364 struct lpfc_pc_sli4_params *sli4_params; 8365 uint32_t mbox_tmo; 8366 int length; 8367 struct lpfc_sli4_parameters *mbx_sli4_parameters; 8368 8369 /* 8370 * By default, the driver assumes the SLI4 port requires RPI 8371 * header postings. The SLI4_PARAM response will correct this 8372 * assumption. 8373 */ 8374 phba->sli4_hba.rpi_hdrs_in_use = 1; 8375 8376 /* Read the port's SLI4 Config Parameters */ 8377 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 8378 sizeof(struct lpfc_sli4_cfg_mhdr)); 8379 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 8380 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 8381 length, LPFC_SLI4_MBX_EMBED); 8382 if (!phba->sli4_hba.intr_enable) 8383 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8384 else { 8385 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 8386 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 8387 } 8388 if (unlikely(rc)) 8389 return rc; 8390 sli4_params = &phba->sli4_hba.pc_sli4_params; 8391 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 8392 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 8393 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 8394 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 8395 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 8396 mbx_sli4_parameters); 8397 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 8398 mbx_sli4_parameters); 8399 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 8400 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 8401 else 8402 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 8403 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 8404 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 8405 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 8406 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 8407 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 8408 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 8409 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 8410 mbx_sli4_parameters); 8411 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 8412 mbx_sli4_parameters); 8413 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 8414 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 8415 8416 /* Make sure that sge_supp_len can be handled by the driver */ 8417 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 8418 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 8419 8420 return 0; 8421} 8422 8423/** 8424 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 8425 * @pdev: pointer to PCI device 8426 * @pid: pointer to PCI device identifier 8427 * 8428 * This routine is to be called to attach a device with SLI-3 interface spec 8429 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 8430 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 8431 * information of the device and driver to see if the driver state that it can 8432 * support this kind of device. If the match is successful, the driver core 8433 * invokes this routine. If this routine determines it can claim the HBA, it 8434 * does all the initialization that it needs to do to handle the HBA properly. 8435 * 8436 * Return code 8437 * 0 - driver can claim the device 8438 * negative value - driver can not claim the device 8439 **/ 8440static int __devinit 8441lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 8442{ 8443 struct lpfc_hba *phba; 8444 struct lpfc_vport *vport = NULL; 8445 struct Scsi_Host *shost = NULL; 8446 int error; 8447 uint32_t cfg_mode, intr_mode; 8448 8449 /* Allocate memory for HBA structure */ 8450 phba = lpfc_hba_alloc(pdev); 8451 if (!phba) 8452 return -ENOMEM; 8453 8454 /* Perform generic PCI device enabling operation */ 8455 error = lpfc_enable_pci_dev(phba); 8456 if (error) 8457 goto out_free_phba; 8458 8459 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 8460 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 8461 if (error) 8462 goto out_disable_pci_dev; 8463 8464 /* Set up SLI-3 specific device PCI memory space */ 8465 error = lpfc_sli_pci_mem_setup(phba); 8466 if (error) { 8467 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8468 "1402 Failed to set up pci memory space.\n"); 8469 goto out_disable_pci_dev; 8470 } 8471 8472 /* Set up phase-1 common device driver resources */ 8473 error = lpfc_setup_driver_resource_phase1(phba); 8474 if (error) { 8475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8476 "1403 Failed to set up driver resource.\n"); 8477 goto out_unset_pci_mem_s3; 8478 } 8479 8480 /* Set up SLI-3 specific device driver resources */ 8481 error = lpfc_sli_driver_resource_setup(phba); 8482 if (error) { 8483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8484 "1404 Failed to set up driver resource.\n"); 8485 goto out_unset_pci_mem_s3; 8486 } 8487 8488 /* Initialize and populate the iocb list per host */ 8489 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 8490 if (error) { 8491 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8492 "1405 Failed to initialize iocb list.\n"); 8493 goto out_unset_driver_resource_s3; 8494 } 8495 8496 /* Set up common device driver resources */ 8497 error = lpfc_setup_driver_resource_phase2(phba); 8498 if (error) { 8499 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8500 "1406 Failed to set up driver resource.\n"); 8501 goto out_free_iocb_list; 8502 } 8503 8504 /* Get the default values for Model Name and Description */ 8505 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 8506 8507 /* Create SCSI host to the physical port */ 8508 error = lpfc_create_shost(phba); 8509 if (error) { 8510 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8511 "1407 Failed to create scsi host.\n"); 8512 goto out_unset_driver_resource; 8513 } 8514 8515 /* Configure sysfs attributes */ 8516 vport = phba->pport; 8517 error = lpfc_alloc_sysfs_attr(vport); 8518 if (error) { 8519 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8520 "1476 Failed to allocate sysfs attr\n"); 8521 goto out_destroy_shost; 8522 } 8523 8524 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 8525 /* Now, trying to enable interrupt and bring up the device */ 8526 cfg_mode = phba->cfg_use_msi; 8527 while (true) { 8528 /* Put device to a known state before enabling interrupt */ 8529 lpfc_stop_port(phba); 8530 /* Configure and enable interrupt */ 8531 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 8532 if (intr_mode == LPFC_INTR_ERROR) { 8533 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8534 "0431 Failed to enable interrupt.\n"); 8535 error = -ENODEV; 8536 goto out_free_sysfs_attr; 8537 } 8538 /* SLI-3 HBA setup */ 8539 if (lpfc_sli_hba_setup(phba)) { 8540 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8541 "1477 Failed to set up hba\n"); 8542 error = -ENODEV; 8543 goto out_remove_device; 8544 } 8545 8546 /* Wait 50ms for the interrupts of previous mailbox commands */ 8547 msleep(50); 8548 /* Check active interrupts on message signaled interrupts */ 8549 if (intr_mode == 0 || 8550 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 8551 /* Log the current active interrupt mode */ 8552 phba->intr_mode = intr_mode; 8553 lpfc_log_intr_mode(phba, intr_mode); 8554 break; 8555 } else { 8556 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8557 "0447 Configure interrupt mode (%d) " 8558 "failed active interrupt test.\n", 8559 intr_mode); 8560 /* Disable the current interrupt mode */ 8561 lpfc_sli_disable_intr(phba); 8562 /* Try next level of interrupt mode */ 8563 cfg_mode = --intr_mode; 8564 } 8565 } 8566 8567 /* Perform post initialization setup */ 8568 lpfc_post_init_setup(phba); 8569 8570 /* Check if there are static vports to be created. */ 8571 lpfc_create_static_vport(phba); 8572 8573 return 0; 8574 8575out_remove_device: 8576 lpfc_unset_hba(phba); 8577out_free_sysfs_attr: 8578 lpfc_free_sysfs_attr(vport); 8579out_destroy_shost: 8580 lpfc_destroy_shost(phba); 8581out_unset_driver_resource: 8582 lpfc_unset_driver_resource_phase2(phba); 8583out_free_iocb_list: 8584 lpfc_free_iocb_list(phba); 8585out_unset_driver_resource_s3: 8586 lpfc_sli_driver_resource_unset(phba); 8587out_unset_pci_mem_s3: 8588 lpfc_sli_pci_mem_unset(phba); 8589out_disable_pci_dev: 8590 lpfc_disable_pci_dev(phba); 8591 if (shost) 8592 scsi_host_put(shost); 8593out_free_phba: 8594 lpfc_hba_free(phba); 8595 return error; 8596} 8597 8598/** 8599 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 8600 * @pdev: pointer to PCI device 8601 * 8602 * This routine is to be called to disattach a device with SLI-3 interface 8603 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 8604 * removed from PCI bus, it performs all the necessary cleanup for the HBA 8605 * device to be removed from the PCI subsystem properly. 8606 **/ 8607static void __devexit 8608lpfc_pci_remove_one_s3(struct pci_dev *pdev) 8609{ 8610 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8611 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8612 struct lpfc_vport **vports; 8613 struct lpfc_hba *phba = vport->phba; 8614 int i; 8615 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 8616 8617 spin_lock_irq(&phba->hbalock); 8618 vport->load_flag |= FC_UNLOADING; 8619 spin_unlock_irq(&phba->hbalock); 8620 8621 lpfc_free_sysfs_attr(vport); 8622 8623 /* Release all the vports against this physical port */ 8624 vports = lpfc_create_vport_work_array(phba); 8625 if (vports != NULL) 8626 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8627 fc_vport_terminate(vports[i]->fc_vport); 8628 lpfc_destroy_vport_work_array(phba, vports); 8629 8630 /* Remove FC host and then SCSI host with the physical port */ 8631 fc_remove_host(shost); 8632 scsi_remove_host(shost); 8633 lpfc_cleanup(vport); 8634 8635 /* 8636 * Bring down the SLI Layer. This step disable all interrupts, 8637 * clears the rings, discards all mailbox commands, and resets 8638 * the HBA. 8639 */ 8640 8641 /* HBA interrupt will be disabled after this call */ 8642 lpfc_sli_hba_down(phba); 8643 /* Stop kthread signal shall trigger work_done one more time */ 8644 kthread_stop(phba->worker_thread); 8645 /* Final cleanup of txcmplq and reset the HBA */ 8646 lpfc_sli_brdrestart(phba); 8647 8648 lpfc_stop_hba_timers(phba); 8649 spin_lock_irq(&phba->hbalock); 8650 list_del_init(&vport->listentry); 8651 spin_unlock_irq(&phba->hbalock); 8652 8653 lpfc_debugfs_terminate(vport); 8654 8655 /* Disable SR-IOV if enabled */ 8656 if (phba->cfg_sriov_nr_virtfn) 8657 pci_disable_sriov(pdev); 8658 8659 /* Disable interrupt */ 8660 lpfc_sli_disable_intr(phba); 8661 8662 pci_set_drvdata(pdev, NULL); 8663 scsi_host_put(shost); 8664 8665 /* 8666 * Call scsi_free before mem_free since scsi bufs are released to their 8667 * corresponding pools here. 8668 */ 8669 lpfc_scsi_free(phba); 8670 lpfc_mem_free_all(phba); 8671 8672 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 8673 phba->hbqslimp.virt, phba->hbqslimp.phys); 8674 8675 /* Free resources associated with SLI2 interface */ 8676 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 8677 phba->slim2p.virt, phba->slim2p.phys); 8678 8679 /* unmap adapter SLIM and Control Registers */ 8680 iounmap(phba->ctrl_regs_memmap_p); 8681 iounmap(phba->slim_memmap_p); 8682 8683 lpfc_hba_free(phba); 8684 8685 pci_release_selected_regions(pdev, bars); 8686 pci_disable_device(pdev); 8687} 8688 8689/** 8690 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 8691 * @pdev: pointer to PCI device 8692 * @msg: power management message 8693 * 8694 * This routine is to be called from the kernel's PCI subsystem to support 8695 * system Power Management (PM) to device with SLI-3 interface spec. When 8696 * PM invokes this method, it quiesces the device by stopping the driver's 8697 * worker thread for the device, turning off device's interrupt and DMA, 8698 * and bring the device offline. Note that as the driver implements the 8699 * minimum PM requirements to a power-aware driver's PM support for the 8700 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8701 * to the suspend() method call will be treated as SUSPEND and the driver will 8702 * fully reinitialize its device during resume() method call, the driver will 8703 * set device to PCI_D3hot state in PCI config space instead of setting it 8704 * according to the @msg provided by the PM. 8705 * 8706 * Return code 8707 * 0 - driver suspended the device 8708 * Error otherwise 8709 **/ 8710static int 8711lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 8712{ 8713 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8714 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8715 8716 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8717 "0473 PCI device Power Management suspend.\n"); 8718 8719 /* Bring down the device */ 8720 lpfc_offline_prep(phba); 8721 lpfc_offline(phba); 8722 kthread_stop(phba->worker_thread); 8723 8724 /* Disable interrupt from device */ 8725 lpfc_sli_disable_intr(phba); 8726 8727 /* Save device state to PCI config space */ 8728 pci_save_state(pdev); 8729 pci_set_power_state(pdev, PCI_D3hot); 8730 8731 return 0; 8732} 8733 8734/** 8735 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 8736 * @pdev: pointer to PCI device 8737 * 8738 * This routine is to be called from the kernel's PCI subsystem to support 8739 * system Power Management (PM) to device with SLI-3 interface spec. When PM 8740 * invokes this method, it restores the device's PCI config space state and 8741 * fully reinitializes the device and brings it online. Note that as the 8742 * driver implements the minimum PM requirements to a power-aware driver's 8743 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 8744 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 8745 * driver will fully reinitialize its device during resume() method call, 8746 * the device will be set to PCI_D0 directly in PCI config space before 8747 * restoring the state. 8748 * 8749 * Return code 8750 * 0 - driver suspended the device 8751 * Error otherwise 8752 **/ 8753static int 8754lpfc_pci_resume_one_s3(struct pci_dev *pdev) 8755{ 8756 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8757 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8758 uint32_t intr_mode; 8759 int error; 8760 8761 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8762 "0452 PCI device Power Management resume.\n"); 8763 8764 /* Restore device state from PCI config space */ 8765 pci_set_power_state(pdev, PCI_D0); 8766 pci_restore_state(pdev); 8767 8768 /* 8769 * As the new kernel behavior of pci_restore_state() API call clears 8770 * device saved_state flag, need to save the restored state again. 8771 */ 8772 pci_save_state(pdev); 8773 8774 if (pdev->is_busmaster) 8775 pci_set_master(pdev); 8776 8777 /* Startup the kernel thread for this host adapter. */ 8778 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8779 "lpfc_worker_%d", phba->brd_no); 8780 if (IS_ERR(phba->worker_thread)) { 8781 error = PTR_ERR(phba->worker_thread); 8782 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8783 "0434 PM resume failed to start worker " 8784 "thread: error=x%x.\n", error); 8785 return error; 8786 } 8787 8788 /* Configure and enable interrupt */ 8789 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 8790 if (intr_mode == LPFC_INTR_ERROR) { 8791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8792 "0430 PM resume Failed to enable interrupt\n"); 8793 return -EIO; 8794 } else 8795 phba->intr_mode = intr_mode; 8796 8797 /* Restart HBA and bring it online */ 8798 lpfc_sli_brdrestart(phba); 8799 lpfc_online(phba); 8800 8801 /* Log the current active interrupt mode */ 8802 lpfc_log_intr_mode(phba, phba->intr_mode); 8803 8804 return 0; 8805} 8806 8807/** 8808 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 8809 * @phba: pointer to lpfc hba data structure. 8810 * 8811 * This routine is called to prepare the SLI3 device for PCI slot recover. It 8812 * aborts all the outstanding SCSI I/Os to the pci device. 8813 **/ 8814static void 8815lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 8816{ 8817 struct lpfc_sli *psli = &phba->sli; 8818 struct lpfc_sli_ring *pring; 8819 8820 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8821 "2723 PCI channel I/O abort preparing for recovery\n"); 8822 8823 /* 8824 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 8825 * and let the SCSI mid-layer to retry them to recover. 8826 */ 8827 pring = &psli->ring[psli->fcp_ring]; 8828 lpfc_sli_abort_iocb_ring(phba, pring); 8829} 8830 8831/** 8832 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 8833 * @phba: pointer to lpfc hba data structure. 8834 * 8835 * This routine is called to prepare the SLI3 device for PCI slot reset. It 8836 * disables the device interrupt and pci device, and aborts the internal FCP 8837 * pending I/Os. 8838 **/ 8839static void 8840lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 8841{ 8842 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8843 "2710 PCI channel disable preparing for reset\n"); 8844 8845 /* Block any management I/Os to the device */ 8846 lpfc_block_mgmt_io(phba); 8847 8848 /* Block all SCSI devices' I/Os on the host */ 8849 lpfc_scsi_dev_block(phba); 8850 8851 /* stop all timers */ 8852 lpfc_stop_hba_timers(phba); 8853 8854 /* Disable interrupt and pci device */ 8855 lpfc_sli_disable_intr(phba); 8856 pci_disable_device(phba->pcidev); 8857 8858 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 8859 lpfc_sli_flush_fcp_rings(phba); 8860} 8861 8862/** 8863 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 8864 * @phba: pointer to lpfc hba data structure. 8865 * 8866 * This routine is called to prepare the SLI3 device for PCI slot permanently 8867 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 8868 * pending I/Os. 8869 **/ 8870static void 8871lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 8872{ 8873 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8874 "2711 PCI channel permanent disable for failure\n"); 8875 /* Block all SCSI devices' I/Os on the host */ 8876 lpfc_scsi_dev_block(phba); 8877 8878 /* stop all timers */ 8879 lpfc_stop_hba_timers(phba); 8880 8881 /* Clean up all driver's outstanding SCSI I/Os */ 8882 lpfc_sli_flush_fcp_rings(phba); 8883} 8884 8885/** 8886 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 8887 * @pdev: pointer to PCI device. 8888 * @state: the current PCI connection state. 8889 * 8890 * This routine is called from the PCI subsystem for I/O error handling to 8891 * device with SLI-3 interface spec. This function is called by the PCI 8892 * subsystem after a PCI bus error affecting this device has been detected. 8893 * When this function is invoked, it will need to stop all the I/Os and 8894 * interrupt(s) to the device. Once that is done, it will return 8895 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 8896 * as desired. 8897 * 8898 * Return codes 8899 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 8900 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8901 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8902 **/ 8903static pci_ers_result_t 8904lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 8905{ 8906 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8907 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8908 8909 switch (state) { 8910 case pci_channel_io_normal: 8911 /* Non-fatal error, prepare for recovery */ 8912 lpfc_sli_prep_dev_for_recover(phba); 8913 return PCI_ERS_RESULT_CAN_RECOVER; 8914 case pci_channel_io_frozen: 8915 /* Fatal error, prepare for slot reset */ 8916 lpfc_sli_prep_dev_for_reset(phba); 8917 return PCI_ERS_RESULT_NEED_RESET; 8918 case pci_channel_io_perm_failure: 8919 /* Permanent failure, prepare for device down */ 8920 lpfc_sli_prep_dev_for_perm_failure(phba); 8921 return PCI_ERS_RESULT_DISCONNECT; 8922 default: 8923 /* Unknown state, prepare and request slot reset */ 8924 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8925 "0472 Unknown PCI error state: x%x\n", state); 8926 lpfc_sli_prep_dev_for_reset(phba); 8927 return PCI_ERS_RESULT_NEED_RESET; 8928 } 8929} 8930 8931/** 8932 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 8933 * @pdev: pointer to PCI device. 8934 * 8935 * This routine is called from the PCI subsystem for error handling to 8936 * device with SLI-3 interface spec. This is called after PCI bus has been 8937 * reset to restart the PCI card from scratch, as if from a cold-boot. 8938 * During the PCI subsystem error recovery, after driver returns 8939 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 8940 * recovery and then call this routine before calling the .resume method 8941 * to recover the device. This function will initialize the HBA device, 8942 * enable the interrupt, but it will just put the HBA to offline state 8943 * without passing any I/O traffic. 8944 * 8945 * Return codes 8946 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8947 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8948 */ 8949static pci_ers_result_t 8950lpfc_io_slot_reset_s3(struct pci_dev *pdev) 8951{ 8952 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8953 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8954 struct lpfc_sli *psli = &phba->sli; 8955 uint32_t intr_mode; 8956 8957 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 8958 if (pci_enable_device_mem(pdev)) { 8959 printk(KERN_ERR "lpfc: Cannot re-enable " 8960 "PCI device after reset.\n"); 8961 return PCI_ERS_RESULT_DISCONNECT; 8962 } 8963 8964 pci_restore_state(pdev); 8965 8966 /* 8967 * As the new kernel behavior of pci_restore_state() API call clears 8968 * device saved_state flag, need to save the restored state again. 8969 */ 8970 pci_save_state(pdev); 8971 8972 if (pdev->is_busmaster) 8973 pci_set_master(pdev); 8974 8975 spin_lock_irq(&phba->hbalock); 8976 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 8977 spin_unlock_irq(&phba->hbalock); 8978 8979 /* Configure and enable interrupt */ 8980 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 8981 if (intr_mode == LPFC_INTR_ERROR) { 8982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8983 "0427 Cannot re-enable interrupt after " 8984 "slot reset.\n"); 8985 return PCI_ERS_RESULT_DISCONNECT; 8986 } else 8987 phba->intr_mode = intr_mode; 8988 8989 /* Take device offline, it will perform cleanup */ 8990 lpfc_offline_prep(phba); 8991 lpfc_offline(phba); 8992 lpfc_sli_brdrestart(phba); 8993 8994 /* Log the current active interrupt mode */ 8995 lpfc_log_intr_mode(phba, phba->intr_mode); 8996 8997 return PCI_ERS_RESULT_RECOVERED; 8998} 8999 9000/** 9001 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 9002 * @pdev: pointer to PCI device 9003 * 9004 * This routine is called from the PCI subsystem for error handling to device 9005 * with SLI-3 interface spec. It is called when kernel error recovery tells 9006 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 9007 * error recovery. After this call, traffic can start to flow from this device 9008 * again. 9009 */ 9010static void 9011lpfc_io_resume_s3(struct pci_dev *pdev) 9012{ 9013 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9014 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9015 9016 /* Bring device online, it will be no-op for non-fatal error resume */ 9017 lpfc_online(phba); 9018 9019 /* Clean up Advanced Error Reporting (AER) if needed */ 9020 if (phba->hba_flag & HBA_AER_ENABLED) 9021 pci_cleanup_aer_uncorrect_error_status(pdev); 9022} 9023 9024/** 9025 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 9026 * @phba: pointer to lpfc hba data structure. 9027 * 9028 * returns the number of ELS/CT IOCBs to reserve 9029 **/ 9030int 9031lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 9032{ 9033 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 9034 9035 if (phba->sli_rev == LPFC_SLI_REV4) { 9036 if (max_xri <= 100) 9037 return 10; 9038 else if (max_xri <= 256) 9039 return 25; 9040 else if (max_xri <= 512) 9041 return 50; 9042 else if (max_xri <= 1024) 9043 return 100; 9044 else 9045 return 150; 9046 } else 9047 return 0; 9048} 9049 9050/** 9051 * lpfc_write_firmware - attempt to write a firmware image to the port 9052 * @phba: pointer to lpfc hba data structure. 9053 * @fw: pointer to firmware image returned from request_firmware. 9054 * 9055 * returns the number of bytes written if write is successful. 9056 * returns a negative error value if there were errors. 9057 * returns 0 if firmware matches currently active firmware on port. 9058 **/ 9059int 9060lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) 9061{ 9062 char fwrev[32]; 9063 struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data; 9064 struct list_head dma_buffer_list; 9065 int i, rc = 0; 9066 struct lpfc_dmabuf *dmabuf, *next; 9067 uint32_t offset = 0, temp_offset = 0; 9068 9069 INIT_LIST_HEAD(&dma_buffer_list); 9070 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) || 9071 (bf_get_be32(lpfc_grp_hdr_file_type, image) != 9072 LPFC_FILE_TYPE_GROUP) || 9073 (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) || 9074 (be32_to_cpu(image->size) != fw->size)) { 9075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9076 "3022 Invalid FW image found. " 9077 "Magic:%x Type:%x ID:%x\n", 9078 be32_to_cpu(image->magic_number), 9079 bf_get_be32(lpfc_grp_hdr_file_type, image), 9080 bf_get_be32(lpfc_grp_hdr_id, image)); 9081 return -EINVAL; 9082 } 9083 lpfc_decode_firmware_rev(phba, fwrev, 1); 9084 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 9085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9086 "3023 Updating Firmware. Current Version:%s " 9087 "New Version:%s\n", 9088 fwrev, image->revision); 9089 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 9090 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 9091 GFP_KERNEL); 9092 if (!dmabuf) { 9093 rc = -ENOMEM; 9094 goto out; 9095 } 9096 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 9097 SLI4_PAGE_SIZE, 9098 &dmabuf->phys, 9099 GFP_KERNEL); 9100 if (!dmabuf->virt) { 9101 kfree(dmabuf); 9102 rc = -ENOMEM; 9103 goto out; 9104 } 9105 list_add_tail(&dmabuf->list, &dma_buffer_list); 9106 } 9107 while (offset < fw->size) { 9108 temp_offset = offset; 9109 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 9110 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 9111 memcpy(dmabuf->virt, 9112 fw->data + temp_offset, 9113 fw->size - temp_offset); 9114 temp_offset = fw->size; 9115 break; 9116 } 9117 memcpy(dmabuf->virt, fw->data + temp_offset, 9118 SLI4_PAGE_SIZE); 9119 temp_offset += SLI4_PAGE_SIZE; 9120 } 9121 rc = lpfc_wr_object(phba, &dma_buffer_list, 9122 (fw->size - offset), &offset); 9123 if (rc) { 9124 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9125 "3024 Firmware update failed. " 9126 "%d\n", rc); 9127 goto out; 9128 } 9129 } 9130 rc = offset; 9131 } 9132out: 9133 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 9134 list_del(&dmabuf->list); 9135 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 9136 dmabuf->virt, dmabuf->phys); 9137 kfree(dmabuf); 9138 } 9139 return rc; 9140} 9141 9142/** 9143 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 9144 * @pdev: pointer to PCI device 9145 * @pid: pointer to PCI device identifier 9146 * 9147 * This routine is called from the kernel's PCI subsystem to device with 9148 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 9149 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 9150 * information of the device and driver to see if the driver state that it 9151 * can support this kind of device. If the match is successful, the driver 9152 * core invokes this routine. If this routine determines it can claim the HBA, 9153 * it does all the initialization that it needs to do to handle the HBA 9154 * properly. 9155 * 9156 * Return code 9157 * 0 - driver can claim the device 9158 * negative value - driver can not claim the device 9159 **/ 9160static int __devinit 9161lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 9162{ 9163 struct lpfc_hba *phba; 9164 struct lpfc_vport *vport = NULL; 9165 struct Scsi_Host *shost = NULL; 9166 int error; 9167 uint32_t cfg_mode, intr_mode; 9168 int mcnt; 9169 int adjusted_fcp_eq_count; 9170 const struct firmware *fw; 9171 uint8_t file_name[16]; 9172 9173 /* Allocate memory for HBA structure */ 9174 phba = lpfc_hba_alloc(pdev); 9175 if (!phba) 9176 return -ENOMEM; 9177 9178 /* Perform generic PCI device enabling operation */ 9179 error = lpfc_enable_pci_dev(phba); 9180 if (error) 9181 goto out_free_phba; 9182 9183 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 9184 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 9185 if (error) 9186 goto out_disable_pci_dev; 9187 9188 /* Set up SLI-4 specific device PCI memory space */ 9189 error = lpfc_sli4_pci_mem_setup(phba); 9190 if (error) { 9191 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9192 "1410 Failed to set up pci memory space.\n"); 9193 goto out_disable_pci_dev; 9194 } 9195 9196 /* Set up phase-1 common device driver resources */ 9197 error = lpfc_setup_driver_resource_phase1(phba); 9198 if (error) { 9199 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9200 "1411 Failed to set up driver resource.\n"); 9201 goto out_unset_pci_mem_s4; 9202 } 9203 9204 /* Set up SLI-4 Specific device driver resources */ 9205 error = lpfc_sli4_driver_resource_setup(phba); 9206 if (error) { 9207 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9208 "1412 Failed to set up driver resource.\n"); 9209 goto out_unset_pci_mem_s4; 9210 } 9211 9212 /* Initialize and populate the iocb list per host */ 9213 9214 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9215 "2821 initialize iocb list %d.\n", 9216 phba->cfg_iocb_cnt*1024); 9217 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); 9218 9219 if (error) { 9220 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9221 "1413 Failed to initialize iocb list.\n"); 9222 goto out_unset_driver_resource_s4; 9223 } 9224 9225 INIT_LIST_HEAD(&phba->active_rrq_list); 9226 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 9227 9228 /* Set up common device driver resources */ 9229 error = lpfc_setup_driver_resource_phase2(phba); 9230 if (error) { 9231 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9232 "1414 Failed to set up driver resource.\n"); 9233 goto out_free_iocb_list; 9234 } 9235 9236 /* Get the default values for Model Name and Description */ 9237 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 9238 9239 /* Create SCSI host to the physical port */ 9240 error = lpfc_create_shost(phba); 9241 if (error) { 9242 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9243 "1415 Failed to create scsi host.\n"); 9244 goto out_unset_driver_resource; 9245 } 9246 9247 /* Configure sysfs attributes */ 9248 vport = phba->pport; 9249 error = lpfc_alloc_sysfs_attr(vport); 9250 if (error) { 9251 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9252 "1416 Failed to allocate sysfs attr\n"); 9253 goto out_destroy_shost; 9254 } 9255 9256 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 9257 /* Now, trying to enable interrupt and bring up the device */ 9258 cfg_mode = phba->cfg_use_msi; 9259 while (true) { 9260 /* Put device to a known state before enabling interrupt */ 9261 lpfc_stop_port(phba); 9262 /* Configure and enable interrupt */ 9263 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 9264 if (intr_mode == LPFC_INTR_ERROR) { 9265 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9266 "0426 Failed to enable interrupt.\n"); 9267 error = -ENODEV; 9268 goto out_free_sysfs_attr; 9269 } 9270 /* Default to single EQ for non-MSI-X */ 9271 if (phba->intr_type != MSIX) 9272 adjusted_fcp_eq_count = 0; 9273 else if (phba->sli4_hba.msix_vec_nr < 9274 phba->cfg_fcp_eq_count + 1) 9275 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 9276 else 9277 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count; 9278 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count; 9279 /* Set up SLI-4 HBA */ 9280 if (lpfc_sli4_hba_setup(phba)) { 9281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9282 "1421 Failed to set up hba\n"); 9283 error = -ENODEV; 9284 goto out_disable_intr; 9285 } 9286 9287 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 9288 if (intr_mode != 0) 9289 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 9290 LPFC_ACT_INTR_CNT); 9291 9292 /* Check active interrupts received only for MSI/MSI-X */ 9293 if (intr_mode == 0 || 9294 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 9295 /* Log the current active interrupt mode */ 9296 phba->intr_mode = intr_mode; 9297 lpfc_log_intr_mode(phba, intr_mode); 9298 break; 9299 } 9300 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9301 "0451 Configure interrupt mode (%d) " 9302 "failed active interrupt test.\n", 9303 intr_mode); 9304 /* Unset the previous SLI-4 HBA setup. */ 9305 /* 9306 * TODO: Is this operation compatible with IF TYPE 2 9307 * devices? All port state is deleted and cleared. 9308 */ 9309 lpfc_sli4_unset_hba(phba); 9310 /* Try next level of interrupt mode */ 9311 cfg_mode = --intr_mode; 9312 } 9313 9314 /* Perform post initialization setup */ 9315 lpfc_post_init_setup(phba); 9316 9317 /* check for firmware upgrade or downgrade (if_type 2 only) */ 9318 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 9319 LPFC_SLI_INTF_IF_TYPE_2) { 9320 snprintf(file_name, 16, "%s.grp", phba->ModelName); 9321 error = request_firmware(&fw, file_name, &phba->pcidev->dev); 9322 if (!error) { 9323 lpfc_write_firmware(phba, fw); 9324 release_firmware(fw); 9325 } 9326 } 9327 9328 /* Check if there are static vports to be created. */ 9329 lpfc_create_static_vport(phba); 9330 return 0; 9331 9332out_disable_intr: 9333 lpfc_sli4_disable_intr(phba); 9334out_free_sysfs_attr: 9335 lpfc_free_sysfs_attr(vport); 9336out_destroy_shost: 9337 lpfc_destroy_shost(phba); 9338out_unset_driver_resource: 9339 lpfc_unset_driver_resource_phase2(phba); 9340out_free_iocb_list: 9341 lpfc_free_iocb_list(phba); 9342out_unset_driver_resource_s4: 9343 lpfc_sli4_driver_resource_unset(phba); 9344out_unset_pci_mem_s4: 9345 lpfc_sli4_pci_mem_unset(phba); 9346out_disable_pci_dev: 9347 lpfc_disable_pci_dev(phba); 9348 if (shost) 9349 scsi_host_put(shost); 9350out_free_phba: 9351 lpfc_hba_free(phba); 9352 return error; 9353} 9354 9355/** 9356 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 9357 * @pdev: pointer to PCI device 9358 * 9359 * This routine is called from the kernel's PCI subsystem to device with 9360 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 9361 * removed from PCI bus, it performs all the necessary cleanup for the HBA 9362 * device to be removed from the PCI subsystem properly. 9363 **/ 9364static void __devexit 9365lpfc_pci_remove_one_s4(struct pci_dev *pdev) 9366{ 9367 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9368 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 9369 struct lpfc_vport **vports; 9370 struct lpfc_hba *phba = vport->phba; 9371 int i; 9372 9373 /* Mark the device unloading flag */ 9374 spin_lock_irq(&phba->hbalock); 9375 vport->load_flag |= FC_UNLOADING; 9376 spin_unlock_irq(&phba->hbalock); 9377 9378 /* Free the HBA sysfs attributes */ 9379 lpfc_free_sysfs_attr(vport); 9380 9381 /* Release all the vports against this physical port */ 9382 vports = lpfc_create_vport_work_array(phba); 9383 if (vports != NULL) 9384 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 9385 fc_vport_terminate(vports[i]->fc_vport); 9386 lpfc_destroy_vport_work_array(phba, vports); 9387 9388 /* Remove FC host and then SCSI host with the physical port */ 9389 fc_remove_host(shost); 9390 scsi_remove_host(shost); 9391 9392 /* Perform cleanup on the physical port */ 9393 lpfc_cleanup(vport); 9394 9395 /* 9396 * Bring down the SLI Layer. This step disables all interrupts, 9397 * clears the rings, discards all mailbox commands, and resets 9398 * the HBA FCoE function. 9399 */ 9400 lpfc_debugfs_terminate(vport); 9401 lpfc_sli4_hba_unset(phba); 9402 9403 spin_lock_irq(&phba->hbalock); 9404 list_del_init(&vport->listentry); 9405 spin_unlock_irq(&phba->hbalock); 9406 9407 /* Perform scsi free before driver resource_unset since scsi 9408 * buffers are released to their corresponding pools here. 9409 */ 9410 lpfc_scsi_free(phba); 9411 lpfc_sli4_driver_resource_unset(phba); 9412 9413 /* Unmap adapter Control and Doorbell registers */ 9414 lpfc_sli4_pci_mem_unset(phba); 9415 9416 /* Release PCI resources and disable device's PCI function */ 9417 scsi_host_put(shost); 9418 lpfc_disable_pci_dev(phba); 9419 9420 /* Finally, free the driver's device data structure */ 9421 lpfc_hba_free(phba); 9422 9423 return; 9424} 9425 9426/** 9427 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 9428 * @pdev: pointer to PCI device 9429 * @msg: power management message 9430 * 9431 * This routine is called from the kernel's PCI subsystem to support system 9432 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 9433 * this method, it quiesces the device by stopping the driver's worker 9434 * thread for the device, turning off device's interrupt and DMA, and bring 9435 * the device offline. Note that as the driver implements the minimum PM 9436 * requirements to a power-aware driver's PM support for suspend/resume -- all 9437 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 9438 * method call will be treated as SUSPEND and the driver will fully 9439 * reinitialize its device during resume() method call, the driver will set 9440 * device to PCI_D3hot state in PCI config space instead of setting it 9441 * according to the @msg provided by the PM. 9442 * 9443 * Return code 9444 * 0 - driver suspended the device 9445 * Error otherwise 9446 **/ 9447static int 9448lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 9449{ 9450 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9451 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9452 9453 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9454 "2843 PCI device Power Management suspend.\n"); 9455 9456 /* Bring down the device */ 9457 lpfc_offline_prep(phba); 9458 lpfc_offline(phba); 9459 kthread_stop(phba->worker_thread); 9460 9461 /* Disable interrupt from device */ 9462 lpfc_sli4_disable_intr(phba); 9463 lpfc_sli4_queue_destroy(phba); 9464 9465 /* Save device state to PCI config space */ 9466 pci_save_state(pdev); 9467 pci_set_power_state(pdev, PCI_D3hot); 9468 9469 return 0; 9470} 9471 9472/** 9473 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 9474 * @pdev: pointer to PCI device 9475 * 9476 * This routine is called from the kernel's PCI subsystem to support system 9477 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 9478 * this method, it restores the device's PCI config space state and fully 9479 * reinitializes the device and brings it online. Note that as the driver 9480 * implements the minimum PM requirements to a power-aware driver's PM for 9481 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 9482 * to the suspend() method call will be treated as SUSPEND and the driver 9483 * will fully reinitialize its device during resume() method call, the device 9484 * will be set to PCI_D0 directly in PCI config space before restoring the 9485 * state. 9486 * 9487 * Return code 9488 * 0 - driver suspended the device 9489 * Error otherwise 9490 **/ 9491static int 9492lpfc_pci_resume_one_s4(struct pci_dev *pdev) 9493{ 9494 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9495 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9496 uint32_t intr_mode; 9497 int error; 9498 9499 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9500 "0292 PCI device Power Management resume.\n"); 9501 9502 /* Restore device state from PCI config space */ 9503 pci_set_power_state(pdev, PCI_D0); 9504 pci_restore_state(pdev); 9505 9506 /* 9507 * As the new kernel behavior of pci_restore_state() API call clears 9508 * device saved_state flag, need to save the restored state again. 9509 */ 9510 pci_save_state(pdev); 9511 9512 if (pdev->is_busmaster) 9513 pci_set_master(pdev); 9514 9515 /* Startup the kernel thread for this host adapter. */ 9516 phba->worker_thread = kthread_run(lpfc_do_work, phba, 9517 "lpfc_worker_%d", phba->brd_no); 9518 if (IS_ERR(phba->worker_thread)) { 9519 error = PTR_ERR(phba->worker_thread); 9520 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9521 "0293 PM resume failed to start worker " 9522 "thread: error=x%x.\n", error); 9523 return error; 9524 } 9525 9526 /* Configure and enable interrupt */ 9527 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 9528 if (intr_mode == LPFC_INTR_ERROR) { 9529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9530 "0294 PM resume Failed to enable interrupt\n"); 9531 return -EIO; 9532 } else 9533 phba->intr_mode = intr_mode; 9534 9535 /* Restart HBA and bring it online */ 9536 lpfc_sli_brdrestart(phba); 9537 lpfc_online(phba); 9538 9539 /* Log the current active interrupt mode */ 9540 lpfc_log_intr_mode(phba, phba->intr_mode); 9541 9542 return 0; 9543} 9544 9545/** 9546 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 9547 * @phba: pointer to lpfc hba data structure. 9548 * 9549 * This routine is called to prepare the SLI4 device for PCI slot recover. It 9550 * aborts all the outstanding SCSI I/Os to the pci device. 9551 **/ 9552static void 9553lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 9554{ 9555 struct lpfc_sli *psli = &phba->sli; 9556 struct lpfc_sli_ring *pring; 9557 9558 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9559 "2828 PCI channel I/O abort preparing for recovery\n"); 9560 /* 9561 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 9562 * and let the SCSI mid-layer to retry them to recover. 9563 */ 9564 pring = &psli->ring[psli->fcp_ring]; 9565 lpfc_sli_abort_iocb_ring(phba, pring); 9566} 9567 9568/** 9569 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 9570 * @phba: pointer to lpfc hba data structure. 9571 * 9572 * This routine is called to prepare the SLI4 device for PCI slot reset. It 9573 * disables the device interrupt and pci device, and aborts the internal FCP 9574 * pending I/Os. 9575 **/ 9576static void 9577lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 9578{ 9579 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9580 "2826 PCI channel disable preparing for reset\n"); 9581 9582 /* Block any management I/Os to the device */ 9583 lpfc_block_mgmt_io(phba); 9584 9585 /* Block all SCSI devices' I/Os on the host */ 9586 lpfc_scsi_dev_block(phba); 9587 9588 /* stop all timers */ 9589 lpfc_stop_hba_timers(phba); 9590 9591 /* Disable interrupt and pci device */ 9592 lpfc_sli4_disable_intr(phba); 9593 lpfc_sli4_queue_destroy(phba); 9594 pci_disable_device(phba->pcidev); 9595 9596 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 9597 lpfc_sli_flush_fcp_rings(phba); 9598} 9599 9600/** 9601 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 9602 * @phba: pointer to lpfc hba data structure. 9603 * 9604 * This routine is called to prepare the SLI4 device for PCI slot permanently 9605 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 9606 * pending I/Os. 9607 **/ 9608static void 9609lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 9610{ 9611 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9612 "2827 PCI channel permanent disable for failure\n"); 9613 9614 /* Block all SCSI devices' I/Os on the host */ 9615 lpfc_scsi_dev_block(phba); 9616 9617 /* stop all timers */ 9618 lpfc_stop_hba_timers(phba); 9619 9620 /* Clean up all driver's outstanding SCSI I/Os */ 9621 lpfc_sli_flush_fcp_rings(phba); 9622} 9623 9624/** 9625 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 9626 * @pdev: pointer to PCI device. 9627 * @state: the current PCI connection state. 9628 * 9629 * This routine is called from the PCI subsystem for error handling to device 9630 * with SLI-4 interface spec. This function is called by the PCI subsystem 9631 * after a PCI bus error affecting this device has been detected. When this 9632 * function is invoked, it will need to stop all the I/Os and interrupt(s) 9633 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 9634 * for the PCI subsystem to perform proper recovery as desired. 9635 * 9636 * Return codes 9637 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9638 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9639 **/ 9640static pci_ers_result_t 9641lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 9642{ 9643 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9644 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9645 9646 switch (state) { 9647 case pci_channel_io_normal: 9648 /* Non-fatal error, prepare for recovery */ 9649 lpfc_sli4_prep_dev_for_recover(phba); 9650 return PCI_ERS_RESULT_CAN_RECOVER; 9651 case pci_channel_io_frozen: 9652 /* Fatal error, prepare for slot reset */ 9653 lpfc_sli4_prep_dev_for_reset(phba); 9654 return PCI_ERS_RESULT_NEED_RESET; 9655 case pci_channel_io_perm_failure: 9656 /* Permanent failure, prepare for device down */ 9657 lpfc_sli4_prep_dev_for_perm_failure(phba); 9658 return PCI_ERS_RESULT_DISCONNECT; 9659 default: 9660 /* Unknown state, prepare and request slot reset */ 9661 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9662 "2825 Unknown PCI error state: x%x\n", state); 9663 lpfc_sli4_prep_dev_for_reset(phba); 9664 return PCI_ERS_RESULT_NEED_RESET; 9665 } 9666} 9667 9668/** 9669 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 9670 * @pdev: pointer to PCI device. 9671 * 9672 * This routine is called from the PCI subsystem for error handling to device 9673 * with SLI-4 interface spec. It is called after PCI bus has been reset to 9674 * restart the PCI card from scratch, as if from a cold-boot. During the 9675 * PCI subsystem error recovery, after the driver returns 9676 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 9677 * recovery and then call this routine before calling the .resume method to 9678 * recover the device. This function will initialize the HBA device, enable 9679 * the interrupt, but it will just put the HBA to offline state without 9680 * passing any I/O traffic. 9681 * 9682 * Return codes 9683 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9684 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9685 */ 9686static pci_ers_result_t 9687lpfc_io_slot_reset_s4(struct pci_dev *pdev) 9688{ 9689 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9690 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9691 struct lpfc_sli *psli = &phba->sli; 9692 uint32_t intr_mode; 9693 9694 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 9695 if (pci_enable_device_mem(pdev)) { 9696 printk(KERN_ERR "lpfc: Cannot re-enable " 9697 "PCI device after reset.\n"); 9698 return PCI_ERS_RESULT_DISCONNECT; 9699 } 9700 9701 pci_restore_state(pdev); 9702 9703 /* 9704 * As the new kernel behavior of pci_restore_state() API call clears 9705 * device saved_state flag, need to save the restored state again. 9706 */ 9707 pci_save_state(pdev); 9708 9709 if (pdev->is_busmaster) 9710 pci_set_master(pdev); 9711 9712 spin_lock_irq(&phba->hbalock); 9713 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 9714 spin_unlock_irq(&phba->hbalock); 9715 9716 /* Configure and enable interrupt */ 9717 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 9718 if (intr_mode == LPFC_INTR_ERROR) { 9719 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9720 "2824 Cannot re-enable interrupt after " 9721 "slot reset.\n"); 9722 return PCI_ERS_RESULT_DISCONNECT; 9723 } else 9724 phba->intr_mode = intr_mode; 9725 9726 /* Log the current active interrupt mode */ 9727 lpfc_log_intr_mode(phba, phba->intr_mode); 9728 9729 return PCI_ERS_RESULT_RECOVERED; 9730} 9731 9732/** 9733 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 9734 * @pdev: pointer to PCI device 9735 * 9736 * This routine is called from the PCI subsystem for error handling to device 9737 * with SLI-4 interface spec. It is called when kernel error recovery tells 9738 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 9739 * error recovery. After this call, traffic can start to flow from this device 9740 * again. 9741 **/ 9742static void 9743lpfc_io_resume_s4(struct pci_dev *pdev) 9744{ 9745 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9746 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9747 9748 /* 9749 * In case of slot reset, as function reset is performed through 9750 * mailbox command which needs DMA to be enabled, this operation 9751 * has to be moved to the io resume phase. Taking device offline 9752 * will perform the necessary cleanup. 9753 */ 9754 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 9755 /* Perform device reset */ 9756 lpfc_offline_prep(phba); 9757 lpfc_offline(phba); 9758 lpfc_sli_brdrestart(phba); 9759 /* Bring the device back online */ 9760 lpfc_online(phba); 9761 } 9762 9763 /* Clean up Advanced Error Reporting (AER) if needed */ 9764 if (phba->hba_flag & HBA_AER_ENABLED) 9765 pci_cleanup_aer_uncorrect_error_status(pdev); 9766} 9767 9768/** 9769 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 9770 * @pdev: pointer to PCI device 9771 * @pid: pointer to PCI device identifier 9772 * 9773 * This routine is to be registered to the kernel's PCI subsystem. When an 9774 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 9775 * at PCI device-specific information of the device and driver to see if the 9776 * driver state that it can support this kind of device. If the match is 9777 * successful, the driver core invokes this routine. This routine dispatches 9778 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 9779 * do all the initialization that it needs to do to handle the HBA device 9780 * properly. 9781 * 9782 * Return code 9783 * 0 - driver can claim the device 9784 * negative value - driver can not claim the device 9785 **/ 9786static int __devinit 9787lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 9788{ 9789 int rc; 9790 struct lpfc_sli_intf intf; 9791 9792 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 9793 return -ENODEV; 9794 9795 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 9796 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 9797 rc = lpfc_pci_probe_one_s4(pdev, pid); 9798 else 9799 rc = lpfc_pci_probe_one_s3(pdev, pid); 9800 9801 return rc; 9802} 9803 9804/** 9805 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 9806 * @pdev: pointer to PCI device 9807 * 9808 * This routine is to be registered to the kernel's PCI subsystem. When an 9809 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 9810 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 9811 * remove routine, which will perform all the necessary cleanup for the 9812 * device to be removed from the PCI subsystem properly. 9813 **/ 9814static void __devexit 9815lpfc_pci_remove_one(struct pci_dev *pdev) 9816{ 9817 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9818 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9819 9820 switch (phba->pci_dev_grp) { 9821 case LPFC_PCI_DEV_LP: 9822 lpfc_pci_remove_one_s3(pdev); 9823 break; 9824 case LPFC_PCI_DEV_OC: 9825 lpfc_pci_remove_one_s4(pdev); 9826 break; 9827 default: 9828 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9829 "1424 Invalid PCI device group: 0x%x\n", 9830 phba->pci_dev_grp); 9831 break; 9832 } 9833 return; 9834} 9835 9836/** 9837 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 9838 * @pdev: pointer to PCI device 9839 * @msg: power management message 9840 * 9841 * This routine is to be registered to the kernel's PCI subsystem to support 9842 * system Power Management (PM). When PM invokes this method, it dispatches 9843 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 9844 * suspend the device. 9845 * 9846 * Return code 9847 * 0 - driver suspended the device 9848 * Error otherwise 9849 **/ 9850static int 9851lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 9852{ 9853 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9854 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9855 int rc = -ENODEV; 9856 9857 switch (phba->pci_dev_grp) { 9858 case LPFC_PCI_DEV_LP: 9859 rc = lpfc_pci_suspend_one_s3(pdev, msg); 9860 break; 9861 case LPFC_PCI_DEV_OC: 9862 rc = lpfc_pci_suspend_one_s4(pdev, msg); 9863 break; 9864 default: 9865 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9866 "1425 Invalid PCI device group: 0x%x\n", 9867 phba->pci_dev_grp); 9868 break; 9869 } 9870 return rc; 9871} 9872 9873/** 9874 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 9875 * @pdev: pointer to PCI device 9876 * 9877 * This routine is to be registered to the kernel's PCI subsystem to support 9878 * system Power Management (PM). When PM invokes this method, it dispatches 9879 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 9880 * resume the device. 9881 * 9882 * Return code 9883 * 0 - driver suspended the device 9884 * Error otherwise 9885 **/ 9886static int 9887lpfc_pci_resume_one(struct pci_dev *pdev) 9888{ 9889 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9890 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9891 int rc = -ENODEV; 9892 9893 switch (phba->pci_dev_grp) { 9894 case LPFC_PCI_DEV_LP: 9895 rc = lpfc_pci_resume_one_s3(pdev); 9896 break; 9897 case LPFC_PCI_DEV_OC: 9898 rc = lpfc_pci_resume_one_s4(pdev); 9899 break; 9900 default: 9901 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9902 "1426 Invalid PCI device group: 0x%x\n", 9903 phba->pci_dev_grp); 9904 break; 9905 } 9906 return rc; 9907} 9908 9909/** 9910 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 9911 * @pdev: pointer to PCI device. 9912 * @state: the current PCI connection state. 9913 * 9914 * This routine is registered to the PCI subsystem for error handling. This 9915 * function is called by the PCI subsystem after a PCI bus error affecting 9916 * this device has been detected. When this routine is invoked, it dispatches 9917 * the action to the proper SLI-3 or SLI-4 device error detected handling 9918 * routine, which will perform the proper error detected operation. 9919 * 9920 * Return codes 9921 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9922 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9923 **/ 9924static pci_ers_result_t 9925lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 9926{ 9927 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9928 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9929 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 9930 9931 switch (phba->pci_dev_grp) { 9932 case LPFC_PCI_DEV_LP: 9933 rc = lpfc_io_error_detected_s3(pdev, state); 9934 break; 9935 case LPFC_PCI_DEV_OC: 9936 rc = lpfc_io_error_detected_s4(pdev, state); 9937 break; 9938 default: 9939 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9940 "1427 Invalid PCI device group: 0x%x\n", 9941 phba->pci_dev_grp); 9942 break; 9943 } 9944 return rc; 9945} 9946 9947/** 9948 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 9949 * @pdev: pointer to PCI device. 9950 * 9951 * This routine is registered to the PCI subsystem for error handling. This 9952 * function is called after PCI bus has been reset to restart the PCI card 9953 * from scratch, as if from a cold-boot. When this routine is invoked, it 9954 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 9955 * routine, which will perform the proper device reset. 9956 * 9957 * Return codes 9958 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9959 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9960 **/ 9961static pci_ers_result_t 9962lpfc_io_slot_reset(struct pci_dev *pdev) 9963{ 9964 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9965 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9966 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 9967 9968 switch (phba->pci_dev_grp) { 9969 case LPFC_PCI_DEV_LP: 9970 rc = lpfc_io_slot_reset_s3(pdev); 9971 break; 9972 case LPFC_PCI_DEV_OC: 9973 rc = lpfc_io_slot_reset_s4(pdev); 9974 break; 9975 default: 9976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9977 "1428 Invalid PCI device group: 0x%x\n", 9978 phba->pci_dev_grp); 9979 break; 9980 } 9981 return rc; 9982} 9983 9984/** 9985 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 9986 * @pdev: pointer to PCI device 9987 * 9988 * This routine is registered to the PCI subsystem for error handling. It 9989 * is called when kernel error recovery tells the lpfc driver that it is 9990 * OK to resume normal PCI operation after PCI bus error recovery. When 9991 * this routine is invoked, it dispatches the action to the proper SLI-3 9992 * or SLI-4 device io_resume routine, which will resume the device operation. 9993 **/ 9994static void 9995lpfc_io_resume(struct pci_dev *pdev) 9996{ 9997 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9998 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9999 10000 switch (phba->pci_dev_grp) { 10001 case LPFC_PCI_DEV_LP: 10002 lpfc_io_resume_s3(pdev); 10003 break; 10004 case LPFC_PCI_DEV_OC: 10005 lpfc_io_resume_s4(pdev); 10006 break; 10007 default: 10008 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10009 "1429 Invalid PCI device group: 0x%x\n", 10010 phba->pci_dev_grp); 10011 break; 10012 } 10013 return; 10014} 10015 10016/** 10017 * lpfc_mgmt_open - method called when 'lpfcmgmt' is opened from userspace 10018 * @inode: pointer to the inode representing the lpfcmgmt device 10019 * @filep: pointer to the file representing the open lpfcmgmt device 10020 * 10021 * This routine puts a reference count on the lpfc module whenever the 10022 * character device is opened 10023 **/ 10024static int 10025lpfc_mgmt_open(struct inode *inode, struct file *filep) 10026{ 10027 try_module_get(THIS_MODULE); 10028 return 0; 10029} 10030 10031/** 10032 * lpfc_mgmt_release - method called when 'lpfcmgmt' is closed in userspace 10033 * @inode: pointer to the inode representing the lpfcmgmt device 10034 * @filep: pointer to the file representing the open lpfcmgmt device 10035 * 10036 * This routine removes a reference count from the lpfc module when the 10037 * character device is closed 10038 **/ 10039static int 10040lpfc_mgmt_release(struct inode *inode, struct file *filep) 10041{ 10042 module_put(THIS_MODULE); 10043 return 0; 10044} 10045 10046static struct pci_device_id lpfc_id_table[] = { 10047 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 10048 PCI_ANY_ID, PCI_ANY_ID, }, 10049 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 10050 PCI_ANY_ID, PCI_ANY_ID, }, 10051 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 10052 PCI_ANY_ID, PCI_ANY_ID, }, 10053 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 10054 PCI_ANY_ID, PCI_ANY_ID, }, 10055 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 10056 PCI_ANY_ID, PCI_ANY_ID, }, 10057 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 10058 PCI_ANY_ID, PCI_ANY_ID, }, 10059 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 10060 PCI_ANY_ID, PCI_ANY_ID, }, 10061 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 10062 PCI_ANY_ID, PCI_ANY_ID, }, 10063 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 10064 PCI_ANY_ID, PCI_ANY_ID, }, 10065 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 10066 PCI_ANY_ID, PCI_ANY_ID, }, 10067 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 10068 PCI_ANY_ID, PCI_ANY_ID, }, 10069 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 10070 PCI_ANY_ID, PCI_ANY_ID, }, 10071 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 10072 PCI_ANY_ID, PCI_ANY_ID, }, 10073 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 10074 PCI_ANY_ID, PCI_ANY_ID, }, 10075 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 10076 PCI_ANY_ID, PCI_ANY_ID, }, 10077 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 10078 PCI_ANY_ID, PCI_ANY_ID, }, 10079 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 10080 PCI_ANY_ID, PCI_ANY_ID, }, 10081 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 10082 PCI_ANY_ID, PCI_ANY_ID, }, 10083 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 10084 PCI_ANY_ID, PCI_ANY_ID, }, 10085 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 10086 PCI_ANY_ID, PCI_ANY_ID, }, 10087 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 10088 PCI_ANY_ID, PCI_ANY_ID, }, 10089 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 10090 PCI_ANY_ID, PCI_ANY_ID, }, 10091 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 10092 PCI_ANY_ID, PCI_ANY_ID, }, 10093 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 10094 PCI_ANY_ID, PCI_ANY_ID, }, 10095 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 10096 PCI_ANY_ID, PCI_ANY_ID, }, 10097 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 10098 PCI_ANY_ID, PCI_ANY_ID, }, 10099 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 10100 PCI_ANY_ID, PCI_ANY_ID, }, 10101 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 10102 PCI_ANY_ID, PCI_ANY_ID, }, 10103 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 10104 PCI_ANY_ID, PCI_ANY_ID, }, 10105 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 10106 PCI_ANY_ID, PCI_ANY_ID, }, 10107 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 10108 PCI_ANY_ID, PCI_ANY_ID, }, 10109 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 10110 PCI_ANY_ID, PCI_ANY_ID, }, 10111 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 10112 PCI_ANY_ID, PCI_ANY_ID, }, 10113 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 10114 PCI_ANY_ID, PCI_ANY_ID, }, 10115 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 10116 PCI_ANY_ID, PCI_ANY_ID, }, 10117 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 10118 PCI_ANY_ID, PCI_ANY_ID, }, 10119 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 10120 PCI_ANY_ID, PCI_ANY_ID, }, 10121 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 10122 PCI_ANY_ID, PCI_ANY_ID, }, 10123 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 10124 PCI_ANY_ID, PCI_ANY_ID, }, 10125 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 10126 PCI_ANY_ID, PCI_ANY_ID, }, 10127 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, 10128 PCI_ANY_ID, PCI_ANY_ID, }, 10129 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, 10130 PCI_ANY_ID, PCI_ANY_ID, }, 10131 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, 10132 PCI_ANY_ID, PCI_ANY_ID, }, 10133 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF, 10134 PCI_ANY_ID, PCI_ANY_ID, }, 10135 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, 10136 PCI_ANY_ID, PCI_ANY_ID, }, 10137 { 0 } 10138}; 10139 10140MODULE_DEVICE_TABLE(pci, lpfc_id_table); 10141 10142static struct pci_error_handlers lpfc_err_handler = { 10143 .error_detected = lpfc_io_error_detected, 10144 .slot_reset = lpfc_io_slot_reset, 10145 .resume = lpfc_io_resume, 10146}; 10147 10148static struct pci_driver lpfc_driver = { 10149 .name = LPFC_DRIVER_NAME, 10150 .id_table = lpfc_id_table, 10151 .probe = lpfc_pci_probe_one, 10152 .remove = __devexit_p(lpfc_pci_remove_one), 10153 .suspend = lpfc_pci_suspend_one, 10154 .resume = lpfc_pci_resume_one, 10155 .err_handler = &lpfc_err_handler, 10156}; 10157 10158static const struct file_operations lpfc_mgmt_fop = { 10159 .open = lpfc_mgmt_open, 10160 .release = lpfc_mgmt_release, 10161}; 10162 10163static struct miscdevice lpfc_mgmt_dev = { 10164 .minor = MISC_DYNAMIC_MINOR, 10165 .name = "lpfcmgmt", 10166 .fops = &lpfc_mgmt_fop, 10167}; 10168 10169/** 10170 * lpfc_init - lpfc module initialization routine 10171 * 10172 * This routine is to be invoked when the lpfc module is loaded into the 10173 * kernel. The special kernel macro module_init() is used to indicate the 10174 * role of this routine to the kernel as lpfc module entry point. 10175 * 10176 * Return codes 10177 * 0 - successful 10178 * -ENOMEM - FC attach transport failed 10179 * all others - failed 10180 */ 10181static int __init 10182lpfc_init(void) 10183{ 10184 int error = 0; 10185 10186 printk(LPFC_MODULE_DESC "\n"); 10187 printk(LPFC_COPYRIGHT "\n"); 10188 10189 error = misc_register(&lpfc_mgmt_dev); 10190 if (error) 10191 printk(KERN_ERR "Could not register lpfcmgmt device, " 10192 "misc_register returned with status %d", error); 10193 10194 if (lpfc_enable_npiv) { 10195 lpfc_transport_functions.vport_create = lpfc_vport_create; 10196 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 10197 } 10198 lpfc_transport_template = 10199 fc_attach_transport(&lpfc_transport_functions); 10200 if (lpfc_transport_template == NULL) 10201 return -ENOMEM; 10202 if (lpfc_enable_npiv) { 10203 lpfc_vport_transport_template = 10204 fc_attach_transport(&lpfc_vport_transport_functions); 10205 if (lpfc_vport_transport_template == NULL) { 10206 fc_release_transport(lpfc_transport_template); 10207 return -ENOMEM; 10208 } 10209 } 10210 error = pci_register_driver(&lpfc_driver); 10211 if (error) { 10212 fc_release_transport(lpfc_transport_template); 10213 if (lpfc_enable_npiv) 10214 fc_release_transport(lpfc_vport_transport_template); 10215 } 10216 10217 return error; 10218} 10219 10220/** 10221 * lpfc_exit - lpfc module removal routine 10222 * 10223 * This routine is invoked when the lpfc module is removed from the kernel. 10224 * The special kernel macro module_exit() is used to indicate the role of 10225 * this routine to the kernel as lpfc module exit point. 10226 */ 10227static void __exit 10228lpfc_exit(void) 10229{ 10230 misc_deregister(&lpfc_mgmt_dev); 10231 pci_unregister_driver(&lpfc_driver); 10232 fc_release_transport(lpfc_transport_template); 10233 if (lpfc_enable_npiv) 10234 fc_release_transport(lpfc_vport_transport_template); 10235 if (_dump_buf_data) { 10236 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 10237 "_dump_buf_data at 0x%p\n", 10238 (1L << _dump_buf_data_order), _dump_buf_data); 10239 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 10240 } 10241 10242 if (_dump_buf_dif) { 10243 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 10244 "_dump_buf_dif at 0x%p\n", 10245 (1L << _dump_buf_dif_order), _dump_buf_dif); 10246 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 10247 } 10248} 10249 10250module_init(lpfc_init); 10251module_exit(lpfc_exit); 10252MODULE_LICENSE("GPL"); 10253MODULE_DESCRIPTION(LPFC_MODULE_DESC); 10254MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 10255MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 10256