lpfc_init.c revision 72100cc43262fa39821b3debc04be1a7a17b3428
1/******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/dma-mapping.h> 25#include <linux/idr.h> 26#include <linux/interrupt.h> 27#include <linux/kthread.h> 28#include <linux/pci.h> 29#include <linux/spinlock.h> 30#include <linux/ctype.h> 31#include <linux/aer.h> 32 33#include <scsi/scsi.h> 34#include <scsi/scsi_device.h> 35#include <scsi/scsi_host.h> 36#include <scsi/scsi_transport_fc.h> 37 38#include "lpfc_hw4.h" 39#include "lpfc_hw.h" 40#include "lpfc_sli.h" 41#include "lpfc_sli4.h" 42#include "lpfc_nl.h" 43#include "lpfc_disc.h" 44#include "lpfc_scsi.h" 45#include "lpfc.h" 46#include "lpfc_logmsg.h" 47#include "lpfc_crtn.h" 48#include "lpfc_vport.h" 49#include "lpfc_version.h" 50 51char *_dump_buf_data; 52unsigned long _dump_buf_data_order; 53char *_dump_buf_dif; 54unsigned long _dump_buf_dif_order; 55spinlock_t _dump_buf_lock; 56 57static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 58static int lpfc_post_rcv_buf(struct lpfc_hba *); 59static int lpfc_sli4_queue_create(struct lpfc_hba *); 60static void lpfc_sli4_queue_destroy(struct lpfc_hba *); 61static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 62static int lpfc_setup_endian_order(struct lpfc_hba *); 63static int lpfc_sli4_read_config(struct lpfc_hba *); 64static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 65static void lpfc_free_sgl_list(struct lpfc_hba *); 66static int lpfc_init_sgl_list(struct lpfc_hba *); 67static int lpfc_init_active_sgl_array(struct lpfc_hba *); 68static void lpfc_free_active_sgl(struct lpfc_hba *); 69static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 70static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 71static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 72static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 73static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 74 75static struct scsi_transport_template *lpfc_transport_template = NULL; 76static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 77static DEFINE_IDR(lpfc_hba_index); 78 79/** 80 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 81 * @phba: pointer to lpfc hba data structure. 82 * 83 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 84 * mailbox command. It retrieves the revision information from the HBA and 85 * collects the Vital Product Data (VPD) about the HBA for preparing the 86 * configuration of the HBA. 87 * 88 * Return codes: 89 * 0 - success. 90 * -ERESTART - requests the SLI layer to reset the HBA and try again. 91 * Any other value - indicates an error. 92 **/ 93int 94lpfc_config_port_prep(struct lpfc_hba *phba) 95{ 96 lpfc_vpd_t *vp = &phba->vpd; 97 int i = 0, rc; 98 LPFC_MBOXQ_t *pmb; 99 MAILBOX_t *mb; 100 char *lpfc_vpd_data = NULL; 101 uint16_t offset = 0; 102 static char licensed[56] = 103 "key unlock for use with gnu public licensed code only\0"; 104 static int init_key = 1; 105 106 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 107 if (!pmb) { 108 phba->link_state = LPFC_HBA_ERROR; 109 return -ENOMEM; 110 } 111 112 mb = &pmb->u.mb; 113 phba->link_state = LPFC_INIT_MBX_CMDS; 114 115 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 116 if (init_key) { 117 uint32_t *ptext = (uint32_t *) licensed; 118 119 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 120 *ptext = cpu_to_be32(*ptext); 121 init_key = 0; 122 } 123 124 lpfc_read_nv(phba, pmb); 125 memset((char*)mb->un.varRDnvp.rsvd3, 0, 126 sizeof (mb->un.varRDnvp.rsvd3)); 127 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 128 sizeof (licensed)); 129 130 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 131 132 if (rc != MBX_SUCCESS) { 133 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 134 "0324 Config Port initialization " 135 "error, mbxCmd x%x READ_NVPARM, " 136 "mbxStatus x%x\n", 137 mb->mbxCommand, mb->mbxStatus); 138 mempool_free(pmb, phba->mbox_mem_pool); 139 return -ERESTART; 140 } 141 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 142 sizeof(phba->wwnn)); 143 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 144 sizeof(phba->wwpn)); 145 } 146 147 phba->sli3_options = 0x0; 148 149 /* Setup and issue mailbox READ REV command */ 150 lpfc_read_rev(phba, pmb); 151 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 152 if (rc != MBX_SUCCESS) { 153 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 154 "0439 Adapter failed to init, mbxCmd x%x " 155 "READ_REV, mbxStatus x%x\n", 156 mb->mbxCommand, mb->mbxStatus); 157 mempool_free( pmb, phba->mbox_mem_pool); 158 return -ERESTART; 159 } 160 161 162 /* 163 * The value of rr must be 1 since the driver set the cv field to 1. 164 * This setting requires the FW to set all revision fields. 165 */ 166 if (mb->un.varRdRev.rr == 0) { 167 vp->rev.rBit = 0; 168 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 169 "0440 Adapter failed to init, READ_REV has " 170 "missing revision information.\n"); 171 mempool_free(pmb, phba->mbox_mem_pool); 172 return -ERESTART; 173 } 174 175 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 176 mempool_free(pmb, phba->mbox_mem_pool); 177 return -EINVAL; 178 } 179 180 /* Save information as VPD data */ 181 vp->rev.rBit = 1; 182 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 183 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 184 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 185 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 186 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 187 vp->rev.biuRev = mb->un.varRdRev.biuRev; 188 vp->rev.smRev = mb->un.varRdRev.smRev; 189 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 190 vp->rev.endecRev = mb->un.varRdRev.endecRev; 191 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 192 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 193 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 194 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 195 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 196 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 197 198 /* If the sli feature level is less then 9, we must 199 * tear down all RPIs and VPIs on link down if NPIV 200 * is enabled. 201 */ 202 if (vp->rev.feaLevelHigh < 9) 203 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 204 205 if (lpfc_is_LC_HBA(phba->pcidev->device)) 206 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 207 sizeof (phba->RandomData)); 208 209 /* Get adapter VPD information */ 210 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 211 if (!lpfc_vpd_data) 212 goto out_free_mbox; 213 214 do { 215 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 216 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 217 218 if (rc != MBX_SUCCESS) { 219 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 220 "0441 VPD not present on adapter, " 221 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 222 mb->mbxCommand, mb->mbxStatus); 223 mb->un.varDmp.word_cnt = 0; 224 } 225 /* dump mem may return a zero when finished or we got a 226 * mailbox error, either way we are done. 227 */ 228 if (mb->un.varDmp.word_cnt == 0) 229 break; 230 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 231 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 232 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 233 lpfc_vpd_data + offset, 234 mb->un.varDmp.word_cnt); 235 offset += mb->un.varDmp.word_cnt; 236 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 237 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 238 239 kfree(lpfc_vpd_data); 240out_free_mbox: 241 mempool_free(pmb, phba->mbox_mem_pool); 242 return 0; 243} 244 245/** 246 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 247 * @phba: pointer to lpfc hba data structure. 248 * @pmboxq: pointer to the driver internal queue element for mailbox command. 249 * 250 * This is the completion handler for driver's configuring asynchronous event 251 * mailbox command to the device. If the mailbox command returns successfully, 252 * it will set internal async event support flag to 1; otherwise, it will 253 * set internal async event support flag to 0. 254 **/ 255static void 256lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 257{ 258 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 259 phba->temp_sensor_support = 1; 260 else 261 phba->temp_sensor_support = 0; 262 mempool_free(pmboxq, phba->mbox_mem_pool); 263 return; 264} 265 266/** 267 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 268 * @phba: pointer to lpfc hba data structure. 269 * @pmboxq: pointer to the driver internal queue element for mailbox command. 270 * 271 * This is the completion handler for dump mailbox command for getting 272 * wake up parameters. When this command complete, the response contain 273 * Option rom version of the HBA. This function translate the version number 274 * into a human readable string and store it in OptionROMVersion. 275 **/ 276static void 277lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 278{ 279 struct prog_id *prg; 280 uint32_t prog_id_word; 281 char dist = ' '; 282 /* character array used for decoding dist type. */ 283 char dist_char[] = "nabx"; 284 285 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 286 mempool_free(pmboxq, phba->mbox_mem_pool); 287 return; 288 } 289 290 prg = (struct prog_id *) &prog_id_word; 291 292 /* word 7 contain option rom version */ 293 prog_id_word = pmboxq->u.mb.un.varWords[7]; 294 295 /* Decode the Option rom version word to a readable string */ 296 if (prg->dist < 4) 297 dist = dist_char[prg->dist]; 298 299 if ((prg->dist == 3) && (prg->num == 0)) 300 sprintf(phba->OptionROMVersion, "%d.%d%d", 301 prg->ver, prg->rev, prg->lev); 302 else 303 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 304 prg->ver, prg->rev, prg->lev, 305 dist, prg->num); 306 mempool_free(pmboxq, phba->mbox_mem_pool); 307 return; 308} 309 310/** 311 * lpfc_config_port_post - Perform lpfc initialization after config port 312 * @phba: pointer to lpfc hba data structure. 313 * 314 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 315 * command call. It performs all internal resource and state setups on the 316 * port: post IOCB buffers, enable appropriate host interrupt attentions, 317 * ELS ring timers, etc. 318 * 319 * Return codes 320 * 0 - success. 321 * Any other value - error. 322 **/ 323int 324lpfc_config_port_post(struct lpfc_hba *phba) 325{ 326 struct lpfc_vport *vport = phba->pport; 327 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 328 LPFC_MBOXQ_t *pmb; 329 MAILBOX_t *mb; 330 struct lpfc_dmabuf *mp; 331 struct lpfc_sli *psli = &phba->sli; 332 uint32_t status, timeout; 333 int i, j; 334 int rc; 335 336 spin_lock_irq(&phba->hbalock); 337 /* 338 * If the Config port completed correctly the HBA is not 339 * over heated any more. 340 */ 341 if (phba->over_temp_state == HBA_OVER_TEMP) 342 phba->over_temp_state = HBA_NORMAL_TEMP; 343 spin_unlock_irq(&phba->hbalock); 344 345 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 346 if (!pmb) { 347 phba->link_state = LPFC_HBA_ERROR; 348 return -ENOMEM; 349 } 350 mb = &pmb->u.mb; 351 352 /* Get login parameters for NID. */ 353 lpfc_read_sparam(phba, pmb, 0); 354 pmb->vport = vport; 355 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 357 "0448 Adapter failed init, mbxCmd x%x " 358 "READ_SPARM mbxStatus x%x\n", 359 mb->mbxCommand, mb->mbxStatus); 360 phba->link_state = LPFC_HBA_ERROR; 361 mp = (struct lpfc_dmabuf *) pmb->context1; 362 mempool_free( pmb, phba->mbox_mem_pool); 363 lpfc_mbuf_free(phba, mp->virt, mp->phys); 364 kfree(mp); 365 return -EIO; 366 } 367 368 mp = (struct lpfc_dmabuf *) pmb->context1; 369 370 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 371 lpfc_mbuf_free(phba, mp->virt, mp->phys); 372 kfree(mp); 373 pmb->context1 = NULL; 374 375 if (phba->cfg_soft_wwnn) 376 u64_to_wwn(phba->cfg_soft_wwnn, 377 vport->fc_sparam.nodeName.u.wwn); 378 if (phba->cfg_soft_wwpn) 379 u64_to_wwn(phba->cfg_soft_wwpn, 380 vport->fc_sparam.portName.u.wwn); 381 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 382 sizeof (struct lpfc_name)); 383 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 384 sizeof (struct lpfc_name)); 385 386 /* Update the fc_host data structures with new wwn. */ 387 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 388 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 389 fc_host_max_npiv_vports(shost) = phba->max_vpi; 390 391 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 392 /* This should be consolidated into parse_vpd ? - mr */ 393 if (phba->SerialNumber[0] == 0) { 394 uint8_t *outptr; 395 396 outptr = &vport->fc_nodename.u.s.IEEE[0]; 397 for (i = 0; i < 12; i++) { 398 status = *outptr++; 399 j = ((status & 0xf0) >> 4); 400 if (j <= 9) 401 phba->SerialNumber[i] = 402 (char)((uint8_t) 0x30 + (uint8_t) j); 403 else 404 phba->SerialNumber[i] = 405 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 406 i++; 407 j = (status & 0xf); 408 if (j <= 9) 409 phba->SerialNumber[i] = 410 (char)((uint8_t) 0x30 + (uint8_t) j); 411 else 412 phba->SerialNumber[i] = 413 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 414 } 415 } 416 417 lpfc_read_config(phba, pmb); 418 pmb->vport = vport; 419 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 420 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 421 "0453 Adapter failed to init, mbxCmd x%x " 422 "READ_CONFIG, mbxStatus x%x\n", 423 mb->mbxCommand, mb->mbxStatus); 424 phba->link_state = LPFC_HBA_ERROR; 425 mempool_free( pmb, phba->mbox_mem_pool); 426 return -EIO; 427 } 428 429 /* Check if the port is disabled */ 430 lpfc_sli_read_link_ste(phba); 431 432 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 433 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 434 phba->cfg_hba_queue_depth = 435 (mb->un.varRdConfig.max_xri + 1) - 436 lpfc_sli4_get_els_iocb_cnt(phba); 437 438 phba->lmt = mb->un.varRdConfig.lmt; 439 440 /* Get the default values for Model Name and Description */ 441 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 442 443 if ((phba->cfg_link_speed > LINK_SPEED_10G) 444 || ((phba->cfg_link_speed == LINK_SPEED_1G) 445 && !(phba->lmt & LMT_1Gb)) 446 || ((phba->cfg_link_speed == LINK_SPEED_2G) 447 && !(phba->lmt & LMT_2Gb)) 448 || ((phba->cfg_link_speed == LINK_SPEED_4G) 449 && !(phba->lmt & LMT_4Gb)) 450 || ((phba->cfg_link_speed == LINK_SPEED_8G) 451 && !(phba->lmt & LMT_8Gb)) 452 || ((phba->cfg_link_speed == LINK_SPEED_10G) 453 && !(phba->lmt & LMT_10Gb))) { 454 /* Reset link speed to auto */ 455 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 456 "1302 Invalid speed for this board: " 457 "Reset link speed to auto: x%x\n", 458 phba->cfg_link_speed); 459 phba->cfg_link_speed = LINK_SPEED_AUTO; 460 } 461 462 phba->link_state = LPFC_LINK_DOWN; 463 464 /* Only process IOCBs on ELS ring till hba_state is READY */ 465 if (psli->ring[psli->extra_ring].cmdringaddr) 466 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 467 if (psli->ring[psli->fcp_ring].cmdringaddr) 468 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 469 if (psli->ring[psli->next_ring].cmdringaddr) 470 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 471 472 /* Post receive buffers for desired rings */ 473 if (phba->sli_rev != 3) 474 lpfc_post_rcv_buf(phba); 475 476 /* 477 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 478 */ 479 if (phba->intr_type == MSIX) { 480 rc = lpfc_config_msi(phba, pmb); 481 if (rc) { 482 mempool_free(pmb, phba->mbox_mem_pool); 483 return -EIO; 484 } 485 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 486 if (rc != MBX_SUCCESS) { 487 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 488 "0352 Config MSI mailbox command " 489 "failed, mbxCmd x%x, mbxStatus x%x\n", 490 pmb->u.mb.mbxCommand, 491 pmb->u.mb.mbxStatus); 492 mempool_free(pmb, phba->mbox_mem_pool); 493 return -EIO; 494 } 495 } 496 497 spin_lock_irq(&phba->hbalock); 498 /* Initialize ERATT handling flag */ 499 phba->hba_flag &= ~HBA_ERATT_HANDLED; 500 501 /* Enable appropriate host interrupts */ 502 status = readl(phba->HCregaddr); 503 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 504 if (psli->num_rings > 0) 505 status |= HC_R0INT_ENA; 506 if (psli->num_rings > 1) 507 status |= HC_R1INT_ENA; 508 if (psli->num_rings > 2) 509 status |= HC_R2INT_ENA; 510 if (psli->num_rings > 3) 511 status |= HC_R3INT_ENA; 512 513 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 514 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 515 status &= ~(HC_R0INT_ENA); 516 517 writel(status, phba->HCregaddr); 518 readl(phba->HCregaddr); /* flush */ 519 spin_unlock_irq(&phba->hbalock); 520 521 /* Set up ring-0 (ELS) timer */ 522 timeout = phba->fc_ratov * 2; 523 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 524 /* Set up heart beat (HB) timer */ 525 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 526 phba->hb_outstanding = 0; 527 phba->last_completion_time = jiffies; 528 /* Set up error attention (ERATT) polling timer */ 529 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 530 531 if (phba->hba_flag & LINK_DISABLED) { 532 lpfc_printf_log(phba, 533 KERN_ERR, LOG_INIT, 534 "2598 Adapter Link is disabled.\n"); 535 lpfc_down_link(phba, pmb); 536 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 537 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 538 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 539 lpfc_printf_log(phba, 540 KERN_ERR, LOG_INIT, 541 "2599 Adapter failed to issue DOWN_LINK" 542 " mbox command rc 0x%x\n", rc); 543 544 mempool_free(pmb, phba->mbox_mem_pool); 545 return -EIO; 546 } 547 } else if (phba->cfg_suppress_link_up == 0) { 548 lpfc_init_link(phba, pmb, phba->cfg_topology, 549 phba->cfg_link_speed); 550 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 551 lpfc_set_loopback_flag(phba); 552 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 553 if (rc != MBX_SUCCESS) { 554 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 555 "0454 Adapter failed to init, mbxCmd x%x " 556 "INIT_LINK, mbxStatus x%x\n", 557 mb->mbxCommand, mb->mbxStatus); 558 559 /* Clear all interrupt enable conditions */ 560 writel(0, phba->HCregaddr); 561 readl(phba->HCregaddr); /* flush */ 562 /* Clear all pending interrupts */ 563 writel(0xffffffff, phba->HAregaddr); 564 readl(phba->HAregaddr); /* flush */ 565 566 phba->link_state = LPFC_HBA_ERROR; 567 if (rc != MBX_BUSY) 568 mempool_free(pmb, phba->mbox_mem_pool); 569 return -EIO; 570 } 571 } 572 /* MBOX buffer will be freed in mbox compl */ 573 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 574 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 575 pmb->mbox_cmpl = lpfc_config_async_cmpl; 576 pmb->vport = phba->pport; 577 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 578 579 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 580 lpfc_printf_log(phba, 581 KERN_ERR, 582 LOG_INIT, 583 "0456 Adapter failed to issue " 584 "ASYNCEVT_ENABLE mbox status x%x\n", 585 rc); 586 mempool_free(pmb, phba->mbox_mem_pool); 587 } 588 589 /* Get Option rom version */ 590 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 591 lpfc_dump_wakeup_param(phba, pmb); 592 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 593 pmb->vport = phba->pport; 594 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 595 596 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 597 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 598 "to get Option ROM version status x%x\n", rc); 599 mempool_free(pmb, phba->mbox_mem_pool); 600 } 601 602 return 0; 603} 604 605/** 606 * lpfc_hba_init_link - Initialize the FC link 607 * @phba: pointer to lpfc hba data structure. 608 * 609 * This routine will issue the INIT_LINK mailbox command call. 610 * It is available to other drivers through the lpfc_hba data 611 * structure for use as a delayed link up mechanism with the 612 * module parameter lpfc_suppress_link_up. 613 * 614 * Return code 615 * 0 - success 616 * Any other value - error 617 **/ 618int 619lpfc_hba_init_link(struct lpfc_hba *phba) 620{ 621 struct lpfc_vport *vport = phba->pport; 622 LPFC_MBOXQ_t *pmb; 623 MAILBOX_t *mb; 624 int rc; 625 626 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 627 if (!pmb) { 628 phba->link_state = LPFC_HBA_ERROR; 629 return -ENOMEM; 630 } 631 mb = &pmb->u.mb; 632 pmb->vport = vport; 633 634 lpfc_init_link(phba, pmb, phba->cfg_topology, 635 phba->cfg_link_speed); 636 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 637 lpfc_set_loopback_flag(phba); 638 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 639 if (rc != MBX_SUCCESS) { 640 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 641 "0498 Adapter failed to init, mbxCmd x%x " 642 "INIT_LINK, mbxStatus x%x\n", 643 mb->mbxCommand, mb->mbxStatus); 644 /* Clear all interrupt enable conditions */ 645 writel(0, phba->HCregaddr); 646 readl(phba->HCregaddr); /* flush */ 647 /* Clear all pending interrupts */ 648 writel(0xffffffff, phba->HAregaddr); 649 readl(phba->HAregaddr); /* flush */ 650 phba->link_state = LPFC_HBA_ERROR; 651 if (rc != MBX_BUSY) 652 mempool_free(pmb, phba->mbox_mem_pool); 653 return -EIO; 654 } 655 phba->cfg_suppress_link_up = 0; 656 657 return 0; 658} 659 660/** 661 * lpfc_hba_down_link - this routine downs the FC link 662 * 663 * This routine will issue the DOWN_LINK mailbox command call. 664 * It is available to other drivers through the lpfc_hba data 665 * structure for use to stop the link. 666 * 667 * Return code 668 * 0 - success 669 * Any other value - error 670 **/ 671int 672lpfc_hba_down_link(struct lpfc_hba *phba) 673{ 674 LPFC_MBOXQ_t *pmb; 675 int rc; 676 677 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 678 if (!pmb) { 679 phba->link_state = LPFC_HBA_ERROR; 680 return -ENOMEM; 681 } 682 683 lpfc_printf_log(phba, 684 KERN_ERR, LOG_INIT, 685 "0491 Adapter Link is disabled.\n"); 686 lpfc_down_link(phba, pmb); 687 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 688 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 689 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 690 lpfc_printf_log(phba, 691 KERN_ERR, LOG_INIT, 692 "2522 Adapter failed to issue DOWN_LINK" 693 " mbox command rc 0x%x\n", rc); 694 695 mempool_free(pmb, phba->mbox_mem_pool); 696 return -EIO; 697 } 698 return 0; 699} 700 701/** 702 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 703 * @phba: pointer to lpfc HBA data structure. 704 * 705 * This routine will do LPFC uninitialization before the HBA is reset when 706 * bringing down the SLI Layer. 707 * 708 * Return codes 709 * 0 - success. 710 * Any other value - error. 711 **/ 712int 713lpfc_hba_down_prep(struct lpfc_hba *phba) 714{ 715 struct lpfc_vport **vports; 716 int i; 717 718 if (phba->sli_rev <= LPFC_SLI_REV3) { 719 /* Disable interrupts */ 720 writel(0, phba->HCregaddr); 721 readl(phba->HCregaddr); /* flush */ 722 } 723 724 if (phba->pport->load_flag & FC_UNLOADING) 725 lpfc_cleanup_discovery_resources(phba->pport); 726 else { 727 vports = lpfc_create_vport_work_array(phba); 728 if (vports != NULL) 729 for (i = 0; i <= phba->max_vports && 730 vports[i] != NULL; i++) 731 lpfc_cleanup_discovery_resources(vports[i]); 732 lpfc_destroy_vport_work_array(phba, vports); 733 } 734 return 0; 735} 736 737/** 738 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 739 * @phba: pointer to lpfc HBA data structure. 740 * 741 * This routine will do uninitialization after the HBA is reset when bring 742 * down the SLI Layer. 743 * 744 * Return codes 745 * 0 - success. 746 * Any other value - error. 747 **/ 748static int 749lpfc_hba_down_post_s3(struct lpfc_hba *phba) 750{ 751 struct lpfc_sli *psli = &phba->sli; 752 struct lpfc_sli_ring *pring; 753 struct lpfc_dmabuf *mp, *next_mp; 754 LIST_HEAD(completions); 755 int i; 756 757 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 758 lpfc_sli_hbqbuf_free_all(phba); 759 else { 760 /* Cleanup preposted buffers on the ELS ring */ 761 pring = &psli->ring[LPFC_ELS_RING]; 762 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 763 list_del(&mp->list); 764 pring->postbufq_cnt--; 765 lpfc_mbuf_free(phba, mp->virt, mp->phys); 766 kfree(mp); 767 } 768 } 769 770 spin_lock_irq(&phba->hbalock); 771 for (i = 0; i < psli->num_rings; i++) { 772 pring = &psli->ring[i]; 773 774 /* At this point in time the HBA is either reset or DOA. Either 775 * way, nothing should be on txcmplq as it will NEVER complete. 776 */ 777 list_splice_init(&pring->txcmplq, &completions); 778 pring->txcmplq_cnt = 0; 779 spin_unlock_irq(&phba->hbalock); 780 781 /* Cancel all the IOCBs from the completions list */ 782 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 783 IOERR_SLI_ABORTED); 784 785 lpfc_sli_abort_iocb_ring(phba, pring); 786 spin_lock_irq(&phba->hbalock); 787 } 788 spin_unlock_irq(&phba->hbalock); 789 790 return 0; 791} 792/** 793 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 794 * @phba: pointer to lpfc HBA data structure. 795 * 796 * This routine will do uninitialization after the HBA is reset when bring 797 * down the SLI Layer. 798 * 799 * Return codes 800 * 0 - success. 801 * Any other value - error. 802 **/ 803static int 804lpfc_hba_down_post_s4(struct lpfc_hba *phba) 805{ 806 struct lpfc_scsi_buf *psb, *psb_next; 807 LIST_HEAD(aborts); 808 int ret; 809 unsigned long iflag = 0; 810 ret = lpfc_hba_down_post_s3(phba); 811 if (ret) 812 return ret; 813 /* At this point in time the HBA is either reset or DOA. Either 814 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 815 * on the lpfc_sgl_list so that it can either be freed if the 816 * driver is unloading or reposted if the driver is restarting 817 * the port. 818 */ 819 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 820 /* scsl_buf_list */ 821 /* abts_sgl_list_lock required because worker thread uses this 822 * list. 823 */ 824 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 825 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 826 &phba->sli4_hba.lpfc_sgl_list); 827 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 828 /* abts_scsi_buf_list_lock required because worker thread uses this 829 * list. 830 */ 831 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 832 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 833 &aborts); 834 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 835 spin_unlock_irq(&phba->hbalock); 836 837 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 838 psb->pCmd = NULL; 839 psb->status = IOSTAT_SUCCESS; 840 } 841 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 842 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 843 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 844 return 0; 845} 846 847/** 848 * lpfc_hba_down_post - Wrapper func for hba down post routine 849 * @phba: pointer to lpfc HBA data structure. 850 * 851 * This routine wraps the actual SLI3 or SLI4 routine for performing 852 * uninitialization after the HBA is reset when bring down the SLI Layer. 853 * 854 * Return codes 855 * 0 - success. 856 * Any other value - error. 857 **/ 858int 859lpfc_hba_down_post(struct lpfc_hba *phba) 860{ 861 return (*phba->lpfc_hba_down_post)(phba); 862} 863 864/** 865 * lpfc_hb_timeout - The HBA-timer timeout handler 866 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 867 * 868 * This is the HBA-timer timeout handler registered to the lpfc driver. When 869 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 870 * work-port-events bitmap and the worker thread is notified. This timeout 871 * event will be used by the worker thread to invoke the actual timeout 872 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 873 * be performed in the timeout handler and the HBA timeout event bit shall 874 * be cleared by the worker thread after it has taken the event bitmap out. 875 **/ 876static void 877lpfc_hb_timeout(unsigned long ptr) 878{ 879 struct lpfc_hba *phba; 880 uint32_t tmo_posted; 881 unsigned long iflag; 882 883 phba = (struct lpfc_hba *)ptr; 884 885 /* Check for heart beat timeout conditions */ 886 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 887 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 888 if (!tmo_posted) 889 phba->pport->work_port_events |= WORKER_HB_TMO; 890 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 891 892 /* Tell the worker thread there is work to do */ 893 if (!tmo_posted) 894 lpfc_worker_wake_up(phba); 895 return; 896} 897 898/** 899 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 900 * @phba: pointer to lpfc hba data structure. 901 * @pmboxq: pointer to the driver internal queue element for mailbox command. 902 * 903 * This is the callback function to the lpfc heart-beat mailbox command. 904 * If configured, the lpfc driver issues the heart-beat mailbox command to 905 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 906 * heart-beat mailbox command is issued, the driver shall set up heart-beat 907 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 908 * heart-beat outstanding state. Once the mailbox command comes back and 909 * no error conditions detected, the heart-beat mailbox command timer is 910 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 911 * state is cleared for the next heart-beat. If the timer expired with the 912 * heart-beat outstanding state set, the driver will put the HBA offline. 913 **/ 914static void 915lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 916{ 917 unsigned long drvr_flag; 918 919 spin_lock_irqsave(&phba->hbalock, drvr_flag); 920 phba->hb_outstanding = 0; 921 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 922 923 /* Check and reset heart-beat timer is necessary */ 924 mempool_free(pmboxq, phba->mbox_mem_pool); 925 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 926 !(phba->link_state == LPFC_HBA_ERROR) && 927 !(phba->pport->load_flag & FC_UNLOADING)) 928 mod_timer(&phba->hb_tmofunc, 929 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 930 return; 931} 932 933/** 934 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 935 * @phba: pointer to lpfc hba data structure. 936 * 937 * This is the actual HBA-timer timeout handler to be invoked by the worker 938 * thread whenever the HBA timer fired and HBA-timeout event posted. This 939 * handler performs any periodic operations needed for the device. If such 940 * periodic event has already been attended to either in the interrupt handler 941 * or by processing slow-ring or fast-ring events within the HBA-timer 942 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 943 * the timer for the next timeout period. If lpfc heart-beat mailbox command 944 * is configured and there is no heart-beat mailbox command outstanding, a 945 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 946 * has been a heart-beat mailbox command outstanding, the HBA shall be put 947 * to offline. 948 **/ 949void 950lpfc_hb_timeout_handler(struct lpfc_hba *phba) 951{ 952 struct lpfc_vport **vports; 953 LPFC_MBOXQ_t *pmboxq; 954 struct lpfc_dmabuf *buf_ptr; 955 int retval, i; 956 struct lpfc_sli *psli = &phba->sli; 957 LIST_HEAD(completions); 958 959 vports = lpfc_create_vport_work_array(phba); 960 if (vports != NULL) 961 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 962 lpfc_rcv_seq_check_edtov(vports[i]); 963 lpfc_destroy_vport_work_array(phba, vports); 964 965 if ((phba->link_state == LPFC_HBA_ERROR) || 966 (phba->pport->load_flag & FC_UNLOADING) || 967 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 968 return; 969 970 spin_lock_irq(&phba->pport->work_port_lock); 971 972 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 973 jiffies)) { 974 spin_unlock_irq(&phba->pport->work_port_lock); 975 if (!phba->hb_outstanding) 976 mod_timer(&phba->hb_tmofunc, 977 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 978 else 979 mod_timer(&phba->hb_tmofunc, 980 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 981 return; 982 } 983 spin_unlock_irq(&phba->pport->work_port_lock); 984 985 if (phba->elsbuf_cnt && 986 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 987 spin_lock_irq(&phba->hbalock); 988 list_splice_init(&phba->elsbuf, &completions); 989 phba->elsbuf_cnt = 0; 990 phba->elsbuf_prev_cnt = 0; 991 spin_unlock_irq(&phba->hbalock); 992 993 while (!list_empty(&completions)) { 994 list_remove_head(&completions, buf_ptr, 995 struct lpfc_dmabuf, list); 996 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 997 kfree(buf_ptr); 998 } 999 } 1000 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1001 1002 /* If there is no heart beat outstanding, issue a heartbeat command */ 1003 if (phba->cfg_enable_hba_heartbeat) { 1004 if (!phba->hb_outstanding) { 1005 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 1006 if (!pmboxq) { 1007 mod_timer(&phba->hb_tmofunc, 1008 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1009 return; 1010 } 1011 1012 lpfc_heart_beat(phba, pmboxq); 1013 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1014 pmboxq->vport = phba->pport; 1015 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 1016 1017 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 1018 mempool_free(pmboxq, phba->mbox_mem_pool); 1019 mod_timer(&phba->hb_tmofunc, 1020 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1021 return; 1022 } 1023 mod_timer(&phba->hb_tmofunc, 1024 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1025 phba->hb_outstanding = 1; 1026 return; 1027 } else { 1028 /* 1029 * If heart beat timeout called with hb_outstanding set 1030 * we need to take the HBA offline. 1031 */ 1032 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1033 "0459 Adapter heartbeat failure, " 1034 "taking this port offline.\n"); 1035 1036 spin_lock_irq(&phba->hbalock); 1037 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1038 spin_unlock_irq(&phba->hbalock); 1039 1040 lpfc_offline_prep(phba); 1041 lpfc_offline(phba); 1042 lpfc_unblock_mgmt_io(phba); 1043 phba->link_state = LPFC_HBA_ERROR; 1044 lpfc_hba_down_post(phba); 1045 } 1046 } 1047} 1048 1049/** 1050 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1051 * @phba: pointer to lpfc hba data structure. 1052 * 1053 * This routine is called to bring the HBA offline when HBA hardware error 1054 * other than Port Error 6 has been detected. 1055 **/ 1056static void 1057lpfc_offline_eratt(struct lpfc_hba *phba) 1058{ 1059 struct lpfc_sli *psli = &phba->sli; 1060 1061 spin_lock_irq(&phba->hbalock); 1062 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1063 spin_unlock_irq(&phba->hbalock); 1064 lpfc_offline_prep(phba); 1065 1066 lpfc_offline(phba); 1067 lpfc_reset_barrier(phba); 1068 spin_lock_irq(&phba->hbalock); 1069 lpfc_sli_brdreset(phba); 1070 spin_unlock_irq(&phba->hbalock); 1071 lpfc_hba_down_post(phba); 1072 lpfc_sli_brdready(phba, HS_MBRDY); 1073 lpfc_unblock_mgmt_io(phba); 1074 phba->link_state = LPFC_HBA_ERROR; 1075 return; 1076} 1077 1078/** 1079 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1080 * @phba: pointer to lpfc hba data structure. 1081 * 1082 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1083 * other than Port Error 6 has been detected. 1084 **/ 1085static void 1086lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1087{ 1088 lpfc_offline_prep(phba); 1089 lpfc_offline(phba); 1090 lpfc_sli4_brdreset(phba); 1091 lpfc_hba_down_post(phba); 1092 lpfc_sli4_post_status_check(phba); 1093 lpfc_unblock_mgmt_io(phba); 1094 phba->link_state = LPFC_HBA_ERROR; 1095} 1096 1097/** 1098 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1099 * @phba: pointer to lpfc hba data structure. 1100 * 1101 * This routine is invoked to handle the deferred HBA hardware error 1102 * conditions. This type of error is indicated by HBA by setting ER1 1103 * and another ER bit in the host status register. The driver will 1104 * wait until the ER1 bit clears before handling the error condition. 1105 **/ 1106static void 1107lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1108{ 1109 uint32_t old_host_status = phba->work_hs; 1110 struct lpfc_sli_ring *pring; 1111 struct lpfc_sli *psli = &phba->sli; 1112 1113 /* If the pci channel is offline, ignore possible errors, 1114 * since we cannot communicate with the pci card anyway. 1115 */ 1116 if (pci_channel_offline(phba->pcidev)) { 1117 spin_lock_irq(&phba->hbalock); 1118 phba->hba_flag &= ~DEFER_ERATT; 1119 spin_unlock_irq(&phba->hbalock); 1120 return; 1121 } 1122 1123 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1124 "0479 Deferred Adapter Hardware Error " 1125 "Data: x%x x%x x%x\n", 1126 phba->work_hs, 1127 phba->work_status[0], phba->work_status[1]); 1128 1129 spin_lock_irq(&phba->hbalock); 1130 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1131 spin_unlock_irq(&phba->hbalock); 1132 1133 1134 /* 1135 * Firmware stops when it triggred erratt. That could cause the I/Os 1136 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1137 * SCSI layer retry it after re-establishing link. 1138 */ 1139 pring = &psli->ring[psli->fcp_ring]; 1140 lpfc_sli_abort_iocb_ring(phba, pring); 1141 1142 /* 1143 * There was a firmware error. Take the hba offline and then 1144 * attempt to restart it. 1145 */ 1146 lpfc_offline_prep(phba); 1147 lpfc_offline(phba); 1148 1149 /* Wait for the ER1 bit to clear.*/ 1150 while (phba->work_hs & HS_FFER1) { 1151 msleep(100); 1152 phba->work_hs = readl(phba->HSregaddr); 1153 /* If driver is unloading let the worker thread continue */ 1154 if (phba->pport->load_flag & FC_UNLOADING) { 1155 phba->work_hs = 0; 1156 break; 1157 } 1158 } 1159 1160 /* 1161 * This is to ptrotect against a race condition in which 1162 * first write to the host attention register clear the 1163 * host status register. 1164 */ 1165 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1166 phba->work_hs = old_host_status & ~HS_FFER1; 1167 1168 spin_lock_irq(&phba->hbalock); 1169 phba->hba_flag &= ~DEFER_ERATT; 1170 spin_unlock_irq(&phba->hbalock); 1171 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1172 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1173} 1174 1175static void 1176lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1177{ 1178 struct lpfc_board_event_header board_event; 1179 struct Scsi_Host *shost; 1180 1181 board_event.event_type = FC_REG_BOARD_EVENT; 1182 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1183 shost = lpfc_shost_from_vport(phba->pport); 1184 fc_host_post_vendor_event(shost, fc_get_event_number(), 1185 sizeof(board_event), 1186 (char *) &board_event, 1187 LPFC_NL_VENDOR_ID); 1188} 1189 1190/** 1191 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1192 * @phba: pointer to lpfc hba data structure. 1193 * 1194 * This routine is invoked to handle the following HBA hardware error 1195 * conditions: 1196 * 1 - HBA error attention interrupt 1197 * 2 - DMA ring index out of range 1198 * 3 - Mailbox command came back as unknown 1199 **/ 1200static void 1201lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1202{ 1203 struct lpfc_vport *vport = phba->pport; 1204 struct lpfc_sli *psli = &phba->sli; 1205 struct lpfc_sli_ring *pring; 1206 uint32_t event_data; 1207 unsigned long temperature; 1208 struct temp_event temp_event_data; 1209 struct Scsi_Host *shost; 1210 1211 /* If the pci channel is offline, ignore possible errors, 1212 * since we cannot communicate with the pci card anyway. 1213 */ 1214 if (pci_channel_offline(phba->pcidev)) { 1215 spin_lock_irq(&phba->hbalock); 1216 phba->hba_flag &= ~DEFER_ERATT; 1217 spin_unlock_irq(&phba->hbalock); 1218 return; 1219 } 1220 1221 /* If resets are disabled then leave the HBA alone and return */ 1222 if (!phba->cfg_enable_hba_reset) 1223 return; 1224 1225 /* Send an internal error event to mgmt application */ 1226 lpfc_board_errevt_to_mgmt(phba); 1227 1228 if (phba->hba_flag & DEFER_ERATT) 1229 lpfc_handle_deferred_eratt(phba); 1230 1231 if (phba->work_hs & HS_FFER6) { 1232 /* Re-establishing Link */ 1233 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1234 "1301 Re-establishing Link " 1235 "Data: x%x x%x x%x\n", 1236 phba->work_hs, 1237 phba->work_status[0], phba->work_status[1]); 1238 1239 spin_lock_irq(&phba->hbalock); 1240 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1241 spin_unlock_irq(&phba->hbalock); 1242 1243 /* 1244 * Firmware stops when it triggled erratt with HS_FFER6. 1245 * That could cause the I/Os dropped by the firmware. 1246 * Error iocb (I/O) on txcmplq and let the SCSI layer 1247 * retry it after re-establishing link. 1248 */ 1249 pring = &psli->ring[psli->fcp_ring]; 1250 lpfc_sli_abort_iocb_ring(phba, pring); 1251 1252 /* 1253 * There was a firmware error. Take the hba offline and then 1254 * attempt to restart it. 1255 */ 1256 lpfc_offline_prep(phba); 1257 lpfc_offline(phba); 1258 lpfc_sli_brdrestart(phba); 1259 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1260 lpfc_unblock_mgmt_io(phba); 1261 return; 1262 } 1263 lpfc_unblock_mgmt_io(phba); 1264 } else if (phba->work_hs & HS_CRIT_TEMP) { 1265 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1266 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1267 temp_event_data.event_code = LPFC_CRIT_TEMP; 1268 temp_event_data.data = (uint32_t)temperature; 1269 1270 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1271 "0406 Adapter maximum temperature exceeded " 1272 "(%ld), taking this port offline " 1273 "Data: x%x x%x x%x\n", 1274 temperature, phba->work_hs, 1275 phba->work_status[0], phba->work_status[1]); 1276 1277 shost = lpfc_shost_from_vport(phba->pport); 1278 fc_host_post_vendor_event(shost, fc_get_event_number(), 1279 sizeof(temp_event_data), 1280 (char *) &temp_event_data, 1281 SCSI_NL_VID_TYPE_PCI 1282 | PCI_VENDOR_ID_EMULEX); 1283 1284 spin_lock_irq(&phba->hbalock); 1285 phba->over_temp_state = HBA_OVER_TEMP; 1286 spin_unlock_irq(&phba->hbalock); 1287 lpfc_offline_eratt(phba); 1288 1289 } else { 1290 /* The if clause above forces this code path when the status 1291 * failure is a value other than FFER6. Do not call the offline 1292 * twice. This is the adapter hardware error path. 1293 */ 1294 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1295 "0457 Adapter Hardware Error " 1296 "Data: x%x x%x x%x\n", 1297 phba->work_hs, 1298 phba->work_status[0], phba->work_status[1]); 1299 1300 event_data = FC_REG_DUMP_EVENT; 1301 shost = lpfc_shost_from_vport(vport); 1302 fc_host_post_vendor_event(shost, fc_get_event_number(), 1303 sizeof(event_data), (char *) &event_data, 1304 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1305 1306 lpfc_offline_eratt(phba); 1307 } 1308 return; 1309} 1310 1311/** 1312 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1313 * @phba: pointer to lpfc hba data structure. 1314 * 1315 * This routine is invoked to handle the SLI4 HBA hardware error attention 1316 * conditions. 1317 **/ 1318static void 1319lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1320{ 1321 struct lpfc_vport *vport = phba->pport; 1322 uint32_t event_data; 1323 struct Scsi_Host *shost; 1324 1325 /* If the pci channel is offline, ignore possible errors, since 1326 * we cannot communicate with the pci card anyway. 1327 */ 1328 if (pci_channel_offline(phba->pcidev)) 1329 return; 1330 /* If resets are disabled then leave the HBA alone and return */ 1331 if (!phba->cfg_enable_hba_reset) 1332 return; 1333 1334 /* Send an internal error event to mgmt application */ 1335 lpfc_board_errevt_to_mgmt(phba); 1336 1337 /* For now, the actual action for SLI4 device handling is not 1338 * specified yet, just treated it as adaptor hardware failure 1339 */ 1340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1341 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", 1342 phba->work_status[0], phba->work_status[1]); 1343 1344 event_data = FC_REG_DUMP_EVENT; 1345 shost = lpfc_shost_from_vport(vport); 1346 fc_host_post_vendor_event(shost, fc_get_event_number(), 1347 sizeof(event_data), (char *) &event_data, 1348 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1349 1350 lpfc_sli4_offline_eratt(phba); 1351} 1352 1353/** 1354 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1355 * @phba: pointer to lpfc HBA data structure. 1356 * 1357 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1358 * routine from the API jump table function pointer from the lpfc_hba struct. 1359 * 1360 * Return codes 1361 * 0 - success. 1362 * Any other value - error. 1363 **/ 1364void 1365lpfc_handle_eratt(struct lpfc_hba *phba) 1366{ 1367 (*phba->lpfc_handle_eratt)(phba); 1368} 1369 1370/** 1371 * lpfc_handle_latt - The HBA link event handler 1372 * @phba: pointer to lpfc hba data structure. 1373 * 1374 * This routine is invoked from the worker thread to handle a HBA host 1375 * attention link event. 1376 **/ 1377void 1378lpfc_handle_latt(struct lpfc_hba *phba) 1379{ 1380 struct lpfc_vport *vport = phba->pport; 1381 struct lpfc_sli *psli = &phba->sli; 1382 LPFC_MBOXQ_t *pmb; 1383 volatile uint32_t control; 1384 struct lpfc_dmabuf *mp; 1385 int rc = 0; 1386 1387 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1388 if (!pmb) { 1389 rc = 1; 1390 goto lpfc_handle_latt_err_exit; 1391 } 1392 1393 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1394 if (!mp) { 1395 rc = 2; 1396 goto lpfc_handle_latt_free_pmb; 1397 } 1398 1399 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1400 if (!mp->virt) { 1401 rc = 3; 1402 goto lpfc_handle_latt_free_mp; 1403 } 1404 1405 /* Cleanup any outstanding ELS commands */ 1406 lpfc_els_flush_all_cmd(phba); 1407 1408 psli->slistat.link_event++; 1409 lpfc_read_la(phba, pmb, mp); 1410 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 1411 pmb->vport = vport; 1412 /* Block ELS IOCBs until we have processed this mbox command */ 1413 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1414 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1415 if (rc == MBX_NOT_FINISHED) { 1416 rc = 4; 1417 goto lpfc_handle_latt_free_mbuf; 1418 } 1419 1420 /* Clear Link Attention in HA REG */ 1421 spin_lock_irq(&phba->hbalock); 1422 writel(HA_LATT, phba->HAregaddr); 1423 readl(phba->HAregaddr); /* flush */ 1424 spin_unlock_irq(&phba->hbalock); 1425 1426 return; 1427 1428lpfc_handle_latt_free_mbuf: 1429 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1430 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1431lpfc_handle_latt_free_mp: 1432 kfree(mp); 1433lpfc_handle_latt_free_pmb: 1434 mempool_free(pmb, phba->mbox_mem_pool); 1435lpfc_handle_latt_err_exit: 1436 /* Enable Link attention interrupts */ 1437 spin_lock_irq(&phba->hbalock); 1438 psli->sli_flag |= LPFC_PROCESS_LA; 1439 control = readl(phba->HCregaddr); 1440 control |= HC_LAINT_ENA; 1441 writel(control, phba->HCregaddr); 1442 readl(phba->HCregaddr); /* flush */ 1443 1444 /* Clear Link Attention in HA REG */ 1445 writel(HA_LATT, phba->HAregaddr); 1446 readl(phba->HAregaddr); /* flush */ 1447 spin_unlock_irq(&phba->hbalock); 1448 lpfc_linkdown(phba); 1449 phba->link_state = LPFC_HBA_ERROR; 1450 1451 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1452 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1453 1454 return; 1455} 1456 1457/** 1458 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1459 * @phba: pointer to lpfc hba data structure. 1460 * @vpd: pointer to the vital product data. 1461 * @len: length of the vital product data in bytes. 1462 * 1463 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1464 * an array of characters. In this routine, the ModelName, ProgramType, and 1465 * ModelDesc, etc. fields of the phba data structure will be populated. 1466 * 1467 * Return codes 1468 * 0 - pointer to the VPD passed in is NULL 1469 * 1 - success 1470 **/ 1471int 1472lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1473{ 1474 uint8_t lenlo, lenhi; 1475 int Length; 1476 int i, j; 1477 int finished = 0; 1478 int index = 0; 1479 1480 if (!vpd) 1481 return 0; 1482 1483 /* Vital Product */ 1484 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1485 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1486 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1487 (uint32_t) vpd[3]); 1488 while (!finished && (index < (len - 4))) { 1489 switch (vpd[index]) { 1490 case 0x82: 1491 case 0x91: 1492 index += 1; 1493 lenlo = vpd[index]; 1494 index += 1; 1495 lenhi = vpd[index]; 1496 index += 1; 1497 i = ((((unsigned short)lenhi) << 8) + lenlo); 1498 index += i; 1499 break; 1500 case 0x90: 1501 index += 1; 1502 lenlo = vpd[index]; 1503 index += 1; 1504 lenhi = vpd[index]; 1505 index += 1; 1506 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1507 if (Length > len - index) 1508 Length = len - index; 1509 while (Length > 0) { 1510 /* Look for Serial Number */ 1511 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1512 index += 2; 1513 i = vpd[index]; 1514 index += 1; 1515 j = 0; 1516 Length -= (3+i); 1517 while(i--) { 1518 phba->SerialNumber[j++] = vpd[index++]; 1519 if (j == 31) 1520 break; 1521 } 1522 phba->SerialNumber[j] = 0; 1523 continue; 1524 } 1525 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1526 phba->vpd_flag |= VPD_MODEL_DESC; 1527 index += 2; 1528 i = vpd[index]; 1529 index += 1; 1530 j = 0; 1531 Length -= (3+i); 1532 while(i--) { 1533 phba->ModelDesc[j++] = vpd[index++]; 1534 if (j == 255) 1535 break; 1536 } 1537 phba->ModelDesc[j] = 0; 1538 continue; 1539 } 1540 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1541 phba->vpd_flag |= VPD_MODEL_NAME; 1542 index += 2; 1543 i = vpd[index]; 1544 index += 1; 1545 j = 0; 1546 Length -= (3+i); 1547 while(i--) { 1548 phba->ModelName[j++] = vpd[index++]; 1549 if (j == 79) 1550 break; 1551 } 1552 phba->ModelName[j] = 0; 1553 continue; 1554 } 1555 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1556 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1557 index += 2; 1558 i = vpd[index]; 1559 index += 1; 1560 j = 0; 1561 Length -= (3+i); 1562 while(i--) { 1563 phba->ProgramType[j++] = vpd[index++]; 1564 if (j == 255) 1565 break; 1566 } 1567 phba->ProgramType[j] = 0; 1568 continue; 1569 } 1570 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1571 phba->vpd_flag |= VPD_PORT; 1572 index += 2; 1573 i = vpd[index]; 1574 index += 1; 1575 j = 0; 1576 Length -= (3+i); 1577 while(i--) { 1578 phba->Port[j++] = vpd[index++]; 1579 if (j == 19) 1580 break; 1581 } 1582 phba->Port[j] = 0; 1583 continue; 1584 } 1585 else { 1586 index += 2; 1587 i = vpd[index]; 1588 index += 1; 1589 index += i; 1590 Length -= (3 + i); 1591 } 1592 } 1593 finished = 0; 1594 break; 1595 case 0x78: 1596 finished = 1; 1597 break; 1598 default: 1599 index ++; 1600 break; 1601 } 1602 } 1603 1604 return(1); 1605} 1606 1607/** 1608 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1609 * @phba: pointer to lpfc hba data structure. 1610 * @mdp: pointer to the data structure to hold the derived model name. 1611 * @descp: pointer to the data structure to hold the derived description. 1612 * 1613 * This routine retrieves HBA's description based on its registered PCI device 1614 * ID. The @descp passed into this function points to an array of 256 chars. It 1615 * shall be returned with the model name, maximum speed, and the host bus type. 1616 * The @mdp passed into this function points to an array of 80 chars. When the 1617 * function returns, the @mdp will be filled with the model name. 1618 **/ 1619static void 1620lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1621{ 1622 lpfc_vpd_t *vp; 1623 uint16_t dev_id = phba->pcidev->device; 1624 int max_speed; 1625 int GE = 0; 1626 int oneConnect = 0; /* default is not a oneConnect */ 1627 struct { 1628 char *name; 1629 char *bus; 1630 char *function; 1631 } m = {"<Unknown>", "", ""}; 1632 1633 if (mdp && mdp[0] != '\0' 1634 && descp && descp[0] != '\0') 1635 return; 1636 1637 if (phba->lmt & LMT_10Gb) 1638 max_speed = 10; 1639 else if (phba->lmt & LMT_8Gb) 1640 max_speed = 8; 1641 else if (phba->lmt & LMT_4Gb) 1642 max_speed = 4; 1643 else if (phba->lmt & LMT_2Gb) 1644 max_speed = 2; 1645 else 1646 max_speed = 1; 1647 1648 vp = &phba->vpd; 1649 1650 switch (dev_id) { 1651 case PCI_DEVICE_ID_FIREFLY: 1652 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 1653 break; 1654 case PCI_DEVICE_ID_SUPERFLY: 1655 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1656 m = (typeof(m)){"LP7000", "PCI", 1657 "Fibre Channel Adapter"}; 1658 else 1659 m = (typeof(m)){"LP7000E", "PCI", 1660 "Fibre Channel Adapter"}; 1661 break; 1662 case PCI_DEVICE_ID_DRAGONFLY: 1663 m = (typeof(m)){"LP8000", "PCI", 1664 "Fibre Channel Adapter"}; 1665 break; 1666 case PCI_DEVICE_ID_CENTAUR: 1667 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1668 m = (typeof(m)){"LP9002", "PCI", 1669 "Fibre Channel Adapter"}; 1670 else 1671 m = (typeof(m)){"LP9000", "PCI", 1672 "Fibre Channel Adapter"}; 1673 break; 1674 case PCI_DEVICE_ID_RFLY: 1675 m = (typeof(m)){"LP952", "PCI", 1676 "Fibre Channel Adapter"}; 1677 break; 1678 case PCI_DEVICE_ID_PEGASUS: 1679 m = (typeof(m)){"LP9802", "PCI-X", 1680 "Fibre Channel Adapter"}; 1681 break; 1682 case PCI_DEVICE_ID_THOR: 1683 m = (typeof(m)){"LP10000", "PCI-X", 1684 "Fibre Channel Adapter"}; 1685 break; 1686 case PCI_DEVICE_ID_VIPER: 1687 m = (typeof(m)){"LPX1000", "PCI-X", 1688 "Fibre Channel Adapter"}; 1689 break; 1690 case PCI_DEVICE_ID_PFLY: 1691 m = (typeof(m)){"LP982", "PCI-X", 1692 "Fibre Channel Adapter"}; 1693 break; 1694 case PCI_DEVICE_ID_TFLY: 1695 m = (typeof(m)){"LP1050", "PCI-X", 1696 "Fibre Channel Adapter"}; 1697 break; 1698 case PCI_DEVICE_ID_HELIOS: 1699 m = (typeof(m)){"LP11000", "PCI-X2", 1700 "Fibre Channel Adapter"}; 1701 break; 1702 case PCI_DEVICE_ID_HELIOS_SCSP: 1703 m = (typeof(m)){"LP11000-SP", "PCI-X2", 1704 "Fibre Channel Adapter"}; 1705 break; 1706 case PCI_DEVICE_ID_HELIOS_DCSP: 1707 m = (typeof(m)){"LP11002-SP", "PCI-X2", 1708 "Fibre Channel Adapter"}; 1709 break; 1710 case PCI_DEVICE_ID_NEPTUNE: 1711 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 1712 break; 1713 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1714 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 1715 break; 1716 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1717 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 1718 break; 1719 case PCI_DEVICE_ID_BMID: 1720 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 1721 break; 1722 case PCI_DEVICE_ID_BSMB: 1723 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 1724 break; 1725 case PCI_DEVICE_ID_ZEPHYR: 1726 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1727 break; 1728 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1729 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1730 break; 1731 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1732 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 1733 GE = 1; 1734 break; 1735 case PCI_DEVICE_ID_ZMID: 1736 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 1737 break; 1738 case PCI_DEVICE_ID_ZSMB: 1739 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 1740 break; 1741 case PCI_DEVICE_ID_LP101: 1742 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 1743 break; 1744 case PCI_DEVICE_ID_LP10000S: 1745 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 1746 break; 1747 case PCI_DEVICE_ID_LP11000S: 1748 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 1749 break; 1750 case PCI_DEVICE_ID_LPE11000S: 1751 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 1752 break; 1753 case PCI_DEVICE_ID_SAT: 1754 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 1755 break; 1756 case PCI_DEVICE_ID_SAT_MID: 1757 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 1758 break; 1759 case PCI_DEVICE_ID_SAT_SMB: 1760 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 1761 break; 1762 case PCI_DEVICE_ID_SAT_DCSP: 1763 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 1764 break; 1765 case PCI_DEVICE_ID_SAT_SCSP: 1766 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 1767 break; 1768 case PCI_DEVICE_ID_SAT_S: 1769 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 1770 break; 1771 case PCI_DEVICE_ID_HORNET: 1772 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 1773 GE = 1; 1774 break; 1775 case PCI_DEVICE_ID_PROTEUS_VF: 1776 m = (typeof(m)){"LPev12000", "PCIe IOV", 1777 "Fibre Channel Adapter"}; 1778 break; 1779 case PCI_DEVICE_ID_PROTEUS_PF: 1780 m = (typeof(m)){"LPev12000", "PCIe IOV", 1781 "Fibre Channel Adapter"}; 1782 break; 1783 case PCI_DEVICE_ID_PROTEUS_S: 1784 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 1785 "Fibre Channel Adapter"}; 1786 break; 1787 case PCI_DEVICE_ID_TIGERSHARK: 1788 oneConnect = 1; 1789 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 1790 break; 1791 case PCI_DEVICE_ID_TOMCAT: 1792 oneConnect = 1; 1793 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 1794 break; 1795 case PCI_DEVICE_ID_FALCON: 1796 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 1797 "EmulexSecure Fibre"}; 1798 break; 1799 default: 1800 m = (typeof(m)){"Unknown", "", ""}; 1801 break; 1802 } 1803 1804 if (mdp && mdp[0] == '\0') 1805 snprintf(mdp, 79,"%s", m.name); 1806 /* oneConnect hba requires special processing, they are all initiators 1807 * and we put the port number on the end 1808 */ 1809 if (descp && descp[0] == '\0') { 1810 if (oneConnect) 1811 snprintf(descp, 255, 1812 "Emulex OneConnect %s, %s Initiator, Port %s", 1813 m.name, m.function, 1814 phba->Port); 1815 else 1816 snprintf(descp, 255, 1817 "Emulex %s %d%s %s %s", 1818 m.name, max_speed, (GE) ? "GE" : "Gb", 1819 m.bus, m.function); 1820 } 1821} 1822 1823/** 1824 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 1825 * @phba: pointer to lpfc hba data structure. 1826 * @pring: pointer to a IOCB ring. 1827 * @cnt: the number of IOCBs to be posted to the IOCB ring. 1828 * 1829 * This routine posts a given number of IOCBs with the associated DMA buffer 1830 * descriptors specified by the cnt argument to the given IOCB ring. 1831 * 1832 * Return codes 1833 * The number of IOCBs NOT able to be posted to the IOCB ring. 1834 **/ 1835int 1836lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 1837{ 1838 IOCB_t *icmd; 1839 struct lpfc_iocbq *iocb; 1840 struct lpfc_dmabuf *mp1, *mp2; 1841 1842 cnt += pring->missbufcnt; 1843 1844 /* While there are buffers to post */ 1845 while (cnt > 0) { 1846 /* Allocate buffer for command iocb */ 1847 iocb = lpfc_sli_get_iocbq(phba); 1848 if (iocb == NULL) { 1849 pring->missbufcnt = cnt; 1850 return cnt; 1851 } 1852 icmd = &iocb->iocb; 1853 1854 /* 2 buffers can be posted per command */ 1855 /* Allocate buffer to post */ 1856 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1857 if (mp1) 1858 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1859 if (!mp1 || !mp1->virt) { 1860 kfree(mp1); 1861 lpfc_sli_release_iocbq(phba, iocb); 1862 pring->missbufcnt = cnt; 1863 return cnt; 1864 } 1865 1866 INIT_LIST_HEAD(&mp1->list); 1867 /* Allocate buffer to post */ 1868 if (cnt > 1) { 1869 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1870 if (mp2) 1871 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1872 &mp2->phys); 1873 if (!mp2 || !mp2->virt) { 1874 kfree(mp2); 1875 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1876 kfree(mp1); 1877 lpfc_sli_release_iocbq(phba, iocb); 1878 pring->missbufcnt = cnt; 1879 return cnt; 1880 } 1881 1882 INIT_LIST_HEAD(&mp2->list); 1883 } else { 1884 mp2 = NULL; 1885 } 1886 1887 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 1888 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 1889 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 1890 icmd->ulpBdeCount = 1; 1891 cnt--; 1892 if (mp2) { 1893 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 1894 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 1895 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 1896 cnt--; 1897 icmd->ulpBdeCount = 2; 1898 } 1899 1900 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1901 icmd->ulpLe = 1; 1902 1903 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 1904 IOCB_ERROR) { 1905 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1906 kfree(mp1); 1907 cnt++; 1908 if (mp2) { 1909 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 1910 kfree(mp2); 1911 cnt++; 1912 } 1913 lpfc_sli_release_iocbq(phba, iocb); 1914 pring->missbufcnt = cnt; 1915 return cnt; 1916 } 1917 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1918 if (mp2) 1919 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1920 } 1921 pring->missbufcnt = 0; 1922 return 0; 1923} 1924 1925/** 1926 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 1927 * @phba: pointer to lpfc hba data structure. 1928 * 1929 * This routine posts initial receive IOCB buffers to the ELS ring. The 1930 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 1931 * set to 64 IOCBs. 1932 * 1933 * Return codes 1934 * 0 - success (currently always success) 1935 **/ 1936static int 1937lpfc_post_rcv_buf(struct lpfc_hba *phba) 1938{ 1939 struct lpfc_sli *psli = &phba->sli; 1940 1941 /* Ring 0, ELS / CT buffers */ 1942 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 1943 /* Ring 2 - FCP no buffers needed */ 1944 1945 return 0; 1946} 1947 1948#define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 1949 1950/** 1951 * lpfc_sha_init - Set up initial array of hash table entries 1952 * @HashResultPointer: pointer to an array as hash table. 1953 * 1954 * This routine sets up the initial values to the array of hash table entries 1955 * for the LC HBAs. 1956 **/ 1957static void 1958lpfc_sha_init(uint32_t * HashResultPointer) 1959{ 1960 HashResultPointer[0] = 0x67452301; 1961 HashResultPointer[1] = 0xEFCDAB89; 1962 HashResultPointer[2] = 0x98BADCFE; 1963 HashResultPointer[3] = 0x10325476; 1964 HashResultPointer[4] = 0xC3D2E1F0; 1965} 1966 1967/** 1968 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 1969 * @HashResultPointer: pointer to an initial/result hash table. 1970 * @HashWorkingPointer: pointer to an working hash table. 1971 * 1972 * This routine iterates an initial hash table pointed by @HashResultPointer 1973 * with the values from the working hash table pointeed by @HashWorkingPointer. 1974 * The results are putting back to the initial hash table, returned through 1975 * the @HashResultPointer as the result hash table. 1976 **/ 1977static void 1978lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 1979{ 1980 int t; 1981 uint32_t TEMP; 1982 uint32_t A, B, C, D, E; 1983 t = 16; 1984 do { 1985 HashWorkingPointer[t] = 1986 S(1, 1987 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 1988 8] ^ 1989 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 1990 } while (++t <= 79); 1991 t = 0; 1992 A = HashResultPointer[0]; 1993 B = HashResultPointer[1]; 1994 C = HashResultPointer[2]; 1995 D = HashResultPointer[3]; 1996 E = HashResultPointer[4]; 1997 1998 do { 1999 if (t < 20) { 2000 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2001 } else if (t < 40) { 2002 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2003 } else if (t < 60) { 2004 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2005 } else { 2006 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2007 } 2008 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2009 E = D; 2010 D = C; 2011 C = S(30, B); 2012 B = A; 2013 A = TEMP; 2014 } while (++t <= 79); 2015 2016 HashResultPointer[0] += A; 2017 HashResultPointer[1] += B; 2018 HashResultPointer[2] += C; 2019 HashResultPointer[3] += D; 2020 HashResultPointer[4] += E; 2021 2022} 2023 2024/** 2025 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2026 * @RandomChallenge: pointer to the entry of host challenge random number array. 2027 * @HashWorking: pointer to the entry of the working hash array. 2028 * 2029 * This routine calculates the working hash array referred by @HashWorking 2030 * from the challenge random numbers associated with the host, referred by 2031 * @RandomChallenge. The result is put into the entry of the working hash 2032 * array and returned by reference through @HashWorking. 2033 **/ 2034static void 2035lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2036{ 2037 *HashWorking = (*RandomChallenge ^ *HashWorking); 2038} 2039 2040/** 2041 * lpfc_hba_init - Perform special handling for LC HBA initialization 2042 * @phba: pointer to lpfc hba data structure. 2043 * @hbainit: pointer to an array of unsigned 32-bit integers. 2044 * 2045 * This routine performs the special handling for LC HBA initialization. 2046 **/ 2047void 2048lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2049{ 2050 int t; 2051 uint32_t *HashWorking; 2052 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2053 2054 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2055 if (!HashWorking) 2056 return; 2057 2058 HashWorking[0] = HashWorking[78] = *pwwnn++; 2059 HashWorking[1] = HashWorking[79] = *pwwnn; 2060 2061 for (t = 0; t < 7; t++) 2062 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2063 2064 lpfc_sha_init(hbainit); 2065 lpfc_sha_iterate(hbainit, HashWorking); 2066 kfree(HashWorking); 2067} 2068 2069/** 2070 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2071 * @vport: pointer to a virtual N_Port data structure. 2072 * 2073 * This routine performs the necessary cleanups before deleting the @vport. 2074 * It invokes the discovery state machine to perform necessary state 2075 * transitions and to release the ndlps associated with the @vport. Note, 2076 * the physical port is treated as @vport 0. 2077 **/ 2078void 2079lpfc_cleanup(struct lpfc_vport *vport) 2080{ 2081 struct lpfc_hba *phba = vport->phba; 2082 struct lpfc_nodelist *ndlp, *next_ndlp; 2083 int i = 0; 2084 2085 if (phba->link_state > LPFC_LINK_DOWN) 2086 lpfc_port_link_failure(vport); 2087 2088 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2089 if (!NLP_CHK_NODE_ACT(ndlp)) { 2090 ndlp = lpfc_enable_node(vport, ndlp, 2091 NLP_STE_UNUSED_NODE); 2092 if (!ndlp) 2093 continue; 2094 spin_lock_irq(&phba->ndlp_lock); 2095 NLP_SET_FREE_REQ(ndlp); 2096 spin_unlock_irq(&phba->ndlp_lock); 2097 /* Trigger the release of the ndlp memory */ 2098 lpfc_nlp_put(ndlp); 2099 continue; 2100 } 2101 spin_lock_irq(&phba->ndlp_lock); 2102 if (NLP_CHK_FREE_REQ(ndlp)) { 2103 /* The ndlp should not be in memory free mode already */ 2104 spin_unlock_irq(&phba->ndlp_lock); 2105 continue; 2106 } else 2107 /* Indicate request for freeing ndlp memory */ 2108 NLP_SET_FREE_REQ(ndlp); 2109 spin_unlock_irq(&phba->ndlp_lock); 2110 2111 if (vport->port_type != LPFC_PHYSICAL_PORT && 2112 ndlp->nlp_DID == Fabric_DID) { 2113 /* Just free up ndlp with Fabric_DID for vports */ 2114 lpfc_nlp_put(ndlp); 2115 continue; 2116 } 2117 2118 if (ndlp->nlp_type & NLP_FABRIC) 2119 lpfc_disc_state_machine(vport, ndlp, NULL, 2120 NLP_EVT_DEVICE_RECOVERY); 2121 2122 lpfc_disc_state_machine(vport, ndlp, NULL, 2123 NLP_EVT_DEVICE_RM); 2124 2125 } 2126 2127 /* At this point, ALL ndlp's should be gone 2128 * because of the previous NLP_EVT_DEVICE_RM. 2129 * Lets wait for this to happen, if needed. 2130 */ 2131 while (!list_empty(&vport->fc_nodes)) { 2132 if (i++ > 3000) { 2133 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2134 "0233 Nodelist not empty\n"); 2135 list_for_each_entry_safe(ndlp, next_ndlp, 2136 &vport->fc_nodes, nlp_listp) { 2137 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2138 LOG_NODE, 2139 "0282 did:x%x ndlp:x%p " 2140 "usgmap:x%x refcnt:%d\n", 2141 ndlp->nlp_DID, (void *)ndlp, 2142 ndlp->nlp_usg_map, 2143 atomic_read( 2144 &ndlp->kref.refcount)); 2145 } 2146 break; 2147 } 2148 2149 /* Wait for any activity on ndlps to settle */ 2150 msleep(10); 2151 } 2152} 2153 2154/** 2155 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2156 * @vport: pointer to a virtual N_Port data structure. 2157 * 2158 * This routine stops all the timers associated with a @vport. This function 2159 * is invoked before disabling or deleting a @vport. Note that the physical 2160 * port is treated as @vport 0. 2161 **/ 2162void 2163lpfc_stop_vport_timers(struct lpfc_vport *vport) 2164{ 2165 del_timer_sync(&vport->els_tmofunc); 2166 del_timer_sync(&vport->fc_fdmitmo); 2167 lpfc_can_disctmo(vport); 2168 return; 2169} 2170 2171/** 2172 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2173 * @phba: pointer to lpfc hba data structure. 2174 * 2175 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2176 * caller of this routine should already hold the host lock. 2177 **/ 2178void 2179__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2180{ 2181 /* Clear pending FCF rediscovery wait timer */ 2182 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2183 /* Now, try to stop the timer */ 2184 del_timer(&phba->fcf.redisc_wait); 2185} 2186 2187/** 2188 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2189 * @phba: pointer to lpfc hba data structure. 2190 * 2191 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2192 * checks whether the FCF rediscovery wait timer is pending with the host 2193 * lock held before proceeding with disabling the timer and clearing the 2194 * wait timer pendig flag. 2195 **/ 2196void 2197lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2198{ 2199 spin_lock_irq(&phba->hbalock); 2200 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2201 /* FCF rediscovery timer already fired or stopped */ 2202 spin_unlock_irq(&phba->hbalock); 2203 return; 2204 } 2205 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2206 spin_unlock_irq(&phba->hbalock); 2207} 2208 2209/** 2210 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2211 * @phba: pointer to lpfc hba data structure. 2212 * 2213 * This routine stops all the timers associated with a HBA. This function is 2214 * invoked before either putting a HBA offline or unloading the driver. 2215 **/ 2216void 2217lpfc_stop_hba_timers(struct lpfc_hba *phba) 2218{ 2219 lpfc_stop_vport_timers(phba->pport); 2220 del_timer_sync(&phba->sli.mbox_tmo); 2221 del_timer_sync(&phba->fabric_block_timer); 2222 del_timer_sync(&phba->eratt_poll); 2223 del_timer_sync(&phba->hb_tmofunc); 2224 phba->hb_outstanding = 0; 2225 2226 switch (phba->pci_dev_grp) { 2227 case LPFC_PCI_DEV_LP: 2228 /* Stop any LightPulse device specific driver timers */ 2229 del_timer_sync(&phba->fcp_poll_timer); 2230 break; 2231 case LPFC_PCI_DEV_OC: 2232 /* Stop any OneConnect device sepcific driver timers */ 2233 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2234 break; 2235 default: 2236 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2237 "0297 Invalid device group (x%x)\n", 2238 phba->pci_dev_grp); 2239 break; 2240 } 2241 return; 2242} 2243 2244/** 2245 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2246 * @phba: pointer to lpfc hba data structure. 2247 * 2248 * This routine marks a HBA's management interface as blocked. Once the HBA's 2249 * management interface is marked as blocked, all the user space access to 2250 * the HBA, whether they are from sysfs interface or libdfc interface will 2251 * all be blocked. The HBA is set to block the management interface when the 2252 * driver prepares the HBA interface for online or offline. 2253 **/ 2254static void 2255lpfc_block_mgmt_io(struct lpfc_hba * phba) 2256{ 2257 unsigned long iflag; 2258 2259 spin_lock_irqsave(&phba->hbalock, iflag); 2260 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2261 spin_unlock_irqrestore(&phba->hbalock, iflag); 2262} 2263 2264/** 2265 * lpfc_online - Initialize and bring a HBA online 2266 * @phba: pointer to lpfc hba data structure. 2267 * 2268 * This routine initializes the HBA and brings a HBA online. During this 2269 * process, the management interface is blocked to prevent user space access 2270 * to the HBA interfering with the driver initialization. 2271 * 2272 * Return codes 2273 * 0 - successful 2274 * 1 - failed 2275 **/ 2276int 2277lpfc_online(struct lpfc_hba *phba) 2278{ 2279 struct lpfc_vport *vport; 2280 struct lpfc_vport **vports; 2281 int i; 2282 2283 if (!phba) 2284 return 0; 2285 vport = phba->pport; 2286 2287 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2288 return 0; 2289 2290 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2291 "0458 Bring Adapter online\n"); 2292 2293 lpfc_block_mgmt_io(phba); 2294 2295 if (!lpfc_sli_queue_setup(phba)) { 2296 lpfc_unblock_mgmt_io(phba); 2297 return 1; 2298 } 2299 2300 if (phba->sli_rev == LPFC_SLI_REV4) { 2301 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2302 lpfc_unblock_mgmt_io(phba); 2303 return 1; 2304 } 2305 } else { 2306 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2307 lpfc_unblock_mgmt_io(phba); 2308 return 1; 2309 } 2310 } 2311 2312 vports = lpfc_create_vport_work_array(phba); 2313 if (vports != NULL) 2314 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2315 struct Scsi_Host *shost; 2316 shost = lpfc_shost_from_vport(vports[i]); 2317 spin_lock_irq(shost->host_lock); 2318 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2319 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2320 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2321 if (phba->sli_rev == LPFC_SLI_REV4) 2322 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2323 spin_unlock_irq(shost->host_lock); 2324 } 2325 lpfc_destroy_vport_work_array(phba, vports); 2326 2327 lpfc_unblock_mgmt_io(phba); 2328 return 0; 2329} 2330 2331/** 2332 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2333 * @phba: pointer to lpfc hba data structure. 2334 * 2335 * This routine marks a HBA's management interface as not blocked. Once the 2336 * HBA's management interface is marked as not blocked, all the user space 2337 * access to the HBA, whether they are from sysfs interface or libdfc 2338 * interface will be allowed. The HBA is set to block the management interface 2339 * when the driver prepares the HBA interface for online or offline and then 2340 * set to unblock the management interface afterwards. 2341 **/ 2342void 2343lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2344{ 2345 unsigned long iflag; 2346 2347 spin_lock_irqsave(&phba->hbalock, iflag); 2348 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2349 spin_unlock_irqrestore(&phba->hbalock, iflag); 2350} 2351 2352/** 2353 * lpfc_offline_prep - Prepare a HBA to be brought offline 2354 * @phba: pointer to lpfc hba data structure. 2355 * 2356 * This routine is invoked to prepare a HBA to be brought offline. It performs 2357 * unregistration login to all the nodes on all vports and flushes the mailbox 2358 * queue to make it ready to be brought offline. 2359 **/ 2360void 2361lpfc_offline_prep(struct lpfc_hba * phba) 2362{ 2363 struct lpfc_vport *vport = phba->pport; 2364 struct lpfc_nodelist *ndlp, *next_ndlp; 2365 struct lpfc_vport **vports; 2366 struct Scsi_Host *shost; 2367 int i; 2368 2369 if (vport->fc_flag & FC_OFFLINE_MODE) 2370 return; 2371 2372 lpfc_block_mgmt_io(phba); 2373 2374 lpfc_linkdown(phba); 2375 2376 /* Issue an unreg_login to all nodes on all vports */ 2377 vports = lpfc_create_vport_work_array(phba); 2378 if (vports != NULL) { 2379 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2380 if (vports[i]->load_flag & FC_UNLOADING) 2381 continue; 2382 shost = lpfc_shost_from_vport(vports[i]); 2383 spin_lock_irq(shost->host_lock); 2384 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2385 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2386 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2387 spin_unlock_irq(shost->host_lock); 2388 2389 shost = lpfc_shost_from_vport(vports[i]); 2390 list_for_each_entry_safe(ndlp, next_ndlp, 2391 &vports[i]->fc_nodes, 2392 nlp_listp) { 2393 if (!NLP_CHK_NODE_ACT(ndlp)) 2394 continue; 2395 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2396 continue; 2397 if (ndlp->nlp_type & NLP_FABRIC) { 2398 lpfc_disc_state_machine(vports[i], ndlp, 2399 NULL, NLP_EVT_DEVICE_RECOVERY); 2400 lpfc_disc_state_machine(vports[i], ndlp, 2401 NULL, NLP_EVT_DEVICE_RM); 2402 } 2403 spin_lock_irq(shost->host_lock); 2404 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2405 spin_unlock_irq(shost->host_lock); 2406 lpfc_unreg_rpi(vports[i], ndlp); 2407 } 2408 } 2409 } 2410 lpfc_destroy_vport_work_array(phba, vports); 2411 2412 lpfc_sli_mbox_sys_shutdown(phba); 2413} 2414 2415/** 2416 * lpfc_offline - Bring a HBA offline 2417 * @phba: pointer to lpfc hba data structure. 2418 * 2419 * This routine actually brings a HBA offline. It stops all the timers 2420 * associated with the HBA, brings down the SLI layer, and eventually 2421 * marks the HBA as in offline state for the upper layer protocol. 2422 **/ 2423void 2424lpfc_offline(struct lpfc_hba *phba) 2425{ 2426 struct Scsi_Host *shost; 2427 struct lpfc_vport **vports; 2428 int i; 2429 2430 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2431 return; 2432 2433 /* stop port and all timers associated with this hba */ 2434 lpfc_stop_port(phba); 2435 vports = lpfc_create_vport_work_array(phba); 2436 if (vports != NULL) 2437 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2438 lpfc_stop_vport_timers(vports[i]); 2439 lpfc_destroy_vport_work_array(phba, vports); 2440 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2441 "0460 Bring Adapter offline\n"); 2442 /* Bring down the SLI Layer and cleanup. The HBA is offline 2443 now. */ 2444 lpfc_sli_hba_down(phba); 2445 spin_lock_irq(&phba->hbalock); 2446 phba->work_ha = 0; 2447 spin_unlock_irq(&phba->hbalock); 2448 vports = lpfc_create_vport_work_array(phba); 2449 if (vports != NULL) 2450 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2451 shost = lpfc_shost_from_vport(vports[i]); 2452 spin_lock_irq(shost->host_lock); 2453 vports[i]->work_port_events = 0; 2454 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2455 spin_unlock_irq(shost->host_lock); 2456 } 2457 lpfc_destroy_vport_work_array(phba, vports); 2458} 2459 2460/** 2461 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2462 * @phba: pointer to lpfc hba data structure. 2463 * 2464 * This routine is to free all the SCSI buffers and IOCBs from the driver 2465 * list back to kernel. It is called from lpfc_pci_remove_one to free 2466 * the internal resources before the device is removed from the system. 2467 * 2468 * Return codes 2469 * 0 - successful (for now, it always returns 0) 2470 **/ 2471static int 2472lpfc_scsi_free(struct lpfc_hba *phba) 2473{ 2474 struct lpfc_scsi_buf *sb, *sb_next; 2475 struct lpfc_iocbq *io, *io_next; 2476 2477 spin_lock_irq(&phba->hbalock); 2478 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2479 spin_lock(&phba->scsi_buf_list_lock); 2480 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2481 list_del(&sb->list); 2482 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2483 sb->dma_handle); 2484 kfree(sb); 2485 phba->total_scsi_bufs--; 2486 } 2487 spin_unlock(&phba->scsi_buf_list_lock); 2488 2489 /* Release all the lpfc_iocbq entries maintained by this host. */ 2490 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2491 list_del(&io->list); 2492 kfree(io); 2493 phba->total_iocbq_bufs--; 2494 } 2495 spin_unlock_irq(&phba->hbalock); 2496 return 0; 2497} 2498 2499/** 2500 * lpfc_create_port - Create an FC port 2501 * @phba: pointer to lpfc hba data structure. 2502 * @instance: a unique integer ID to this FC port. 2503 * @dev: pointer to the device data structure. 2504 * 2505 * This routine creates a FC port for the upper layer protocol. The FC port 2506 * can be created on top of either a physical port or a virtual port provided 2507 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2508 * and associates the FC port created before adding the shost into the SCSI 2509 * layer. 2510 * 2511 * Return codes 2512 * @vport - pointer to the virtual N_Port data structure. 2513 * NULL - port create failed. 2514 **/ 2515struct lpfc_vport * 2516lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2517{ 2518 struct lpfc_vport *vport; 2519 struct Scsi_Host *shost; 2520 int error = 0; 2521 2522 if (dev != &phba->pcidev->dev) 2523 shost = scsi_host_alloc(&lpfc_vport_template, 2524 sizeof(struct lpfc_vport)); 2525 else 2526 shost = scsi_host_alloc(&lpfc_template, 2527 sizeof(struct lpfc_vport)); 2528 if (!shost) 2529 goto out; 2530 2531 vport = (struct lpfc_vport *) shost->hostdata; 2532 vport->phba = phba; 2533 vport->load_flag |= FC_LOADING; 2534 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2535 vport->fc_rscn_flush = 0; 2536 2537 lpfc_get_vport_cfgparam(vport); 2538 shost->unique_id = instance; 2539 shost->max_id = LPFC_MAX_TARGET; 2540 shost->max_lun = vport->cfg_max_luns; 2541 shost->this_id = -1; 2542 shost->max_cmd_len = 16; 2543 if (phba->sli_rev == LPFC_SLI_REV4) { 2544 shost->dma_boundary = 2545 phba->sli4_hba.pc_sli4_params.sge_supp_len; 2546 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2547 } 2548 2549 /* 2550 * Set initial can_queue value since 0 is no longer supported and 2551 * scsi_add_host will fail. This will be adjusted later based on the 2552 * max xri value determined in hba setup. 2553 */ 2554 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2555 if (dev != &phba->pcidev->dev) { 2556 shost->transportt = lpfc_vport_transport_template; 2557 vport->port_type = LPFC_NPIV_PORT; 2558 } else { 2559 shost->transportt = lpfc_transport_template; 2560 vport->port_type = LPFC_PHYSICAL_PORT; 2561 } 2562 2563 /* Initialize all internally managed lists. */ 2564 INIT_LIST_HEAD(&vport->fc_nodes); 2565 INIT_LIST_HEAD(&vport->rcv_buffer_list); 2566 spin_lock_init(&vport->work_port_lock); 2567 2568 init_timer(&vport->fc_disctmo); 2569 vport->fc_disctmo.function = lpfc_disc_timeout; 2570 vport->fc_disctmo.data = (unsigned long)vport; 2571 2572 init_timer(&vport->fc_fdmitmo); 2573 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2574 vport->fc_fdmitmo.data = (unsigned long)vport; 2575 2576 init_timer(&vport->els_tmofunc); 2577 vport->els_tmofunc.function = lpfc_els_timeout; 2578 vport->els_tmofunc.data = (unsigned long)vport; 2579 2580 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2581 if (error) 2582 goto out_put_shost; 2583 2584 spin_lock_irq(&phba->hbalock); 2585 list_add_tail(&vport->listentry, &phba->port_list); 2586 spin_unlock_irq(&phba->hbalock); 2587 return vport; 2588 2589out_put_shost: 2590 scsi_host_put(shost); 2591out: 2592 return NULL; 2593} 2594 2595/** 2596 * destroy_port - destroy an FC port 2597 * @vport: pointer to an lpfc virtual N_Port data structure. 2598 * 2599 * This routine destroys a FC port from the upper layer protocol. All the 2600 * resources associated with the port are released. 2601 **/ 2602void 2603destroy_port(struct lpfc_vport *vport) 2604{ 2605 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2606 struct lpfc_hba *phba = vport->phba; 2607 2608 lpfc_debugfs_terminate(vport); 2609 fc_remove_host(shost); 2610 scsi_remove_host(shost); 2611 2612 spin_lock_irq(&phba->hbalock); 2613 list_del_init(&vport->listentry); 2614 spin_unlock_irq(&phba->hbalock); 2615 2616 lpfc_cleanup(vport); 2617 return; 2618} 2619 2620/** 2621 * lpfc_get_instance - Get a unique integer ID 2622 * 2623 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2624 * uses the kernel idr facility to perform the task. 2625 * 2626 * Return codes: 2627 * instance - a unique integer ID allocated as the new instance. 2628 * -1 - lpfc get instance failed. 2629 **/ 2630int 2631lpfc_get_instance(void) 2632{ 2633 int instance = 0; 2634 2635 /* Assign an unused number */ 2636 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2637 return -1; 2638 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2639 return -1; 2640 return instance; 2641} 2642 2643/** 2644 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2645 * @shost: pointer to SCSI host data structure. 2646 * @time: elapsed time of the scan in jiffies. 2647 * 2648 * This routine is called by the SCSI layer with a SCSI host to determine 2649 * whether the scan host is finished. 2650 * 2651 * Note: there is no scan_start function as adapter initialization will have 2652 * asynchronously kicked off the link initialization. 2653 * 2654 * Return codes 2655 * 0 - SCSI host scan is not over yet. 2656 * 1 - SCSI host scan is over. 2657 **/ 2658int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2659{ 2660 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2661 struct lpfc_hba *phba = vport->phba; 2662 int stat = 0; 2663 2664 spin_lock_irq(shost->host_lock); 2665 2666 if (vport->load_flag & FC_UNLOADING) { 2667 stat = 1; 2668 goto finished; 2669 } 2670 if (time >= 30 * HZ) { 2671 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2672 "0461 Scanning longer than 30 " 2673 "seconds. Continuing initialization\n"); 2674 stat = 1; 2675 goto finished; 2676 } 2677 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2678 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2679 "0465 Link down longer than 15 " 2680 "seconds. Continuing initialization\n"); 2681 stat = 1; 2682 goto finished; 2683 } 2684 2685 if (vport->port_state != LPFC_VPORT_READY) 2686 goto finished; 2687 if (vport->num_disc_nodes || vport->fc_prli_sent) 2688 goto finished; 2689 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2690 goto finished; 2691 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2692 goto finished; 2693 2694 stat = 1; 2695 2696finished: 2697 spin_unlock_irq(shost->host_lock); 2698 return stat; 2699} 2700 2701/** 2702 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2703 * @shost: pointer to SCSI host data structure. 2704 * 2705 * This routine initializes a given SCSI host attributes on a FC port. The 2706 * SCSI host can be either on top of a physical port or a virtual port. 2707 **/ 2708void lpfc_host_attrib_init(struct Scsi_Host *shost) 2709{ 2710 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2711 struct lpfc_hba *phba = vport->phba; 2712 /* 2713 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2714 */ 2715 2716 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2717 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2718 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2719 2720 memset(fc_host_supported_fc4s(shost), 0, 2721 sizeof(fc_host_supported_fc4s(shost))); 2722 fc_host_supported_fc4s(shost)[2] = 1; 2723 fc_host_supported_fc4s(shost)[7] = 1; 2724 2725 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2726 sizeof fc_host_symbolic_name(shost)); 2727 2728 fc_host_supported_speeds(shost) = 0; 2729 if (phba->lmt & LMT_10Gb) 2730 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2731 if (phba->lmt & LMT_8Gb) 2732 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2733 if (phba->lmt & LMT_4Gb) 2734 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2735 if (phba->lmt & LMT_2Gb) 2736 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2737 if (phba->lmt & LMT_1Gb) 2738 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2739 2740 fc_host_maxframe_size(shost) = 2741 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2742 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2743 2744 /* This value is also unchanging */ 2745 memset(fc_host_active_fc4s(shost), 0, 2746 sizeof(fc_host_active_fc4s(shost))); 2747 fc_host_active_fc4s(shost)[2] = 1; 2748 fc_host_active_fc4s(shost)[7] = 1; 2749 2750 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2751 spin_lock_irq(shost->host_lock); 2752 vport->load_flag &= ~FC_LOADING; 2753 spin_unlock_irq(shost->host_lock); 2754} 2755 2756/** 2757 * lpfc_stop_port_s3 - Stop SLI3 device port 2758 * @phba: pointer to lpfc hba data structure. 2759 * 2760 * This routine is invoked to stop an SLI3 device port, it stops the device 2761 * from generating interrupts and stops the device driver's timers for the 2762 * device. 2763 **/ 2764static void 2765lpfc_stop_port_s3(struct lpfc_hba *phba) 2766{ 2767 /* Clear all interrupt enable conditions */ 2768 writel(0, phba->HCregaddr); 2769 readl(phba->HCregaddr); /* flush */ 2770 /* Clear all pending interrupts */ 2771 writel(0xffffffff, phba->HAregaddr); 2772 readl(phba->HAregaddr); /* flush */ 2773 2774 /* Reset some HBA SLI setup states */ 2775 lpfc_stop_hba_timers(phba); 2776 phba->pport->work_port_events = 0; 2777} 2778 2779/** 2780 * lpfc_stop_port_s4 - Stop SLI4 device port 2781 * @phba: pointer to lpfc hba data structure. 2782 * 2783 * This routine is invoked to stop an SLI4 device port, it stops the device 2784 * from generating interrupts and stops the device driver's timers for the 2785 * device. 2786 **/ 2787static void 2788lpfc_stop_port_s4(struct lpfc_hba *phba) 2789{ 2790 /* Reset some HBA SLI4 setup states */ 2791 lpfc_stop_hba_timers(phba); 2792 phba->pport->work_port_events = 0; 2793 phba->sli4_hba.intr_enable = 0; 2794} 2795 2796/** 2797 * lpfc_stop_port - Wrapper function for stopping hba port 2798 * @phba: Pointer to HBA context object. 2799 * 2800 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 2801 * the API jump table function pointer from the lpfc_hba struct. 2802 **/ 2803void 2804lpfc_stop_port(struct lpfc_hba *phba) 2805{ 2806 phba->lpfc_stop_port(phba); 2807} 2808 2809/** 2810 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port. 2811 * @phba: pointer to lpfc hba data structure. 2812 * 2813 * This routine is invoked to remove the driver default fcf record from 2814 * the port. This routine currently acts on FCF Index 0. 2815 * 2816 **/ 2817void 2818lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) 2819{ 2820 int rc = 0; 2821 LPFC_MBOXQ_t *mboxq; 2822 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record; 2823 uint32_t mbox_tmo, req_len; 2824 uint32_t shdr_status, shdr_add_status; 2825 2826 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2827 if (!mboxq) { 2828 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2829 "2020 Failed to allocate mbox for ADD_FCF cmd\n"); 2830 return; 2831 } 2832 2833 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) - 2834 sizeof(struct lpfc_sli4_cfg_mhdr); 2835 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2836 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF, 2837 req_len, LPFC_SLI4_MBX_EMBED); 2838 /* 2839 * In phase 1, there is a single FCF index, 0. In phase2, the driver 2840 * supports multiple FCF indices. 2841 */ 2842 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; 2843 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); 2844 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, 2845 phba->fcf.current_rec.fcf_indx); 2846 2847 if (!phba->sli4_hba.intr_enable) 2848 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2849 else { 2850 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 2851 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 2852 } 2853 /* The IOCTL status is embedded in the mailbox subheader. */ 2854 shdr_status = bf_get(lpfc_mbox_hdr_status, 2855 &del_fcf_record->header.cfg_shdr.response); 2856 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 2857 &del_fcf_record->header.cfg_shdr.response); 2858 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { 2859 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2860 "2516 DEL FCF of default FCF Index failed " 2861 "mbx status x%x, status x%x add_status x%x\n", 2862 rc, shdr_status, shdr_add_status); 2863 } 2864 if (rc != MBX_TIMEOUT) 2865 mempool_free(mboxq, phba->mbox_mem_pool); 2866} 2867 2868/** 2869 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 2870 * @phba: Pointer to hba for which this call is being executed. 2871 * 2872 * This routine starts the timer waiting for the FCF rediscovery to complete. 2873 **/ 2874void 2875lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 2876{ 2877 unsigned long fcf_redisc_wait_tmo = 2878 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 2879 /* Start fcf rediscovery wait period timer */ 2880 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 2881 spin_lock_irq(&phba->hbalock); 2882 /* Allow action to new fcf asynchronous event */ 2883 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 2884 /* Mark the FCF rediscovery pending state */ 2885 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 2886 spin_unlock_irq(&phba->hbalock); 2887} 2888 2889/** 2890 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 2891 * @ptr: Map to lpfc_hba data structure pointer. 2892 * 2893 * This routine is invoked when waiting for FCF table rediscover has been 2894 * timed out. If new FCF record(s) has (have) been discovered during the 2895 * wait period, a new FCF event shall be added to the FCOE async event 2896 * list, and then worker thread shall be waked up for processing from the 2897 * worker thread context. 2898 **/ 2899void 2900lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 2901{ 2902 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 2903 2904 /* Don't send FCF rediscovery event if timer cancelled */ 2905 spin_lock_irq(&phba->hbalock); 2906 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2907 spin_unlock_irq(&phba->hbalock); 2908 return; 2909 } 2910 /* Clear FCF rediscovery timer pending flag */ 2911 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2912 /* FCF rediscovery event to worker thread */ 2913 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 2914 spin_unlock_irq(&phba->hbalock); 2915 /* wake up worker thread */ 2916 lpfc_worker_wake_up(phba); 2917} 2918 2919/** 2920 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support 2921 * @phba: pointer to lpfc hba data structure. 2922 * 2923 * This function uses the QUERY_FW_CFG mailbox command to determine if the 2924 * firmware loaded supports FCoE. A return of zero indicates that the mailbox 2925 * was successful and the firmware supports FCoE. Any other return indicates 2926 * a error. It is assumed that this function will be called before interrupts 2927 * are enabled. 2928 **/ 2929static int 2930lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba) 2931{ 2932 int rc = 0; 2933 LPFC_MBOXQ_t *mboxq; 2934 struct lpfc_mbx_query_fw_cfg *query_fw_cfg; 2935 uint32_t length; 2936 uint32_t shdr_status, shdr_add_status; 2937 2938 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2939 if (!mboxq) { 2940 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2941 "2621 Failed to allocate mbox for " 2942 "query firmware config cmd\n"); 2943 return -ENOMEM; 2944 } 2945 query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg; 2946 length = (sizeof(struct lpfc_mbx_query_fw_cfg) - 2947 sizeof(struct lpfc_sli4_cfg_mhdr)); 2948 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 2949 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 2950 length, LPFC_SLI4_MBX_EMBED); 2951 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2952 /* The IOCTL status is embedded in the mailbox subheader. */ 2953 shdr_status = bf_get(lpfc_mbox_hdr_status, 2954 &query_fw_cfg->header.cfg_shdr.response); 2955 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 2956 &query_fw_cfg->header.cfg_shdr.response); 2957 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { 2958 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2959 "2622 Query Firmware Config failed " 2960 "mbx status x%x, status x%x add_status x%x\n", 2961 rc, shdr_status, shdr_add_status); 2962 return -EINVAL; 2963 } 2964 if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) { 2965 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2966 "2623 FCoE Function not supported by firmware. " 2967 "Function mode = %08x\n", 2968 query_fw_cfg->function_mode); 2969 return -EINVAL; 2970 } 2971 if (rc != MBX_TIMEOUT) 2972 mempool_free(mboxq, phba->mbox_mem_pool); 2973 return 0; 2974} 2975 2976/** 2977 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 2978 * @phba: pointer to lpfc hba data structure. 2979 * @acqe_link: pointer to the async link completion queue entry. 2980 * 2981 * This routine is to parse the SLI4 link-attention link fault code and 2982 * translate it into the base driver's read link attention mailbox command 2983 * status. 2984 * 2985 * Return: Link-attention status in terms of base driver's coding. 2986 **/ 2987static uint16_t 2988lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 2989 struct lpfc_acqe_link *acqe_link) 2990{ 2991 uint16_t latt_fault; 2992 2993 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 2994 case LPFC_ASYNC_LINK_FAULT_NONE: 2995 case LPFC_ASYNC_LINK_FAULT_LOCAL: 2996 case LPFC_ASYNC_LINK_FAULT_REMOTE: 2997 latt_fault = 0; 2998 break; 2999 default: 3000 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3001 "0398 Invalid link fault code: x%x\n", 3002 bf_get(lpfc_acqe_link_fault, acqe_link)); 3003 latt_fault = MBXERR_ERROR; 3004 break; 3005 } 3006 return latt_fault; 3007} 3008 3009/** 3010 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3011 * @phba: pointer to lpfc hba data structure. 3012 * @acqe_link: pointer to the async link completion queue entry. 3013 * 3014 * This routine is to parse the SLI4 link attention type and translate it 3015 * into the base driver's link attention type coding. 3016 * 3017 * Return: Link attention type in terms of base driver's coding. 3018 **/ 3019static uint8_t 3020lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3021 struct lpfc_acqe_link *acqe_link) 3022{ 3023 uint8_t att_type; 3024 3025 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3026 case LPFC_ASYNC_LINK_STATUS_DOWN: 3027 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3028 att_type = AT_LINK_DOWN; 3029 break; 3030 case LPFC_ASYNC_LINK_STATUS_UP: 3031 /* Ignore physical link up events - wait for logical link up */ 3032 att_type = AT_RESERVED; 3033 break; 3034 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3035 att_type = AT_LINK_UP; 3036 break; 3037 default: 3038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3039 "0399 Invalid link attention type: x%x\n", 3040 bf_get(lpfc_acqe_link_status, acqe_link)); 3041 att_type = AT_RESERVED; 3042 break; 3043 } 3044 return att_type; 3045} 3046 3047/** 3048 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 3049 * @phba: pointer to lpfc hba data structure. 3050 * @acqe_link: pointer to the async link completion queue entry. 3051 * 3052 * This routine is to parse the SLI4 link-attention link speed and translate 3053 * it into the base driver's link-attention link speed coding. 3054 * 3055 * Return: Link-attention link speed in terms of base driver's coding. 3056 **/ 3057static uint8_t 3058lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 3059 struct lpfc_acqe_link *acqe_link) 3060{ 3061 uint8_t link_speed; 3062 3063 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3064 case LPFC_ASYNC_LINK_SPEED_ZERO: 3065 link_speed = LA_UNKNW_LINK; 3066 break; 3067 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3068 link_speed = LA_UNKNW_LINK; 3069 break; 3070 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3071 link_speed = LA_UNKNW_LINK; 3072 break; 3073 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3074 link_speed = LA_1GHZ_LINK; 3075 break; 3076 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3077 link_speed = LA_10GHZ_LINK; 3078 break; 3079 default: 3080 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3081 "0483 Invalid link-attention link speed: x%x\n", 3082 bf_get(lpfc_acqe_link_speed, acqe_link)); 3083 link_speed = LA_UNKNW_LINK; 3084 break; 3085 } 3086 return link_speed; 3087} 3088 3089/** 3090 * lpfc_sli4_async_link_evt - Process the asynchronous link event 3091 * @phba: pointer to lpfc hba data structure. 3092 * @acqe_link: pointer to the async link completion queue entry. 3093 * 3094 * This routine is to handle the SLI4 asynchronous link event. 3095 **/ 3096static void 3097lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3098 struct lpfc_acqe_link *acqe_link) 3099{ 3100 struct lpfc_dmabuf *mp; 3101 LPFC_MBOXQ_t *pmb; 3102 MAILBOX_t *mb; 3103 READ_LA_VAR *la; 3104 uint8_t att_type; 3105 3106 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3107 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) 3108 return; 3109 phba->fcoe_eventtag = acqe_link->event_tag; 3110 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3111 if (!pmb) { 3112 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3113 "0395 The mboxq allocation failed\n"); 3114 return; 3115 } 3116 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3117 if (!mp) { 3118 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3119 "0396 The lpfc_dmabuf allocation failed\n"); 3120 goto out_free_pmb; 3121 } 3122 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3123 if (!mp->virt) { 3124 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3125 "0397 The mbuf allocation failed\n"); 3126 goto out_free_dmabuf; 3127 } 3128 3129 /* Cleanup any outstanding ELS commands */ 3130 lpfc_els_flush_all_cmd(phba); 3131 3132 /* Block ELS IOCBs until we have done process link event */ 3133 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3134 3135 /* Update link event statistics */ 3136 phba->sli.slistat.link_event++; 3137 3138 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ 3139 lpfc_read_la(phba, pmb, mp); 3140 pmb->vport = phba->pport; 3141 3142 /* Parse and translate status field */ 3143 mb = &pmb->u.mb; 3144 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3145 3146 /* Parse and translate link attention fields */ 3147 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; 3148 la->eventTag = acqe_link->event_tag; 3149 la->attType = att_type; 3150 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); 3151 3152 /* Fake the the following irrelvant fields */ 3153 la->topology = TOPOLOGY_PT_PT; 3154 la->granted_AL_PA = 0; 3155 la->il = 0; 3156 la->pb = 0; 3157 la->fa = 0; 3158 la->mm = 0; 3159 3160 /* Keep the link status for extra SLI4 state machine reference */ 3161 phba->sli4_hba.link_state.speed = 3162 bf_get(lpfc_acqe_link_speed, acqe_link); 3163 phba->sli4_hba.link_state.duplex = 3164 bf_get(lpfc_acqe_link_duplex, acqe_link); 3165 phba->sli4_hba.link_state.status = 3166 bf_get(lpfc_acqe_link_status, acqe_link); 3167 phba->sli4_hba.link_state.physical = 3168 bf_get(lpfc_acqe_link_physical, acqe_link); 3169 phba->sli4_hba.link_state.fault = 3170 bf_get(lpfc_acqe_link_fault, acqe_link); 3171 phba->sli4_hba.link_state.logical_speed = 3172 bf_get(lpfc_acqe_qos_link_speed, acqe_link); 3173 3174 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3175 lpfc_mbx_cmpl_read_la(phba, pmb); 3176 3177 return; 3178 3179out_free_dmabuf: 3180 kfree(mp); 3181out_free_pmb: 3182 mempool_free(pmb, phba->mbox_mem_pool); 3183} 3184 3185/** 3186 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 3187 * @phba: pointer to lpfc hba data structure. 3188 * @acqe_link: pointer to the async fcoe completion queue entry. 3189 * 3190 * This routine is to handle the SLI4 asynchronous fcoe event. 3191 **/ 3192static void 3193lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, 3194 struct lpfc_acqe_fcoe *acqe_fcoe) 3195{ 3196 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 3197 int rc; 3198 struct lpfc_vport *vport; 3199 struct lpfc_nodelist *ndlp; 3200 struct Scsi_Host *shost; 3201 uint32_t link_state; 3202 int active_vlink_present; 3203 struct lpfc_vport **vports; 3204 int i; 3205 3206 phba->fc_eventTag = acqe_fcoe->event_tag; 3207 phba->fcoe_eventtag = acqe_fcoe->event_tag; 3208 switch (event_type) { 3209 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3210 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: 3211 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3212 "2546 New FCF found index 0x%x tag 0x%x\n", 3213 acqe_fcoe->index, 3214 acqe_fcoe->event_tag); 3215 spin_lock_irq(&phba->hbalock); 3216 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || 3217 (phba->hba_flag & FCF_DISC_INPROGRESS)) { 3218 /* 3219 * If the current FCF is in discovered state or 3220 * FCF discovery is in progress, do nothing. 3221 */ 3222 spin_unlock_irq(&phba->hbalock); 3223 break; 3224 } 3225 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3226 /* 3227 * If fast FCF failover rescan event is pending, 3228 * do nothing. 3229 */ 3230 spin_unlock_irq(&phba->hbalock); 3231 break; 3232 } 3233 spin_unlock_irq(&phba->hbalock); 3234 3235 /* Read the FCF table and re-discover SAN. */ 3236 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 3237 if (rc) 3238 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3239 "2547 Read FCF record failed 0x%x\n", 3240 rc); 3241 break; 3242 3243 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3244 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3245 "2548 FCF Table full count 0x%x tag 0x%x\n", 3246 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), 3247 acqe_fcoe->event_tag); 3248 break; 3249 3250 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3251 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3252 "2549 FCF disconnected from network index 0x%x" 3253 " tag 0x%x\n", acqe_fcoe->index, 3254 acqe_fcoe->event_tag); 3255 /* If the event is not for currently used fcf do nothing */ 3256 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) 3257 break; 3258 /* 3259 * Currently, driver support only one FCF - so treat this as 3260 * a link down, but save the link state because we don't want 3261 * it to be changed to Link Down unless it is already down. 3262 */ 3263 link_state = phba->link_state; 3264 lpfc_linkdown(phba); 3265 phba->link_state = link_state; 3266 /* Unregister FCF if no devices connected to it */ 3267 lpfc_unregister_unused_fcf(phba); 3268 break; 3269 case LPFC_FCOE_EVENT_TYPE_CVL: 3270 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3271 "2718 Clear Virtual Link Received for VPI 0x%x" 3272 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3273 vport = lpfc_find_vport_by_vpid(phba, 3274 acqe_fcoe->index - phba->vpi_base); 3275 if (!vport) 3276 break; 3277 ndlp = lpfc_findnode_did(vport, Fabric_DID); 3278 if (!ndlp) 3279 break; 3280 shost = lpfc_shost_from_vport(vport); 3281 if (phba->pport->port_state <= LPFC_FLOGI) 3282 break; 3283 /* If virtual link is not yet instantiated ignore CVL */ 3284 if (vport->port_state <= LPFC_FDISC) 3285 break; 3286 3287 lpfc_linkdown_port(vport); 3288 lpfc_cleanup_pending_mbox(vport); 3289 spin_lock_irq(shost->host_lock); 3290 vport->fc_flag |= FC_VPORT_CVL_RCVD; 3291 spin_unlock_irq(shost->host_lock); 3292 active_vlink_present = 0; 3293 3294 vports = lpfc_create_vport_work_array(phba); 3295 if (vports) { 3296 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 3297 i++) { 3298 if ((!(vports[i]->fc_flag & 3299 FC_VPORT_CVL_RCVD)) && 3300 (vports[i]->port_state > LPFC_FDISC)) { 3301 active_vlink_present = 1; 3302 break; 3303 } 3304 } 3305 lpfc_destroy_vport_work_array(phba, vports); 3306 } 3307 3308 if (active_vlink_present) { 3309 /* 3310 * If there are other active VLinks present, 3311 * re-instantiate the Vlink using FDISC. 3312 */ 3313 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3314 spin_lock_irq(shost->host_lock); 3315 ndlp->nlp_flag |= NLP_DELAY_TMO; 3316 spin_unlock_irq(shost->host_lock); 3317 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 3318 vport->port_state = LPFC_FDISC; 3319 } else { 3320 /* 3321 * Otherwise, we request port to rediscover 3322 * the entire FCF table for a fast recovery 3323 * from possible case that the current FCF 3324 * is no longer valid. 3325 */ 3326 rc = lpfc_sli4_redisc_fcf_table(phba); 3327 if (rc) 3328 /* 3329 * Last resort will be re-try on the 3330 * the current registered FCF entry. 3331 */ 3332 lpfc_retry_pport_discovery(phba); 3333 } 3334 break; 3335 default: 3336 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3337 "0288 Unknown FCoE event type 0x%x event tag " 3338 "0x%x\n", event_type, acqe_fcoe->event_tag); 3339 break; 3340 } 3341} 3342 3343/** 3344 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 3345 * @phba: pointer to lpfc hba data structure. 3346 * @acqe_link: pointer to the async dcbx completion queue entry. 3347 * 3348 * This routine is to handle the SLI4 asynchronous dcbx event. 3349 **/ 3350static void 3351lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3352 struct lpfc_acqe_dcbx *acqe_dcbx) 3353{ 3354 phba->fc_eventTag = acqe_dcbx->event_tag; 3355 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3356 "0290 The SLI4 DCBX asynchronous event is not " 3357 "handled yet\n"); 3358} 3359 3360/** 3361 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 3362 * @phba: pointer to lpfc hba data structure. 3363 * 3364 * This routine is invoked by the worker thread to process all the pending 3365 * SLI4 asynchronous events. 3366 **/ 3367void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 3368{ 3369 struct lpfc_cq_event *cq_event; 3370 3371 /* First, declare the async event has been handled */ 3372 spin_lock_irq(&phba->hbalock); 3373 phba->hba_flag &= ~ASYNC_EVENT; 3374 spin_unlock_irq(&phba->hbalock); 3375 /* Now, handle all the async events */ 3376 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 3377 /* Get the first event from the head of the event queue */ 3378 spin_lock_irq(&phba->hbalock); 3379 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 3380 cq_event, struct lpfc_cq_event, list); 3381 spin_unlock_irq(&phba->hbalock); 3382 /* Process the asynchronous event */ 3383 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 3384 case LPFC_TRAILER_CODE_LINK: 3385 lpfc_sli4_async_link_evt(phba, 3386 &cq_event->cqe.acqe_link); 3387 break; 3388 case LPFC_TRAILER_CODE_FCOE: 3389 lpfc_sli4_async_fcoe_evt(phba, 3390 &cq_event->cqe.acqe_fcoe); 3391 break; 3392 case LPFC_TRAILER_CODE_DCBX: 3393 lpfc_sli4_async_dcbx_evt(phba, 3394 &cq_event->cqe.acqe_dcbx); 3395 break; 3396 default: 3397 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3398 "1804 Invalid asynchrous event code: " 3399 "x%x\n", bf_get(lpfc_trailer_code, 3400 &cq_event->cqe.mcqe_cmpl)); 3401 break; 3402 } 3403 /* Free the completion event processed to the free pool */ 3404 lpfc_sli4_cq_event_release(phba, cq_event); 3405 } 3406} 3407 3408/** 3409 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 3410 * @phba: pointer to lpfc hba data structure. 3411 * 3412 * This routine is invoked by the worker thread to process FCF table 3413 * rediscovery pending completion event. 3414 **/ 3415void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 3416{ 3417 int rc; 3418 3419 spin_lock_irq(&phba->hbalock); 3420 /* Clear FCF rediscovery timeout event */ 3421 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 3422 /* Clear driver fast failover FCF record flag */ 3423 phba->fcf.failover_rec.flag = 0; 3424 /* Set state for FCF fast failover */ 3425 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 3426 spin_unlock_irq(&phba->hbalock); 3427 3428 /* Scan FCF table from the first entry to re-discover SAN */ 3429 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 3430 if (rc) 3431 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3432 "2747 Post FCF rediscovery read FCF record " 3433 "failed 0x%x\n", rc); 3434} 3435 3436/** 3437 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3438 * @phba: pointer to lpfc hba data structure. 3439 * @dev_grp: The HBA PCI-Device group number. 3440 * 3441 * This routine is invoked to set up the per HBA PCI-Device group function 3442 * API jump table entries. 3443 * 3444 * Return: 0 if success, otherwise -ENODEV 3445 **/ 3446int 3447lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3448{ 3449 int rc; 3450 3451 /* Set up lpfc PCI-device group */ 3452 phba->pci_dev_grp = dev_grp; 3453 3454 /* The LPFC_PCI_DEV_OC uses SLI4 */ 3455 if (dev_grp == LPFC_PCI_DEV_OC) 3456 phba->sli_rev = LPFC_SLI_REV4; 3457 3458 /* Set up device INIT API function jump table */ 3459 rc = lpfc_init_api_table_setup(phba, dev_grp); 3460 if (rc) 3461 return -ENODEV; 3462 /* Set up SCSI API function jump table */ 3463 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 3464 if (rc) 3465 return -ENODEV; 3466 /* Set up SLI API function jump table */ 3467 rc = lpfc_sli_api_table_setup(phba, dev_grp); 3468 if (rc) 3469 return -ENODEV; 3470 /* Set up MBOX API function jump table */ 3471 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 3472 if (rc) 3473 return -ENODEV; 3474 3475 return 0; 3476} 3477 3478/** 3479 * lpfc_log_intr_mode - Log the active interrupt mode 3480 * @phba: pointer to lpfc hba data structure. 3481 * @intr_mode: active interrupt mode adopted. 3482 * 3483 * This routine it invoked to log the currently used active interrupt mode 3484 * to the device. 3485 **/ 3486static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 3487{ 3488 switch (intr_mode) { 3489 case 0: 3490 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3491 "0470 Enable INTx interrupt mode.\n"); 3492 break; 3493 case 1: 3494 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3495 "0481 Enabled MSI interrupt mode.\n"); 3496 break; 3497 case 2: 3498 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3499 "0480 Enabled MSI-X interrupt mode.\n"); 3500 break; 3501 default: 3502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3503 "0482 Illegal interrupt mode.\n"); 3504 break; 3505 } 3506 return; 3507} 3508 3509/** 3510 * lpfc_enable_pci_dev - Enable a generic PCI device. 3511 * @phba: pointer to lpfc hba data structure. 3512 * 3513 * This routine is invoked to enable the PCI device that is common to all 3514 * PCI devices. 3515 * 3516 * Return codes 3517 * 0 - successful 3518 * other values - error 3519 **/ 3520static int 3521lpfc_enable_pci_dev(struct lpfc_hba *phba) 3522{ 3523 struct pci_dev *pdev; 3524 int bars; 3525 3526 /* Obtain PCI device reference */ 3527 if (!phba->pcidev) 3528 goto out_error; 3529 else 3530 pdev = phba->pcidev; 3531 /* Select PCI BARs */ 3532 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3533 /* Enable PCI device */ 3534 if (pci_enable_device_mem(pdev)) 3535 goto out_error; 3536 /* Request PCI resource for the device */ 3537 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 3538 goto out_disable_device; 3539 /* Set up device as PCI master and save state for EEH */ 3540 pci_set_master(pdev); 3541 pci_try_set_mwi(pdev); 3542 pci_save_state(pdev); 3543 3544 return 0; 3545 3546out_disable_device: 3547 pci_disable_device(pdev); 3548out_error: 3549 return -ENODEV; 3550} 3551 3552/** 3553 * lpfc_disable_pci_dev - Disable a generic PCI device. 3554 * @phba: pointer to lpfc hba data structure. 3555 * 3556 * This routine is invoked to disable the PCI device that is common to all 3557 * PCI devices. 3558 **/ 3559static void 3560lpfc_disable_pci_dev(struct lpfc_hba *phba) 3561{ 3562 struct pci_dev *pdev; 3563 int bars; 3564 3565 /* Obtain PCI device reference */ 3566 if (!phba->pcidev) 3567 return; 3568 else 3569 pdev = phba->pcidev; 3570 /* Select PCI BARs */ 3571 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3572 /* Release PCI resource and disable PCI device */ 3573 pci_release_selected_regions(pdev, bars); 3574 pci_disable_device(pdev); 3575 /* Null out PCI private reference to driver */ 3576 pci_set_drvdata(pdev, NULL); 3577 3578 return; 3579} 3580 3581/** 3582 * lpfc_reset_hba - Reset a hba 3583 * @phba: pointer to lpfc hba data structure. 3584 * 3585 * This routine is invoked to reset a hba device. It brings the HBA 3586 * offline, performs a board restart, and then brings the board back 3587 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 3588 * on outstanding mailbox commands. 3589 **/ 3590void 3591lpfc_reset_hba(struct lpfc_hba *phba) 3592{ 3593 /* If resets are disabled then set error state and return. */ 3594 if (!phba->cfg_enable_hba_reset) { 3595 phba->link_state = LPFC_HBA_ERROR; 3596 return; 3597 } 3598 lpfc_offline_prep(phba); 3599 lpfc_offline(phba); 3600 lpfc_sli_brdrestart(phba); 3601 lpfc_online(phba); 3602 lpfc_unblock_mgmt_io(phba); 3603} 3604 3605/** 3606 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 3607 * @phba: pointer to lpfc hba data structure. 3608 * 3609 * This routine is invoked to set up the driver internal resources specific to 3610 * support the SLI-3 HBA device it attached to. 3611 * 3612 * Return codes 3613 * 0 - successful 3614 * other values - error 3615 **/ 3616static int 3617lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 3618{ 3619 struct lpfc_sli *psli; 3620 3621 /* 3622 * Initialize timers used by driver 3623 */ 3624 3625 /* Heartbeat timer */ 3626 init_timer(&phba->hb_tmofunc); 3627 phba->hb_tmofunc.function = lpfc_hb_timeout; 3628 phba->hb_tmofunc.data = (unsigned long)phba; 3629 3630 psli = &phba->sli; 3631 /* MBOX heartbeat timer */ 3632 init_timer(&psli->mbox_tmo); 3633 psli->mbox_tmo.function = lpfc_mbox_timeout; 3634 psli->mbox_tmo.data = (unsigned long) phba; 3635 /* FCP polling mode timer */ 3636 init_timer(&phba->fcp_poll_timer); 3637 phba->fcp_poll_timer.function = lpfc_poll_timeout; 3638 phba->fcp_poll_timer.data = (unsigned long) phba; 3639 /* Fabric block timer */ 3640 init_timer(&phba->fabric_block_timer); 3641 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3642 phba->fabric_block_timer.data = (unsigned long) phba; 3643 /* EA polling mode timer */ 3644 init_timer(&phba->eratt_poll); 3645 phba->eratt_poll.function = lpfc_poll_eratt; 3646 phba->eratt_poll.data = (unsigned long) phba; 3647 3648 /* Host attention work mask setup */ 3649 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 3650 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 3651 3652 /* Get all the module params for configuring this host */ 3653 lpfc_get_cfgparam(phba); 3654 /* 3655 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3656 * used to create the sg_dma_buf_pool must be dynamically calculated. 3657 * 2 segments are added since the IOCB needs a command and response bde. 3658 */ 3659 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 3660 sizeof(struct fcp_rsp) + 3661 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 3662 3663 if (phba->cfg_enable_bg) { 3664 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 3665 phba->cfg_sg_dma_buf_size += 3666 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 3667 } 3668 3669 /* Also reinitialize the host templates with new values. */ 3670 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3671 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3672 3673 phba->max_vpi = LPFC_MAX_VPI; 3674 /* This will be set to correct value after config_port mbox */ 3675 phba->max_vports = 0; 3676 3677 /* 3678 * Initialize the SLI Layer to run with lpfc HBAs. 3679 */ 3680 lpfc_sli_setup(phba); 3681 lpfc_sli_queue_setup(phba); 3682 3683 /* Allocate device driver memory */ 3684 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 3685 return -ENOMEM; 3686 3687 return 0; 3688} 3689 3690/** 3691 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 3692 * @phba: pointer to lpfc hba data structure. 3693 * 3694 * This routine is invoked to unset the driver internal resources set up 3695 * specific for supporting the SLI-3 HBA device it attached to. 3696 **/ 3697static void 3698lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 3699{ 3700 /* Free device driver memory allocated */ 3701 lpfc_mem_free_all(phba); 3702 3703 return; 3704} 3705 3706/** 3707 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 3708 * @phba: pointer to lpfc hba data structure. 3709 * 3710 * This routine is invoked to set up the driver internal resources specific to 3711 * support the SLI-4 HBA device it attached to. 3712 * 3713 * Return codes 3714 * 0 - successful 3715 * other values - error 3716 **/ 3717static int 3718lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 3719{ 3720 struct lpfc_sli *psli; 3721 LPFC_MBOXQ_t *mboxq; 3722 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 3723 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 3724 struct lpfc_mqe *mqe; 3725 3726 /* Before proceed, wait for POST done and device ready */ 3727 rc = lpfc_sli4_post_status_check(phba); 3728 if (rc) 3729 return -ENODEV; 3730 3731 /* 3732 * Initialize timers used by driver 3733 */ 3734 3735 /* Heartbeat timer */ 3736 init_timer(&phba->hb_tmofunc); 3737 phba->hb_tmofunc.function = lpfc_hb_timeout; 3738 phba->hb_tmofunc.data = (unsigned long)phba; 3739 3740 psli = &phba->sli; 3741 /* MBOX heartbeat timer */ 3742 init_timer(&psli->mbox_tmo); 3743 psli->mbox_tmo.function = lpfc_mbox_timeout; 3744 psli->mbox_tmo.data = (unsigned long) phba; 3745 /* Fabric block timer */ 3746 init_timer(&phba->fabric_block_timer); 3747 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3748 phba->fabric_block_timer.data = (unsigned long) phba; 3749 /* EA polling mode timer */ 3750 init_timer(&phba->eratt_poll); 3751 phba->eratt_poll.function = lpfc_poll_eratt; 3752 phba->eratt_poll.data = (unsigned long) phba; 3753 /* FCF rediscover timer */ 3754 init_timer(&phba->fcf.redisc_wait); 3755 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 3756 phba->fcf.redisc_wait.data = (unsigned long)phba; 3757 3758 /* 3759 * We need to do a READ_CONFIG mailbox command here before 3760 * calling lpfc_get_cfgparam. For VFs this will report the 3761 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 3762 * All of the resources allocated 3763 * for this Port are tied to these values. 3764 */ 3765 /* Get all the module params for configuring this host */ 3766 lpfc_get_cfgparam(phba); 3767 phba->max_vpi = LPFC_MAX_VPI; 3768 /* This will be set to correct value after the read_config mbox */ 3769 phba->max_vports = 0; 3770 3771 /* Program the default value of vlan_id and fc_map */ 3772 phba->valid_vlan = 0; 3773 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 3774 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 3775 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 3776 3777 /* 3778 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3779 * used to create the sg_dma_buf_pool must be dynamically calculated. 3780 * 2 segments are added since the IOCB needs a command and response bde. 3781 * To insure that the scsi sgl does not cross a 4k page boundary only 3782 * sgl sizes of must be a power of 2. 3783 */ 3784 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 3785 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); 3786 /* Feature Level 1 hardware is limited to 2 pages */ 3787 if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) == 3788 LPFC_SLI_INTF_FEATURELEVEL1_1)) 3789 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 3790 else 3791 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 3792 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 3793 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 3794 dma_buf_size = dma_buf_size << 1) 3795 ; 3796 if (dma_buf_size == max_buf_size) 3797 phba->cfg_sg_seg_cnt = (dma_buf_size - 3798 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - 3799 (2 * sizeof(struct sli4_sge))) / 3800 sizeof(struct sli4_sge); 3801 phba->cfg_sg_dma_buf_size = dma_buf_size; 3802 3803 /* Initialize buffer queue management fields */ 3804 hbq_count = lpfc_sli_hbq_count(); 3805 for (i = 0; i < hbq_count; ++i) 3806 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 3807 INIT_LIST_HEAD(&phba->rb_pend_list); 3808 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 3809 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 3810 3811 /* 3812 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 3813 */ 3814 /* Initialize the Abort scsi buffer list used by driver */ 3815 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 3816 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 3817 /* This abort list used by worker thread */ 3818 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 3819 3820 /* 3821 * Initialize dirver internal slow-path work queues 3822 */ 3823 3824 /* Driver internel slow-path CQ Event pool */ 3825 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 3826 /* Response IOCB work queue list */ 3827 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 3828 /* Asynchronous event CQ Event work queue list */ 3829 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 3830 /* Fast-path XRI aborted CQ Event work queue list */ 3831 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 3832 /* Slow-path XRI aborted CQ Event work queue list */ 3833 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 3834 /* Receive queue CQ Event work queue list */ 3835 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 3836 3837 /* Initialize the driver internal SLI layer lists. */ 3838 lpfc_sli_setup(phba); 3839 lpfc_sli_queue_setup(phba); 3840 3841 /* Allocate device driver memory */ 3842 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 3843 if (rc) 3844 return -ENOMEM; 3845 3846 /* Create the bootstrap mailbox command */ 3847 rc = lpfc_create_bootstrap_mbox(phba); 3848 if (unlikely(rc)) 3849 goto out_free_mem; 3850 3851 /* Set up the host's endian order with the device. */ 3852 rc = lpfc_setup_endian_order(phba); 3853 if (unlikely(rc)) 3854 goto out_free_bsmbx; 3855 3856 rc = lpfc_sli4_fw_cfg_check(phba); 3857 if (unlikely(rc)) 3858 goto out_free_bsmbx; 3859 3860 /* Set up the hba's configuration parameters. */ 3861 rc = lpfc_sli4_read_config(phba); 3862 if (unlikely(rc)) 3863 goto out_free_bsmbx; 3864 3865 /* Perform a function reset */ 3866 rc = lpfc_pci_function_reset(phba); 3867 if (unlikely(rc)) 3868 goto out_free_bsmbx; 3869 3870 /* Create all the SLI4 queues */ 3871 rc = lpfc_sli4_queue_create(phba); 3872 if (rc) 3873 goto out_free_bsmbx; 3874 3875 /* Create driver internal CQE event pool */ 3876 rc = lpfc_sli4_cq_event_pool_create(phba); 3877 if (rc) 3878 goto out_destroy_queue; 3879 3880 /* Initialize and populate the iocb list per host */ 3881 rc = lpfc_init_sgl_list(phba); 3882 if (rc) { 3883 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3884 "1400 Failed to initialize sgl list.\n"); 3885 goto out_destroy_cq_event_pool; 3886 } 3887 rc = lpfc_init_active_sgl_array(phba); 3888 if (rc) { 3889 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3890 "1430 Failed to initialize sgl list.\n"); 3891 goto out_free_sgl_list; 3892 } 3893 3894 rc = lpfc_sli4_init_rpi_hdrs(phba); 3895 if (rc) { 3896 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3897 "1432 Failed to initialize rpi headers.\n"); 3898 goto out_free_active_sgl; 3899 } 3900 3901 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 3902 phba->cfg_fcp_eq_count), GFP_KERNEL); 3903 if (!phba->sli4_hba.fcp_eq_hdl) { 3904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3905 "2572 Failed allocate memory for fast-path " 3906 "per-EQ handle array\n"); 3907 goto out_remove_rpi_hdrs; 3908 } 3909 3910 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 3911 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 3912 if (!phba->sli4_hba.msix_entries) { 3913 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3914 "2573 Failed allocate memory for msi-x " 3915 "interrupt vector entries\n"); 3916 goto out_free_fcp_eq_hdl; 3917 } 3918 3919 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 3920 GFP_KERNEL); 3921 if (!mboxq) { 3922 rc = -ENOMEM; 3923 goto out_free_fcp_eq_hdl; 3924 } 3925 3926 /* Get the Supported Pages. It is always available. */ 3927 lpfc_supported_pages(mboxq); 3928 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 3929 if (unlikely(rc)) { 3930 rc = -EIO; 3931 mempool_free(mboxq, phba->mbox_mem_pool); 3932 goto out_free_fcp_eq_hdl; 3933 } 3934 3935 mqe = &mboxq->u.mqe; 3936 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 3937 LPFC_MAX_SUPPORTED_PAGES); 3938 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 3939 switch (pn_page[i]) { 3940 case LPFC_SLI4_PARAMETERS: 3941 phba->sli4_hba.pc_sli4_params.supported = 1; 3942 break; 3943 default: 3944 break; 3945 } 3946 } 3947 3948 /* Read the port's SLI4 Parameters capabilities if supported. */ 3949 if (phba->sli4_hba.pc_sli4_params.supported) 3950 rc = lpfc_pc_sli4_params_get(phba, mboxq); 3951 mempool_free(mboxq, phba->mbox_mem_pool); 3952 if (rc) { 3953 rc = -EIO; 3954 goto out_free_fcp_eq_hdl; 3955 } 3956 return rc; 3957 3958out_free_fcp_eq_hdl: 3959 kfree(phba->sli4_hba.fcp_eq_hdl); 3960out_remove_rpi_hdrs: 3961 lpfc_sli4_remove_rpi_hdrs(phba); 3962out_free_active_sgl: 3963 lpfc_free_active_sgl(phba); 3964out_free_sgl_list: 3965 lpfc_free_sgl_list(phba); 3966out_destroy_cq_event_pool: 3967 lpfc_sli4_cq_event_pool_destroy(phba); 3968out_destroy_queue: 3969 lpfc_sli4_queue_destroy(phba); 3970out_free_bsmbx: 3971 lpfc_destroy_bootstrap_mbox(phba); 3972out_free_mem: 3973 lpfc_mem_free(phba); 3974 return rc; 3975} 3976 3977/** 3978 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 3979 * @phba: pointer to lpfc hba data structure. 3980 * 3981 * This routine is invoked to unset the driver internal resources set up 3982 * specific for supporting the SLI-4 HBA device it attached to. 3983 **/ 3984static void 3985lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 3986{ 3987 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 3988 3989 /* unregister default FCFI from the HBA */ 3990 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); 3991 3992 /* Free the default FCR table */ 3993 lpfc_sli_remove_dflt_fcf(phba); 3994 3995 /* Free memory allocated for msi-x interrupt vector entries */ 3996 kfree(phba->sli4_hba.msix_entries); 3997 3998 /* Free memory allocated for fast-path work queue handles */ 3999 kfree(phba->sli4_hba.fcp_eq_hdl); 4000 4001 /* Free the allocated rpi headers. */ 4002 lpfc_sli4_remove_rpi_hdrs(phba); 4003 lpfc_sli4_remove_rpis(phba); 4004 4005 /* Free the ELS sgl list */ 4006 lpfc_free_active_sgl(phba); 4007 lpfc_free_sgl_list(phba); 4008 4009 /* Free the SCSI sgl management array */ 4010 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4011 4012 /* Free the SLI4 queues */ 4013 lpfc_sli4_queue_destroy(phba); 4014 4015 /* Free the completion queue EQ event pool */ 4016 lpfc_sli4_cq_event_release_all(phba); 4017 lpfc_sli4_cq_event_pool_destroy(phba); 4018 4019 /* Reset SLI4 HBA FCoE function */ 4020 lpfc_pci_function_reset(phba); 4021 4022 /* Free the bsmbx region. */ 4023 lpfc_destroy_bootstrap_mbox(phba); 4024 4025 /* Free the SLI Layer memory with SLI4 HBAs */ 4026 lpfc_mem_free_all(phba); 4027 4028 /* Free the current connect table */ 4029 list_for_each_entry_safe(conn_entry, next_conn_entry, 4030 &phba->fcf_conn_rec_list, list) { 4031 list_del_init(&conn_entry->list); 4032 kfree(conn_entry); 4033 } 4034 4035 return; 4036} 4037 4038/** 4039 * lpfc_init_api_table_setup - Set up init api fucntion jump table 4040 * @phba: The hba struct for which this call is being executed. 4041 * @dev_grp: The HBA PCI-Device group number. 4042 * 4043 * This routine sets up the device INIT interface API function jump table 4044 * in @phba struct. 4045 * 4046 * Returns: 0 - success, -ENODEV - failure. 4047 **/ 4048int 4049lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4050{ 4051 phba->lpfc_hba_init_link = lpfc_hba_init_link; 4052 phba->lpfc_hba_down_link = lpfc_hba_down_link; 4053 switch (dev_grp) { 4054 case LPFC_PCI_DEV_LP: 4055 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4056 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 4057 phba->lpfc_stop_port = lpfc_stop_port_s3; 4058 break; 4059 case LPFC_PCI_DEV_OC: 4060 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 4061 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 4062 phba->lpfc_stop_port = lpfc_stop_port_s4; 4063 break; 4064 default: 4065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4066 "1431 Invalid HBA PCI-device group: 0x%x\n", 4067 dev_grp); 4068 return -ENODEV; 4069 break; 4070 } 4071 return 0; 4072} 4073 4074/** 4075 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 4076 * @phba: pointer to lpfc hba data structure. 4077 * 4078 * This routine is invoked to set up the driver internal resources before the 4079 * device specific resource setup to support the HBA device it attached to. 4080 * 4081 * Return codes 4082 * 0 - successful 4083 * other values - error 4084 **/ 4085static int 4086lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 4087{ 4088 /* 4089 * Driver resources common to all SLI revisions 4090 */ 4091 atomic_set(&phba->fast_event_count, 0); 4092 spin_lock_init(&phba->hbalock); 4093 4094 /* Initialize ndlp management spinlock */ 4095 spin_lock_init(&phba->ndlp_lock); 4096 4097 INIT_LIST_HEAD(&phba->port_list); 4098 INIT_LIST_HEAD(&phba->work_list); 4099 init_waitqueue_head(&phba->wait_4_mlo_m_q); 4100 4101 /* Initialize the wait queue head for the kernel thread */ 4102 init_waitqueue_head(&phba->work_waitq); 4103 4104 /* Initialize the scsi buffer list used by driver for scsi IO */ 4105 spin_lock_init(&phba->scsi_buf_list_lock); 4106 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 4107 4108 /* Initialize the fabric iocb list */ 4109 INIT_LIST_HEAD(&phba->fabric_iocb_list); 4110 4111 /* Initialize list to save ELS buffers */ 4112 INIT_LIST_HEAD(&phba->elsbuf); 4113 4114 /* Initialize FCF connection rec list */ 4115 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 4116 4117 return 0; 4118} 4119 4120/** 4121 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 4122 * @phba: pointer to lpfc hba data structure. 4123 * 4124 * This routine is invoked to set up the driver internal resources after the 4125 * device specific resource setup to support the HBA device it attached to. 4126 * 4127 * Return codes 4128 * 0 - successful 4129 * other values - error 4130 **/ 4131static int 4132lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 4133{ 4134 int error; 4135 4136 /* Startup the kernel thread for this host adapter. */ 4137 phba->worker_thread = kthread_run(lpfc_do_work, phba, 4138 "lpfc_worker_%d", phba->brd_no); 4139 if (IS_ERR(phba->worker_thread)) { 4140 error = PTR_ERR(phba->worker_thread); 4141 return error; 4142 } 4143 4144 return 0; 4145} 4146 4147/** 4148 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 4149 * @phba: pointer to lpfc hba data structure. 4150 * 4151 * This routine is invoked to unset the driver internal resources set up after 4152 * the device specific resource setup for supporting the HBA device it 4153 * attached to. 4154 **/ 4155static void 4156lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 4157{ 4158 /* Stop kernel worker thread */ 4159 kthread_stop(phba->worker_thread); 4160} 4161 4162/** 4163 * lpfc_free_iocb_list - Free iocb list. 4164 * @phba: pointer to lpfc hba data structure. 4165 * 4166 * This routine is invoked to free the driver's IOCB list and memory. 4167 **/ 4168static void 4169lpfc_free_iocb_list(struct lpfc_hba *phba) 4170{ 4171 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 4172 4173 spin_lock_irq(&phba->hbalock); 4174 list_for_each_entry_safe(iocbq_entry, iocbq_next, 4175 &phba->lpfc_iocb_list, list) { 4176 list_del(&iocbq_entry->list); 4177 kfree(iocbq_entry); 4178 phba->total_iocbq_bufs--; 4179 } 4180 spin_unlock_irq(&phba->hbalock); 4181 4182 return; 4183} 4184 4185/** 4186 * lpfc_init_iocb_list - Allocate and initialize iocb list. 4187 * @phba: pointer to lpfc hba data structure. 4188 * 4189 * This routine is invoked to allocate and initizlize the driver's IOCB 4190 * list and set up the IOCB tag array accordingly. 4191 * 4192 * Return codes 4193 * 0 - successful 4194 * other values - error 4195 **/ 4196static int 4197lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 4198{ 4199 struct lpfc_iocbq *iocbq_entry = NULL; 4200 uint16_t iotag; 4201 int i; 4202 4203 /* Initialize and populate the iocb list per host. */ 4204 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 4205 for (i = 0; i < iocb_count; i++) { 4206 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 4207 if (iocbq_entry == NULL) { 4208 printk(KERN_ERR "%s: only allocated %d iocbs of " 4209 "expected %d count. Unloading driver.\n", 4210 __func__, i, LPFC_IOCB_LIST_CNT); 4211 goto out_free_iocbq; 4212 } 4213 4214 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 4215 if (iotag == 0) { 4216 kfree(iocbq_entry); 4217 printk(KERN_ERR "%s: failed to allocate IOTAG. " 4218 "Unloading driver.\n", __func__); 4219 goto out_free_iocbq; 4220 } 4221 iocbq_entry->sli4_xritag = NO_XRI; 4222 4223 spin_lock_irq(&phba->hbalock); 4224 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 4225 phba->total_iocbq_bufs++; 4226 spin_unlock_irq(&phba->hbalock); 4227 } 4228 4229 return 0; 4230 4231out_free_iocbq: 4232 lpfc_free_iocb_list(phba); 4233 4234 return -ENOMEM; 4235} 4236 4237/** 4238 * lpfc_free_sgl_list - Free sgl list. 4239 * @phba: pointer to lpfc hba data structure. 4240 * 4241 * This routine is invoked to free the driver's sgl list and memory. 4242 **/ 4243static void 4244lpfc_free_sgl_list(struct lpfc_hba *phba) 4245{ 4246 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 4247 LIST_HEAD(sglq_list); 4248 int rc = 0; 4249 4250 spin_lock_irq(&phba->hbalock); 4251 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 4252 spin_unlock_irq(&phba->hbalock); 4253 4254 list_for_each_entry_safe(sglq_entry, sglq_next, 4255 &sglq_list, list) { 4256 list_del(&sglq_entry->list); 4257 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 4258 kfree(sglq_entry); 4259 phba->sli4_hba.total_sglq_bufs--; 4260 } 4261 rc = lpfc_sli4_remove_all_sgl_pages(phba); 4262 if (rc) { 4263 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4264 "2005 Unable to deregister pages from HBA: %x\n", rc); 4265 } 4266 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4267} 4268 4269/** 4270 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 4271 * @phba: pointer to lpfc hba data structure. 4272 * 4273 * This routine is invoked to allocate the driver's active sgl memory. 4274 * This array will hold the sglq_entry's for active IOs. 4275 **/ 4276static int 4277lpfc_init_active_sgl_array(struct lpfc_hba *phba) 4278{ 4279 int size; 4280 size = sizeof(struct lpfc_sglq *); 4281 size *= phba->sli4_hba.max_cfg_param.max_xri; 4282 4283 phba->sli4_hba.lpfc_sglq_active_list = 4284 kzalloc(size, GFP_KERNEL); 4285 if (!phba->sli4_hba.lpfc_sglq_active_list) 4286 return -ENOMEM; 4287 return 0; 4288} 4289 4290/** 4291 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 4292 * @phba: pointer to lpfc hba data structure. 4293 * 4294 * This routine is invoked to walk through the array of active sglq entries 4295 * and free all of the resources. 4296 * This is just a place holder for now. 4297 **/ 4298static void 4299lpfc_free_active_sgl(struct lpfc_hba *phba) 4300{ 4301 kfree(phba->sli4_hba.lpfc_sglq_active_list); 4302} 4303 4304/** 4305 * lpfc_init_sgl_list - Allocate and initialize sgl list. 4306 * @phba: pointer to lpfc hba data structure. 4307 * 4308 * This routine is invoked to allocate and initizlize the driver's sgl 4309 * list and set up the sgl xritag tag array accordingly. 4310 * 4311 * Return codes 4312 * 0 - successful 4313 * other values - error 4314 **/ 4315static int 4316lpfc_init_sgl_list(struct lpfc_hba *phba) 4317{ 4318 struct lpfc_sglq *sglq_entry = NULL; 4319 int i; 4320 int els_xri_cnt; 4321 4322 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4323 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4324 "2400 lpfc_init_sgl_list els %d.\n", 4325 els_xri_cnt); 4326 /* Initialize and populate the sglq list per host/VF. */ 4327 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4328 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 4329 4330 /* Sanity check on XRI management */ 4331 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 4332 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4333 "2562 No room left for SCSI XRI allocation: " 4334 "max_xri=%d, els_xri=%d\n", 4335 phba->sli4_hba.max_cfg_param.max_xri, 4336 els_xri_cnt); 4337 return -ENOMEM; 4338 } 4339 4340 /* Allocate memory for the ELS XRI management array */ 4341 phba->sli4_hba.lpfc_els_sgl_array = 4342 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), 4343 GFP_KERNEL); 4344 4345 if (!phba->sli4_hba.lpfc_els_sgl_array) { 4346 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4347 "2401 Failed to allocate memory for ELS " 4348 "XRI management array of size %d.\n", 4349 els_xri_cnt); 4350 return -ENOMEM; 4351 } 4352 4353 /* Keep the SCSI XRI into the XRI management array */ 4354 phba->sli4_hba.scsi_xri_max = 4355 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4356 phba->sli4_hba.scsi_xri_cnt = 0; 4357 4358 phba->sli4_hba.lpfc_scsi_psb_array = 4359 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4360 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4361 4362 if (!phba->sli4_hba.lpfc_scsi_psb_array) { 4363 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4364 "2563 Failed to allocate memory for SCSI " 4365 "XRI management array of size %d.\n", 4366 phba->sli4_hba.scsi_xri_max); 4367 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4368 return -ENOMEM; 4369 } 4370 4371 for (i = 0; i < els_xri_cnt; i++) { 4372 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); 4373 if (sglq_entry == NULL) { 4374 printk(KERN_ERR "%s: only allocated %d sgls of " 4375 "expected %d count. Unloading driver.\n", 4376 __func__, i, els_xri_cnt); 4377 goto out_free_mem; 4378 } 4379 4380 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); 4381 if (sglq_entry->sli4_xritag == NO_XRI) { 4382 kfree(sglq_entry); 4383 printk(KERN_ERR "%s: failed to allocate XRI.\n" 4384 "Unloading driver.\n", __func__); 4385 goto out_free_mem; 4386 } 4387 sglq_entry->buff_type = GEN_BUFF_TYPE; 4388 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4389 if (sglq_entry->virt == NULL) { 4390 kfree(sglq_entry); 4391 printk(KERN_ERR "%s: failed to allocate mbuf.\n" 4392 "Unloading driver.\n", __func__); 4393 goto out_free_mem; 4394 } 4395 sglq_entry->sgl = sglq_entry->virt; 4396 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 4397 4398 /* The list order is used by later block SGL registraton */ 4399 spin_lock_irq(&phba->hbalock); 4400 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4401 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4402 phba->sli4_hba.total_sglq_bufs++; 4403 spin_unlock_irq(&phba->hbalock); 4404 } 4405 return 0; 4406 4407out_free_mem: 4408 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4409 lpfc_free_sgl_list(phba); 4410 return -ENOMEM; 4411} 4412 4413/** 4414 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 4415 * @phba: pointer to lpfc hba data structure. 4416 * 4417 * This routine is invoked to post rpi header templates to the 4418 * HBA consistent with the SLI-4 interface spec. This routine 4419 * posts a PAGE_SIZE memory region to the port to hold up to 4420 * PAGE_SIZE modulo 64 rpi context headers. 4421 * No locks are held here because this is an initialization routine 4422 * called only from probe or lpfc_online when interrupts are not 4423 * enabled and the driver is reinitializing the device. 4424 * 4425 * Return codes 4426 * 0 - successful 4427 * ENOMEM - No availble memory 4428 * EIO - The mailbox failed to complete successfully. 4429 **/ 4430int 4431lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4432{ 4433 int rc = 0; 4434 int longs; 4435 uint16_t rpi_count; 4436 struct lpfc_rpi_hdr *rpi_hdr; 4437 4438 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 4439 4440 /* 4441 * Provision an rpi bitmask range for discovery. The total count 4442 * is the difference between max and base + 1. 4443 */ 4444 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 4445 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4446 4447 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 4448 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 4449 GFP_KERNEL); 4450 if (!phba->sli4_hba.rpi_bmask) 4451 return -ENOMEM; 4452 4453 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 4454 if (!rpi_hdr) { 4455 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4456 "0391 Error during rpi post operation\n"); 4457 lpfc_sli4_remove_rpis(phba); 4458 rc = -ENODEV; 4459 } 4460 4461 return rc; 4462} 4463 4464/** 4465 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 4466 * @phba: pointer to lpfc hba data structure. 4467 * 4468 * This routine is invoked to allocate a single 4KB memory region to 4469 * support rpis and stores them in the phba. This single region 4470 * provides support for up to 64 rpis. The region is used globally 4471 * by the device. 4472 * 4473 * Returns: 4474 * A valid rpi hdr on success. 4475 * A NULL pointer on any failure. 4476 **/ 4477struct lpfc_rpi_hdr * 4478lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 4479{ 4480 uint16_t rpi_limit, curr_rpi_range; 4481 struct lpfc_dmabuf *dmabuf; 4482 struct lpfc_rpi_hdr *rpi_hdr; 4483 4484 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 4485 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4486 4487 spin_lock_irq(&phba->hbalock); 4488 curr_rpi_range = phba->sli4_hba.next_rpi; 4489 spin_unlock_irq(&phba->hbalock); 4490 4491 /* 4492 * The port has a limited number of rpis. The increment here 4493 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 4494 * and to allow the full max_rpi range per port. 4495 */ 4496 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 4497 return NULL; 4498 4499 /* 4500 * First allocate the protocol header region for the port. The 4501 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 4502 */ 4503 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4504 if (!dmabuf) 4505 return NULL; 4506 4507 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4508 LPFC_HDR_TEMPLATE_SIZE, 4509 &dmabuf->phys, 4510 GFP_KERNEL); 4511 if (!dmabuf->virt) { 4512 rpi_hdr = NULL; 4513 goto err_free_dmabuf; 4514 } 4515 4516 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 4517 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 4518 rpi_hdr = NULL; 4519 goto err_free_coherent; 4520 } 4521 4522 /* Save the rpi header data for cleanup later. */ 4523 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 4524 if (!rpi_hdr) 4525 goto err_free_coherent; 4526 4527 rpi_hdr->dmabuf = dmabuf; 4528 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 4529 rpi_hdr->page_count = 1; 4530 spin_lock_irq(&phba->hbalock); 4531 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 4532 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 4533 4534 /* 4535 * The next_rpi stores the next module-64 rpi value to post 4536 * in any subsequent rpi memory region postings. 4537 */ 4538 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; 4539 spin_unlock_irq(&phba->hbalock); 4540 return rpi_hdr; 4541 4542 err_free_coherent: 4543 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 4544 dmabuf->virt, dmabuf->phys); 4545 err_free_dmabuf: 4546 kfree(dmabuf); 4547 return NULL; 4548} 4549 4550/** 4551 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 4552 * @phba: pointer to lpfc hba data structure. 4553 * 4554 * This routine is invoked to remove all memory resources allocated 4555 * to support rpis. This routine presumes the caller has released all 4556 * rpis consumed by fabric or port logins and is prepared to have 4557 * the header pages removed. 4558 **/ 4559void 4560lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 4561{ 4562 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 4563 4564 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 4565 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 4566 list_del(&rpi_hdr->list); 4567 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 4568 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 4569 kfree(rpi_hdr->dmabuf); 4570 kfree(rpi_hdr); 4571 } 4572 4573 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4574 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 4575} 4576 4577/** 4578 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 4579 * @pdev: pointer to pci device data structure. 4580 * 4581 * This routine is invoked to allocate the driver hba data structure for an 4582 * HBA device. If the allocation is successful, the phba reference to the 4583 * PCI device data structure is set. 4584 * 4585 * Return codes 4586 * pointer to @phba - successful 4587 * NULL - error 4588 **/ 4589static struct lpfc_hba * 4590lpfc_hba_alloc(struct pci_dev *pdev) 4591{ 4592 struct lpfc_hba *phba; 4593 4594 /* Allocate memory for HBA structure */ 4595 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 4596 if (!phba) { 4597 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 4598 return NULL; 4599 } 4600 4601 /* Set reference to PCI device in HBA structure */ 4602 phba->pcidev = pdev; 4603 4604 /* Assign an unused board number */ 4605 phba->brd_no = lpfc_get_instance(); 4606 if (phba->brd_no < 0) { 4607 kfree(phba); 4608 return NULL; 4609 } 4610 4611 spin_lock_init(&phba->ct_ev_lock); 4612 INIT_LIST_HEAD(&phba->ct_ev_waiters); 4613 4614 return phba; 4615} 4616 4617/** 4618 * lpfc_hba_free - Free driver hba data structure with a device. 4619 * @phba: pointer to lpfc hba data structure. 4620 * 4621 * This routine is invoked to free the driver hba data structure with an 4622 * HBA device. 4623 **/ 4624static void 4625lpfc_hba_free(struct lpfc_hba *phba) 4626{ 4627 /* Release the driver assigned board number */ 4628 idr_remove(&lpfc_hba_index, phba->brd_no); 4629 4630 kfree(phba); 4631 return; 4632} 4633 4634/** 4635 * lpfc_create_shost - Create hba physical port with associated scsi host. 4636 * @phba: pointer to lpfc hba data structure. 4637 * 4638 * This routine is invoked to create HBA physical port and associate a SCSI 4639 * host with it. 4640 * 4641 * Return codes 4642 * 0 - successful 4643 * other values - error 4644 **/ 4645static int 4646lpfc_create_shost(struct lpfc_hba *phba) 4647{ 4648 struct lpfc_vport *vport; 4649 struct Scsi_Host *shost; 4650 4651 /* Initialize HBA FC structure */ 4652 phba->fc_edtov = FF_DEF_EDTOV; 4653 phba->fc_ratov = FF_DEF_RATOV; 4654 phba->fc_altov = FF_DEF_ALTOV; 4655 phba->fc_arbtov = FF_DEF_ARBTOV; 4656 4657 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 4658 if (!vport) 4659 return -ENODEV; 4660 4661 shost = lpfc_shost_from_vport(vport); 4662 phba->pport = vport; 4663 lpfc_debugfs_initialize(vport); 4664 /* Put reference to SCSI host to driver's device private data */ 4665 pci_set_drvdata(phba->pcidev, shost); 4666 4667 return 0; 4668} 4669 4670/** 4671 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 4672 * @phba: pointer to lpfc hba data structure. 4673 * 4674 * This routine is invoked to destroy HBA physical port and the associated 4675 * SCSI host. 4676 **/ 4677static void 4678lpfc_destroy_shost(struct lpfc_hba *phba) 4679{ 4680 struct lpfc_vport *vport = phba->pport; 4681 4682 /* Destroy physical port that associated with the SCSI host */ 4683 destroy_port(vport); 4684 4685 return; 4686} 4687 4688/** 4689 * lpfc_setup_bg - Setup Block guard structures and debug areas. 4690 * @phba: pointer to lpfc hba data structure. 4691 * @shost: the shost to be used to detect Block guard settings. 4692 * 4693 * This routine sets up the local Block guard protocol settings for @shost. 4694 * This routine also allocates memory for debugging bg buffers. 4695 **/ 4696static void 4697lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 4698{ 4699 int pagecnt = 10; 4700 if (lpfc_prot_mask && lpfc_prot_guard) { 4701 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4702 "1478 Registering BlockGuard with the " 4703 "SCSI layer\n"); 4704 scsi_host_set_prot(shost, lpfc_prot_mask); 4705 scsi_host_set_guard(shost, lpfc_prot_guard); 4706 } 4707 if (!_dump_buf_data) { 4708 while (pagecnt) { 4709 spin_lock_init(&_dump_buf_lock); 4710 _dump_buf_data = 4711 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4712 if (_dump_buf_data) { 4713 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4714 "9043 BLKGRD: allocated %d pages for " 4715 "_dump_buf_data at 0x%p\n", 4716 (1 << pagecnt), _dump_buf_data); 4717 _dump_buf_data_order = pagecnt; 4718 memset(_dump_buf_data, 0, 4719 ((1 << PAGE_SHIFT) << pagecnt)); 4720 break; 4721 } else 4722 --pagecnt; 4723 } 4724 if (!_dump_buf_data_order) 4725 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4726 "9044 BLKGRD: ERROR unable to allocate " 4727 "memory for hexdump\n"); 4728 } else 4729 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4730 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 4731 "\n", _dump_buf_data); 4732 if (!_dump_buf_dif) { 4733 while (pagecnt) { 4734 _dump_buf_dif = 4735 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4736 if (_dump_buf_dif) { 4737 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4738 "9046 BLKGRD: allocated %d pages for " 4739 "_dump_buf_dif at 0x%p\n", 4740 (1 << pagecnt), _dump_buf_dif); 4741 _dump_buf_dif_order = pagecnt; 4742 memset(_dump_buf_dif, 0, 4743 ((1 << PAGE_SHIFT) << pagecnt)); 4744 break; 4745 } else 4746 --pagecnt; 4747 } 4748 if (!_dump_buf_dif_order) 4749 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4750 "9047 BLKGRD: ERROR unable to allocate " 4751 "memory for hexdump\n"); 4752 } else 4753 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4754 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 4755 _dump_buf_dif); 4756} 4757 4758/** 4759 * lpfc_post_init_setup - Perform necessary device post initialization setup. 4760 * @phba: pointer to lpfc hba data structure. 4761 * 4762 * This routine is invoked to perform all the necessary post initialization 4763 * setup for the device. 4764 **/ 4765static void 4766lpfc_post_init_setup(struct lpfc_hba *phba) 4767{ 4768 struct Scsi_Host *shost; 4769 struct lpfc_adapter_event_header adapter_event; 4770 4771 /* Get the default values for Model Name and Description */ 4772 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 4773 4774 /* 4775 * hba setup may have changed the hba_queue_depth so we need to 4776 * adjust the value of can_queue. 4777 */ 4778 shost = pci_get_drvdata(phba->pcidev); 4779 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4780 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4781 lpfc_setup_bg(phba, shost); 4782 4783 lpfc_host_attrib_init(shost); 4784 4785 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 4786 spin_lock_irq(shost->host_lock); 4787 lpfc_poll_start_timer(phba); 4788 spin_unlock_irq(shost->host_lock); 4789 } 4790 4791 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4792 "0428 Perform SCSI scan\n"); 4793 /* Send board arrival event to upper layer */ 4794 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 4795 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 4796 fc_host_post_vendor_event(shost, fc_get_event_number(), 4797 sizeof(adapter_event), 4798 (char *) &adapter_event, 4799 LPFC_NL_VENDOR_ID); 4800 return; 4801} 4802 4803/** 4804 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 4805 * @phba: pointer to lpfc hba data structure. 4806 * 4807 * This routine is invoked to set up the PCI device memory space for device 4808 * with SLI-3 interface spec. 4809 * 4810 * Return codes 4811 * 0 - successful 4812 * other values - error 4813 **/ 4814static int 4815lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 4816{ 4817 struct pci_dev *pdev; 4818 unsigned long bar0map_len, bar2map_len; 4819 int i, hbq_count; 4820 void *ptr; 4821 int error = -ENODEV; 4822 4823 /* Obtain PCI device reference */ 4824 if (!phba->pcidev) 4825 return error; 4826 else 4827 pdev = phba->pcidev; 4828 4829 /* Set the device DMA mask size */ 4830 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 4831 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 4832 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 4833 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 4834 return error; 4835 } 4836 } 4837 4838 /* Get the bus address of Bar0 and Bar2 and the number of bytes 4839 * required by each mapping. 4840 */ 4841 phba->pci_bar0_map = pci_resource_start(pdev, 0); 4842 bar0map_len = pci_resource_len(pdev, 0); 4843 4844 phba->pci_bar2_map = pci_resource_start(pdev, 2); 4845 bar2map_len = pci_resource_len(pdev, 2); 4846 4847 /* Map HBA SLIM to a kernel virtual address. */ 4848 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 4849 if (!phba->slim_memmap_p) { 4850 dev_printk(KERN_ERR, &pdev->dev, 4851 "ioremap failed for SLIM memory.\n"); 4852 goto out; 4853 } 4854 4855 /* Map HBA Control Registers to a kernel virtual address. */ 4856 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 4857 if (!phba->ctrl_regs_memmap_p) { 4858 dev_printk(KERN_ERR, &pdev->dev, 4859 "ioremap failed for HBA control registers.\n"); 4860 goto out_iounmap_slim; 4861 } 4862 4863 /* Allocate memory for SLI-2 structures */ 4864 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 4865 SLI2_SLIM_SIZE, 4866 &phba->slim2p.phys, 4867 GFP_KERNEL); 4868 if (!phba->slim2p.virt) 4869 goto out_iounmap; 4870 4871 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 4872 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 4873 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 4874 phba->IOCBs = (phba->slim2p.virt + 4875 offsetof(struct lpfc_sli2_slim, IOCBs)); 4876 4877 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 4878 lpfc_sli_hbq_size(), 4879 &phba->hbqslimp.phys, 4880 GFP_KERNEL); 4881 if (!phba->hbqslimp.virt) 4882 goto out_free_slim; 4883 4884 hbq_count = lpfc_sli_hbq_count(); 4885 ptr = phba->hbqslimp.virt; 4886 for (i = 0; i < hbq_count; ++i) { 4887 phba->hbqs[i].hbq_virt = ptr; 4888 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 4889 ptr += (lpfc_hbq_defs[i]->entry_count * 4890 sizeof(struct lpfc_hbq_entry)); 4891 } 4892 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 4893 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 4894 4895 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 4896 4897 INIT_LIST_HEAD(&phba->rb_pend_list); 4898 4899 phba->MBslimaddr = phba->slim_memmap_p; 4900 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 4901 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 4902 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 4903 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 4904 4905 return 0; 4906 4907out_free_slim: 4908 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 4909 phba->slim2p.virt, phba->slim2p.phys); 4910out_iounmap: 4911 iounmap(phba->ctrl_regs_memmap_p); 4912out_iounmap_slim: 4913 iounmap(phba->slim_memmap_p); 4914out: 4915 return error; 4916} 4917 4918/** 4919 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 4920 * @phba: pointer to lpfc hba data structure. 4921 * 4922 * This routine is invoked to unset the PCI device memory space for device 4923 * with SLI-3 interface spec. 4924 **/ 4925static void 4926lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 4927{ 4928 struct pci_dev *pdev; 4929 4930 /* Obtain PCI device reference */ 4931 if (!phba->pcidev) 4932 return; 4933 else 4934 pdev = phba->pcidev; 4935 4936 /* Free coherent DMA memory allocated */ 4937 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 4938 phba->hbqslimp.virt, phba->hbqslimp.phys); 4939 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 4940 phba->slim2p.virt, phba->slim2p.phys); 4941 4942 /* I/O memory unmap */ 4943 iounmap(phba->ctrl_regs_memmap_p); 4944 iounmap(phba->slim_memmap_p); 4945 4946 return; 4947} 4948 4949/** 4950 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 4951 * @phba: pointer to lpfc hba data structure. 4952 * 4953 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 4954 * done and check status. 4955 * 4956 * Return 0 if successful, otherwise -ENODEV. 4957 **/ 4958int 4959lpfc_sli4_post_status_check(struct lpfc_hba *phba) 4960{ 4961 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg; 4962 int i, port_error = -ENODEV; 4963 4964 if (!phba->sli4_hba.STAregaddr) 4965 return -ENODEV; 4966 4967 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 4968 for (i = 0; i < 3000; i++) { 4969 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); 4970 /* Encounter fatal POST error, break out */ 4971 if (bf_get(lpfc_hst_state_perr, &sta_reg)) { 4972 port_error = -ENODEV; 4973 break; 4974 } 4975 if (LPFC_POST_STAGE_ARMFW_READY == 4976 bf_get(lpfc_hst_state_port_status, &sta_reg)) { 4977 port_error = 0; 4978 break; 4979 } 4980 msleep(10); 4981 } 4982 4983 if (port_error) 4984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4985 "1408 Failure HBA POST Status: sta_reg=0x%x, " 4986 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " 4987 "dl=x%x, pstatus=x%x\n", sta_reg.word0, 4988 bf_get(lpfc_hst_state_perr, &sta_reg), 4989 bf_get(lpfc_hst_state_sfi, &sta_reg), 4990 bf_get(lpfc_hst_state_nip, &sta_reg), 4991 bf_get(lpfc_hst_state_ipc, &sta_reg), 4992 bf_get(lpfc_hst_state_xrom, &sta_reg), 4993 bf_get(lpfc_hst_state_dl, &sta_reg), 4994 bf_get(lpfc_hst_state_port_status, &sta_reg)); 4995 4996 /* Log device information */ 4997 phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr); 4998 if (bf_get(lpfc_sli_intf_valid, 4999 &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) { 5000 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5001 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " 5002 "FeatureL1=0x%x, FeatureL2=0x%x\n", 5003 bf_get(lpfc_sli_intf_sli_family, 5004 &phba->sli4_hba.sli_intf), 5005 bf_get(lpfc_sli_intf_slirev, 5006 &phba->sli4_hba.sli_intf), 5007 bf_get(lpfc_sli_intf_featurelevel1, 5008 &phba->sli4_hba.sli_intf), 5009 bf_get(lpfc_sli_intf_featurelevel2, 5010 &phba->sli4_hba.sli_intf)); 5011 } 5012 phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr); 5013 phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr); 5014 /* With uncoverable error, log the error message and return error */ 5015 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); 5016 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); 5017 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 5018 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 5019 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5020 "1422 HBA Unrecoverable error: " 5021 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 5022 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n", 5023 uerrlo_reg.word0, uerrhi_reg.word0, 5024 phba->sli4_hba.ue_mask_lo, 5025 phba->sli4_hba.ue_mask_hi); 5026 return -ENODEV; 5027 } 5028 5029 return port_error; 5030} 5031 5032/** 5033 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 5034 * @phba: pointer to lpfc hba data structure. 5035 * 5036 * This routine is invoked to set up SLI4 BAR0 PCI config space register 5037 * memory map. 5038 **/ 5039static void 5040lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) 5041{ 5042 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 5043 LPFC_UERR_STATUS_LO; 5044 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5045 LPFC_UERR_STATUS_HI; 5046 phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 5047 LPFC_UE_MASK_LO; 5048 phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5049 LPFC_UE_MASK_HI; 5050 phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p + 5051 LPFC_SLI_INTF; 5052} 5053 5054/** 5055 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 5056 * @phba: pointer to lpfc hba data structure. 5057 * 5058 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 5059 * memory map. 5060 **/ 5061static void 5062lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 5063{ 5064 5065 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5066 LPFC_HST_STATE; 5067 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5068 LPFC_HST_ISR0; 5069 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5070 LPFC_HST_IMR0; 5071 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5072 LPFC_HST_ISCR0; 5073 return; 5074} 5075 5076/** 5077 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 5078 * @phba: pointer to lpfc hba data structure. 5079 * @vf: virtual function number 5080 * 5081 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 5082 * based on the given viftual function number, @vf. 5083 * 5084 * Return 0 if successful, otherwise -ENODEV. 5085 **/ 5086static int 5087lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 5088{ 5089 if (vf > LPFC_VIR_FUNC_MAX) 5090 return -ENODEV; 5091 5092 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5093 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 5094 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5095 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 5096 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5097 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 5098 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5099 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 5100 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5101 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 5102 return 0; 5103} 5104 5105/** 5106 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 5107 * @phba: pointer to lpfc hba data structure. 5108 * 5109 * This routine is invoked to create the bootstrap mailbox 5110 * region consistent with the SLI-4 interface spec. This 5111 * routine allocates all memory necessary to communicate 5112 * mailbox commands to the port and sets up all alignment 5113 * needs. No locks are expected to be held when calling 5114 * this routine. 5115 * 5116 * Return codes 5117 * 0 - successful 5118 * ENOMEM - could not allocated memory. 5119 **/ 5120static int 5121lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 5122{ 5123 uint32_t bmbx_size; 5124 struct lpfc_dmabuf *dmabuf; 5125 struct dma_address *dma_address; 5126 uint32_t pa_addr; 5127 uint64_t phys_addr; 5128 5129 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5130 if (!dmabuf) 5131 return -ENOMEM; 5132 5133 /* 5134 * The bootstrap mailbox region is comprised of 2 parts 5135 * plus an alignment restriction of 16 bytes. 5136 */ 5137 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 5138 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5139 bmbx_size, 5140 &dmabuf->phys, 5141 GFP_KERNEL); 5142 if (!dmabuf->virt) { 5143 kfree(dmabuf); 5144 return -ENOMEM; 5145 } 5146 memset(dmabuf->virt, 0, bmbx_size); 5147 5148 /* 5149 * Initialize the bootstrap mailbox pointers now so that the register 5150 * operations are simple later. The mailbox dma address is required 5151 * to be 16-byte aligned. Also align the virtual memory as each 5152 * maibox is copied into the bmbx mailbox region before issuing the 5153 * command to the port. 5154 */ 5155 phba->sli4_hba.bmbx.dmabuf = dmabuf; 5156 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 5157 5158 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 5159 LPFC_ALIGN_16_BYTE); 5160 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 5161 LPFC_ALIGN_16_BYTE); 5162 5163 /* 5164 * Set the high and low physical addresses now. The SLI4 alignment 5165 * requirement is 16 bytes and the mailbox is posted to the port 5166 * as two 30-bit addresses. The other data is a bit marking whether 5167 * the 30-bit address is the high or low address. 5168 * Upcast bmbx aphys to 64bits so shift instruction compiles 5169 * clean on 32 bit machines. 5170 */ 5171 dma_address = &phba->sli4_hba.bmbx.dma_address; 5172 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 5173 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 5174 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 5175 LPFC_BMBX_BIT1_ADDR_HI); 5176 5177 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 5178 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 5179 LPFC_BMBX_BIT1_ADDR_LO); 5180 return 0; 5181} 5182 5183/** 5184 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 5185 * @phba: pointer to lpfc hba data structure. 5186 * 5187 * This routine is invoked to teardown the bootstrap mailbox 5188 * region and release all host resources. This routine requires 5189 * the caller to ensure all mailbox commands recovered, no 5190 * additional mailbox comands are sent, and interrupts are disabled 5191 * before calling this routine. 5192 * 5193 **/ 5194static void 5195lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 5196{ 5197 dma_free_coherent(&phba->pcidev->dev, 5198 phba->sli4_hba.bmbx.bmbx_size, 5199 phba->sli4_hba.bmbx.dmabuf->virt, 5200 phba->sli4_hba.bmbx.dmabuf->phys); 5201 5202 kfree(phba->sli4_hba.bmbx.dmabuf); 5203 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 5204} 5205 5206/** 5207 * lpfc_sli4_read_config - Get the config parameters. 5208 * @phba: pointer to lpfc hba data structure. 5209 * 5210 * This routine is invoked to read the configuration parameters from the HBA. 5211 * The configuration parameters are used to set the base and maximum values 5212 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 5213 * allocation for the port. 5214 * 5215 * Return codes 5216 * 0 - successful 5217 * ENOMEM - No availble memory 5218 * EIO - The mailbox failed to complete successfully. 5219 **/ 5220static int 5221lpfc_sli4_read_config(struct lpfc_hba *phba) 5222{ 5223 LPFC_MBOXQ_t *pmb; 5224 struct lpfc_mbx_read_config *rd_config; 5225 uint32_t rc = 0; 5226 5227 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5228 if (!pmb) { 5229 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5230 "2011 Unable to allocate memory for issuing " 5231 "SLI_CONFIG_SPECIAL mailbox command\n"); 5232 return -ENOMEM; 5233 } 5234 5235 lpfc_read_config(phba, pmb); 5236 5237 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 5238 if (rc != MBX_SUCCESS) { 5239 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5240 "2012 Mailbox failed , mbxCmd x%x " 5241 "READ_CONFIG, mbxStatus x%x\n", 5242 bf_get(lpfc_mqe_command, &pmb->u.mqe), 5243 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 5244 rc = -EIO; 5245 } else { 5246 rd_config = &pmb->u.mqe.un.rd_config; 5247 phba->sli4_hba.max_cfg_param.max_xri = 5248 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 5249 phba->sli4_hba.max_cfg_param.xri_base = 5250 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 5251 phba->sli4_hba.max_cfg_param.max_vpi = 5252 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 5253 phba->sli4_hba.max_cfg_param.vpi_base = 5254 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 5255 phba->sli4_hba.max_cfg_param.max_rpi = 5256 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 5257 phba->sli4_hba.max_cfg_param.rpi_base = 5258 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 5259 phba->sli4_hba.max_cfg_param.max_vfi = 5260 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 5261 phba->sli4_hba.max_cfg_param.vfi_base = 5262 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 5263 phba->sli4_hba.max_cfg_param.max_fcfi = 5264 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 5265 phba->sli4_hba.max_cfg_param.fcfi_base = 5266 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); 5267 phba->sli4_hba.max_cfg_param.max_eq = 5268 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 5269 phba->sli4_hba.max_cfg_param.max_rq = 5270 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 5271 phba->sli4_hba.max_cfg_param.max_wq = 5272 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 5273 phba->sli4_hba.max_cfg_param.max_cq = 5274 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 5275 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 5276 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 5277 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 5278 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 5279 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5280 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 5281 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 5282 phba->max_vports = phba->max_vpi; 5283 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5284 "2003 cfg params XRI(B:%d M:%d), " 5285 "VPI(B:%d M:%d) " 5286 "VFI(B:%d M:%d) " 5287 "RPI(B:%d M:%d) " 5288 "FCFI(B:%d M:%d)\n", 5289 phba->sli4_hba.max_cfg_param.xri_base, 5290 phba->sli4_hba.max_cfg_param.max_xri, 5291 phba->sli4_hba.max_cfg_param.vpi_base, 5292 phba->sli4_hba.max_cfg_param.max_vpi, 5293 phba->sli4_hba.max_cfg_param.vfi_base, 5294 phba->sli4_hba.max_cfg_param.max_vfi, 5295 phba->sli4_hba.max_cfg_param.rpi_base, 5296 phba->sli4_hba.max_cfg_param.max_rpi, 5297 phba->sli4_hba.max_cfg_param.fcfi_base, 5298 phba->sli4_hba.max_cfg_param.max_fcfi); 5299 } 5300 mempool_free(pmb, phba->mbox_mem_pool); 5301 5302 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 5303 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri)) 5304 phba->cfg_hba_queue_depth = 5305 phba->sli4_hba.max_cfg_param.max_xri; 5306 return rc; 5307} 5308 5309/** 5310 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. 5311 * @phba: pointer to lpfc hba data structure. 5312 * 5313 * This routine is invoked to setup the host-side endian order to the 5314 * HBA consistent with the SLI-4 interface spec. 5315 * 5316 * Return codes 5317 * 0 - successful 5318 * ENOMEM - No availble memory 5319 * EIO - The mailbox failed to complete successfully. 5320 **/ 5321static int 5322lpfc_setup_endian_order(struct lpfc_hba *phba) 5323{ 5324 LPFC_MBOXQ_t *mboxq; 5325 uint32_t rc = 0; 5326 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 5327 HOST_ENDIAN_HIGH_WORD1}; 5328 5329 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5330 if (!mboxq) { 5331 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5332 "0492 Unable to allocate memory for issuing " 5333 "SLI_CONFIG_SPECIAL mailbox command\n"); 5334 return -ENOMEM; 5335 } 5336 5337 /* 5338 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two 5339 * words to contain special data values and no other data. 5340 */ 5341 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 5342 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 5343 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5344 if (rc != MBX_SUCCESS) { 5345 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5346 "0493 SLI_CONFIG_SPECIAL mailbox failed with " 5347 "status x%x\n", 5348 rc); 5349 rc = -EIO; 5350 } 5351 5352 mempool_free(mboxq, phba->mbox_mem_pool); 5353 return rc; 5354} 5355 5356/** 5357 * lpfc_sli4_queue_create - Create all the SLI4 queues 5358 * @phba: pointer to lpfc hba data structure. 5359 * 5360 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 5361 * operation. For each SLI4 queue type, the parameters such as queue entry 5362 * count (queue depth) shall be taken from the module parameter. For now, 5363 * we just use some constant number as place holder. 5364 * 5365 * Return codes 5366 * 0 - successful 5367 * ENOMEM - No availble memory 5368 * EIO - The mailbox failed to complete successfully. 5369 **/ 5370static int 5371lpfc_sli4_queue_create(struct lpfc_hba *phba) 5372{ 5373 struct lpfc_queue *qdesc; 5374 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5375 int cfg_fcp_wq_count; 5376 int cfg_fcp_eq_count; 5377 5378 /* 5379 * Sanity check for confiugred queue parameters against the run-time 5380 * device parameters 5381 */ 5382 5383 /* Sanity check on FCP fast-path WQ parameters */ 5384 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 5385 if (cfg_fcp_wq_count > 5386 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 5387 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 5388 LPFC_SP_WQN_DEF; 5389 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 5390 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5391 "2581 Not enough WQs (%d) from " 5392 "the pci function for supporting " 5393 "FCP WQs (%d)\n", 5394 phba->sli4_hba.max_cfg_param.max_wq, 5395 phba->cfg_fcp_wq_count); 5396 goto out_error; 5397 } 5398 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5399 "2582 Not enough WQs (%d) from the pci " 5400 "function for supporting the requested " 5401 "FCP WQs (%d), the actual FCP WQs can " 5402 "be supported: %d\n", 5403 phba->sli4_hba.max_cfg_param.max_wq, 5404 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 5405 } 5406 /* The actual number of FCP work queues adopted */ 5407 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 5408 5409 /* Sanity check on FCP fast-path EQ parameters */ 5410 cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 5411 if (cfg_fcp_eq_count > 5412 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 5413 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 5414 LPFC_SP_EQN_DEF; 5415 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 5416 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5417 "2574 Not enough EQs (%d) from the " 5418 "pci function for supporting FCP " 5419 "EQs (%d)\n", 5420 phba->sli4_hba.max_cfg_param.max_eq, 5421 phba->cfg_fcp_eq_count); 5422 goto out_error; 5423 } 5424 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5425 "2575 Not enough EQs (%d) from the pci " 5426 "function for supporting the requested " 5427 "FCP EQs (%d), the actual FCP EQs can " 5428 "be supported: %d\n", 5429 phba->sli4_hba.max_cfg_param.max_eq, 5430 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 5431 } 5432 /* It does not make sense to have more EQs than WQs */ 5433 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 5434 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5435 "2593 The FCP EQ count(%d) cannot be greater " 5436 "than the FCP WQ count(%d), limiting the " 5437 "FCP EQ count to %d\n", cfg_fcp_eq_count, 5438 phba->cfg_fcp_wq_count, 5439 phba->cfg_fcp_wq_count); 5440 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 5441 } 5442 /* The actual number of FCP event queues adopted */ 5443 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 5444 /* The overall number of event queues used */ 5445 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 5446 5447 /* 5448 * Create Event Queues (EQs) 5449 */ 5450 5451 /* Get EQ depth from module parameter, fake the default for now */ 5452 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 5453 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 5454 5455 /* Create slow path event queue */ 5456 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5457 phba->sli4_hba.eq_ecount); 5458 if (!qdesc) { 5459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5460 "0496 Failed allocate slow-path EQ\n"); 5461 goto out_error; 5462 } 5463 phba->sli4_hba.sp_eq = qdesc; 5464 5465 /* Create fast-path FCP Event Queue(s) */ 5466 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 5467 phba->cfg_fcp_eq_count), GFP_KERNEL); 5468 if (!phba->sli4_hba.fp_eq) { 5469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5470 "2576 Failed allocate memory for fast-path " 5471 "EQ record array\n"); 5472 goto out_free_sp_eq; 5473 } 5474 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5475 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5476 phba->sli4_hba.eq_ecount); 5477 if (!qdesc) { 5478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5479 "0497 Failed allocate fast-path EQ\n"); 5480 goto out_free_fp_eq; 5481 } 5482 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 5483 } 5484 5485 /* 5486 * Create Complete Queues (CQs) 5487 */ 5488 5489 /* Get CQ depth from module parameter, fake the default for now */ 5490 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 5491 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 5492 5493 /* Create slow-path Mailbox Command Complete Queue */ 5494 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5495 phba->sli4_hba.cq_ecount); 5496 if (!qdesc) { 5497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5498 "0500 Failed allocate slow-path mailbox CQ\n"); 5499 goto out_free_fp_eq; 5500 } 5501 phba->sli4_hba.mbx_cq = qdesc; 5502 5503 /* Create slow-path ELS Complete Queue */ 5504 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5505 phba->sli4_hba.cq_ecount); 5506 if (!qdesc) { 5507 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5508 "0501 Failed allocate slow-path ELS CQ\n"); 5509 goto out_free_mbx_cq; 5510 } 5511 phba->sli4_hba.els_cq = qdesc; 5512 5513 5514 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 5515 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 5516 phba->cfg_fcp_eq_count), GFP_KERNEL); 5517 if (!phba->sli4_hba.fcp_cq) { 5518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5519 "2577 Failed allocate memory for fast-path " 5520 "CQ record array\n"); 5521 goto out_free_els_cq; 5522 } 5523 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5524 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5525 phba->sli4_hba.cq_ecount); 5526 if (!qdesc) { 5527 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5528 "0499 Failed allocate fast-path FCP " 5529 "CQ (%d)\n", fcp_cqidx); 5530 goto out_free_fcp_cq; 5531 } 5532 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 5533 } 5534 5535 /* Create Mailbox Command Queue */ 5536 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 5537 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 5538 5539 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 5540 phba->sli4_hba.mq_ecount); 5541 if (!qdesc) { 5542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5543 "0505 Failed allocate slow-path MQ\n"); 5544 goto out_free_fcp_cq; 5545 } 5546 phba->sli4_hba.mbx_wq = qdesc; 5547 5548 /* 5549 * Create all the Work Queues (WQs) 5550 */ 5551 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 5552 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 5553 5554 /* Create slow-path ELS Work Queue */ 5555 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5556 phba->sli4_hba.wq_ecount); 5557 if (!qdesc) { 5558 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5559 "0504 Failed allocate slow-path ELS WQ\n"); 5560 goto out_free_mbx_wq; 5561 } 5562 phba->sli4_hba.els_wq = qdesc; 5563 5564 /* Create fast-path FCP Work Queue(s) */ 5565 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 5566 phba->cfg_fcp_wq_count), GFP_KERNEL); 5567 if (!phba->sli4_hba.fcp_wq) { 5568 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5569 "2578 Failed allocate memory for fast-path " 5570 "WQ record array\n"); 5571 goto out_free_els_wq; 5572 } 5573 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 5574 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5575 phba->sli4_hba.wq_ecount); 5576 if (!qdesc) { 5577 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5578 "0503 Failed allocate fast-path FCP " 5579 "WQ (%d)\n", fcp_wqidx); 5580 goto out_free_fcp_wq; 5581 } 5582 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 5583 } 5584 5585 /* 5586 * Create Receive Queue (RQ) 5587 */ 5588 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 5589 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 5590 5591 /* Create Receive Queue for header */ 5592 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5593 phba->sli4_hba.rq_ecount); 5594 if (!qdesc) { 5595 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5596 "0506 Failed allocate receive HRQ\n"); 5597 goto out_free_fcp_wq; 5598 } 5599 phba->sli4_hba.hdr_rq = qdesc; 5600 5601 /* Create Receive Queue for data */ 5602 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5603 phba->sli4_hba.rq_ecount); 5604 if (!qdesc) { 5605 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5606 "0507 Failed allocate receive DRQ\n"); 5607 goto out_free_hdr_rq; 5608 } 5609 phba->sli4_hba.dat_rq = qdesc; 5610 5611 return 0; 5612 5613out_free_hdr_rq: 5614 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5615 phba->sli4_hba.hdr_rq = NULL; 5616out_free_fcp_wq: 5617 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 5618 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 5619 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 5620 } 5621 kfree(phba->sli4_hba.fcp_wq); 5622out_free_els_wq: 5623 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5624 phba->sli4_hba.els_wq = NULL; 5625out_free_mbx_wq: 5626 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5627 phba->sli4_hba.mbx_wq = NULL; 5628out_free_fcp_cq: 5629 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 5630 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 5631 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 5632 } 5633 kfree(phba->sli4_hba.fcp_cq); 5634out_free_els_cq: 5635 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5636 phba->sli4_hba.els_cq = NULL; 5637out_free_mbx_cq: 5638 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5639 phba->sli4_hba.mbx_cq = NULL; 5640out_free_fp_eq: 5641 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 5642 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 5643 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 5644 } 5645 kfree(phba->sli4_hba.fp_eq); 5646out_free_sp_eq: 5647 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5648 phba->sli4_hba.sp_eq = NULL; 5649out_error: 5650 return -ENOMEM; 5651} 5652 5653/** 5654 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 5655 * @phba: pointer to lpfc hba data structure. 5656 * 5657 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 5658 * operation. 5659 * 5660 * Return codes 5661 * 0 - successful 5662 * ENOMEM - No availble memory 5663 * EIO - The mailbox failed to complete successfully. 5664 **/ 5665static void 5666lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 5667{ 5668 int fcp_qidx; 5669 5670 /* Release mailbox command work queue */ 5671 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5672 phba->sli4_hba.mbx_wq = NULL; 5673 5674 /* Release ELS work queue */ 5675 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5676 phba->sli4_hba.els_wq = NULL; 5677 5678 /* Release FCP work queue */ 5679 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 5680 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 5681 kfree(phba->sli4_hba.fcp_wq); 5682 phba->sli4_hba.fcp_wq = NULL; 5683 5684 /* Release unsolicited receive queue */ 5685 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5686 phba->sli4_hba.hdr_rq = NULL; 5687 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 5688 phba->sli4_hba.dat_rq = NULL; 5689 5690 /* Release ELS complete queue */ 5691 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5692 phba->sli4_hba.els_cq = NULL; 5693 5694 /* Release mailbox command complete queue */ 5695 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5696 phba->sli4_hba.mbx_cq = NULL; 5697 5698 /* Release FCP response complete queue */ 5699 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5700 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 5701 kfree(phba->sli4_hba.fcp_cq); 5702 phba->sli4_hba.fcp_cq = NULL; 5703 5704 /* Release fast-path event queue */ 5705 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5706 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 5707 kfree(phba->sli4_hba.fp_eq); 5708 phba->sli4_hba.fp_eq = NULL; 5709 5710 /* Release slow-path event queue */ 5711 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5712 phba->sli4_hba.sp_eq = NULL; 5713 5714 return; 5715} 5716 5717/** 5718 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 5719 * @phba: pointer to lpfc hba data structure. 5720 * 5721 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 5722 * operation. 5723 * 5724 * Return codes 5725 * 0 - successful 5726 * ENOMEM - No availble memory 5727 * EIO - The mailbox failed to complete successfully. 5728 **/ 5729int 5730lpfc_sli4_queue_setup(struct lpfc_hba *phba) 5731{ 5732 int rc = -ENOMEM; 5733 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5734 int fcp_cq_index = 0; 5735 5736 /* 5737 * Set up Event Queues (EQs) 5738 */ 5739 5740 /* Set up slow-path event queue */ 5741 if (!phba->sli4_hba.sp_eq) { 5742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5743 "0520 Slow-path EQ not allocated\n"); 5744 goto out_error; 5745 } 5746 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 5747 LPFC_SP_DEF_IMAX); 5748 if (rc) { 5749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5750 "0521 Failed setup of slow-path EQ: " 5751 "rc = 0x%x\n", rc); 5752 goto out_error; 5753 } 5754 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5755 "2583 Slow-path EQ setup: queue-id=%d\n", 5756 phba->sli4_hba.sp_eq->queue_id); 5757 5758 /* Set up fast-path event queue */ 5759 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5760 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 5761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5762 "0522 Fast-path EQ (%d) not " 5763 "allocated\n", fcp_eqidx); 5764 goto out_destroy_fp_eq; 5765 } 5766 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 5767 phba->cfg_fcp_imax); 5768 if (rc) { 5769 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5770 "0523 Failed setup of fast-path EQ " 5771 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 5772 goto out_destroy_fp_eq; 5773 } 5774 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5775 "2584 Fast-path EQ setup: " 5776 "queue[%d]-id=%d\n", fcp_eqidx, 5777 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 5778 } 5779 5780 /* 5781 * Set up Complete Queues (CQs) 5782 */ 5783 5784 /* Set up slow-path MBOX Complete Queue as the first CQ */ 5785 if (!phba->sli4_hba.mbx_cq) { 5786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5787 "0528 Mailbox CQ not allocated\n"); 5788 goto out_destroy_fp_eq; 5789 } 5790 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 5791 LPFC_MCQ, LPFC_MBOX); 5792 if (rc) { 5793 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5794 "0529 Failed setup of slow-path mailbox CQ: " 5795 "rc = 0x%x\n", rc); 5796 goto out_destroy_fp_eq; 5797 } 5798 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5799 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 5800 phba->sli4_hba.mbx_cq->queue_id, 5801 phba->sli4_hba.sp_eq->queue_id); 5802 5803 /* Set up slow-path ELS Complete Queue */ 5804 if (!phba->sli4_hba.els_cq) { 5805 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5806 "0530 ELS CQ not allocated\n"); 5807 goto out_destroy_mbx_cq; 5808 } 5809 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 5810 LPFC_WCQ, LPFC_ELS); 5811 if (rc) { 5812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5813 "0531 Failed setup of slow-path ELS CQ: " 5814 "rc = 0x%x\n", rc); 5815 goto out_destroy_mbx_cq; 5816 } 5817 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5818 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 5819 phba->sli4_hba.els_cq->queue_id, 5820 phba->sli4_hba.sp_eq->queue_id); 5821 5822 /* Set up fast-path FCP Response Complete Queue */ 5823 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5824 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 5825 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5826 "0526 Fast-path FCP CQ (%d) not " 5827 "allocated\n", fcp_cqidx); 5828 goto out_destroy_fcp_cq; 5829 } 5830 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 5831 phba->sli4_hba.fp_eq[fcp_cqidx], 5832 LPFC_WCQ, LPFC_FCP); 5833 if (rc) { 5834 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5835 "0527 Failed setup of fast-path FCP " 5836 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 5837 goto out_destroy_fcp_cq; 5838 } 5839 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5840 "2588 FCP CQ setup: cq[%d]-id=%d, " 5841 "parent eq[%d]-id=%d\n", 5842 fcp_cqidx, 5843 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 5844 fcp_cqidx, 5845 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 5846 } 5847 5848 /* 5849 * Set up all the Work Queues (WQs) 5850 */ 5851 5852 /* Set up Mailbox Command Queue */ 5853 if (!phba->sli4_hba.mbx_wq) { 5854 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5855 "0538 Slow-path MQ not allocated\n"); 5856 goto out_destroy_fcp_cq; 5857 } 5858 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 5859 phba->sli4_hba.mbx_cq, LPFC_MBOX); 5860 if (rc) { 5861 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5862 "0539 Failed setup of slow-path MQ: " 5863 "rc = 0x%x\n", rc); 5864 goto out_destroy_fcp_cq; 5865 } 5866 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5867 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 5868 phba->sli4_hba.mbx_wq->queue_id, 5869 phba->sli4_hba.mbx_cq->queue_id); 5870 5871 /* Set up slow-path ELS Work Queue */ 5872 if (!phba->sli4_hba.els_wq) { 5873 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5874 "0536 Slow-path ELS WQ not allocated\n"); 5875 goto out_destroy_mbx_wq; 5876 } 5877 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 5878 phba->sli4_hba.els_cq, LPFC_ELS); 5879 if (rc) { 5880 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5881 "0537 Failed setup of slow-path ELS WQ: " 5882 "rc = 0x%x\n", rc); 5883 goto out_destroy_mbx_wq; 5884 } 5885 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5886 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 5887 phba->sli4_hba.els_wq->queue_id, 5888 phba->sli4_hba.els_cq->queue_id); 5889 5890 /* Set up fast-path FCP Work Queue */ 5891 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 5892 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 5893 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5894 "0534 Fast-path FCP WQ (%d) not " 5895 "allocated\n", fcp_wqidx); 5896 goto out_destroy_fcp_wq; 5897 } 5898 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 5899 phba->sli4_hba.fcp_cq[fcp_cq_index], 5900 LPFC_FCP); 5901 if (rc) { 5902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5903 "0535 Failed setup of fast-path FCP " 5904 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 5905 goto out_destroy_fcp_wq; 5906 } 5907 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5908 "2591 FCP WQ setup: wq[%d]-id=%d, " 5909 "parent cq[%d]-id=%d\n", 5910 fcp_wqidx, 5911 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 5912 fcp_cq_index, 5913 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 5914 /* Round robin FCP Work Queue's Completion Queue assignment */ 5915 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 5916 } 5917 5918 /* 5919 * Create Receive Queue (RQ) 5920 */ 5921 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 5922 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5923 "0540 Receive Queue not allocated\n"); 5924 goto out_destroy_fcp_wq; 5925 } 5926 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 5927 phba->sli4_hba.els_cq, LPFC_USOL); 5928 if (rc) { 5929 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5930 "0541 Failed setup of Receive Queue: " 5931 "rc = 0x%x\n", rc); 5932 goto out_destroy_fcp_wq; 5933 } 5934 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5935 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 5936 "parent cq-id=%d\n", 5937 phba->sli4_hba.hdr_rq->queue_id, 5938 phba->sli4_hba.dat_rq->queue_id, 5939 phba->sli4_hba.els_cq->queue_id); 5940 return 0; 5941 5942out_destroy_fcp_wq: 5943 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 5944 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 5945 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 5946out_destroy_mbx_wq: 5947 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 5948out_destroy_fcp_cq: 5949 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 5950 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 5951 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 5952out_destroy_mbx_cq: 5953 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 5954out_destroy_fp_eq: 5955 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 5956 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 5957 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 5958out_error: 5959 return rc; 5960} 5961 5962/** 5963 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 5964 * @phba: pointer to lpfc hba data structure. 5965 * 5966 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 5967 * operation. 5968 * 5969 * Return codes 5970 * 0 - successful 5971 * ENOMEM - No availble memory 5972 * EIO - The mailbox failed to complete successfully. 5973 **/ 5974void 5975lpfc_sli4_queue_unset(struct lpfc_hba *phba) 5976{ 5977 int fcp_qidx; 5978 5979 /* Unset mailbox command work queue */ 5980 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 5981 /* Unset ELS work queue */ 5982 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 5983 /* Unset unsolicited receive queue */ 5984 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 5985 /* Unset FCP work queue */ 5986 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 5987 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 5988 /* Unset mailbox command complete queue */ 5989 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 5990 /* Unset ELS complete queue */ 5991 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 5992 /* Unset FCP response complete queue */ 5993 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5994 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 5995 /* Unset fast-path event queue */ 5996 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5997 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 5998 /* Unset slow-path event queue */ 5999 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6000} 6001 6002/** 6003 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 6004 * @phba: pointer to lpfc hba data structure. 6005 * 6006 * This routine is invoked to allocate and set up a pool of completion queue 6007 * events. The body of the completion queue event is a completion queue entry 6008 * CQE. For now, this pool is used for the interrupt service routine to queue 6009 * the following HBA completion queue events for the worker thread to process: 6010 * - Mailbox asynchronous events 6011 * - Receive queue completion unsolicited events 6012 * Later, this can be used for all the slow-path events. 6013 * 6014 * Return codes 6015 * 0 - successful 6016 * -ENOMEM - No availble memory 6017 **/ 6018static int 6019lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 6020{ 6021 struct lpfc_cq_event *cq_event; 6022 int i; 6023 6024 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 6025 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 6026 if (!cq_event) 6027 goto out_pool_create_fail; 6028 list_add_tail(&cq_event->list, 6029 &phba->sli4_hba.sp_cqe_event_pool); 6030 } 6031 return 0; 6032 6033out_pool_create_fail: 6034 lpfc_sli4_cq_event_pool_destroy(phba); 6035 return -ENOMEM; 6036} 6037 6038/** 6039 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 6040 * @phba: pointer to lpfc hba data structure. 6041 * 6042 * This routine is invoked to free the pool of completion queue events at 6043 * driver unload time. Note that, it is the responsibility of the driver 6044 * cleanup routine to free all the outstanding completion-queue events 6045 * allocated from this pool back into the pool before invoking this routine 6046 * to destroy the pool. 6047 **/ 6048static void 6049lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 6050{ 6051 struct lpfc_cq_event *cq_event, *next_cq_event; 6052 6053 list_for_each_entry_safe(cq_event, next_cq_event, 6054 &phba->sli4_hba.sp_cqe_event_pool, list) { 6055 list_del(&cq_event->list); 6056 kfree(cq_event); 6057 } 6058} 6059 6060/** 6061 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6062 * @phba: pointer to lpfc hba data structure. 6063 * 6064 * This routine is the lock free version of the API invoked to allocate a 6065 * completion-queue event from the free pool. 6066 * 6067 * Return: Pointer to the newly allocated completion-queue event if successful 6068 * NULL otherwise. 6069 **/ 6070struct lpfc_cq_event * 6071__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6072{ 6073 struct lpfc_cq_event *cq_event = NULL; 6074 6075 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 6076 struct lpfc_cq_event, list); 6077 return cq_event; 6078} 6079 6080/** 6081 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6082 * @phba: pointer to lpfc hba data structure. 6083 * 6084 * This routine is the lock version of the API invoked to allocate a 6085 * completion-queue event from the free pool. 6086 * 6087 * Return: Pointer to the newly allocated completion-queue event if successful 6088 * NULL otherwise. 6089 **/ 6090struct lpfc_cq_event * 6091lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6092{ 6093 struct lpfc_cq_event *cq_event; 6094 unsigned long iflags; 6095 6096 spin_lock_irqsave(&phba->hbalock, iflags); 6097 cq_event = __lpfc_sli4_cq_event_alloc(phba); 6098 spin_unlock_irqrestore(&phba->hbalock, iflags); 6099 return cq_event; 6100} 6101 6102/** 6103 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6104 * @phba: pointer to lpfc hba data structure. 6105 * @cq_event: pointer to the completion queue event to be freed. 6106 * 6107 * This routine is the lock free version of the API invoked to release a 6108 * completion-queue event back into the free pool. 6109 **/ 6110void 6111__lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6112 struct lpfc_cq_event *cq_event) 6113{ 6114 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 6115} 6116 6117/** 6118 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6119 * @phba: pointer to lpfc hba data structure. 6120 * @cq_event: pointer to the completion queue event to be freed. 6121 * 6122 * This routine is the lock version of the API invoked to release a 6123 * completion-queue event back into the free pool. 6124 **/ 6125void 6126lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6127 struct lpfc_cq_event *cq_event) 6128{ 6129 unsigned long iflags; 6130 spin_lock_irqsave(&phba->hbalock, iflags); 6131 __lpfc_sli4_cq_event_release(phba, cq_event); 6132 spin_unlock_irqrestore(&phba->hbalock, iflags); 6133} 6134 6135/** 6136 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 6137 * @phba: pointer to lpfc hba data structure. 6138 * 6139 * This routine is to free all the pending completion-queue events to the 6140 * back into the free pool for device reset. 6141 **/ 6142static void 6143lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 6144{ 6145 LIST_HEAD(cqelist); 6146 struct lpfc_cq_event *cqe; 6147 unsigned long iflags; 6148 6149 /* Retrieve all the pending WCQEs from pending WCQE lists */ 6150 spin_lock_irqsave(&phba->hbalock, iflags); 6151 /* Pending FCP XRI abort events */ 6152 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 6153 &cqelist); 6154 /* Pending ELS XRI abort events */ 6155 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 6156 &cqelist); 6157 /* Pending asynnc events */ 6158 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 6159 &cqelist); 6160 spin_unlock_irqrestore(&phba->hbalock, iflags); 6161 6162 while (!list_empty(&cqelist)) { 6163 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 6164 lpfc_sli4_cq_event_release(phba, cqe); 6165 } 6166} 6167 6168/** 6169 * lpfc_pci_function_reset - Reset pci function. 6170 * @phba: pointer to lpfc hba data structure. 6171 * 6172 * This routine is invoked to request a PCI function reset. It will destroys 6173 * all resources assigned to the PCI function which originates this request. 6174 * 6175 * Return codes 6176 * 0 - successful 6177 * ENOMEM - No availble memory 6178 * EIO - The mailbox failed to complete successfully. 6179 **/ 6180int 6181lpfc_pci_function_reset(struct lpfc_hba *phba) 6182{ 6183 LPFC_MBOXQ_t *mboxq; 6184 uint32_t rc = 0; 6185 uint32_t shdr_status, shdr_add_status; 6186 union lpfc_sli4_cfg_shdr *shdr; 6187 6188 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6189 if (!mboxq) { 6190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6191 "0494 Unable to allocate memory for issuing " 6192 "SLI_FUNCTION_RESET mailbox command\n"); 6193 return -ENOMEM; 6194 } 6195 6196 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ 6197 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6198 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 6199 LPFC_SLI4_MBX_EMBED); 6200 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6201 shdr = (union lpfc_sli4_cfg_shdr *) 6202 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6203 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6204 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6205 if (rc != MBX_TIMEOUT) 6206 mempool_free(mboxq, phba->mbox_mem_pool); 6207 if (shdr_status || shdr_add_status || rc) { 6208 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6209 "0495 SLI_FUNCTION_RESET mailbox failed with " 6210 "status x%x add_status x%x, mbx status x%x\n", 6211 shdr_status, shdr_add_status, rc); 6212 rc = -ENXIO; 6213 } 6214 return rc; 6215} 6216 6217/** 6218 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 6219 * @phba: pointer to lpfc hba data structure. 6220 * @cnt: number of nop mailbox commands to send. 6221 * 6222 * This routine is invoked to send a number @cnt of NOP mailbox command and 6223 * wait for each command to complete. 6224 * 6225 * Return: the number of NOP mailbox command completed. 6226 **/ 6227static int 6228lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 6229{ 6230 LPFC_MBOXQ_t *mboxq; 6231 int length, cmdsent; 6232 uint32_t mbox_tmo; 6233 uint32_t rc = 0; 6234 uint32_t shdr_status, shdr_add_status; 6235 union lpfc_sli4_cfg_shdr *shdr; 6236 6237 if (cnt == 0) { 6238 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6239 "2518 Requested to send 0 NOP mailbox cmd\n"); 6240 return cnt; 6241 } 6242 6243 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6244 if (!mboxq) { 6245 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6246 "2519 Unable to allocate memory for issuing " 6247 "NOP mailbox command\n"); 6248 return 0; 6249 } 6250 6251 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 6252 length = (sizeof(struct lpfc_mbx_nop) - 6253 sizeof(struct lpfc_sli4_cfg_mhdr)); 6254 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6255 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 6256 6257 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 6258 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 6259 if (!phba->sli4_hba.intr_enable) 6260 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6261 else 6262 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 6263 if (rc == MBX_TIMEOUT) 6264 break; 6265 /* Check return status */ 6266 shdr = (union lpfc_sli4_cfg_shdr *) 6267 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6268 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6269 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 6270 &shdr->response); 6271 if (shdr_status || shdr_add_status || rc) { 6272 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6273 "2520 NOP mailbox command failed " 6274 "status x%x add_status x%x mbx " 6275 "status x%x\n", shdr_status, 6276 shdr_add_status, rc); 6277 break; 6278 } 6279 } 6280 6281 if (rc != MBX_TIMEOUT) 6282 mempool_free(mboxq, phba->mbox_mem_pool); 6283 6284 return cmdsent; 6285} 6286 6287/** 6288 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device 6289 * @phba: pointer to lpfc hba data structure. 6290 * @fcfi: fcf index. 6291 * 6292 * This routine is invoked to unregister a FCFI from device. 6293 **/ 6294void 6295lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) 6296{ 6297 LPFC_MBOXQ_t *mbox; 6298 uint32_t mbox_tmo; 6299 int rc; 6300 unsigned long flags; 6301 6302 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6303 6304 if (!mbox) 6305 return; 6306 6307 lpfc_unreg_fcfi(mbox, fcfi); 6308 6309 if (!phba->sli4_hba.intr_enable) 6310 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6311 else { 6312 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 6313 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6314 } 6315 if (rc != MBX_TIMEOUT) 6316 mempool_free(mbox, phba->mbox_mem_pool); 6317 if (rc != MBX_SUCCESS) 6318 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6319 "2517 Unregister FCFI command failed " 6320 "status %d, mbxStatus x%x\n", rc, 6321 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 6322 else { 6323 spin_lock_irqsave(&phba->hbalock, flags); 6324 /* Mark the FCFI is no longer registered */ 6325 phba->fcf.fcf_flag &= 6326 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE); 6327 spin_unlock_irqrestore(&phba->hbalock, flags); 6328 } 6329} 6330 6331/** 6332 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 6333 * @phba: pointer to lpfc hba data structure. 6334 * 6335 * This routine is invoked to set up the PCI device memory space for device 6336 * with SLI-4 interface spec. 6337 * 6338 * Return codes 6339 * 0 - successful 6340 * other values - error 6341 **/ 6342static int 6343lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 6344{ 6345 struct pci_dev *pdev; 6346 unsigned long bar0map_len, bar1map_len, bar2map_len; 6347 int error = -ENODEV; 6348 6349 /* Obtain PCI device reference */ 6350 if (!phba->pcidev) 6351 return error; 6352 else 6353 pdev = phba->pcidev; 6354 6355 /* Set the device DMA mask size */ 6356 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6357 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6358 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6359 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6360 return error; 6361 } 6362 } 6363 6364 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 6365 * number of bytes required by each mapping. They are actually 6366 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device. 6367 */ 6368 if (pci_resource_start(pdev, 0)) { 6369 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6370 bar0map_len = pci_resource_len(pdev, 0); 6371 } else { 6372 phba->pci_bar0_map = pci_resource_start(pdev, 1); 6373 bar0map_len = pci_resource_len(pdev, 1); 6374 } 6375 phba->pci_bar1_map = pci_resource_start(pdev, 2); 6376 bar1map_len = pci_resource_len(pdev, 2); 6377 6378 phba->pci_bar2_map = pci_resource_start(pdev, 4); 6379 bar2map_len = pci_resource_len(pdev, 4); 6380 6381 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ 6382 phba->sli4_hba.conf_regs_memmap_p = 6383 ioremap(phba->pci_bar0_map, bar0map_len); 6384 if (!phba->sli4_hba.conf_regs_memmap_p) { 6385 dev_printk(KERN_ERR, &pdev->dev, 6386 "ioremap failed for SLI4 PCI config registers.\n"); 6387 goto out; 6388 } 6389 6390 /* Map SLI4 HBA Control Register base to a kernel virtual address. */ 6391 phba->sli4_hba.ctrl_regs_memmap_p = 6392 ioremap(phba->pci_bar1_map, bar1map_len); 6393 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 6394 dev_printk(KERN_ERR, &pdev->dev, 6395 "ioremap failed for SLI4 HBA control registers.\n"); 6396 goto out_iounmap_conf; 6397 } 6398 6399 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ 6400 phba->sli4_hba.drbl_regs_memmap_p = 6401 ioremap(phba->pci_bar2_map, bar2map_len); 6402 if (!phba->sli4_hba.drbl_regs_memmap_p) { 6403 dev_printk(KERN_ERR, &pdev->dev, 6404 "ioremap failed for SLI4 HBA doorbell registers.\n"); 6405 goto out_iounmap_ctrl; 6406 } 6407 6408 /* Set up BAR0 PCI config space register memory map */ 6409 lpfc_sli4_bar0_register_memmap(phba); 6410 6411 /* Set up BAR1 register memory map */ 6412 lpfc_sli4_bar1_register_memmap(phba); 6413 6414 /* Set up BAR2 register memory map */ 6415 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 6416 if (error) 6417 goto out_iounmap_all; 6418 6419 return 0; 6420 6421out_iounmap_all: 6422 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 6423out_iounmap_ctrl: 6424 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 6425out_iounmap_conf: 6426 iounmap(phba->sli4_hba.conf_regs_memmap_p); 6427out: 6428 return error; 6429} 6430 6431/** 6432 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 6433 * @phba: pointer to lpfc hba data structure. 6434 * 6435 * This routine is invoked to unset the PCI device memory space for device 6436 * with SLI-4 interface spec. 6437 **/ 6438static void 6439lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 6440{ 6441 struct pci_dev *pdev; 6442 6443 /* Obtain PCI device reference */ 6444 if (!phba->pcidev) 6445 return; 6446 else 6447 pdev = phba->pcidev; 6448 6449 /* Free coherent DMA memory allocated */ 6450 6451 /* Unmap I/O memory space */ 6452 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 6453 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 6454 iounmap(phba->sli4_hba.conf_regs_memmap_p); 6455 6456 return; 6457} 6458 6459/** 6460 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 6461 * @phba: pointer to lpfc hba data structure. 6462 * 6463 * This routine is invoked to enable the MSI-X interrupt vectors to device 6464 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 6465 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 6466 * invoked, enables either all or nothing, depending on the current 6467 * availability of PCI vector resources. The device driver is responsible 6468 * for calling the individual request_irq() to register each MSI-X vector 6469 * with a interrupt handler, which is done in this function. Note that 6470 * later when device is unloading, the driver should always call free_irq() 6471 * on all MSI-X vectors it has done request_irq() on before calling 6472 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 6473 * will be left with MSI-X enabled and leaks its vectors. 6474 * 6475 * Return codes 6476 * 0 - successful 6477 * other values - error 6478 **/ 6479static int 6480lpfc_sli_enable_msix(struct lpfc_hba *phba) 6481{ 6482 int rc, i; 6483 LPFC_MBOXQ_t *pmb; 6484 6485 /* Set up MSI-X multi-message vectors */ 6486 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6487 phba->msix_entries[i].entry = i; 6488 6489 /* Configure MSI-X capability structure */ 6490 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 6491 ARRAY_SIZE(phba->msix_entries)); 6492 if (rc) { 6493 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6494 "0420 PCI enable MSI-X failed (%d)\n", rc); 6495 goto msi_fail_out; 6496 } 6497 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6498 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6499 "0477 MSI-X entry[%d]: vector=x%x " 6500 "message=%d\n", i, 6501 phba->msix_entries[i].vector, 6502 phba->msix_entries[i].entry); 6503 /* 6504 * Assign MSI-X vectors to interrupt handlers 6505 */ 6506 6507 /* vector-0 is associated to slow-path handler */ 6508 rc = request_irq(phba->msix_entries[0].vector, 6509 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 6510 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6511 if (rc) { 6512 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6513 "0421 MSI-X slow-path request_irq failed " 6514 "(%d)\n", rc); 6515 goto msi_fail_out; 6516 } 6517 6518 /* vector-1 is associated to fast-path handler */ 6519 rc = request_irq(phba->msix_entries[1].vector, 6520 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 6521 LPFC_FP_DRIVER_HANDLER_NAME, phba); 6522 6523 if (rc) { 6524 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6525 "0429 MSI-X fast-path request_irq failed " 6526 "(%d)\n", rc); 6527 goto irq_fail_out; 6528 } 6529 6530 /* 6531 * Configure HBA MSI-X attention conditions to messages 6532 */ 6533 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6534 6535 if (!pmb) { 6536 rc = -ENOMEM; 6537 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6538 "0474 Unable to allocate memory for issuing " 6539 "MBOX_CONFIG_MSI command\n"); 6540 goto mem_fail_out; 6541 } 6542 rc = lpfc_config_msi(phba, pmb); 6543 if (rc) 6544 goto mbx_fail_out; 6545 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6546 if (rc != MBX_SUCCESS) { 6547 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6548 "0351 Config MSI mailbox command failed, " 6549 "mbxCmd x%x, mbxStatus x%x\n", 6550 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 6551 goto mbx_fail_out; 6552 } 6553 6554 /* Free memory allocated for mailbox command */ 6555 mempool_free(pmb, phba->mbox_mem_pool); 6556 return rc; 6557 6558mbx_fail_out: 6559 /* Free memory allocated for mailbox command */ 6560 mempool_free(pmb, phba->mbox_mem_pool); 6561 6562mem_fail_out: 6563 /* free the irq already requested */ 6564 free_irq(phba->msix_entries[1].vector, phba); 6565 6566irq_fail_out: 6567 /* free the irq already requested */ 6568 free_irq(phba->msix_entries[0].vector, phba); 6569 6570msi_fail_out: 6571 /* Unconfigure MSI-X capability structure */ 6572 pci_disable_msix(phba->pcidev); 6573 return rc; 6574} 6575 6576/** 6577 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 6578 * @phba: pointer to lpfc hba data structure. 6579 * 6580 * This routine is invoked to release the MSI-X vectors and then disable the 6581 * MSI-X interrupt mode to device with SLI-3 interface spec. 6582 **/ 6583static void 6584lpfc_sli_disable_msix(struct lpfc_hba *phba) 6585{ 6586 int i; 6587 6588 /* Free up MSI-X multi-message vectors */ 6589 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6590 free_irq(phba->msix_entries[i].vector, phba); 6591 /* Disable MSI-X */ 6592 pci_disable_msix(phba->pcidev); 6593 6594 return; 6595} 6596 6597/** 6598 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 6599 * @phba: pointer to lpfc hba data structure. 6600 * 6601 * This routine is invoked to enable the MSI interrupt mode to device with 6602 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 6603 * enable the MSI vector. The device driver is responsible for calling the 6604 * request_irq() to register MSI vector with a interrupt the handler, which 6605 * is done in this function. 6606 * 6607 * Return codes 6608 * 0 - successful 6609 * other values - error 6610 */ 6611static int 6612lpfc_sli_enable_msi(struct lpfc_hba *phba) 6613{ 6614 int rc; 6615 6616 rc = pci_enable_msi(phba->pcidev); 6617 if (!rc) 6618 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6619 "0462 PCI enable MSI mode success.\n"); 6620 else { 6621 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6622 "0471 PCI enable MSI mode failed (%d)\n", rc); 6623 return rc; 6624 } 6625 6626 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6627 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6628 if (rc) { 6629 pci_disable_msi(phba->pcidev); 6630 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6631 "0478 MSI request_irq failed (%d)\n", rc); 6632 } 6633 return rc; 6634} 6635 6636/** 6637 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 6638 * @phba: pointer to lpfc hba data structure. 6639 * 6640 * This routine is invoked to disable the MSI interrupt mode to device with 6641 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 6642 * done request_irq() on before calling pci_disable_msi(). Failure to do so 6643 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 6644 * its vector. 6645 */ 6646static void 6647lpfc_sli_disable_msi(struct lpfc_hba *phba) 6648{ 6649 free_irq(phba->pcidev->irq, phba); 6650 pci_disable_msi(phba->pcidev); 6651 return; 6652} 6653 6654/** 6655 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 6656 * @phba: pointer to lpfc hba data structure. 6657 * 6658 * This routine is invoked to enable device interrupt and associate driver's 6659 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 6660 * spec. Depends on the interrupt mode configured to the driver, the driver 6661 * will try to fallback from the configured interrupt mode to an interrupt 6662 * mode which is supported by the platform, kernel, and device in the order 6663 * of: 6664 * MSI-X -> MSI -> IRQ. 6665 * 6666 * Return codes 6667 * 0 - successful 6668 * other values - error 6669 **/ 6670static uint32_t 6671lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6672{ 6673 uint32_t intr_mode = LPFC_INTR_ERROR; 6674 int retval; 6675 6676 if (cfg_mode == 2) { 6677 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 6678 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 6679 if (!retval) { 6680 /* Now, try to enable MSI-X interrupt mode */ 6681 retval = lpfc_sli_enable_msix(phba); 6682 if (!retval) { 6683 /* Indicate initialization to MSI-X mode */ 6684 phba->intr_type = MSIX; 6685 intr_mode = 2; 6686 } 6687 } 6688 } 6689 6690 /* Fallback to MSI if MSI-X initialization failed */ 6691 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6692 retval = lpfc_sli_enable_msi(phba); 6693 if (!retval) { 6694 /* Indicate initialization to MSI mode */ 6695 phba->intr_type = MSI; 6696 intr_mode = 1; 6697 } 6698 } 6699 6700 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6701 if (phba->intr_type == NONE) { 6702 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6703 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6704 if (!retval) { 6705 /* Indicate initialization to INTx mode */ 6706 phba->intr_type = INTx; 6707 intr_mode = 0; 6708 } 6709 } 6710 return intr_mode; 6711} 6712 6713/** 6714 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 6715 * @phba: pointer to lpfc hba data structure. 6716 * 6717 * This routine is invoked to disable device interrupt and disassociate the 6718 * driver's interrupt handler(s) from interrupt vector(s) to device with 6719 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 6720 * release the interrupt vector(s) for the message signaled interrupt. 6721 **/ 6722static void 6723lpfc_sli_disable_intr(struct lpfc_hba *phba) 6724{ 6725 /* Disable the currently initialized interrupt mode */ 6726 if (phba->intr_type == MSIX) 6727 lpfc_sli_disable_msix(phba); 6728 else if (phba->intr_type == MSI) 6729 lpfc_sli_disable_msi(phba); 6730 else if (phba->intr_type == INTx) 6731 free_irq(phba->pcidev->irq, phba); 6732 6733 /* Reset interrupt management states */ 6734 phba->intr_type = NONE; 6735 phba->sli.slistat.sli_intr = 0; 6736 6737 return; 6738} 6739 6740/** 6741 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 6742 * @phba: pointer to lpfc hba data structure. 6743 * 6744 * This routine is invoked to enable the MSI-X interrupt vectors to device 6745 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 6746 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 6747 * enables either all or nothing, depending on the current availability of 6748 * PCI vector resources. The device driver is responsible for calling the 6749 * individual request_irq() to register each MSI-X vector with a interrupt 6750 * handler, which is done in this function. Note that later when device is 6751 * unloading, the driver should always call free_irq() on all MSI-X vectors 6752 * it has done request_irq() on before calling pci_disable_msix(). Failure 6753 * to do so results in a BUG_ON() and a device will be left with MSI-X 6754 * enabled and leaks its vectors. 6755 * 6756 * Return codes 6757 * 0 - successful 6758 * other values - error 6759 **/ 6760static int 6761lpfc_sli4_enable_msix(struct lpfc_hba *phba) 6762{ 6763 int rc, index; 6764 6765 /* Set up MSI-X multi-message vectors */ 6766 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 6767 phba->sli4_hba.msix_entries[index].entry = index; 6768 6769 /* Configure MSI-X capability structure */ 6770 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 6771 phba->sli4_hba.cfg_eqn); 6772 if (rc) { 6773 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6774 "0484 PCI enable MSI-X failed (%d)\n", rc); 6775 goto msi_fail_out; 6776 } 6777 /* Log MSI-X vector assignment */ 6778 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 6779 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6780 "0489 MSI-X entry[%d]: vector=x%x " 6781 "message=%d\n", index, 6782 phba->sli4_hba.msix_entries[index].vector, 6783 phba->sli4_hba.msix_entries[index].entry); 6784 /* 6785 * Assign MSI-X vectors to interrupt handlers 6786 */ 6787 6788 /* The first vector must associated to slow-path handler for MQ */ 6789 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 6790 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 6791 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6792 if (rc) { 6793 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6794 "0485 MSI-X slow-path request_irq failed " 6795 "(%d)\n", rc); 6796 goto msi_fail_out; 6797 } 6798 6799 /* The rest of the vector(s) are associated to fast-path handler(s) */ 6800 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) { 6801 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 6802 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 6803 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 6804 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 6805 LPFC_FP_DRIVER_HANDLER_NAME, 6806 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6807 if (rc) { 6808 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6809 "0486 MSI-X fast-path (%d) " 6810 "request_irq failed (%d)\n", index, rc); 6811 goto cfg_fail_out; 6812 } 6813 } 6814 6815 return rc; 6816 6817cfg_fail_out: 6818 /* free the irq already requested */ 6819 for (--index; index >= 1; index--) 6820 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 6821 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6822 6823 /* free the irq already requested */ 6824 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 6825 6826msi_fail_out: 6827 /* Unconfigure MSI-X capability structure */ 6828 pci_disable_msix(phba->pcidev); 6829 return rc; 6830} 6831 6832/** 6833 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 6834 * @phba: pointer to lpfc hba data structure. 6835 * 6836 * This routine is invoked to release the MSI-X vectors and then disable the 6837 * MSI-X interrupt mode to device with SLI-4 interface spec. 6838 **/ 6839static void 6840lpfc_sli4_disable_msix(struct lpfc_hba *phba) 6841{ 6842 int index; 6843 6844 /* Free up MSI-X multi-message vectors */ 6845 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 6846 6847 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) 6848 free_irq(phba->sli4_hba.msix_entries[index].vector, 6849 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6850 /* Disable MSI-X */ 6851 pci_disable_msix(phba->pcidev); 6852 6853 return; 6854} 6855 6856/** 6857 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 6858 * @phba: pointer to lpfc hba data structure. 6859 * 6860 * This routine is invoked to enable the MSI interrupt mode to device with 6861 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 6862 * to enable the MSI vector. The device driver is responsible for calling 6863 * the request_irq() to register MSI vector with a interrupt the handler, 6864 * which is done in this function. 6865 * 6866 * Return codes 6867 * 0 - successful 6868 * other values - error 6869 **/ 6870static int 6871lpfc_sli4_enable_msi(struct lpfc_hba *phba) 6872{ 6873 int rc, index; 6874 6875 rc = pci_enable_msi(phba->pcidev); 6876 if (!rc) 6877 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6878 "0487 PCI enable MSI mode success.\n"); 6879 else { 6880 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6881 "0488 PCI enable MSI mode failed (%d)\n", rc); 6882 return rc; 6883 } 6884 6885 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 6886 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6887 if (rc) { 6888 pci_disable_msi(phba->pcidev); 6889 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6890 "0490 MSI request_irq failed (%d)\n", rc); 6891 } 6892 6893 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 6894 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 6895 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 6896 } 6897 6898 return rc; 6899} 6900 6901/** 6902 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 6903 * @phba: pointer to lpfc hba data structure. 6904 * 6905 * This routine is invoked to disable the MSI interrupt mode to device with 6906 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 6907 * done request_irq() on before calling pci_disable_msi(). Failure to do so 6908 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 6909 * its vector. 6910 **/ 6911static void 6912lpfc_sli4_disable_msi(struct lpfc_hba *phba) 6913{ 6914 free_irq(phba->pcidev->irq, phba); 6915 pci_disable_msi(phba->pcidev); 6916 return; 6917} 6918 6919/** 6920 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 6921 * @phba: pointer to lpfc hba data structure. 6922 * 6923 * This routine is invoked to enable device interrupt and associate driver's 6924 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 6925 * interface spec. Depends on the interrupt mode configured to the driver, 6926 * the driver will try to fallback from the configured interrupt mode to an 6927 * interrupt mode which is supported by the platform, kernel, and device in 6928 * the order of: 6929 * MSI-X -> MSI -> IRQ. 6930 * 6931 * Return codes 6932 * 0 - successful 6933 * other values - error 6934 **/ 6935static uint32_t 6936lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6937{ 6938 uint32_t intr_mode = LPFC_INTR_ERROR; 6939 int retval, index; 6940 6941 if (cfg_mode == 2) { 6942 /* Preparation before conf_msi mbox cmd */ 6943 retval = 0; 6944 if (!retval) { 6945 /* Now, try to enable MSI-X interrupt mode */ 6946 retval = lpfc_sli4_enable_msix(phba); 6947 if (!retval) { 6948 /* Indicate initialization to MSI-X mode */ 6949 phba->intr_type = MSIX; 6950 intr_mode = 2; 6951 } 6952 } 6953 } 6954 6955 /* Fallback to MSI if MSI-X initialization failed */ 6956 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6957 retval = lpfc_sli4_enable_msi(phba); 6958 if (!retval) { 6959 /* Indicate initialization to MSI mode */ 6960 phba->intr_type = MSI; 6961 intr_mode = 1; 6962 } 6963 } 6964 6965 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6966 if (phba->intr_type == NONE) { 6967 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 6968 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6969 if (!retval) { 6970 /* Indicate initialization to INTx mode */ 6971 phba->intr_type = INTx; 6972 intr_mode = 0; 6973 for (index = 0; index < phba->cfg_fcp_eq_count; 6974 index++) { 6975 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 6976 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 6977 } 6978 } 6979 } 6980 return intr_mode; 6981} 6982 6983/** 6984 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 6985 * @phba: pointer to lpfc hba data structure. 6986 * 6987 * This routine is invoked to disable device interrupt and disassociate 6988 * the driver's interrupt handler(s) from interrupt vector(s) to device 6989 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 6990 * will release the interrupt vector(s) for the message signaled interrupt. 6991 **/ 6992static void 6993lpfc_sli4_disable_intr(struct lpfc_hba *phba) 6994{ 6995 /* Disable the currently initialized interrupt mode */ 6996 if (phba->intr_type == MSIX) 6997 lpfc_sli4_disable_msix(phba); 6998 else if (phba->intr_type == MSI) 6999 lpfc_sli4_disable_msi(phba); 7000 else if (phba->intr_type == INTx) 7001 free_irq(phba->pcidev->irq, phba); 7002 7003 /* Reset interrupt management states */ 7004 phba->intr_type = NONE; 7005 phba->sli.slistat.sli_intr = 0; 7006 7007 return; 7008} 7009 7010/** 7011 * lpfc_unset_hba - Unset SLI3 hba device initialization 7012 * @phba: pointer to lpfc hba data structure. 7013 * 7014 * This routine is invoked to unset the HBA device initialization steps to 7015 * a device with SLI-3 interface spec. 7016 **/ 7017static void 7018lpfc_unset_hba(struct lpfc_hba *phba) 7019{ 7020 struct lpfc_vport *vport = phba->pport; 7021 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7022 7023 spin_lock_irq(shost->host_lock); 7024 vport->load_flag |= FC_UNLOADING; 7025 spin_unlock_irq(shost->host_lock); 7026 7027 lpfc_stop_hba_timers(phba); 7028 7029 phba->pport->work_port_events = 0; 7030 7031 lpfc_sli_hba_down(phba); 7032 7033 lpfc_sli_brdrestart(phba); 7034 7035 lpfc_sli_disable_intr(phba); 7036 7037 return; 7038} 7039 7040/** 7041 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 7042 * @phba: pointer to lpfc hba data structure. 7043 * 7044 * This routine is invoked to unset the HBA device initialization steps to 7045 * a device with SLI-4 interface spec. 7046 **/ 7047static void 7048lpfc_sli4_unset_hba(struct lpfc_hba *phba) 7049{ 7050 struct lpfc_vport *vport = phba->pport; 7051 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7052 7053 spin_lock_irq(shost->host_lock); 7054 vport->load_flag |= FC_UNLOADING; 7055 spin_unlock_irq(shost->host_lock); 7056 7057 phba->pport->work_port_events = 0; 7058 7059 lpfc_sli4_hba_down(phba); 7060 7061 lpfc_sli4_disable_intr(phba); 7062 7063 return; 7064} 7065 7066/** 7067 * lpfc_sli4_hba_unset - Unset the fcoe hba 7068 * @phba: Pointer to HBA context object. 7069 * 7070 * This function is called in the SLI4 code path to reset the HBA's FCoE 7071 * function. The caller is not required to hold any lock. This routine 7072 * issues PCI function reset mailbox command to reset the FCoE function. 7073 * At the end of the function, it calls lpfc_hba_down_post function to 7074 * free any pending commands. 7075 **/ 7076static void 7077lpfc_sli4_hba_unset(struct lpfc_hba *phba) 7078{ 7079 int wait_cnt = 0; 7080 LPFC_MBOXQ_t *mboxq; 7081 7082 lpfc_stop_hba_timers(phba); 7083 phba->sli4_hba.intr_enable = 0; 7084 7085 /* 7086 * Gracefully wait out the potential current outstanding asynchronous 7087 * mailbox command. 7088 */ 7089 7090 /* First, block any pending async mailbox command from posted */ 7091 spin_lock_irq(&phba->hbalock); 7092 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 7093 spin_unlock_irq(&phba->hbalock); 7094 /* Now, trying to wait it out if we can */ 7095 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7096 msleep(10); 7097 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 7098 break; 7099 } 7100 /* Forcefully release the outstanding mailbox command if timed out */ 7101 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7102 spin_lock_irq(&phba->hbalock); 7103 mboxq = phba->sli.mbox_active; 7104 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 7105 __lpfc_mbox_cmpl_put(phba, mboxq); 7106 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7107 phba->sli.mbox_active = NULL; 7108 spin_unlock_irq(&phba->hbalock); 7109 } 7110 7111 /* Tear down the queues in the HBA */ 7112 lpfc_sli4_queue_unset(phba); 7113 7114 /* Disable PCI subsystem interrupt */ 7115 lpfc_sli4_disable_intr(phba); 7116 7117 /* Stop kthread signal shall trigger work_done one more time */ 7118 kthread_stop(phba->worker_thread); 7119 7120 /* Stop the SLI4 device port */ 7121 phba->pport->work_port_events = 0; 7122} 7123 7124 /** 7125 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 7126 * @phba: Pointer to HBA context object. 7127 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 7128 * 7129 * This function is called in the SLI4 code path to read the port's 7130 * sli4 capabilities. 7131 * 7132 * This function may be be called from any context that can block-wait 7133 * for the completion. The expectation is that this routine is called 7134 * typically from probe_one or from the online routine. 7135 **/ 7136int 7137lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7138{ 7139 int rc; 7140 struct lpfc_mqe *mqe; 7141 struct lpfc_pc_sli4_params *sli4_params; 7142 uint32_t mbox_tmo; 7143 7144 rc = 0; 7145 mqe = &mboxq->u.mqe; 7146 7147 /* Read the port's SLI4 Parameters port capabilities */ 7148 lpfc_sli4_params(mboxq); 7149 if (!phba->sli4_hba.intr_enable) 7150 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7151 else { 7152 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES); 7153 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 7154 } 7155 7156 if (unlikely(rc)) 7157 return 1; 7158 7159 sli4_params = &phba->sli4_hba.pc_sli4_params; 7160 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 7161 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 7162 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 7163 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 7164 &mqe->un.sli4_params); 7165 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 7166 &mqe->un.sli4_params); 7167 sli4_params->proto_types = mqe->un.sli4_params.word3; 7168 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 7169 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 7170 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 7171 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 7172 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 7173 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 7174 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 7175 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 7176 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 7177 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 7178 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 7179 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 7180 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 7181 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 7182 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 7183 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 7184 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 7185 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 7186 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 7187 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 7188 return rc; 7189} 7190 7191/** 7192 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 7193 * @pdev: pointer to PCI device 7194 * @pid: pointer to PCI device identifier 7195 * 7196 * This routine is to be called to attach a device with SLI-3 interface spec 7197 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 7198 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 7199 * information of the device and driver to see if the driver state that it can 7200 * support this kind of device. If the match is successful, the driver core 7201 * invokes this routine. If this routine determines it can claim the HBA, it 7202 * does all the initialization that it needs to do to handle the HBA properly. 7203 * 7204 * Return code 7205 * 0 - driver can claim the device 7206 * negative value - driver can not claim the device 7207 **/ 7208static int __devinit 7209lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 7210{ 7211 struct lpfc_hba *phba; 7212 struct lpfc_vport *vport = NULL; 7213 struct Scsi_Host *shost = NULL; 7214 int error; 7215 uint32_t cfg_mode, intr_mode; 7216 7217 /* Allocate memory for HBA structure */ 7218 phba = lpfc_hba_alloc(pdev); 7219 if (!phba) 7220 return -ENOMEM; 7221 7222 /* Perform generic PCI device enabling operation */ 7223 error = lpfc_enable_pci_dev(phba); 7224 if (error) { 7225 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7226 "1401 Failed to enable pci device.\n"); 7227 goto out_free_phba; 7228 } 7229 7230 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 7231 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 7232 if (error) 7233 goto out_disable_pci_dev; 7234 7235 /* Set up SLI-3 specific device PCI memory space */ 7236 error = lpfc_sli_pci_mem_setup(phba); 7237 if (error) { 7238 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7239 "1402 Failed to set up pci memory space.\n"); 7240 goto out_disable_pci_dev; 7241 } 7242 7243 /* Set up phase-1 common device driver resources */ 7244 error = lpfc_setup_driver_resource_phase1(phba); 7245 if (error) { 7246 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7247 "1403 Failed to set up driver resource.\n"); 7248 goto out_unset_pci_mem_s3; 7249 } 7250 7251 /* Set up SLI-3 specific device driver resources */ 7252 error = lpfc_sli_driver_resource_setup(phba); 7253 if (error) { 7254 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7255 "1404 Failed to set up driver resource.\n"); 7256 goto out_unset_pci_mem_s3; 7257 } 7258 7259 /* Initialize and populate the iocb list per host */ 7260 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 7261 if (error) { 7262 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7263 "1405 Failed to initialize iocb list.\n"); 7264 goto out_unset_driver_resource_s3; 7265 } 7266 7267 /* Set up common device driver resources */ 7268 error = lpfc_setup_driver_resource_phase2(phba); 7269 if (error) { 7270 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7271 "1406 Failed to set up driver resource.\n"); 7272 goto out_free_iocb_list; 7273 } 7274 7275 /* Create SCSI host to the physical port */ 7276 error = lpfc_create_shost(phba); 7277 if (error) { 7278 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7279 "1407 Failed to create scsi host.\n"); 7280 goto out_unset_driver_resource; 7281 } 7282 7283 /* Configure sysfs attributes */ 7284 vport = phba->pport; 7285 error = lpfc_alloc_sysfs_attr(vport); 7286 if (error) { 7287 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7288 "1476 Failed to allocate sysfs attr\n"); 7289 goto out_destroy_shost; 7290 } 7291 7292 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 7293 /* Now, trying to enable interrupt and bring up the device */ 7294 cfg_mode = phba->cfg_use_msi; 7295 while (true) { 7296 /* Put device to a known state before enabling interrupt */ 7297 lpfc_stop_port(phba); 7298 /* Configure and enable interrupt */ 7299 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 7300 if (intr_mode == LPFC_INTR_ERROR) { 7301 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7302 "0431 Failed to enable interrupt.\n"); 7303 error = -ENODEV; 7304 goto out_free_sysfs_attr; 7305 } 7306 /* SLI-3 HBA setup */ 7307 if (lpfc_sli_hba_setup(phba)) { 7308 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7309 "1477 Failed to set up hba\n"); 7310 error = -ENODEV; 7311 goto out_remove_device; 7312 } 7313 7314 /* Wait 50ms for the interrupts of previous mailbox commands */ 7315 msleep(50); 7316 /* Check active interrupts on message signaled interrupts */ 7317 if (intr_mode == 0 || 7318 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 7319 /* Log the current active interrupt mode */ 7320 phba->intr_mode = intr_mode; 7321 lpfc_log_intr_mode(phba, intr_mode); 7322 break; 7323 } else { 7324 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7325 "0447 Configure interrupt mode (%d) " 7326 "failed active interrupt test.\n", 7327 intr_mode); 7328 /* Disable the current interrupt mode */ 7329 lpfc_sli_disable_intr(phba); 7330 /* Try next level of interrupt mode */ 7331 cfg_mode = --intr_mode; 7332 } 7333 } 7334 7335 /* Perform post initialization setup */ 7336 lpfc_post_init_setup(phba); 7337 7338 /* Check if there are static vports to be created. */ 7339 lpfc_create_static_vport(phba); 7340 7341 return 0; 7342 7343out_remove_device: 7344 lpfc_unset_hba(phba); 7345out_free_sysfs_attr: 7346 lpfc_free_sysfs_attr(vport); 7347out_destroy_shost: 7348 lpfc_destroy_shost(phba); 7349out_unset_driver_resource: 7350 lpfc_unset_driver_resource_phase2(phba); 7351out_free_iocb_list: 7352 lpfc_free_iocb_list(phba); 7353out_unset_driver_resource_s3: 7354 lpfc_sli_driver_resource_unset(phba); 7355out_unset_pci_mem_s3: 7356 lpfc_sli_pci_mem_unset(phba); 7357out_disable_pci_dev: 7358 lpfc_disable_pci_dev(phba); 7359 if (shost) 7360 scsi_host_put(shost); 7361out_free_phba: 7362 lpfc_hba_free(phba); 7363 return error; 7364} 7365 7366/** 7367 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 7368 * @pdev: pointer to PCI device 7369 * 7370 * This routine is to be called to disattach a device with SLI-3 interface 7371 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 7372 * removed from PCI bus, it performs all the necessary cleanup for the HBA 7373 * device to be removed from the PCI subsystem properly. 7374 **/ 7375static void __devexit 7376lpfc_pci_remove_one_s3(struct pci_dev *pdev) 7377{ 7378 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7379 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 7380 struct lpfc_vport **vports; 7381 struct lpfc_hba *phba = vport->phba; 7382 int i; 7383 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 7384 7385 spin_lock_irq(&phba->hbalock); 7386 vport->load_flag |= FC_UNLOADING; 7387 spin_unlock_irq(&phba->hbalock); 7388 7389 lpfc_free_sysfs_attr(vport); 7390 7391 /* Release all the vports against this physical port */ 7392 vports = lpfc_create_vport_work_array(phba); 7393 if (vports != NULL) 7394 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 7395 fc_vport_terminate(vports[i]->fc_vport); 7396 lpfc_destroy_vport_work_array(phba, vports); 7397 7398 /* Remove FC host and then SCSI host with the physical port */ 7399 fc_remove_host(shost); 7400 scsi_remove_host(shost); 7401 lpfc_cleanup(vport); 7402 7403 /* 7404 * Bring down the SLI Layer. This step disable all interrupts, 7405 * clears the rings, discards all mailbox commands, and resets 7406 * the HBA. 7407 */ 7408 7409 /* HBA interrupt will be diabled after this call */ 7410 lpfc_sli_hba_down(phba); 7411 /* Stop kthread signal shall trigger work_done one more time */ 7412 kthread_stop(phba->worker_thread); 7413 /* Final cleanup of txcmplq and reset the HBA */ 7414 lpfc_sli_brdrestart(phba); 7415 7416 lpfc_stop_hba_timers(phba); 7417 spin_lock_irq(&phba->hbalock); 7418 list_del_init(&vport->listentry); 7419 spin_unlock_irq(&phba->hbalock); 7420 7421 lpfc_debugfs_terminate(vport); 7422 7423 /* Disable interrupt */ 7424 lpfc_sli_disable_intr(phba); 7425 7426 pci_set_drvdata(pdev, NULL); 7427 scsi_host_put(shost); 7428 7429 /* 7430 * Call scsi_free before mem_free since scsi bufs are released to their 7431 * corresponding pools here. 7432 */ 7433 lpfc_scsi_free(phba); 7434 lpfc_mem_free_all(phba); 7435 7436 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7437 phba->hbqslimp.virt, phba->hbqslimp.phys); 7438 7439 /* Free resources associated with SLI2 interface */ 7440 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7441 phba->slim2p.virt, phba->slim2p.phys); 7442 7443 /* unmap adapter SLIM and Control Registers */ 7444 iounmap(phba->ctrl_regs_memmap_p); 7445 iounmap(phba->slim_memmap_p); 7446 7447 lpfc_hba_free(phba); 7448 7449 pci_release_selected_regions(pdev, bars); 7450 pci_disable_device(pdev); 7451} 7452 7453/** 7454 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 7455 * @pdev: pointer to PCI device 7456 * @msg: power management message 7457 * 7458 * This routine is to be called from the kernel's PCI subsystem to support 7459 * system Power Management (PM) to device with SLI-3 interface spec. When 7460 * PM invokes this method, it quiesces the device by stopping the driver's 7461 * worker thread for the device, turning off device's interrupt and DMA, 7462 * and bring the device offline. Note that as the driver implements the 7463 * minimum PM requirements to a power-aware driver's PM support for the 7464 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 7465 * to the suspend() method call will be treated as SUSPEND and the driver will 7466 * fully reinitialize its device during resume() method call, the driver will 7467 * set device to PCI_D3hot state in PCI config space instead of setting it 7468 * according to the @msg provided by the PM. 7469 * 7470 * Return code 7471 * 0 - driver suspended the device 7472 * Error otherwise 7473 **/ 7474static int 7475lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 7476{ 7477 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7478 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7479 7480 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7481 "0473 PCI device Power Management suspend.\n"); 7482 7483 /* Bring down the device */ 7484 lpfc_offline_prep(phba); 7485 lpfc_offline(phba); 7486 kthread_stop(phba->worker_thread); 7487 7488 /* Disable interrupt from device */ 7489 lpfc_sli_disable_intr(phba); 7490 7491 /* Save device state to PCI config space */ 7492 pci_save_state(pdev); 7493 pci_set_power_state(pdev, PCI_D3hot); 7494 7495 return 0; 7496} 7497 7498/** 7499 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 7500 * @pdev: pointer to PCI device 7501 * 7502 * This routine is to be called from the kernel's PCI subsystem to support 7503 * system Power Management (PM) to device with SLI-3 interface spec. When PM 7504 * invokes this method, it restores the device's PCI config space state and 7505 * fully reinitializes the device and brings it online. Note that as the 7506 * driver implements the minimum PM requirements to a power-aware driver's 7507 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 7508 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 7509 * driver will fully reinitialize its device during resume() method call, 7510 * the device will be set to PCI_D0 directly in PCI config space before 7511 * restoring the state. 7512 * 7513 * Return code 7514 * 0 - driver suspended the device 7515 * Error otherwise 7516 **/ 7517static int 7518lpfc_pci_resume_one_s3(struct pci_dev *pdev) 7519{ 7520 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7521 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7522 uint32_t intr_mode; 7523 int error; 7524 7525 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7526 "0452 PCI device Power Management resume.\n"); 7527 7528 /* Restore device state from PCI config space */ 7529 pci_set_power_state(pdev, PCI_D0); 7530 pci_restore_state(pdev); 7531 7532 /* 7533 * As the new kernel behavior of pci_restore_state() API call clears 7534 * device saved_state flag, need to save the restored state again. 7535 */ 7536 pci_save_state(pdev); 7537 7538 if (pdev->is_busmaster) 7539 pci_set_master(pdev); 7540 7541 /* Startup the kernel thread for this host adapter. */ 7542 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7543 "lpfc_worker_%d", phba->brd_no); 7544 if (IS_ERR(phba->worker_thread)) { 7545 error = PTR_ERR(phba->worker_thread); 7546 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7547 "0434 PM resume failed to start worker " 7548 "thread: error=x%x.\n", error); 7549 return error; 7550 } 7551 7552 /* Configure and enable interrupt */ 7553 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7554 if (intr_mode == LPFC_INTR_ERROR) { 7555 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7556 "0430 PM resume Failed to enable interrupt\n"); 7557 return -EIO; 7558 } else 7559 phba->intr_mode = intr_mode; 7560 7561 /* Restart HBA and bring it online */ 7562 lpfc_sli_brdrestart(phba); 7563 lpfc_online(phba); 7564 7565 /* Log the current active interrupt mode */ 7566 lpfc_log_intr_mode(phba, phba->intr_mode); 7567 7568 return 0; 7569} 7570 7571/** 7572 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 7573 * @phba: pointer to lpfc hba data structure. 7574 * 7575 * This routine is called to prepare the SLI3 device for PCI slot recover. It 7576 * aborts and stops all the on-going I/Os on the pci device. 7577 **/ 7578static void 7579lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 7580{ 7581 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7582 "2723 PCI channel I/O abort preparing for recovery\n"); 7583 /* Prepare for bringing HBA offline */ 7584 lpfc_offline_prep(phba); 7585 /* Clear sli active flag to prevent sysfs access to HBA */ 7586 spin_lock_irq(&phba->hbalock); 7587 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 7588 spin_unlock_irq(&phba->hbalock); 7589 /* Stop and flush all I/Os and bring HBA offline */ 7590 lpfc_offline(phba); 7591} 7592 7593/** 7594 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 7595 * @phba: pointer to lpfc hba data structure. 7596 * 7597 * This routine is called to prepare the SLI3 device for PCI slot reset. It 7598 * disables the device interrupt and pci device, and aborts the internal FCP 7599 * pending I/Os. 7600 **/ 7601static void 7602lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 7603{ 7604 struct lpfc_sli *psli = &phba->sli; 7605 struct lpfc_sli_ring *pring; 7606 7607 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7608 "2710 PCI channel disable preparing for reset\n"); 7609 /* Disable interrupt and pci device */ 7610 lpfc_sli_disable_intr(phba); 7611 pci_disable_device(phba->pcidev); 7612 /* 7613 * There may be I/Os dropped by the firmware. 7614 * Error iocb (I/O) on txcmplq and let the SCSI layer 7615 * retry it after re-establishing link. 7616 */ 7617 pring = &psli->ring[psli->fcp_ring]; 7618 lpfc_sli_abort_iocb_ring(phba, pring); 7619} 7620 7621/** 7622 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 7623 * @phba: pointer to lpfc hba data structure. 7624 * 7625 * This routine is called to prepare the SLI3 device for PCI slot permanently 7626 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 7627 * pending I/Os. 7628 **/ 7629static void 7630lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba) 7631{ 7632 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7633 "2711 PCI channel permanent disable for failure\n"); 7634 /* Clean up all driver's outstanding SCSI I/Os */ 7635 lpfc_sli_flush_fcp_rings(phba); 7636} 7637 7638/** 7639 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 7640 * @pdev: pointer to PCI device. 7641 * @state: the current PCI connection state. 7642 * 7643 * This routine is called from the PCI subsystem for I/O error handling to 7644 * device with SLI-3 interface spec. This function is called by the PCI 7645 * subsystem after a PCI bus error affecting this device has been detected. 7646 * When this function is invoked, it will need to stop all the I/Os and 7647 * interrupt(s) to the device. Once that is done, it will return 7648 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 7649 * as desired. 7650 * 7651 * Return codes 7652 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 7653 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7654 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7655 **/ 7656static pci_ers_result_t 7657lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 7658{ 7659 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7660 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7661 7662 /* Block all SCSI devices' I/Os on the host */ 7663 lpfc_scsi_dev_block(phba); 7664 7665 switch (state) { 7666 case pci_channel_io_normal: 7667 /* Non-fatal error, prepare for recovery */ 7668 lpfc_sli_prep_dev_for_recover(phba); 7669 return PCI_ERS_RESULT_CAN_RECOVER; 7670 case pci_channel_io_frozen: 7671 /* Fatal error, prepare for slot reset */ 7672 lpfc_sli_prep_dev_for_reset(phba); 7673 return PCI_ERS_RESULT_NEED_RESET; 7674 case pci_channel_io_perm_failure: 7675 /* Permanent failure, prepare for device down */ 7676 lpfc_prep_dev_for_perm_failure(phba); 7677 return PCI_ERS_RESULT_DISCONNECT; 7678 default: 7679 /* Unknown state, prepare and request slot reset */ 7680 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7681 "0472 Unknown PCI error state: x%x\n", state); 7682 lpfc_sli_prep_dev_for_reset(phba); 7683 return PCI_ERS_RESULT_NEED_RESET; 7684 } 7685} 7686 7687/** 7688 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 7689 * @pdev: pointer to PCI device. 7690 * 7691 * This routine is called from the PCI subsystem for error handling to 7692 * device with SLI-3 interface spec. This is called after PCI bus has been 7693 * reset to restart the PCI card from scratch, as if from a cold-boot. 7694 * During the PCI subsystem error recovery, after driver returns 7695 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 7696 * recovery and then call this routine before calling the .resume method 7697 * to recover the device. This function will initialize the HBA device, 7698 * enable the interrupt, but it will just put the HBA to offline state 7699 * without passing any I/O traffic. 7700 * 7701 * Return codes 7702 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7703 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7704 */ 7705static pci_ers_result_t 7706lpfc_io_slot_reset_s3(struct pci_dev *pdev) 7707{ 7708 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7709 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7710 struct lpfc_sli *psli = &phba->sli; 7711 uint32_t intr_mode; 7712 7713 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 7714 if (pci_enable_device_mem(pdev)) { 7715 printk(KERN_ERR "lpfc: Cannot re-enable " 7716 "PCI device after reset.\n"); 7717 return PCI_ERS_RESULT_DISCONNECT; 7718 } 7719 7720 pci_restore_state(pdev); 7721 7722 /* 7723 * As the new kernel behavior of pci_restore_state() API call clears 7724 * device saved_state flag, need to save the restored state again. 7725 */ 7726 pci_save_state(pdev); 7727 7728 if (pdev->is_busmaster) 7729 pci_set_master(pdev); 7730 7731 spin_lock_irq(&phba->hbalock); 7732 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 7733 spin_unlock_irq(&phba->hbalock); 7734 7735 /* Configure and enable interrupt */ 7736 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7737 if (intr_mode == LPFC_INTR_ERROR) { 7738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7739 "0427 Cannot re-enable interrupt after " 7740 "slot reset.\n"); 7741 return PCI_ERS_RESULT_DISCONNECT; 7742 } else 7743 phba->intr_mode = intr_mode; 7744 7745 /* Take device offline; this will perform cleanup */ 7746 lpfc_offline(phba); 7747 lpfc_sli_brdrestart(phba); 7748 7749 /* Log the current active interrupt mode */ 7750 lpfc_log_intr_mode(phba, phba->intr_mode); 7751 7752 return PCI_ERS_RESULT_RECOVERED; 7753} 7754 7755/** 7756 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 7757 * @pdev: pointer to PCI device 7758 * 7759 * This routine is called from the PCI subsystem for error handling to device 7760 * with SLI-3 interface spec. It is called when kernel error recovery tells 7761 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 7762 * error recovery. After this call, traffic can start to flow from this device 7763 * again. 7764 */ 7765static void 7766lpfc_io_resume_s3(struct pci_dev *pdev) 7767{ 7768 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7769 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7770 7771 /* Bring the device online */ 7772 lpfc_online(phba); 7773 7774 /* Clean up Advanced Error Reporting (AER) if needed */ 7775 if (phba->hba_flag & HBA_AER_ENABLED) 7776 pci_cleanup_aer_uncorrect_error_status(pdev); 7777} 7778 7779/** 7780 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 7781 * @phba: pointer to lpfc hba data structure. 7782 * 7783 * returns the number of ELS/CT IOCBs to reserve 7784 **/ 7785int 7786lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 7787{ 7788 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 7789 7790 if (phba->sli_rev == LPFC_SLI_REV4) { 7791 if (max_xri <= 100) 7792 return 10; 7793 else if (max_xri <= 256) 7794 return 25; 7795 else if (max_xri <= 512) 7796 return 50; 7797 else if (max_xri <= 1024) 7798 return 100; 7799 else 7800 return 150; 7801 } else 7802 return 0; 7803} 7804 7805/** 7806 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 7807 * @pdev: pointer to PCI device 7808 * @pid: pointer to PCI device identifier 7809 * 7810 * This routine is called from the kernel's PCI subsystem to device with 7811 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 7812 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 7813 * information of the device and driver to see if the driver state that it 7814 * can support this kind of device. If the match is successful, the driver 7815 * core invokes this routine. If this routine determines it can claim the HBA, 7816 * it does all the initialization that it needs to do to handle the HBA 7817 * properly. 7818 * 7819 * Return code 7820 * 0 - driver can claim the device 7821 * negative value - driver can not claim the device 7822 **/ 7823static int __devinit 7824lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 7825{ 7826 struct lpfc_hba *phba; 7827 struct lpfc_vport *vport = NULL; 7828 struct Scsi_Host *shost = NULL; 7829 int error; 7830 uint32_t cfg_mode, intr_mode; 7831 int mcnt; 7832 7833 /* Allocate memory for HBA structure */ 7834 phba = lpfc_hba_alloc(pdev); 7835 if (!phba) 7836 return -ENOMEM; 7837 7838 /* Perform generic PCI device enabling operation */ 7839 error = lpfc_enable_pci_dev(phba); 7840 if (error) { 7841 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7842 "1409 Failed to enable pci device.\n"); 7843 goto out_free_phba; 7844 } 7845 7846 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 7847 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 7848 if (error) 7849 goto out_disable_pci_dev; 7850 7851 /* Set up SLI-4 specific device PCI memory space */ 7852 error = lpfc_sli4_pci_mem_setup(phba); 7853 if (error) { 7854 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7855 "1410 Failed to set up pci memory space.\n"); 7856 goto out_disable_pci_dev; 7857 } 7858 7859 /* Set up phase-1 common device driver resources */ 7860 error = lpfc_setup_driver_resource_phase1(phba); 7861 if (error) { 7862 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7863 "1411 Failed to set up driver resource.\n"); 7864 goto out_unset_pci_mem_s4; 7865 } 7866 7867 /* Set up SLI-4 Specific device driver resources */ 7868 error = lpfc_sli4_driver_resource_setup(phba); 7869 if (error) { 7870 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7871 "1412 Failed to set up driver resource.\n"); 7872 goto out_unset_pci_mem_s4; 7873 } 7874 7875 /* Initialize and populate the iocb list per host */ 7876 error = lpfc_init_iocb_list(phba, 7877 phba->sli4_hba.max_cfg_param.max_xri); 7878 if (error) { 7879 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7880 "1413 Failed to initialize iocb list.\n"); 7881 goto out_unset_driver_resource_s4; 7882 } 7883 7884 /* Set up common device driver resources */ 7885 error = lpfc_setup_driver_resource_phase2(phba); 7886 if (error) { 7887 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7888 "1414 Failed to set up driver resource.\n"); 7889 goto out_free_iocb_list; 7890 } 7891 7892 /* Create SCSI host to the physical port */ 7893 error = lpfc_create_shost(phba); 7894 if (error) { 7895 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7896 "1415 Failed to create scsi host.\n"); 7897 goto out_unset_driver_resource; 7898 } 7899 7900 /* Configure sysfs attributes */ 7901 vport = phba->pport; 7902 error = lpfc_alloc_sysfs_attr(vport); 7903 if (error) { 7904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7905 "1416 Failed to allocate sysfs attr\n"); 7906 goto out_destroy_shost; 7907 } 7908 7909 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 7910 /* Now, trying to enable interrupt and bring up the device */ 7911 cfg_mode = phba->cfg_use_msi; 7912 while (true) { 7913 /* Put device to a known state before enabling interrupt */ 7914 lpfc_stop_port(phba); 7915 /* Configure and enable interrupt */ 7916 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 7917 if (intr_mode == LPFC_INTR_ERROR) { 7918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7919 "0426 Failed to enable interrupt.\n"); 7920 error = -ENODEV; 7921 goto out_free_sysfs_attr; 7922 } 7923 /* Default to single FCP EQ for non-MSI-X */ 7924 if (phba->intr_type != MSIX) 7925 phba->cfg_fcp_eq_count = 1; 7926 /* Set up SLI-4 HBA */ 7927 if (lpfc_sli4_hba_setup(phba)) { 7928 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7929 "1421 Failed to set up hba\n"); 7930 error = -ENODEV; 7931 goto out_disable_intr; 7932 } 7933 7934 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 7935 if (intr_mode != 0) 7936 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 7937 LPFC_ACT_INTR_CNT); 7938 7939 /* Check active interrupts received only for MSI/MSI-X */ 7940 if (intr_mode == 0 || 7941 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 7942 /* Log the current active interrupt mode */ 7943 phba->intr_mode = intr_mode; 7944 lpfc_log_intr_mode(phba, intr_mode); 7945 break; 7946 } 7947 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7948 "0451 Configure interrupt mode (%d) " 7949 "failed active interrupt test.\n", 7950 intr_mode); 7951 /* Unset the preivous SLI-4 HBA setup */ 7952 lpfc_sli4_unset_hba(phba); 7953 /* Try next level of interrupt mode */ 7954 cfg_mode = --intr_mode; 7955 } 7956 7957 /* Perform post initialization setup */ 7958 lpfc_post_init_setup(phba); 7959 7960 /* Check if there are static vports to be created. */ 7961 lpfc_create_static_vport(phba); 7962 7963 return 0; 7964 7965out_disable_intr: 7966 lpfc_sli4_disable_intr(phba); 7967out_free_sysfs_attr: 7968 lpfc_free_sysfs_attr(vport); 7969out_destroy_shost: 7970 lpfc_destroy_shost(phba); 7971out_unset_driver_resource: 7972 lpfc_unset_driver_resource_phase2(phba); 7973out_free_iocb_list: 7974 lpfc_free_iocb_list(phba); 7975out_unset_driver_resource_s4: 7976 lpfc_sli4_driver_resource_unset(phba); 7977out_unset_pci_mem_s4: 7978 lpfc_sli4_pci_mem_unset(phba); 7979out_disable_pci_dev: 7980 lpfc_disable_pci_dev(phba); 7981 if (shost) 7982 scsi_host_put(shost); 7983out_free_phba: 7984 lpfc_hba_free(phba); 7985 return error; 7986} 7987 7988/** 7989 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 7990 * @pdev: pointer to PCI device 7991 * 7992 * This routine is called from the kernel's PCI subsystem to device with 7993 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 7994 * removed from PCI bus, it performs all the necessary cleanup for the HBA 7995 * device to be removed from the PCI subsystem properly. 7996 **/ 7997static void __devexit 7998lpfc_pci_remove_one_s4(struct pci_dev *pdev) 7999{ 8000 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8001 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8002 struct lpfc_vport **vports; 8003 struct lpfc_hba *phba = vport->phba; 8004 int i; 8005 8006 /* Mark the device unloading flag */ 8007 spin_lock_irq(&phba->hbalock); 8008 vport->load_flag |= FC_UNLOADING; 8009 spin_unlock_irq(&phba->hbalock); 8010 8011 /* Free the HBA sysfs attributes */ 8012 lpfc_free_sysfs_attr(vport); 8013 8014 /* Release all the vports against this physical port */ 8015 vports = lpfc_create_vport_work_array(phba); 8016 if (vports != NULL) 8017 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8018 fc_vport_terminate(vports[i]->fc_vport); 8019 lpfc_destroy_vport_work_array(phba, vports); 8020 8021 /* Remove FC host and then SCSI host with the physical port */ 8022 fc_remove_host(shost); 8023 scsi_remove_host(shost); 8024 8025 /* Perform cleanup on the physical port */ 8026 lpfc_cleanup(vport); 8027 8028 /* 8029 * Bring down the SLI Layer. This step disables all interrupts, 8030 * clears the rings, discards all mailbox commands, and resets 8031 * the HBA FCoE function. 8032 */ 8033 lpfc_debugfs_terminate(vport); 8034 lpfc_sli4_hba_unset(phba); 8035 8036 spin_lock_irq(&phba->hbalock); 8037 list_del_init(&vport->listentry); 8038 spin_unlock_irq(&phba->hbalock); 8039 8040 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi 8041 * buffers are released to their corresponding pools here. 8042 */ 8043 lpfc_scsi_free(phba); 8044 lpfc_sli4_driver_resource_unset(phba); 8045 8046 /* Unmap adapter Control and Doorbell registers */ 8047 lpfc_sli4_pci_mem_unset(phba); 8048 8049 /* Release PCI resources and disable device's PCI function */ 8050 scsi_host_put(shost); 8051 lpfc_disable_pci_dev(phba); 8052 8053 /* Finally, free the driver's device data structure */ 8054 lpfc_hba_free(phba); 8055 8056 return; 8057} 8058 8059/** 8060 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 8061 * @pdev: pointer to PCI device 8062 * @msg: power management message 8063 * 8064 * This routine is called from the kernel's PCI subsystem to support system 8065 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 8066 * this method, it quiesces the device by stopping the driver's worker 8067 * thread for the device, turning off device's interrupt and DMA, and bring 8068 * the device offline. Note that as the driver implements the minimum PM 8069 * requirements to a power-aware driver's PM support for suspend/resume -- all 8070 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 8071 * method call will be treated as SUSPEND and the driver will fully 8072 * reinitialize its device during resume() method call, the driver will set 8073 * device to PCI_D3hot state in PCI config space instead of setting it 8074 * according to the @msg provided by the PM. 8075 * 8076 * Return code 8077 * 0 - driver suspended the device 8078 * Error otherwise 8079 **/ 8080static int 8081lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 8082{ 8083 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8084 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8085 8086 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8087 "0298 PCI device Power Management suspend.\n"); 8088 8089 /* Bring down the device */ 8090 lpfc_offline_prep(phba); 8091 lpfc_offline(phba); 8092 kthread_stop(phba->worker_thread); 8093 8094 /* Disable interrupt from device */ 8095 lpfc_sli4_disable_intr(phba); 8096 8097 /* Save device state to PCI config space */ 8098 pci_save_state(pdev); 8099 pci_set_power_state(pdev, PCI_D3hot); 8100 8101 return 0; 8102} 8103 8104/** 8105 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 8106 * @pdev: pointer to PCI device 8107 * 8108 * This routine is called from the kernel's PCI subsystem to support system 8109 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 8110 * this method, it restores the device's PCI config space state and fully 8111 * reinitializes the device and brings it online. Note that as the driver 8112 * implements the minimum PM requirements to a power-aware driver's PM for 8113 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8114 * to the suspend() method call will be treated as SUSPEND and the driver 8115 * will fully reinitialize its device during resume() method call, the device 8116 * will be set to PCI_D0 directly in PCI config space before restoring the 8117 * state. 8118 * 8119 * Return code 8120 * 0 - driver suspended the device 8121 * Error otherwise 8122 **/ 8123static int 8124lpfc_pci_resume_one_s4(struct pci_dev *pdev) 8125{ 8126 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8127 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8128 uint32_t intr_mode; 8129 int error; 8130 8131 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8132 "0292 PCI device Power Management resume.\n"); 8133 8134 /* Restore device state from PCI config space */ 8135 pci_set_power_state(pdev, PCI_D0); 8136 pci_restore_state(pdev); 8137 8138 /* 8139 * As the new kernel behavior of pci_restore_state() API call clears 8140 * device saved_state flag, need to save the restored state again. 8141 */ 8142 pci_save_state(pdev); 8143 8144 if (pdev->is_busmaster) 8145 pci_set_master(pdev); 8146 8147 /* Startup the kernel thread for this host adapter. */ 8148 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8149 "lpfc_worker_%d", phba->brd_no); 8150 if (IS_ERR(phba->worker_thread)) { 8151 error = PTR_ERR(phba->worker_thread); 8152 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8153 "0293 PM resume failed to start worker " 8154 "thread: error=x%x.\n", error); 8155 return error; 8156 } 8157 8158 /* Configure and enable interrupt */ 8159 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 8160 if (intr_mode == LPFC_INTR_ERROR) { 8161 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8162 "0294 PM resume Failed to enable interrupt\n"); 8163 return -EIO; 8164 } else 8165 phba->intr_mode = intr_mode; 8166 8167 /* Restart HBA and bring it online */ 8168 lpfc_sli_brdrestart(phba); 8169 lpfc_online(phba); 8170 8171 /* Log the current active interrupt mode */ 8172 lpfc_log_intr_mode(phba, phba->intr_mode); 8173 8174 return 0; 8175} 8176 8177/** 8178 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 8179 * @pdev: pointer to PCI device. 8180 * @state: the current PCI connection state. 8181 * 8182 * This routine is called from the PCI subsystem for error handling to device 8183 * with SLI-4 interface spec. This function is called by the PCI subsystem 8184 * after a PCI bus error affecting this device has been detected. When this 8185 * function is invoked, it will need to stop all the I/Os and interrupt(s) 8186 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 8187 * for the PCI subsystem to perform proper recovery as desired. 8188 * 8189 * Return codes 8190 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8191 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8192 **/ 8193static pci_ers_result_t 8194lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 8195{ 8196 return PCI_ERS_RESULT_NEED_RESET; 8197} 8198 8199/** 8200 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 8201 * @pdev: pointer to PCI device. 8202 * 8203 * This routine is called from the PCI subsystem for error handling to device 8204 * with SLI-4 interface spec. It is called after PCI bus has been reset to 8205 * restart the PCI card from scratch, as if from a cold-boot. During the 8206 * PCI subsystem error recovery, after the driver returns 8207 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 8208 * recovery and then call this routine before calling the .resume method to 8209 * recover the device. This function will initialize the HBA device, enable 8210 * the interrupt, but it will just put the HBA to offline state without 8211 * passing any I/O traffic. 8212 * 8213 * Return codes 8214 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8215 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8216 */ 8217static pci_ers_result_t 8218lpfc_io_slot_reset_s4(struct pci_dev *pdev) 8219{ 8220 return PCI_ERS_RESULT_RECOVERED; 8221} 8222 8223/** 8224 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 8225 * @pdev: pointer to PCI device 8226 * 8227 * This routine is called from the PCI subsystem for error handling to device 8228 * with SLI-4 interface spec. It is called when kernel error recovery tells 8229 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 8230 * error recovery. After this call, traffic can start to flow from this device 8231 * again. 8232 **/ 8233static void 8234lpfc_io_resume_s4(struct pci_dev *pdev) 8235{ 8236 return; 8237} 8238 8239/** 8240 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 8241 * @pdev: pointer to PCI device 8242 * @pid: pointer to PCI device identifier 8243 * 8244 * This routine is to be registered to the kernel's PCI subsystem. When an 8245 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 8246 * at PCI device-specific information of the device and driver to see if the 8247 * driver state that it can support this kind of device. If the match is 8248 * successful, the driver core invokes this routine. This routine dispatches 8249 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 8250 * do all the initialization that it needs to do to handle the HBA device 8251 * properly. 8252 * 8253 * Return code 8254 * 0 - driver can claim the device 8255 * negative value - driver can not claim the device 8256 **/ 8257static int __devinit 8258lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 8259{ 8260 int rc; 8261 struct lpfc_sli_intf intf; 8262 8263 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 8264 return -ENODEV; 8265 8266 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 8267 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 8268 rc = lpfc_pci_probe_one_s4(pdev, pid); 8269 else 8270 rc = lpfc_pci_probe_one_s3(pdev, pid); 8271 8272 return rc; 8273} 8274 8275/** 8276 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 8277 * @pdev: pointer to PCI device 8278 * 8279 * This routine is to be registered to the kernel's PCI subsystem. When an 8280 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 8281 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 8282 * remove routine, which will perform all the necessary cleanup for the 8283 * device to be removed from the PCI subsystem properly. 8284 **/ 8285static void __devexit 8286lpfc_pci_remove_one(struct pci_dev *pdev) 8287{ 8288 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8289 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8290 8291 switch (phba->pci_dev_grp) { 8292 case LPFC_PCI_DEV_LP: 8293 lpfc_pci_remove_one_s3(pdev); 8294 break; 8295 case LPFC_PCI_DEV_OC: 8296 lpfc_pci_remove_one_s4(pdev); 8297 break; 8298 default: 8299 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8300 "1424 Invalid PCI device group: 0x%x\n", 8301 phba->pci_dev_grp); 8302 break; 8303 } 8304 return; 8305} 8306 8307/** 8308 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 8309 * @pdev: pointer to PCI device 8310 * @msg: power management message 8311 * 8312 * This routine is to be registered to the kernel's PCI subsystem to support 8313 * system Power Management (PM). When PM invokes this method, it dispatches 8314 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 8315 * suspend the device. 8316 * 8317 * Return code 8318 * 0 - driver suspended the device 8319 * Error otherwise 8320 **/ 8321static int 8322lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 8323{ 8324 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8325 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8326 int rc = -ENODEV; 8327 8328 switch (phba->pci_dev_grp) { 8329 case LPFC_PCI_DEV_LP: 8330 rc = lpfc_pci_suspend_one_s3(pdev, msg); 8331 break; 8332 case LPFC_PCI_DEV_OC: 8333 rc = lpfc_pci_suspend_one_s4(pdev, msg); 8334 break; 8335 default: 8336 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8337 "1425 Invalid PCI device group: 0x%x\n", 8338 phba->pci_dev_grp); 8339 break; 8340 } 8341 return rc; 8342} 8343 8344/** 8345 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 8346 * @pdev: pointer to PCI device 8347 * 8348 * This routine is to be registered to the kernel's PCI subsystem to support 8349 * system Power Management (PM). When PM invokes this method, it dispatches 8350 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 8351 * resume the device. 8352 * 8353 * Return code 8354 * 0 - driver suspended the device 8355 * Error otherwise 8356 **/ 8357static int 8358lpfc_pci_resume_one(struct pci_dev *pdev) 8359{ 8360 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8361 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8362 int rc = -ENODEV; 8363 8364 switch (phba->pci_dev_grp) { 8365 case LPFC_PCI_DEV_LP: 8366 rc = lpfc_pci_resume_one_s3(pdev); 8367 break; 8368 case LPFC_PCI_DEV_OC: 8369 rc = lpfc_pci_resume_one_s4(pdev); 8370 break; 8371 default: 8372 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8373 "1426 Invalid PCI device group: 0x%x\n", 8374 phba->pci_dev_grp); 8375 break; 8376 } 8377 return rc; 8378} 8379 8380/** 8381 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 8382 * @pdev: pointer to PCI device. 8383 * @state: the current PCI connection state. 8384 * 8385 * This routine is registered to the PCI subsystem for error handling. This 8386 * function is called by the PCI subsystem after a PCI bus error affecting 8387 * this device has been detected. When this routine is invoked, it dispatches 8388 * the action to the proper SLI-3 or SLI-4 device error detected handling 8389 * routine, which will perform the proper error detected operation. 8390 * 8391 * Return codes 8392 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8393 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8394 **/ 8395static pci_ers_result_t 8396lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 8397{ 8398 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8399 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8400 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 8401 8402 switch (phba->pci_dev_grp) { 8403 case LPFC_PCI_DEV_LP: 8404 rc = lpfc_io_error_detected_s3(pdev, state); 8405 break; 8406 case LPFC_PCI_DEV_OC: 8407 rc = lpfc_io_error_detected_s4(pdev, state); 8408 break; 8409 default: 8410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8411 "1427 Invalid PCI device group: 0x%x\n", 8412 phba->pci_dev_grp); 8413 break; 8414 } 8415 return rc; 8416} 8417 8418/** 8419 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 8420 * @pdev: pointer to PCI device. 8421 * 8422 * This routine is registered to the PCI subsystem for error handling. This 8423 * function is called after PCI bus has been reset to restart the PCI card 8424 * from scratch, as if from a cold-boot. When this routine is invoked, it 8425 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 8426 * routine, which will perform the proper device reset. 8427 * 8428 * Return codes 8429 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8430 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8431 **/ 8432static pci_ers_result_t 8433lpfc_io_slot_reset(struct pci_dev *pdev) 8434{ 8435 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8436 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8437 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 8438 8439 switch (phba->pci_dev_grp) { 8440 case LPFC_PCI_DEV_LP: 8441 rc = lpfc_io_slot_reset_s3(pdev); 8442 break; 8443 case LPFC_PCI_DEV_OC: 8444 rc = lpfc_io_slot_reset_s4(pdev); 8445 break; 8446 default: 8447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8448 "1428 Invalid PCI device group: 0x%x\n", 8449 phba->pci_dev_grp); 8450 break; 8451 } 8452 return rc; 8453} 8454 8455/** 8456 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 8457 * @pdev: pointer to PCI device 8458 * 8459 * This routine is registered to the PCI subsystem for error handling. It 8460 * is called when kernel error recovery tells the lpfc driver that it is 8461 * OK to resume normal PCI operation after PCI bus error recovery. When 8462 * this routine is invoked, it dispatches the action to the proper SLI-3 8463 * or SLI-4 device io_resume routine, which will resume the device operation. 8464 **/ 8465static void 8466lpfc_io_resume(struct pci_dev *pdev) 8467{ 8468 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8469 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8470 8471 switch (phba->pci_dev_grp) { 8472 case LPFC_PCI_DEV_LP: 8473 lpfc_io_resume_s3(pdev); 8474 break; 8475 case LPFC_PCI_DEV_OC: 8476 lpfc_io_resume_s4(pdev); 8477 break; 8478 default: 8479 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8480 "1429 Invalid PCI device group: 0x%x\n", 8481 phba->pci_dev_grp); 8482 break; 8483 } 8484 return; 8485} 8486 8487static struct pci_device_id lpfc_id_table[] = { 8488 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 8489 PCI_ANY_ID, PCI_ANY_ID, }, 8490 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 8491 PCI_ANY_ID, PCI_ANY_ID, }, 8492 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 8493 PCI_ANY_ID, PCI_ANY_ID, }, 8494 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 8495 PCI_ANY_ID, PCI_ANY_ID, }, 8496 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 8497 PCI_ANY_ID, PCI_ANY_ID, }, 8498 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 8499 PCI_ANY_ID, PCI_ANY_ID, }, 8500 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 8501 PCI_ANY_ID, PCI_ANY_ID, }, 8502 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 8503 PCI_ANY_ID, PCI_ANY_ID, }, 8504 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 8505 PCI_ANY_ID, PCI_ANY_ID, }, 8506 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 8507 PCI_ANY_ID, PCI_ANY_ID, }, 8508 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 8509 PCI_ANY_ID, PCI_ANY_ID, }, 8510 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 8511 PCI_ANY_ID, PCI_ANY_ID, }, 8512 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 8513 PCI_ANY_ID, PCI_ANY_ID, }, 8514 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 8515 PCI_ANY_ID, PCI_ANY_ID, }, 8516 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 8517 PCI_ANY_ID, PCI_ANY_ID, }, 8518 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 8519 PCI_ANY_ID, PCI_ANY_ID, }, 8520 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 8521 PCI_ANY_ID, PCI_ANY_ID, }, 8522 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 8523 PCI_ANY_ID, PCI_ANY_ID, }, 8524 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 8525 PCI_ANY_ID, PCI_ANY_ID, }, 8526 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 8527 PCI_ANY_ID, PCI_ANY_ID, }, 8528 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 8529 PCI_ANY_ID, PCI_ANY_ID, }, 8530 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 8531 PCI_ANY_ID, PCI_ANY_ID, }, 8532 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 8533 PCI_ANY_ID, PCI_ANY_ID, }, 8534 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 8535 PCI_ANY_ID, PCI_ANY_ID, }, 8536 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 8537 PCI_ANY_ID, PCI_ANY_ID, }, 8538 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 8539 PCI_ANY_ID, PCI_ANY_ID, }, 8540 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 8541 PCI_ANY_ID, PCI_ANY_ID, }, 8542 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 8543 PCI_ANY_ID, PCI_ANY_ID, }, 8544 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 8545 PCI_ANY_ID, PCI_ANY_ID, }, 8546 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 8547 PCI_ANY_ID, PCI_ANY_ID, }, 8548 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 8549 PCI_ANY_ID, PCI_ANY_ID, }, 8550 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 8551 PCI_ANY_ID, PCI_ANY_ID, }, 8552 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 8553 PCI_ANY_ID, PCI_ANY_ID, }, 8554 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 8555 PCI_ANY_ID, PCI_ANY_ID, }, 8556 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 8557 PCI_ANY_ID, PCI_ANY_ID, }, 8558 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 8559 PCI_ANY_ID, PCI_ANY_ID, }, 8560 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 8561 PCI_ANY_ID, PCI_ANY_ID, }, 8562 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 8563 PCI_ANY_ID, PCI_ANY_ID, }, 8564 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 8565 PCI_ANY_ID, PCI_ANY_ID, }, 8566 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 8567 PCI_ANY_ID, PCI_ANY_ID, }, 8568 { 0 } 8569}; 8570 8571MODULE_DEVICE_TABLE(pci, lpfc_id_table); 8572 8573static struct pci_error_handlers lpfc_err_handler = { 8574 .error_detected = lpfc_io_error_detected, 8575 .slot_reset = lpfc_io_slot_reset, 8576 .resume = lpfc_io_resume, 8577}; 8578 8579static struct pci_driver lpfc_driver = { 8580 .name = LPFC_DRIVER_NAME, 8581 .id_table = lpfc_id_table, 8582 .probe = lpfc_pci_probe_one, 8583 .remove = __devexit_p(lpfc_pci_remove_one), 8584 .suspend = lpfc_pci_suspend_one, 8585 .resume = lpfc_pci_resume_one, 8586 .err_handler = &lpfc_err_handler, 8587}; 8588 8589/** 8590 * lpfc_init - lpfc module initialization routine 8591 * 8592 * This routine is to be invoked when the lpfc module is loaded into the 8593 * kernel. The special kernel macro module_init() is used to indicate the 8594 * role of this routine to the kernel as lpfc module entry point. 8595 * 8596 * Return codes 8597 * 0 - successful 8598 * -ENOMEM - FC attach transport failed 8599 * all others - failed 8600 */ 8601static int __init 8602lpfc_init(void) 8603{ 8604 int error = 0; 8605 8606 printk(LPFC_MODULE_DESC "\n"); 8607 printk(LPFC_COPYRIGHT "\n"); 8608 8609 if (lpfc_enable_npiv) { 8610 lpfc_transport_functions.vport_create = lpfc_vport_create; 8611 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 8612 } 8613 lpfc_transport_template = 8614 fc_attach_transport(&lpfc_transport_functions); 8615 if (lpfc_transport_template == NULL) 8616 return -ENOMEM; 8617 if (lpfc_enable_npiv) { 8618 lpfc_vport_transport_template = 8619 fc_attach_transport(&lpfc_vport_transport_functions); 8620 if (lpfc_vport_transport_template == NULL) { 8621 fc_release_transport(lpfc_transport_template); 8622 return -ENOMEM; 8623 } 8624 } 8625 error = pci_register_driver(&lpfc_driver); 8626 if (error) { 8627 fc_release_transport(lpfc_transport_template); 8628 if (lpfc_enable_npiv) 8629 fc_release_transport(lpfc_vport_transport_template); 8630 } 8631 8632 return error; 8633} 8634 8635/** 8636 * lpfc_exit - lpfc module removal routine 8637 * 8638 * This routine is invoked when the lpfc module is removed from the kernel. 8639 * The special kernel macro module_exit() is used to indicate the role of 8640 * this routine to the kernel as lpfc module exit point. 8641 */ 8642static void __exit 8643lpfc_exit(void) 8644{ 8645 pci_unregister_driver(&lpfc_driver); 8646 fc_release_transport(lpfc_transport_template); 8647 if (lpfc_enable_npiv) 8648 fc_release_transport(lpfc_vport_transport_template); 8649 if (_dump_buf_data) { 8650 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 8651 "_dump_buf_data at 0x%p\n", 8652 (1L << _dump_buf_data_order), _dump_buf_data); 8653 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 8654 } 8655 8656 if (_dump_buf_dif) { 8657 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 8658 "_dump_buf_dif at 0x%p\n", 8659 (1L << _dump_buf_dif_order), _dump_buf_dif); 8660 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 8661 } 8662} 8663 8664module_init(lpfc_init); 8665module_exit(lpfc_exit); 8666MODULE_LICENSE("GPL"); 8667MODULE_DESCRIPTION(LPFC_MODULE_DESC); 8668MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 8669MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 8670