lpfc_init.c revision e40a02c12581f710877da372b5d7e15b68a1c5c3
1/******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/dma-mapping.h> 25#include <linux/idr.h> 26#include <linux/interrupt.h> 27#include <linux/kthread.h> 28#include <linux/pci.h> 29#include <linux/spinlock.h> 30#include <linux/ctype.h> 31#include <linux/aer.h> 32 33#include <scsi/scsi.h> 34#include <scsi/scsi_device.h> 35#include <scsi/scsi_host.h> 36#include <scsi/scsi_transport_fc.h> 37 38#include "lpfc_hw4.h" 39#include "lpfc_hw.h" 40#include "lpfc_sli.h" 41#include "lpfc_sli4.h" 42#include "lpfc_nl.h" 43#include "lpfc_disc.h" 44#include "lpfc_scsi.h" 45#include "lpfc.h" 46#include "lpfc_logmsg.h" 47#include "lpfc_crtn.h" 48#include "lpfc_vport.h" 49#include "lpfc_version.h" 50 51char *_dump_buf_data; 52unsigned long _dump_buf_data_order; 53char *_dump_buf_dif; 54unsigned long _dump_buf_dif_order; 55spinlock_t _dump_buf_lock; 56 57static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 58static int lpfc_post_rcv_buf(struct lpfc_hba *); 59static int lpfc_sli4_queue_create(struct lpfc_hba *); 60static void lpfc_sli4_queue_destroy(struct lpfc_hba *); 61static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 62static int lpfc_setup_endian_order(struct lpfc_hba *); 63static int lpfc_sli4_read_config(struct lpfc_hba *); 64static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 65static void lpfc_free_sgl_list(struct lpfc_hba *); 66static int lpfc_init_sgl_list(struct lpfc_hba *); 67static int lpfc_init_active_sgl_array(struct lpfc_hba *); 68static void lpfc_free_active_sgl(struct lpfc_hba *); 69static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 70static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 71static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 72static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 73static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 74 75static struct scsi_transport_template *lpfc_transport_template = NULL; 76static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 77static DEFINE_IDR(lpfc_hba_index); 78 79/** 80 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 81 * @phba: pointer to lpfc hba data structure. 82 * 83 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 84 * mailbox command. It retrieves the revision information from the HBA and 85 * collects the Vital Product Data (VPD) about the HBA for preparing the 86 * configuration of the HBA. 87 * 88 * Return codes: 89 * 0 - success. 90 * -ERESTART - requests the SLI layer to reset the HBA and try again. 91 * Any other value - indicates an error. 92 **/ 93int 94lpfc_config_port_prep(struct lpfc_hba *phba) 95{ 96 lpfc_vpd_t *vp = &phba->vpd; 97 int i = 0, rc; 98 LPFC_MBOXQ_t *pmb; 99 MAILBOX_t *mb; 100 char *lpfc_vpd_data = NULL; 101 uint16_t offset = 0; 102 static char licensed[56] = 103 "key unlock for use with gnu public licensed code only\0"; 104 static int init_key = 1; 105 106 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 107 if (!pmb) { 108 phba->link_state = LPFC_HBA_ERROR; 109 return -ENOMEM; 110 } 111 112 mb = &pmb->u.mb; 113 phba->link_state = LPFC_INIT_MBX_CMDS; 114 115 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 116 if (init_key) { 117 uint32_t *ptext = (uint32_t *) licensed; 118 119 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 120 *ptext = cpu_to_be32(*ptext); 121 init_key = 0; 122 } 123 124 lpfc_read_nv(phba, pmb); 125 memset((char*)mb->un.varRDnvp.rsvd3, 0, 126 sizeof (mb->un.varRDnvp.rsvd3)); 127 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 128 sizeof (licensed)); 129 130 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 131 132 if (rc != MBX_SUCCESS) { 133 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 134 "0324 Config Port initialization " 135 "error, mbxCmd x%x READ_NVPARM, " 136 "mbxStatus x%x\n", 137 mb->mbxCommand, mb->mbxStatus); 138 mempool_free(pmb, phba->mbox_mem_pool); 139 return -ERESTART; 140 } 141 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 142 sizeof(phba->wwnn)); 143 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 144 sizeof(phba->wwpn)); 145 } 146 147 phba->sli3_options = 0x0; 148 149 /* Setup and issue mailbox READ REV command */ 150 lpfc_read_rev(phba, pmb); 151 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 152 if (rc != MBX_SUCCESS) { 153 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 154 "0439 Adapter failed to init, mbxCmd x%x " 155 "READ_REV, mbxStatus x%x\n", 156 mb->mbxCommand, mb->mbxStatus); 157 mempool_free( pmb, phba->mbox_mem_pool); 158 return -ERESTART; 159 } 160 161 162 /* 163 * The value of rr must be 1 since the driver set the cv field to 1. 164 * This setting requires the FW to set all revision fields. 165 */ 166 if (mb->un.varRdRev.rr == 0) { 167 vp->rev.rBit = 0; 168 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 169 "0440 Adapter failed to init, READ_REV has " 170 "missing revision information.\n"); 171 mempool_free(pmb, phba->mbox_mem_pool); 172 return -ERESTART; 173 } 174 175 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 176 mempool_free(pmb, phba->mbox_mem_pool); 177 return -EINVAL; 178 } 179 180 /* Save information as VPD data */ 181 vp->rev.rBit = 1; 182 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 183 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 184 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 185 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 186 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 187 vp->rev.biuRev = mb->un.varRdRev.biuRev; 188 vp->rev.smRev = mb->un.varRdRev.smRev; 189 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 190 vp->rev.endecRev = mb->un.varRdRev.endecRev; 191 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 192 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 193 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 194 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 195 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 196 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 197 198 /* If the sli feature level is less then 9, we must 199 * tear down all RPIs and VPIs on link down if NPIV 200 * is enabled. 201 */ 202 if (vp->rev.feaLevelHigh < 9) 203 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 204 205 if (lpfc_is_LC_HBA(phba->pcidev->device)) 206 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 207 sizeof (phba->RandomData)); 208 209 /* Get adapter VPD information */ 210 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 211 if (!lpfc_vpd_data) 212 goto out_free_mbox; 213 214 do { 215 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 216 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 217 218 if (rc != MBX_SUCCESS) { 219 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 220 "0441 VPD not present on adapter, " 221 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 222 mb->mbxCommand, mb->mbxStatus); 223 mb->un.varDmp.word_cnt = 0; 224 } 225 /* dump mem may return a zero when finished or we got a 226 * mailbox error, either way we are done. 227 */ 228 if (mb->un.varDmp.word_cnt == 0) 229 break; 230 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 231 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 232 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 233 lpfc_vpd_data + offset, 234 mb->un.varDmp.word_cnt); 235 offset += mb->un.varDmp.word_cnt; 236 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 237 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 238 239 kfree(lpfc_vpd_data); 240out_free_mbox: 241 mempool_free(pmb, phba->mbox_mem_pool); 242 return 0; 243} 244 245/** 246 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 247 * @phba: pointer to lpfc hba data structure. 248 * @pmboxq: pointer to the driver internal queue element for mailbox command. 249 * 250 * This is the completion handler for driver's configuring asynchronous event 251 * mailbox command to the device. If the mailbox command returns successfully, 252 * it will set internal async event support flag to 1; otherwise, it will 253 * set internal async event support flag to 0. 254 **/ 255static void 256lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 257{ 258 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 259 phba->temp_sensor_support = 1; 260 else 261 phba->temp_sensor_support = 0; 262 mempool_free(pmboxq, phba->mbox_mem_pool); 263 return; 264} 265 266/** 267 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 268 * @phba: pointer to lpfc hba data structure. 269 * @pmboxq: pointer to the driver internal queue element for mailbox command. 270 * 271 * This is the completion handler for dump mailbox command for getting 272 * wake up parameters. When this command complete, the response contain 273 * Option rom version of the HBA. This function translate the version number 274 * into a human readable string and store it in OptionROMVersion. 275 **/ 276static void 277lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 278{ 279 struct prog_id *prg; 280 uint32_t prog_id_word; 281 char dist = ' '; 282 /* character array used for decoding dist type. */ 283 char dist_char[] = "nabx"; 284 285 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 286 mempool_free(pmboxq, phba->mbox_mem_pool); 287 return; 288 } 289 290 prg = (struct prog_id *) &prog_id_word; 291 292 /* word 7 contain option rom version */ 293 prog_id_word = pmboxq->u.mb.un.varWords[7]; 294 295 /* Decode the Option rom version word to a readable string */ 296 if (prg->dist < 4) 297 dist = dist_char[prg->dist]; 298 299 if ((prg->dist == 3) && (prg->num == 0)) 300 sprintf(phba->OptionROMVersion, "%d.%d%d", 301 prg->ver, prg->rev, prg->lev); 302 else 303 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 304 prg->ver, prg->rev, prg->lev, 305 dist, prg->num); 306 mempool_free(pmboxq, phba->mbox_mem_pool); 307 return; 308} 309 310/** 311 * lpfc_config_port_post - Perform lpfc initialization after config port 312 * @phba: pointer to lpfc hba data structure. 313 * 314 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 315 * command call. It performs all internal resource and state setups on the 316 * port: post IOCB buffers, enable appropriate host interrupt attentions, 317 * ELS ring timers, etc. 318 * 319 * Return codes 320 * 0 - success. 321 * Any other value - error. 322 **/ 323int 324lpfc_config_port_post(struct lpfc_hba *phba) 325{ 326 struct lpfc_vport *vport = phba->pport; 327 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 328 LPFC_MBOXQ_t *pmb; 329 MAILBOX_t *mb; 330 struct lpfc_dmabuf *mp; 331 struct lpfc_sli *psli = &phba->sli; 332 uint32_t status, timeout; 333 int i, j; 334 int rc; 335 336 spin_lock_irq(&phba->hbalock); 337 /* 338 * If the Config port completed correctly the HBA is not 339 * over heated any more. 340 */ 341 if (phba->over_temp_state == HBA_OVER_TEMP) 342 phba->over_temp_state = HBA_NORMAL_TEMP; 343 spin_unlock_irq(&phba->hbalock); 344 345 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 346 if (!pmb) { 347 phba->link_state = LPFC_HBA_ERROR; 348 return -ENOMEM; 349 } 350 mb = &pmb->u.mb; 351 352 /* Get login parameters for NID. */ 353 rc = lpfc_read_sparam(phba, pmb, 0); 354 if (rc) { 355 mempool_free(pmb, phba->mbox_mem_pool); 356 return -ENOMEM; 357 } 358 359 pmb->vport = vport; 360 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 362 "0448 Adapter failed init, mbxCmd x%x " 363 "READ_SPARM mbxStatus x%x\n", 364 mb->mbxCommand, mb->mbxStatus); 365 phba->link_state = LPFC_HBA_ERROR; 366 mp = (struct lpfc_dmabuf *) pmb->context1; 367 mempool_free(pmb, phba->mbox_mem_pool); 368 lpfc_mbuf_free(phba, mp->virt, mp->phys); 369 kfree(mp); 370 return -EIO; 371 } 372 373 mp = (struct lpfc_dmabuf *) pmb->context1; 374 375 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 376 lpfc_mbuf_free(phba, mp->virt, mp->phys); 377 kfree(mp); 378 pmb->context1 = NULL; 379 380 if (phba->cfg_soft_wwnn) 381 u64_to_wwn(phba->cfg_soft_wwnn, 382 vport->fc_sparam.nodeName.u.wwn); 383 if (phba->cfg_soft_wwpn) 384 u64_to_wwn(phba->cfg_soft_wwpn, 385 vport->fc_sparam.portName.u.wwn); 386 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 387 sizeof (struct lpfc_name)); 388 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 389 sizeof (struct lpfc_name)); 390 391 /* Update the fc_host data structures with new wwn. */ 392 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 393 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 394 fc_host_max_npiv_vports(shost) = phba->max_vpi; 395 396 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 397 /* This should be consolidated into parse_vpd ? - mr */ 398 if (phba->SerialNumber[0] == 0) { 399 uint8_t *outptr; 400 401 outptr = &vport->fc_nodename.u.s.IEEE[0]; 402 for (i = 0; i < 12; i++) { 403 status = *outptr++; 404 j = ((status & 0xf0) >> 4); 405 if (j <= 9) 406 phba->SerialNumber[i] = 407 (char)((uint8_t) 0x30 + (uint8_t) j); 408 else 409 phba->SerialNumber[i] = 410 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 411 i++; 412 j = (status & 0xf); 413 if (j <= 9) 414 phba->SerialNumber[i] = 415 (char)((uint8_t) 0x30 + (uint8_t) j); 416 else 417 phba->SerialNumber[i] = 418 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 419 } 420 } 421 422 lpfc_read_config(phba, pmb); 423 pmb->vport = vport; 424 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 426 "0453 Adapter failed to init, mbxCmd x%x " 427 "READ_CONFIG, mbxStatus x%x\n", 428 mb->mbxCommand, mb->mbxStatus); 429 phba->link_state = LPFC_HBA_ERROR; 430 mempool_free( pmb, phba->mbox_mem_pool); 431 return -EIO; 432 } 433 434 /* Check if the port is disabled */ 435 lpfc_sli_read_link_ste(phba); 436 437 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 438 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 439 phba->cfg_hba_queue_depth = 440 (mb->un.varRdConfig.max_xri + 1) - 441 lpfc_sli4_get_els_iocb_cnt(phba); 442 443 phba->lmt = mb->un.varRdConfig.lmt; 444 445 /* Get the default values for Model Name and Description */ 446 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 447 448 if ((phba->cfg_link_speed > LINK_SPEED_10G) 449 || ((phba->cfg_link_speed == LINK_SPEED_1G) 450 && !(phba->lmt & LMT_1Gb)) 451 || ((phba->cfg_link_speed == LINK_SPEED_2G) 452 && !(phba->lmt & LMT_2Gb)) 453 || ((phba->cfg_link_speed == LINK_SPEED_4G) 454 && !(phba->lmt & LMT_4Gb)) 455 || ((phba->cfg_link_speed == LINK_SPEED_8G) 456 && !(phba->lmt & LMT_8Gb)) 457 || ((phba->cfg_link_speed == LINK_SPEED_10G) 458 && !(phba->lmt & LMT_10Gb))) { 459 /* Reset link speed to auto */ 460 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 461 "1302 Invalid speed for this board: " 462 "Reset link speed to auto: x%x\n", 463 phba->cfg_link_speed); 464 phba->cfg_link_speed = LINK_SPEED_AUTO; 465 } 466 467 phba->link_state = LPFC_LINK_DOWN; 468 469 /* Only process IOCBs on ELS ring till hba_state is READY */ 470 if (psli->ring[psli->extra_ring].cmdringaddr) 471 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 472 if (psli->ring[psli->fcp_ring].cmdringaddr) 473 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 474 if (psli->ring[psli->next_ring].cmdringaddr) 475 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 476 477 /* Post receive buffers for desired rings */ 478 if (phba->sli_rev != 3) 479 lpfc_post_rcv_buf(phba); 480 481 /* 482 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 483 */ 484 if (phba->intr_type == MSIX) { 485 rc = lpfc_config_msi(phba, pmb); 486 if (rc) { 487 mempool_free(pmb, phba->mbox_mem_pool); 488 return -EIO; 489 } 490 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 491 if (rc != MBX_SUCCESS) { 492 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 493 "0352 Config MSI mailbox command " 494 "failed, mbxCmd x%x, mbxStatus x%x\n", 495 pmb->u.mb.mbxCommand, 496 pmb->u.mb.mbxStatus); 497 mempool_free(pmb, phba->mbox_mem_pool); 498 return -EIO; 499 } 500 } 501 502 spin_lock_irq(&phba->hbalock); 503 /* Initialize ERATT handling flag */ 504 phba->hba_flag &= ~HBA_ERATT_HANDLED; 505 506 /* Enable appropriate host interrupts */ 507 status = readl(phba->HCregaddr); 508 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 509 if (psli->num_rings > 0) 510 status |= HC_R0INT_ENA; 511 if (psli->num_rings > 1) 512 status |= HC_R1INT_ENA; 513 if (psli->num_rings > 2) 514 status |= HC_R2INT_ENA; 515 if (psli->num_rings > 3) 516 status |= HC_R3INT_ENA; 517 518 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 519 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 520 status &= ~(HC_R0INT_ENA); 521 522 writel(status, phba->HCregaddr); 523 readl(phba->HCregaddr); /* flush */ 524 spin_unlock_irq(&phba->hbalock); 525 526 /* Set up ring-0 (ELS) timer */ 527 timeout = phba->fc_ratov * 2; 528 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 529 /* Set up heart beat (HB) timer */ 530 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 531 phba->hb_outstanding = 0; 532 phba->last_completion_time = jiffies; 533 /* Set up error attention (ERATT) polling timer */ 534 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 535 536 if (phba->hba_flag & LINK_DISABLED) { 537 lpfc_printf_log(phba, 538 KERN_ERR, LOG_INIT, 539 "2598 Adapter Link is disabled.\n"); 540 lpfc_down_link(phba, pmb); 541 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 542 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 543 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 544 lpfc_printf_log(phba, 545 KERN_ERR, LOG_INIT, 546 "2599 Adapter failed to issue DOWN_LINK" 547 " mbox command rc 0x%x\n", rc); 548 549 mempool_free(pmb, phba->mbox_mem_pool); 550 return -EIO; 551 } 552 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 553 lpfc_init_link(phba, pmb, phba->cfg_topology, 554 phba->cfg_link_speed); 555 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 556 lpfc_set_loopback_flag(phba); 557 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 558 if (rc != MBX_SUCCESS) { 559 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 560 "0454 Adapter failed to init, mbxCmd x%x " 561 "INIT_LINK, mbxStatus x%x\n", 562 mb->mbxCommand, mb->mbxStatus); 563 564 /* Clear all interrupt enable conditions */ 565 writel(0, phba->HCregaddr); 566 readl(phba->HCregaddr); /* flush */ 567 /* Clear all pending interrupts */ 568 writel(0xffffffff, phba->HAregaddr); 569 readl(phba->HAregaddr); /* flush */ 570 571 phba->link_state = LPFC_HBA_ERROR; 572 if (rc != MBX_BUSY) 573 mempool_free(pmb, phba->mbox_mem_pool); 574 return -EIO; 575 } 576 } 577 /* MBOX buffer will be freed in mbox compl */ 578 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 579 if (!pmb) { 580 phba->link_state = LPFC_HBA_ERROR; 581 return -ENOMEM; 582 } 583 584 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 585 pmb->mbox_cmpl = lpfc_config_async_cmpl; 586 pmb->vport = phba->pport; 587 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 588 589 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 590 lpfc_printf_log(phba, 591 KERN_ERR, 592 LOG_INIT, 593 "0456 Adapter failed to issue " 594 "ASYNCEVT_ENABLE mbox status x%x\n", 595 rc); 596 mempool_free(pmb, phba->mbox_mem_pool); 597 } 598 599 /* Get Option rom version */ 600 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 601 if (!pmb) { 602 phba->link_state = LPFC_HBA_ERROR; 603 return -ENOMEM; 604 } 605 606 lpfc_dump_wakeup_param(phba, pmb); 607 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 608 pmb->vport = phba->pport; 609 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 610 611 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 612 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 613 "to get Option ROM version status x%x\n", rc); 614 mempool_free(pmb, phba->mbox_mem_pool); 615 } 616 617 return 0; 618} 619 620/** 621 * lpfc_hba_init_link - Initialize the FC link 622 * @phba: pointer to lpfc hba data structure. 623 * 624 * This routine will issue the INIT_LINK mailbox command call. 625 * It is available to other drivers through the lpfc_hba data 626 * structure for use as a delayed link up mechanism with the 627 * module parameter lpfc_suppress_link_up. 628 * 629 * Return code 630 * 0 - success 631 * Any other value - error 632 **/ 633int 634lpfc_hba_init_link(struct lpfc_hba *phba) 635{ 636 struct lpfc_vport *vport = phba->pport; 637 LPFC_MBOXQ_t *pmb; 638 MAILBOX_t *mb; 639 int rc; 640 641 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 642 if (!pmb) { 643 phba->link_state = LPFC_HBA_ERROR; 644 return -ENOMEM; 645 } 646 mb = &pmb->u.mb; 647 pmb->vport = vport; 648 649 lpfc_init_link(phba, pmb, phba->cfg_topology, 650 phba->cfg_link_speed); 651 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 652 lpfc_set_loopback_flag(phba); 653 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 654 if (rc != MBX_SUCCESS) { 655 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 656 "0498 Adapter failed to init, mbxCmd x%x " 657 "INIT_LINK, mbxStatus x%x\n", 658 mb->mbxCommand, mb->mbxStatus); 659 /* Clear all interrupt enable conditions */ 660 writel(0, phba->HCregaddr); 661 readl(phba->HCregaddr); /* flush */ 662 /* Clear all pending interrupts */ 663 writel(0xffffffff, phba->HAregaddr); 664 readl(phba->HAregaddr); /* flush */ 665 phba->link_state = LPFC_HBA_ERROR; 666 if (rc != MBX_BUSY) 667 mempool_free(pmb, phba->mbox_mem_pool); 668 return -EIO; 669 } 670 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 671 672 return 0; 673} 674 675/** 676 * lpfc_hba_down_link - this routine downs the FC link 677 * 678 * This routine will issue the DOWN_LINK mailbox command call. 679 * It is available to other drivers through the lpfc_hba data 680 * structure for use to stop the link. 681 * 682 * Return code 683 * 0 - success 684 * Any other value - error 685 **/ 686int 687lpfc_hba_down_link(struct lpfc_hba *phba) 688{ 689 LPFC_MBOXQ_t *pmb; 690 int rc; 691 692 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 693 if (!pmb) { 694 phba->link_state = LPFC_HBA_ERROR; 695 return -ENOMEM; 696 } 697 698 lpfc_printf_log(phba, 699 KERN_ERR, LOG_INIT, 700 "0491 Adapter Link is disabled.\n"); 701 lpfc_down_link(phba, pmb); 702 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 703 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 704 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 705 lpfc_printf_log(phba, 706 KERN_ERR, LOG_INIT, 707 "2522 Adapter failed to issue DOWN_LINK" 708 " mbox command rc 0x%x\n", rc); 709 710 mempool_free(pmb, phba->mbox_mem_pool); 711 return -EIO; 712 } 713 return 0; 714} 715 716/** 717 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 718 * @phba: pointer to lpfc HBA data structure. 719 * 720 * This routine will do LPFC uninitialization before the HBA is reset when 721 * bringing down the SLI Layer. 722 * 723 * Return codes 724 * 0 - success. 725 * Any other value - error. 726 **/ 727int 728lpfc_hba_down_prep(struct lpfc_hba *phba) 729{ 730 struct lpfc_vport **vports; 731 int i; 732 733 if (phba->sli_rev <= LPFC_SLI_REV3) { 734 /* Disable interrupts */ 735 writel(0, phba->HCregaddr); 736 readl(phba->HCregaddr); /* flush */ 737 } 738 739 if (phba->pport->load_flag & FC_UNLOADING) 740 lpfc_cleanup_discovery_resources(phba->pport); 741 else { 742 vports = lpfc_create_vport_work_array(phba); 743 if (vports != NULL) 744 for (i = 0; i <= phba->max_vports && 745 vports[i] != NULL; i++) 746 lpfc_cleanup_discovery_resources(vports[i]); 747 lpfc_destroy_vport_work_array(phba, vports); 748 } 749 return 0; 750} 751 752/** 753 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 754 * @phba: pointer to lpfc HBA data structure. 755 * 756 * This routine will do uninitialization after the HBA is reset when bring 757 * down the SLI Layer. 758 * 759 * Return codes 760 * 0 - success. 761 * Any other value - error. 762 **/ 763static int 764lpfc_hba_down_post_s3(struct lpfc_hba *phba) 765{ 766 struct lpfc_sli *psli = &phba->sli; 767 struct lpfc_sli_ring *pring; 768 struct lpfc_dmabuf *mp, *next_mp; 769 LIST_HEAD(completions); 770 int i; 771 772 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 773 lpfc_sli_hbqbuf_free_all(phba); 774 else { 775 /* Cleanup preposted buffers on the ELS ring */ 776 pring = &psli->ring[LPFC_ELS_RING]; 777 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 778 list_del(&mp->list); 779 pring->postbufq_cnt--; 780 lpfc_mbuf_free(phba, mp->virt, mp->phys); 781 kfree(mp); 782 } 783 } 784 785 spin_lock_irq(&phba->hbalock); 786 for (i = 0; i < psli->num_rings; i++) { 787 pring = &psli->ring[i]; 788 789 /* At this point in time the HBA is either reset or DOA. Either 790 * way, nothing should be on txcmplq as it will NEVER complete. 791 */ 792 list_splice_init(&pring->txcmplq, &completions); 793 pring->txcmplq_cnt = 0; 794 spin_unlock_irq(&phba->hbalock); 795 796 /* Cancel all the IOCBs from the completions list */ 797 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 798 IOERR_SLI_ABORTED); 799 800 lpfc_sli_abort_iocb_ring(phba, pring); 801 spin_lock_irq(&phba->hbalock); 802 } 803 spin_unlock_irq(&phba->hbalock); 804 805 return 0; 806} 807/** 808 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 809 * @phba: pointer to lpfc HBA data structure. 810 * 811 * This routine will do uninitialization after the HBA is reset when bring 812 * down the SLI Layer. 813 * 814 * Return codes 815 * 0 - success. 816 * Any other value - error. 817 **/ 818static int 819lpfc_hba_down_post_s4(struct lpfc_hba *phba) 820{ 821 struct lpfc_scsi_buf *psb, *psb_next; 822 LIST_HEAD(aborts); 823 int ret; 824 unsigned long iflag = 0; 825 ret = lpfc_hba_down_post_s3(phba); 826 if (ret) 827 return ret; 828 /* At this point in time the HBA is either reset or DOA. Either 829 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 830 * on the lpfc_sgl_list so that it can either be freed if the 831 * driver is unloading or reposted if the driver is restarting 832 * the port. 833 */ 834 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 835 /* scsl_buf_list */ 836 /* abts_sgl_list_lock required because worker thread uses this 837 * list. 838 */ 839 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 840 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 841 &phba->sli4_hba.lpfc_sgl_list); 842 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 843 /* abts_scsi_buf_list_lock required because worker thread uses this 844 * list. 845 */ 846 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 847 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 848 &aborts); 849 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 850 spin_unlock_irq(&phba->hbalock); 851 852 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 853 psb->pCmd = NULL; 854 psb->status = IOSTAT_SUCCESS; 855 } 856 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 857 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 858 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 859 return 0; 860} 861 862/** 863 * lpfc_hba_down_post - Wrapper func for hba down post routine 864 * @phba: pointer to lpfc HBA data structure. 865 * 866 * This routine wraps the actual SLI3 or SLI4 routine for performing 867 * uninitialization after the HBA is reset when bring down the SLI Layer. 868 * 869 * Return codes 870 * 0 - success. 871 * Any other value - error. 872 **/ 873int 874lpfc_hba_down_post(struct lpfc_hba *phba) 875{ 876 return (*phba->lpfc_hba_down_post)(phba); 877} 878 879/** 880 * lpfc_hb_timeout - The HBA-timer timeout handler 881 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 882 * 883 * This is the HBA-timer timeout handler registered to the lpfc driver. When 884 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 885 * work-port-events bitmap and the worker thread is notified. This timeout 886 * event will be used by the worker thread to invoke the actual timeout 887 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 888 * be performed in the timeout handler and the HBA timeout event bit shall 889 * be cleared by the worker thread after it has taken the event bitmap out. 890 **/ 891static void 892lpfc_hb_timeout(unsigned long ptr) 893{ 894 struct lpfc_hba *phba; 895 uint32_t tmo_posted; 896 unsigned long iflag; 897 898 phba = (struct lpfc_hba *)ptr; 899 900 /* Check for heart beat timeout conditions */ 901 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 902 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 903 if (!tmo_posted) 904 phba->pport->work_port_events |= WORKER_HB_TMO; 905 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 906 907 /* Tell the worker thread there is work to do */ 908 if (!tmo_posted) 909 lpfc_worker_wake_up(phba); 910 return; 911} 912 913/** 914 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 915 * @phba: pointer to lpfc hba data structure. 916 * @pmboxq: pointer to the driver internal queue element for mailbox command. 917 * 918 * This is the callback function to the lpfc heart-beat mailbox command. 919 * If configured, the lpfc driver issues the heart-beat mailbox command to 920 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 921 * heart-beat mailbox command is issued, the driver shall set up heart-beat 922 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 923 * heart-beat outstanding state. Once the mailbox command comes back and 924 * no error conditions detected, the heart-beat mailbox command timer is 925 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 926 * state is cleared for the next heart-beat. If the timer expired with the 927 * heart-beat outstanding state set, the driver will put the HBA offline. 928 **/ 929static void 930lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 931{ 932 unsigned long drvr_flag; 933 934 spin_lock_irqsave(&phba->hbalock, drvr_flag); 935 phba->hb_outstanding = 0; 936 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 937 938 /* Check and reset heart-beat timer is necessary */ 939 mempool_free(pmboxq, phba->mbox_mem_pool); 940 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 941 !(phba->link_state == LPFC_HBA_ERROR) && 942 !(phba->pport->load_flag & FC_UNLOADING)) 943 mod_timer(&phba->hb_tmofunc, 944 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 945 return; 946} 947 948/** 949 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 950 * @phba: pointer to lpfc hba data structure. 951 * 952 * This is the actual HBA-timer timeout handler to be invoked by the worker 953 * thread whenever the HBA timer fired and HBA-timeout event posted. This 954 * handler performs any periodic operations needed for the device. If such 955 * periodic event has already been attended to either in the interrupt handler 956 * or by processing slow-ring or fast-ring events within the HBA-timer 957 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 958 * the timer for the next timeout period. If lpfc heart-beat mailbox command 959 * is configured and there is no heart-beat mailbox command outstanding, a 960 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 961 * has been a heart-beat mailbox command outstanding, the HBA shall be put 962 * to offline. 963 **/ 964void 965lpfc_hb_timeout_handler(struct lpfc_hba *phba) 966{ 967 struct lpfc_vport **vports; 968 LPFC_MBOXQ_t *pmboxq; 969 struct lpfc_dmabuf *buf_ptr; 970 int retval, i; 971 struct lpfc_sli *psli = &phba->sli; 972 LIST_HEAD(completions); 973 974 vports = lpfc_create_vport_work_array(phba); 975 if (vports != NULL) 976 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 977 lpfc_rcv_seq_check_edtov(vports[i]); 978 lpfc_destroy_vport_work_array(phba, vports); 979 980 if ((phba->link_state == LPFC_HBA_ERROR) || 981 (phba->pport->load_flag & FC_UNLOADING) || 982 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 983 return; 984 985 spin_lock_irq(&phba->pport->work_port_lock); 986 987 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 988 jiffies)) { 989 spin_unlock_irq(&phba->pport->work_port_lock); 990 if (!phba->hb_outstanding) 991 mod_timer(&phba->hb_tmofunc, 992 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 993 else 994 mod_timer(&phba->hb_tmofunc, 995 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 996 return; 997 } 998 spin_unlock_irq(&phba->pport->work_port_lock); 999 1000 if (phba->elsbuf_cnt && 1001 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1002 spin_lock_irq(&phba->hbalock); 1003 list_splice_init(&phba->elsbuf, &completions); 1004 phba->elsbuf_cnt = 0; 1005 phba->elsbuf_prev_cnt = 0; 1006 spin_unlock_irq(&phba->hbalock); 1007 1008 while (!list_empty(&completions)) { 1009 list_remove_head(&completions, buf_ptr, 1010 struct lpfc_dmabuf, list); 1011 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1012 kfree(buf_ptr); 1013 } 1014 } 1015 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1016 1017 /* If there is no heart beat outstanding, issue a heartbeat command */ 1018 if (phba->cfg_enable_hba_heartbeat) { 1019 if (!phba->hb_outstanding) { 1020 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 1021 if (!pmboxq) { 1022 mod_timer(&phba->hb_tmofunc, 1023 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1024 return; 1025 } 1026 1027 lpfc_heart_beat(phba, pmboxq); 1028 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1029 pmboxq->vport = phba->pport; 1030 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 1031 1032 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 1033 mempool_free(pmboxq, phba->mbox_mem_pool); 1034 mod_timer(&phba->hb_tmofunc, 1035 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1036 return; 1037 } 1038 mod_timer(&phba->hb_tmofunc, 1039 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1040 phba->hb_outstanding = 1; 1041 return; 1042 } else { 1043 /* 1044 * If heart beat timeout called with hb_outstanding set 1045 * we need to take the HBA offline. 1046 */ 1047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1048 "0459 Adapter heartbeat failure, " 1049 "taking this port offline.\n"); 1050 1051 spin_lock_irq(&phba->hbalock); 1052 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1053 spin_unlock_irq(&phba->hbalock); 1054 1055 lpfc_offline_prep(phba); 1056 lpfc_offline(phba); 1057 lpfc_unblock_mgmt_io(phba); 1058 phba->link_state = LPFC_HBA_ERROR; 1059 lpfc_hba_down_post(phba); 1060 } 1061 } 1062} 1063 1064/** 1065 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1066 * @phba: pointer to lpfc hba data structure. 1067 * 1068 * This routine is called to bring the HBA offline when HBA hardware error 1069 * other than Port Error 6 has been detected. 1070 **/ 1071static void 1072lpfc_offline_eratt(struct lpfc_hba *phba) 1073{ 1074 struct lpfc_sli *psli = &phba->sli; 1075 1076 spin_lock_irq(&phba->hbalock); 1077 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1078 spin_unlock_irq(&phba->hbalock); 1079 lpfc_offline_prep(phba); 1080 1081 lpfc_offline(phba); 1082 lpfc_reset_barrier(phba); 1083 spin_lock_irq(&phba->hbalock); 1084 lpfc_sli_brdreset(phba); 1085 spin_unlock_irq(&phba->hbalock); 1086 lpfc_hba_down_post(phba); 1087 lpfc_sli_brdready(phba, HS_MBRDY); 1088 lpfc_unblock_mgmt_io(phba); 1089 phba->link_state = LPFC_HBA_ERROR; 1090 return; 1091} 1092 1093/** 1094 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1095 * @phba: pointer to lpfc hba data structure. 1096 * 1097 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1098 * other than Port Error 6 has been detected. 1099 **/ 1100static void 1101lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1102{ 1103 lpfc_offline_prep(phba); 1104 lpfc_offline(phba); 1105 lpfc_sli4_brdreset(phba); 1106 lpfc_hba_down_post(phba); 1107 lpfc_sli4_post_status_check(phba); 1108 lpfc_unblock_mgmt_io(phba); 1109 phba->link_state = LPFC_HBA_ERROR; 1110} 1111 1112/** 1113 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1114 * @phba: pointer to lpfc hba data structure. 1115 * 1116 * This routine is invoked to handle the deferred HBA hardware error 1117 * conditions. This type of error is indicated by HBA by setting ER1 1118 * and another ER bit in the host status register. The driver will 1119 * wait until the ER1 bit clears before handling the error condition. 1120 **/ 1121static void 1122lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1123{ 1124 uint32_t old_host_status = phba->work_hs; 1125 struct lpfc_sli_ring *pring; 1126 struct lpfc_sli *psli = &phba->sli; 1127 1128 /* If the pci channel is offline, ignore possible errors, 1129 * since we cannot communicate with the pci card anyway. 1130 */ 1131 if (pci_channel_offline(phba->pcidev)) { 1132 spin_lock_irq(&phba->hbalock); 1133 phba->hba_flag &= ~DEFER_ERATT; 1134 spin_unlock_irq(&phba->hbalock); 1135 return; 1136 } 1137 1138 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1139 "0479 Deferred Adapter Hardware Error " 1140 "Data: x%x x%x x%x\n", 1141 phba->work_hs, 1142 phba->work_status[0], phba->work_status[1]); 1143 1144 spin_lock_irq(&phba->hbalock); 1145 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1146 spin_unlock_irq(&phba->hbalock); 1147 1148 1149 /* 1150 * Firmware stops when it triggred erratt. That could cause the I/Os 1151 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1152 * SCSI layer retry it after re-establishing link. 1153 */ 1154 pring = &psli->ring[psli->fcp_ring]; 1155 lpfc_sli_abort_iocb_ring(phba, pring); 1156 1157 /* 1158 * There was a firmware error. Take the hba offline and then 1159 * attempt to restart it. 1160 */ 1161 lpfc_offline_prep(phba); 1162 lpfc_offline(phba); 1163 1164 /* Wait for the ER1 bit to clear.*/ 1165 while (phba->work_hs & HS_FFER1) { 1166 msleep(100); 1167 phba->work_hs = readl(phba->HSregaddr); 1168 /* If driver is unloading let the worker thread continue */ 1169 if (phba->pport->load_flag & FC_UNLOADING) { 1170 phba->work_hs = 0; 1171 break; 1172 } 1173 } 1174 1175 /* 1176 * This is to ptrotect against a race condition in which 1177 * first write to the host attention register clear the 1178 * host status register. 1179 */ 1180 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1181 phba->work_hs = old_host_status & ~HS_FFER1; 1182 1183 spin_lock_irq(&phba->hbalock); 1184 phba->hba_flag &= ~DEFER_ERATT; 1185 spin_unlock_irq(&phba->hbalock); 1186 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1187 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1188} 1189 1190static void 1191lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1192{ 1193 struct lpfc_board_event_header board_event; 1194 struct Scsi_Host *shost; 1195 1196 board_event.event_type = FC_REG_BOARD_EVENT; 1197 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1198 shost = lpfc_shost_from_vport(phba->pport); 1199 fc_host_post_vendor_event(shost, fc_get_event_number(), 1200 sizeof(board_event), 1201 (char *) &board_event, 1202 LPFC_NL_VENDOR_ID); 1203} 1204 1205/** 1206 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1207 * @phba: pointer to lpfc hba data structure. 1208 * 1209 * This routine is invoked to handle the following HBA hardware error 1210 * conditions: 1211 * 1 - HBA error attention interrupt 1212 * 2 - DMA ring index out of range 1213 * 3 - Mailbox command came back as unknown 1214 **/ 1215static void 1216lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1217{ 1218 struct lpfc_vport *vport = phba->pport; 1219 struct lpfc_sli *psli = &phba->sli; 1220 struct lpfc_sli_ring *pring; 1221 uint32_t event_data; 1222 unsigned long temperature; 1223 struct temp_event temp_event_data; 1224 struct Scsi_Host *shost; 1225 1226 /* If the pci channel is offline, ignore possible errors, 1227 * since we cannot communicate with the pci card anyway. 1228 */ 1229 if (pci_channel_offline(phba->pcidev)) { 1230 spin_lock_irq(&phba->hbalock); 1231 phba->hba_flag &= ~DEFER_ERATT; 1232 spin_unlock_irq(&phba->hbalock); 1233 return; 1234 } 1235 1236 /* If resets are disabled then leave the HBA alone and return */ 1237 if (!phba->cfg_enable_hba_reset) 1238 return; 1239 1240 /* Send an internal error event to mgmt application */ 1241 lpfc_board_errevt_to_mgmt(phba); 1242 1243 if (phba->hba_flag & DEFER_ERATT) 1244 lpfc_handle_deferred_eratt(phba); 1245 1246 if (phba->work_hs & HS_FFER6) { 1247 /* Re-establishing Link */ 1248 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1249 "1301 Re-establishing Link " 1250 "Data: x%x x%x x%x\n", 1251 phba->work_hs, 1252 phba->work_status[0], phba->work_status[1]); 1253 1254 spin_lock_irq(&phba->hbalock); 1255 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1256 spin_unlock_irq(&phba->hbalock); 1257 1258 /* 1259 * Firmware stops when it triggled erratt with HS_FFER6. 1260 * That could cause the I/Os dropped by the firmware. 1261 * Error iocb (I/O) on txcmplq and let the SCSI layer 1262 * retry it after re-establishing link. 1263 */ 1264 pring = &psli->ring[psli->fcp_ring]; 1265 lpfc_sli_abort_iocb_ring(phba, pring); 1266 1267 /* 1268 * There was a firmware error. Take the hba offline and then 1269 * attempt to restart it. 1270 */ 1271 lpfc_offline_prep(phba); 1272 lpfc_offline(phba); 1273 lpfc_sli_brdrestart(phba); 1274 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1275 lpfc_unblock_mgmt_io(phba); 1276 return; 1277 } 1278 lpfc_unblock_mgmt_io(phba); 1279 } else if (phba->work_hs & HS_CRIT_TEMP) { 1280 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1281 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1282 temp_event_data.event_code = LPFC_CRIT_TEMP; 1283 temp_event_data.data = (uint32_t)temperature; 1284 1285 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1286 "0406 Adapter maximum temperature exceeded " 1287 "(%ld), taking this port offline " 1288 "Data: x%x x%x x%x\n", 1289 temperature, phba->work_hs, 1290 phba->work_status[0], phba->work_status[1]); 1291 1292 shost = lpfc_shost_from_vport(phba->pport); 1293 fc_host_post_vendor_event(shost, fc_get_event_number(), 1294 sizeof(temp_event_data), 1295 (char *) &temp_event_data, 1296 SCSI_NL_VID_TYPE_PCI 1297 | PCI_VENDOR_ID_EMULEX); 1298 1299 spin_lock_irq(&phba->hbalock); 1300 phba->over_temp_state = HBA_OVER_TEMP; 1301 spin_unlock_irq(&phba->hbalock); 1302 lpfc_offline_eratt(phba); 1303 1304 } else { 1305 /* The if clause above forces this code path when the status 1306 * failure is a value other than FFER6. Do not call the offline 1307 * twice. This is the adapter hardware error path. 1308 */ 1309 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1310 "0457 Adapter Hardware Error " 1311 "Data: x%x x%x x%x\n", 1312 phba->work_hs, 1313 phba->work_status[0], phba->work_status[1]); 1314 1315 event_data = FC_REG_DUMP_EVENT; 1316 shost = lpfc_shost_from_vport(vport); 1317 fc_host_post_vendor_event(shost, fc_get_event_number(), 1318 sizeof(event_data), (char *) &event_data, 1319 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1320 1321 lpfc_offline_eratt(phba); 1322 } 1323 return; 1324} 1325 1326/** 1327 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1328 * @phba: pointer to lpfc hba data structure. 1329 * 1330 * This routine is invoked to handle the SLI4 HBA hardware error attention 1331 * conditions. 1332 **/ 1333static void 1334lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1335{ 1336 struct lpfc_vport *vport = phba->pport; 1337 uint32_t event_data; 1338 struct Scsi_Host *shost; 1339 1340 /* If the pci channel is offline, ignore possible errors, since 1341 * we cannot communicate with the pci card anyway. 1342 */ 1343 if (pci_channel_offline(phba->pcidev)) 1344 return; 1345 /* If resets are disabled then leave the HBA alone and return */ 1346 if (!phba->cfg_enable_hba_reset) 1347 return; 1348 1349 /* Send an internal error event to mgmt application */ 1350 lpfc_board_errevt_to_mgmt(phba); 1351 1352 /* For now, the actual action for SLI4 device handling is not 1353 * specified yet, just treated it as adaptor hardware failure 1354 */ 1355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1356 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", 1357 phba->work_status[0], phba->work_status[1]); 1358 1359 event_data = FC_REG_DUMP_EVENT; 1360 shost = lpfc_shost_from_vport(vport); 1361 fc_host_post_vendor_event(shost, fc_get_event_number(), 1362 sizeof(event_data), (char *) &event_data, 1363 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1364 1365 lpfc_sli4_offline_eratt(phba); 1366} 1367 1368/** 1369 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1370 * @phba: pointer to lpfc HBA data structure. 1371 * 1372 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1373 * routine from the API jump table function pointer from the lpfc_hba struct. 1374 * 1375 * Return codes 1376 * 0 - success. 1377 * Any other value - error. 1378 **/ 1379void 1380lpfc_handle_eratt(struct lpfc_hba *phba) 1381{ 1382 (*phba->lpfc_handle_eratt)(phba); 1383} 1384 1385/** 1386 * lpfc_handle_latt - The HBA link event handler 1387 * @phba: pointer to lpfc hba data structure. 1388 * 1389 * This routine is invoked from the worker thread to handle a HBA host 1390 * attention link event. 1391 **/ 1392void 1393lpfc_handle_latt(struct lpfc_hba *phba) 1394{ 1395 struct lpfc_vport *vport = phba->pport; 1396 struct lpfc_sli *psli = &phba->sli; 1397 LPFC_MBOXQ_t *pmb; 1398 volatile uint32_t control; 1399 struct lpfc_dmabuf *mp; 1400 int rc = 0; 1401 1402 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1403 if (!pmb) { 1404 rc = 1; 1405 goto lpfc_handle_latt_err_exit; 1406 } 1407 1408 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1409 if (!mp) { 1410 rc = 2; 1411 goto lpfc_handle_latt_free_pmb; 1412 } 1413 1414 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1415 if (!mp->virt) { 1416 rc = 3; 1417 goto lpfc_handle_latt_free_mp; 1418 } 1419 1420 /* Cleanup any outstanding ELS commands */ 1421 lpfc_els_flush_all_cmd(phba); 1422 1423 psli->slistat.link_event++; 1424 lpfc_read_la(phba, pmb, mp); 1425 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 1426 pmb->vport = vport; 1427 /* Block ELS IOCBs until we have processed this mbox command */ 1428 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1429 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1430 if (rc == MBX_NOT_FINISHED) { 1431 rc = 4; 1432 goto lpfc_handle_latt_free_mbuf; 1433 } 1434 1435 /* Clear Link Attention in HA REG */ 1436 spin_lock_irq(&phba->hbalock); 1437 writel(HA_LATT, phba->HAregaddr); 1438 readl(phba->HAregaddr); /* flush */ 1439 spin_unlock_irq(&phba->hbalock); 1440 1441 return; 1442 1443lpfc_handle_latt_free_mbuf: 1444 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1445 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1446lpfc_handle_latt_free_mp: 1447 kfree(mp); 1448lpfc_handle_latt_free_pmb: 1449 mempool_free(pmb, phba->mbox_mem_pool); 1450lpfc_handle_latt_err_exit: 1451 /* Enable Link attention interrupts */ 1452 spin_lock_irq(&phba->hbalock); 1453 psli->sli_flag |= LPFC_PROCESS_LA; 1454 control = readl(phba->HCregaddr); 1455 control |= HC_LAINT_ENA; 1456 writel(control, phba->HCregaddr); 1457 readl(phba->HCregaddr); /* flush */ 1458 1459 /* Clear Link Attention in HA REG */ 1460 writel(HA_LATT, phba->HAregaddr); 1461 readl(phba->HAregaddr); /* flush */ 1462 spin_unlock_irq(&phba->hbalock); 1463 lpfc_linkdown(phba); 1464 phba->link_state = LPFC_HBA_ERROR; 1465 1466 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1467 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1468 1469 return; 1470} 1471 1472/** 1473 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1474 * @phba: pointer to lpfc hba data structure. 1475 * @vpd: pointer to the vital product data. 1476 * @len: length of the vital product data in bytes. 1477 * 1478 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1479 * an array of characters. In this routine, the ModelName, ProgramType, and 1480 * ModelDesc, etc. fields of the phba data structure will be populated. 1481 * 1482 * Return codes 1483 * 0 - pointer to the VPD passed in is NULL 1484 * 1 - success 1485 **/ 1486int 1487lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1488{ 1489 uint8_t lenlo, lenhi; 1490 int Length; 1491 int i, j; 1492 int finished = 0; 1493 int index = 0; 1494 1495 if (!vpd) 1496 return 0; 1497 1498 /* Vital Product */ 1499 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1500 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1501 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1502 (uint32_t) vpd[3]); 1503 while (!finished && (index < (len - 4))) { 1504 switch (vpd[index]) { 1505 case 0x82: 1506 case 0x91: 1507 index += 1; 1508 lenlo = vpd[index]; 1509 index += 1; 1510 lenhi = vpd[index]; 1511 index += 1; 1512 i = ((((unsigned short)lenhi) << 8) + lenlo); 1513 index += i; 1514 break; 1515 case 0x90: 1516 index += 1; 1517 lenlo = vpd[index]; 1518 index += 1; 1519 lenhi = vpd[index]; 1520 index += 1; 1521 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1522 if (Length > len - index) 1523 Length = len - index; 1524 while (Length > 0) { 1525 /* Look for Serial Number */ 1526 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1527 index += 2; 1528 i = vpd[index]; 1529 index += 1; 1530 j = 0; 1531 Length -= (3+i); 1532 while(i--) { 1533 phba->SerialNumber[j++] = vpd[index++]; 1534 if (j == 31) 1535 break; 1536 } 1537 phba->SerialNumber[j] = 0; 1538 continue; 1539 } 1540 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1541 phba->vpd_flag |= VPD_MODEL_DESC; 1542 index += 2; 1543 i = vpd[index]; 1544 index += 1; 1545 j = 0; 1546 Length -= (3+i); 1547 while(i--) { 1548 phba->ModelDesc[j++] = vpd[index++]; 1549 if (j == 255) 1550 break; 1551 } 1552 phba->ModelDesc[j] = 0; 1553 continue; 1554 } 1555 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1556 phba->vpd_flag |= VPD_MODEL_NAME; 1557 index += 2; 1558 i = vpd[index]; 1559 index += 1; 1560 j = 0; 1561 Length -= (3+i); 1562 while(i--) { 1563 phba->ModelName[j++] = vpd[index++]; 1564 if (j == 79) 1565 break; 1566 } 1567 phba->ModelName[j] = 0; 1568 continue; 1569 } 1570 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1571 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1572 index += 2; 1573 i = vpd[index]; 1574 index += 1; 1575 j = 0; 1576 Length -= (3+i); 1577 while(i--) { 1578 phba->ProgramType[j++] = vpd[index++]; 1579 if (j == 255) 1580 break; 1581 } 1582 phba->ProgramType[j] = 0; 1583 continue; 1584 } 1585 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1586 phba->vpd_flag |= VPD_PORT; 1587 index += 2; 1588 i = vpd[index]; 1589 index += 1; 1590 j = 0; 1591 Length -= (3+i); 1592 while(i--) { 1593 phba->Port[j++] = vpd[index++]; 1594 if (j == 19) 1595 break; 1596 } 1597 phba->Port[j] = 0; 1598 continue; 1599 } 1600 else { 1601 index += 2; 1602 i = vpd[index]; 1603 index += 1; 1604 index += i; 1605 Length -= (3 + i); 1606 } 1607 } 1608 finished = 0; 1609 break; 1610 case 0x78: 1611 finished = 1; 1612 break; 1613 default: 1614 index ++; 1615 break; 1616 } 1617 } 1618 1619 return(1); 1620} 1621 1622/** 1623 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1624 * @phba: pointer to lpfc hba data structure. 1625 * @mdp: pointer to the data structure to hold the derived model name. 1626 * @descp: pointer to the data structure to hold the derived description. 1627 * 1628 * This routine retrieves HBA's description based on its registered PCI device 1629 * ID. The @descp passed into this function points to an array of 256 chars. It 1630 * shall be returned with the model name, maximum speed, and the host bus type. 1631 * The @mdp passed into this function points to an array of 80 chars. When the 1632 * function returns, the @mdp will be filled with the model name. 1633 **/ 1634static void 1635lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1636{ 1637 lpfc_vpd_t *vp; 1638 uint16_t dev_id = phba->pcidev->device; 1639 int max_speed; 1640 int GE = 0; 1641 int oneConnect = 0; /* default is not a oneConnect */ 1642 struct { 1643 char *name; 1644 char *bus; 1645 char *function; 1646 } m = {"<Unknown>", "", ""}; 1647 1648 if (mdp && mdp[0] != '\0' 1649 && descp && descp[0] != '\0') 1650 return; 1651 1652 if (phba->lmt & LMT_10Gb) 1653 max_speed = 10; 1654 else if (phba->lmt & LMT_8Gb) 1655 max_speed = 8; 1656 else if (phba->lmt & LMT_4Gb) 1657 max_speed = 4; 1658 else if (phba->lmt & LMT_2Gb) 1659 max_speed = 2; 1660 else 1661 max_speed = 1; 1662 1663 vp = &phba->vpd; 1664 1665 switch (dev_id) { 1666 case PCI_DEVICE_ID_FIREFLY: 1667 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 1668 break; 1669 case PCI_DEVICE_ID_SUPERFLY: 1670 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1671 m = (typeof(m)){"LP7000", "PCI", 1672 "Fibre Channel Adapter"}; 1673 else 1674 m = (typeof(m)){"LP7000E", "PCI", 1675 "Fibre Channel Adapter"}; 1676 break; 1677 case PCI_DEVICE_ID_DRAGONFLY: 1678 m = (typeof(m)){"LP8000", "PCI", 1679 "Fibre Channel Adapter"}; 1680 break; 1681 case PCI_DEVICE_ID_CENTAUR: 1682 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1683 m = (typeof(m)){"LP9002", "PCI", 1684 "Fibre Channel Adapter"}; 1685 else 1686 m = (typeof(m)){"LP9000", "PCI", 1687 "Fibre Channel Adapter"}; 1688 break; 1689 case PCI_DEVICE_ID_RFLY: 1690 m = (typeof(m)){"LP952", "PCI", 1691 "Fibre Channel Adapter"}; 1692 break; 1693 case PCI_DEVICE_ID_PEGASUS: 1694 m = (typeof(m)){"LP9802", "PCI-X", 1695 "Fibre Channel Adapter"}; 1696 break; 1697 case PCI_DEVICE_ID_THOR: 1698 m = (typeof(m)){"LP10000", "PCI-X", 1699 "Fibre Channel Adapter"}; 1700 break; 1701 case PCI_DEVICE_ID_VIPER: 1702 m = (typeof(m)){"LPX1000", "PCI-X", 1703 "Fibre Channel Adapter"}; 1704 break; 1705 case PCI_DEVICE_ID_PFLY: 1706 m = (typeof(m)){"LP982", "PCI-X", 1707 "Fibre Channel Adapter"}; 1708 break; 1709 case PCI_DEVICE_ID_TFLY: 1710 m = (typeof(m)){"LP1050", "PCI-X", 1711 "Fibre Channel Adapter"}; 1712 break; 1713 case PCI_DEVICE_ID_HELIOS: 1714 m = (typeof(m)){"LP11000", "PCI-X2", 1715 "Fibre Channel Adapter"}; 1716 break; 1717 case PCI_DEVICE_ID_HELIOS_SCSP: 1718 m = (typeof(m)){"LP11000-SP", "PCI-X2", 1719 "Fibre Channel Adapter"}; 1720 break; 1721 case PCI_DEVICE_ID_HELIOS_DCSP: 1722 m = (typeof(m)){"LP11002-SP", "PCI-X2", 1723 "Fibre Channel Adapter"}; 1724 break; 1725 case PCI_DEVICE_ID_NEPTUNE: 1726 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 1727 break; 1728 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1729 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 1730 break; 1731 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1732 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 1733 break; 1734 case PCI_DEVICE_ID_BMID: 1735 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 1736 break; 1737 case PCI_DEVICE_ID_BSMB: 1738 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 1739 break; 1740 case PCI_DEVICE_ID_ZEPHYR: 1741 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1742 break; 1743 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1744 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1745 break; 1746 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1747 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 1748 GE = 1; 1749 break; 1750 case PCI_DEVICE_ID_ZMID: 1751 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 1752 break; 1753 case PCI_DEVICE_ID_ZSMB: 1754 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 1755 break; 1756 case PCI_DEVICE_ID_LP101: 1757 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 1758 break; 1759 case PCI_DEVICE_ID_LP10000S: 1760 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 1761 break; 1762 case PCI_DEVICE_ID_LP11000S: 1763 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 1764 break; 1765 case PCI_DEVICE_ID_LPE11000S: 1766 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 1767 break; 1768 case PCI_DEVICE_ID_SAT: 1769 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 1770 break; 1771 case PCI_DEVICE_ID_SAT_MID: 1772 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 1773 break; 1774 case PCI_DEVICE_ID_SAT_SMB: 1775 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 1776 break; 1777 case PCI_DEVICE_ID_SAT_DCSP: 1778 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 1779 break; 1780 case PCI_DEVICE_ID_SAT_SCSP: 1781 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 1782 break; 1783 case PCI_DEVICE_ID_SAT_S: 1784 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 1785 break; 1786 case PCI_DEVICE_ID_HORNET: 1787 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 1788 GE = 1; 1789 break; 1790 case PCI_DEVICE_ID_PROTEUS_VF: 1791 m = (typeof(m)){"LPev12000", "PCIe IOV", 1792 "Fibre Channel Adapter"}; 1793 break; 1794 case PCI_DEVICE_ID_PROTEUS_PF: 1795 m = (typeof(m)){"LPev12000", "PCIe IOV", 1796 "Fibre Channel Adapter"}; 1797 break; 1798 case PCI_DEVICE_ID_PROTEUS_S: 1799 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 1800 "Fibre Channel Adapter"}; 1801 break; 1802 case PCI_DEVICE_ID_TIGERSHARK: 1803 oneConnect = 1; 1804 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 1805 break; 1806 case PCI_DEVICE_ID_TOMCAT: 1807 oneConnect = 1; 1808 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 1809 break; 1810 case PCI_DEVICE_ID_FALCON: 1811 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 1812 "EmulexSecure Fibre"}; 1813 break; 1814 default: 1815 m = (typeof(m)){"Unknown", "", ""}; 1816 break; 1817 } 1818 1819 if (mdp && mdp[0] == '\0') 1820 snprintf(mdp, 79,"%s", m.name); 1821 /* oneConnect hba requires special processing, they are all initiators 1822 * and we put the port number on the end 1823 */ 1824 if (descp && descp[0] == '\0') { 1825 if (oneConnect) 1826 snprintf(descp, 255, 1827 "Emulex OneConnect %s, %s Initiator, Port %s", 1828 m.name, m.function, 1829 phba->Port); 1830 else 1831 snprintf(descp, 255, 1832 "Emulex %s %d%s %s %s", 1833 m.name, max_speed, (GE) ? "GE" : "Gb", 1834 m.bus, m.function); 1835 } 1836} 1837 1838/** 1839 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 1840 * @phba: pointer to lpfc hba data structure. 1841 * @pring: pointer to a IOCB ring. 1842 * @cnt: the number of IOCBs to be posted to the IOCB ring. 1843 * 1844 * This routine posts a given number of IOCBs with the associated DMA buffer 1845 * descriptors specified by the cnt argument to the given IOCB ring. 1846 * 1847 * Return codes 1848 * The number of IOCBs NOT able to be posted to the IOCB ring. 1849 **/ 1850int 1851lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 1852{ 1853 IOCB_t *icmd; 1854 struct lpfc_iocbq *iocb; 1855 struct lpfc_dmabuf *mp1, *mp2; 1856 1857 cnt += pring->missbufcnt; 1858 1859 /* While there are buffers to post */ 1860 while (cnt > 0) { 1861 /* Allocate buffer for command iocb */ 1862 iocb = lpfc_sli_get_iocbq(phba); 1863 if (iocb == NULL) { 1864 pring->missbufcnt = cnt; 1865 return cnt; 1866 } 1867 icmd = &iocb->iocb; 1868 1869 /* 2 buffers can be posted per command */ 1870 /* Allocate buffer to post */ 1871 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1872 if (mp1) 1873 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1874 if (!mp1 || !mp1->virt) { 1875 kfree(mp1); 1876 lpfc_sli_release_iocbq(phba, iocb); 1877 pring->missbufcnt = cnt; 1878 return cnt; 1879 } 1880 1881 INIT_LIST_HEAD(&mp1->list); 1882 /* Allocate buffer to post */ 1883 if (cnt > 1) { 1884 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1885 if (mp2) 1886 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1887 &mp2->phys); 1888 if (!mp2 || !mp2->virt) { 1889 kfree(mp2); 1890 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1891 kfree(mp1); 1892 lpfc_sli_release_iocbq(phba, iocb); 1893 pring->missbufcnt = cnt; 1894 return cnt; 1895 } 1896 1897 INIT_LIST_HEAD(&mp2->list); 1898 } else { 1899 mp2 = NULL; 1900 } 1901 1902 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 1903 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 1904 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 1905 icmd->ulpBdeCount = 1; 1906 cnt--; 1907 if (mp2) { 1908 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 1909 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 1910 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 1911 cnt--; 1912 icmd->ulpBdeCount = 2; 1913 } 1914 1915 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1916 icmd->ulpLe = 1; 1917 1918 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 1919 IOCB_ERROR) { 1920 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1921 kfree(mp1); 1922 cnt++; 1923 if (mp2) { 1924 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 1925 kfree(mp2); 1926 cnt++; 1927 } 1928 lpfc_sli_release_iocbq(phba, iocb); 1929 pring->missbufcnt = cnt; 1930 return cnt; 1931 } 1932 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1933 if (mp2) 1934 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1935 } 1936 pring->missbufcnt = 0; 1937 return 0; 1938} 1939 1940/** 1941 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 1942 * @phba: pointer to lpfc hba data structure. 1943 * 1944 * This routine posts initial receive IOCB buffers to the ELS ring. The 1945 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 1946 * set to 64 IOCBs. 1947 * 1948 * Return codes 1949 * 0 - success (currently always success) 1950 **/ 1951static int 1952lpfc_post_rcv_buf(struct lpfc_hba *phba) 1953{ 1954 struct lpfc_sli *psli = &phba->sli; 1955 1956 /* Ring 0, ELS / CT buffers */ 1957 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 1958 /* Ring 2 - FCP no buffers needed */ 1959 1960 return 0; 1961} 1962 1963#define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 1964 1965/** 1966 * lpfc_sha_init - Set up initial array of hash table entries 1967 * @HashResultPointer: pointer to an array as hash table. 1968 * 1969 * This routine sets up the initial values to the array of hash table entries 1970 * for the LC HBAs. 1971 **/ 1972static void 1973lpfc_sha_init(uint32_t * HashResultPointer) 1974{ 1975 HashResultPointer[0] = 0x67452301; 1976 HashResultPointer[1] = 0xEFCDAB89; 1977 HashResultPointer[2] = 0x98BADCFE; 1978 HashResultPointer[3] = 0x10325476; 1979 HashResultPointer[4] = 0xC3D2E1F0; 1980} 1981 1982/** 1983 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 1984 * @HashResultPointer: pointer to an initial/result hash table. 1985 * @HashWorkingPointer: pointer to an working hash table. 1986 * 1987 * This routine iterates an initial hash table pointed by @HashResultPointer 1988 * with the values from the working hash table pointeed by @HashWorkingPointer. 1989 * The results are putting back to the initial hash table, returned through 1990 * the @HashResultPointer as the result hash table. 1991 **/ 1992static void 1993lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 1994{ 1995 int t; 1996 uint32_t TEMP; 1997 uint32_t A, B, C, D, E; 1998 t = 16; 1999 do { 2000 HashWorkingPointer[t] = 2001 S(1, 2002 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2003 8] ^ 2004 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2005 } while (++t <= 79); 2006 t = 0; 2007 A = HashResultPointer[0]; 2008 B = HashResultPointer[1]; 2009 C = HashResultPointer[2]; 2010 D = HashResultPointer[3]; 2011 E = HashResultPointer[4]; 2012 2013 do { 2014 if (t < 20) { 2015 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2016 } else if (t < 40) { 2017 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2018 } else if (t < 60) { 2019 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2020 } else { 2021 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2022 } 2023 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2024 E = D; 2025 D = C; 2026 C = S(30, B); 2027 B = A; 2028 A = TEMP; 2029 } while (++t <= 79); 2030 2031 HashResultPointer[0] += A; 2032 HashResultPointer[1] += B; 2033 HashResultPointer[2] += C; 2034 HashResultPointer[3] += D; 2035 HashResultPointer[4] += E; 2036 2037} 2038 2039/** 2040 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2041 * @RandomChallenge: pointer to the entry of host challenge random number array. 2042 * @HashWorking: pointer to the entry of the working hash array. 2043 * 2044 * This routine calculates the working hash array referred by @HashWorking 2045 * from the challenge random numbers associated with the host, referred by 2046 * @RandomChallenge. The result is put into the entry of the working hash 2047 * array and returned by reference through @HashWorking. 2048 **/ 2049static void 2050lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2051{ 2052 *HashWorking = (*RandomChallenge ^ *HashWorking); 2053} 2054 2055/** 2056 * lpfc_hba_init - Perform special handling for LC HBA initialization 2057 * @phba: pointer to lpfc hba data structure. 2058 * @hbainit: pointer to an array of unsigned 32-bit integers. 2059 * 2060 * This routine performs the special handling for LC HBA initialization. 2061 **/ 2062void 2063lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2064{ 2065 int t; 2066 uint32_t *HashWorking; 2067 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2068 2069 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2070 if (!HashWorking) 2071 return; 2072 2073 HashWorking[0] = HashWorking[78] = *pwwnn++; 2074 HashWorking[1] = HashWorking[79] = *pwwnn; 2075 2076 for (t = 0; t < 7; t++) 2077 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2078 2079 lpfc_sha_init(hbainit); 2080 lpfc_sha_iterate(hbainit, HashWorking); 2081 kfree(HashWorking); 2082} 2083 2084/** 2085 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2086 * @vport: pointer to a virtual N_Port data structure. 2087 * 2088 * This routine performs the necessary cleanups before deleting the @vport. 2089 * It invokes the discovery state machine to perform necessary state 2090 * transitions and to release the ndlps associated with the @vport. Note, 2091 * the physical port is treated as @vport 0. 2092 **/ 2093void 2094lpfc_cleanup(struct lpfc_vport *vport) 2095{ 2096 struct lpfc_hba *phba = vport->phba; 2097 struct lpfc_nodelist *ndlp, *next_ndlp; 2098 int i = 0; 2099 2100 if (phba->link_state > LPFC_LINK_DOWN) 2101 lpfc_port_link_failure(vport); 2102 2103 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2104 if (!NLP_CHK_NODE_ACT(ndlp)) { 2105 ndlp = lpfc_enable_node(vport, ndlp, 2106 NLP_STE_UNUSED_NODE); 2107 if (!ndlp) 2108 continue; 2109 spin_lock_irq(&phba->ndlp_lock); 2110 NLP_SET_FREE_REQ(ndlp); 2111 spin_unlock_irq(&phba->ndlp_lock); 2112 /* Trigger the release of the ndlp memory */ 2113 lpfc_nlp_put(ndlp); 2114 continue; 2115 } 2116 spin_lock_irq(&phba->ndlp_lock); 2117 if (NLP_CHK_FREE_REQ(ndlp)) { 2118 /* The ndlp should not be in memory free mode already */ 2119 spin_unlock_irq(&phba->ndlp_lock); 2120 continue; 2121 } else 2122 /* Indicate request for freeing ndlp memory */ 2123 NLP_SET_FREE_REQ(ndlp); 2124 spin_unlock_irq(&phba->ndlp_lock); 2125 2126 if (vport->port_type != LPFC_PHYSICAL_PORT && 2127 ndlp->nlp_DID == Fabric_DID) { 2128 /* Just free up ndlp with Fabric_DID for vports */ 2129 lpfc_nlp_put(ndlp); 2130 continue; 2131 } 2132 2133 if (ndlp->nlp_type & NLP_FABRIC) 2134 lpfc_disc_state_machine(vport, ndlp, NULL, 2135 NLP_EVT_DEVICE_RECOVERY); 2136 2137 lpfc_disc_state_machine(vport, ndlp, NULL, 2138 NLP_EVT_DEVICE_RM); 2139 2140 } 2141 2142 /* At this point, ALL ndlp's should be gone 2143 * because of the previous NLP_EVT_DEVICE_RM. 2144 * Lets wait for this to happen, if needed. 2145 */ 2146 while (!list_empty(&vport->fc_nodes)) { 2147 if (i++ > 3000) { 2148 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2149 "0233 Nodelist not empty\n"); 2150 list_for_each_entry_safe(ndlp, next_ndlp, 2151 &vport->fc_nodes, nlp_listp) { 2152 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2153 LOG_NODE, 2154 "0282 did:x%x ndlp:x%p " 2155 "usgmap:x%x refcnt:%d\n", 2156 ndlp->nlp_DID, (void *)ndlp, 2157 ndlp->nlp_usg_map, 2158 atomic_read( 2159 &ndlp->kref.refcount)); 2160 } 2161 break; 2162 } 2163 2164 /* Wait for any activity on ndlps to settle */ 2165 msleep(10); 2166 } 2167} 2168 2169/** 2170 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2171 * @vport: pointer to a virtual N_Port data structure. 2172 * 2173 * This routine stops all the timers associated with a @vport. This function 2174 * is invoked before disabling or deleting a @vport. Note that the physical 2175 * port is treated as @vport 0. 2176 **/ 2177void 2178lpfc_stop_vport_timers(struct lpfc_vport *vport) 2179{ 2180 del_timer_sync(&vport->els_tmofunc); 2181 del_timer_sync(&vport->fc_fdmitmo); 2182 lpfc_can_disctmo(vport); 2183 return; 2184} 2185 2186/** 2187 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2188 * @phba: pointer to lpfc hba data structure. 2189 * 2190 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2191 * caller of this routine should already hold the host lock. 2192 **/ 2193void 2194__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2195{ 2196 /* Clear pending FCF rediscovery wait timer */ 2197 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2198 /* Now, try to stop the timer */ 2199 del_timer(&phba->fcf.redisc_wait); 2200} 2201 2202/** 2203 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2204 * @phba: pointer to lpfc hba data structure. 2205 * 2206 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2207 * checks whether the FCF rediscovery wait timer is pending with the host 2208 * lock held before proceeding with disabling the timer and clearing the 2209 * wait timer pendig flag. 2210 **/ 2211void 2212lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2213{ 2214 spin_lock_irq(&phba->hbalock); 2215 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2216 /* FCF rediscovery timer already fired or stopped */ 2217 spin_unlock_irq(&phba->hbalock); 2218 return; 2219 } 2220 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2221 spin_unlock_irq(&phba->hbalock); 2222} 2223 2224/** 2225 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2226 * @phba: pointer to lpfc hba data structure. 2227 * 2228 * This routine stops all the timers associated with a HBA. This function is 2229 * invoked before either putting a HBA offline or unloading the driver. 2230 **/ 2231void 2232lpfc_stop_hba_timers(struct lpfc_hba *phba) 2233{ 2234 lpfc_stop_vport_timers(phba->pport); 2235 del_timer_sync(&phba->sli.mbox_tmo); 2236 del_timer_sync(&phba->fabric_block_timer); 2237 del_timer_sync(&phba->eratt_poll); 2238 del_timer_sync(&phba->hb_tmofunc); 2239 phba->hb_outstanding = 0; 2240 2241 switch (phba->pci_dev_grp) { 2242 case LPFC_PCI_DEV_LP: 2243 /* Stop any LightPulse device specific driver timers */ 2244 del_timer_sync(&phba->fcp_poll_timer); 2245 break; 2246 case LPFC_PCI_DEV_OC: 2247 /* Stop any OneConnect device sepcific driver timers */ 2248 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2249 break; 2250 default: 2251 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2252 "0297 Invalid device group (x%x)\n", 2253 phba->pci_dev_grp); 2254 break; 2255 } 2256 return; 2257} 2258 2259/** 2260 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2261 * @phba: pointer to lpfc hba data structure. 2262 * 2263 * This routine marks a HBA's management interface as blocked. Once the HBA's 2264 * management interface is marked as blocked, all the user space access to 2265 * the HBA, whether they are from sysfs interface or libdfc interface will 2266 * all be blocked. The HBA is set to block the management interface when the 2267 * driver prepares the HBA interface for online or offline. 2268 **/ 2269static void 2270lpfc_block_mgmt_io(struct lpfc_hba * phba) 2271{ 2272 unsigned long iflag; 2273 2274 spin_lock_irqsave(&phba->hbalock, iflag); 2275 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2276 spin_unlock_irqrestore(&phba->hbalock, iflag); 2277} 2278 2279/** 2280 * lpfc_online - Initialize and bring a HBA online 2281 * @phba: pointer to lpfc hba data structure. 2282 * 2283 * This routine initializes the HBA and brings a HBA online. During this 2284 * process, the management interface is blocked to prevent user space access 2285 * to the HBA interfering with the driver initialization. 2286 * 2287 * Return codes 2288 * 0 - successful 2289 * 1 - failed 2290 **/ 2291int 2292lpfc_online(struct lpfc_hba *phba) 2293{ 2294 struct lpfc_vport *vport; 2295 struct lpfc_vport **vports; 2296 int i; 2297 2298 if (!phba) 2299 return 0; 2300 vport = phba->pport; 2301 2302 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2303 return 0; 2304 2305 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2306 "0458 Bring Adapter online\n"); 2307 2308 lpfc_block_mgmt_io(phba); 2309 2310 if (!lpfc_sli_queue_setup(phba)) { 2311 lpfc_unblock_mgmt_io(phba); 2312 return 1; 2313 } 2314 2315 if (phba->sli_rev == LPFC_SLI_REV4) { 2316 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2317 lpfc_unblock_mgmt_io(phba); 2318 return 1; 2319 } 2320 } else { 2321 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2322 lpfc_unblock_mgmt_io(phba); 2323 return 1; 2324 } 2325 } 2326 2327 vports = lpfc_create_vport_work_array(phba); 2328 if (vports != NULL) 2329 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2330 struct Scsi_Host *shost; 2331 shost = lpfc_shost_from_vport(vports[i]); 2332 spin_lock_irq(shost->host_lock); 2333 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2334 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2335 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2336 if (phba->sli_rev == LPFC_SLI_REV4) 2337 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2338 spin_unlock_irq(shost->host_lock); 2339 } 2340 lpfc_destroy_vport_work_array(phba, vports); 2341 2342 lpfc_unblock_mgmt_io(phba); 2343 return 0; 2344} 2345 2346/** 2347 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2348 * @phba: pointer to lpfc hba data structure. 2349 * 2350 * This routine marks a HBA's management interface as not blocked. Once the 2351 * HBA's management interface is marked as not blocked, all the user space 2352 * access to the HBA, whether they are from sysfs interface or libdfc 2353 * interface will be allowed. The HBA is set to block the management interface 2354 * when the driver prepares the HBA interface for online or offline and then 2355 * set to unblock the management interface afterwards. 2356 **/ 2357void 2358lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2359{ 2360 unsigned long iflag; 2361 2362 spin_lock_irqsave(&phba->hbalock, iflag); 2363 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2364 spin_unlock_irqrestore(&phba->hbalock, iflag); 2365} 2366 2367/** 2368 * lpfc_offline_prep - Prepare a HBA to be brought offline 2369 * @phba: pointer to lpfc hba data structure. 2370 * 2371 * This routine is invoked to prepare a HBA to be brought offline. It performs 2372 * unregistration login to all the nodes on all vports and flushes the mailbox 2373 * queue to make it ready to be brought offline. 2374 **/ 2375void 2376lpfc_offline_prep(struct lpfc_hba * phba) 2377{ 2378 struct lpfc_vport *vport = phba->pport; 2379 struct lpfc_nodelist *ndlp, *next_ndlp; 2380 struct lpfc_vport **vports; 2381 struct Scsi_Host *shost; 2382 int i; 2383 2384 if (vport->fc_flag & FC_OFFLINE_MODE) 2385 return; 2386 2387 lpfc_block_mgmt_io(phba); 2388 2389 lpfc_linkdown(phba); 2390 2391 /* Issue an unreg_login to all nodes on all vports */ 2392 vports = lpfc_create_vport_work_array(phba); 2393 if (vports != NULL) { 2394 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2395 if (vports[i]->load_flag & FC_UNLOADING) 2396 continue; 2397 shost = lpfc_shost_from_vport(vports[i]); 2398 spin_lock_irq(shost->host_lock); 2399 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2400 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2401 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2402 spin_unlock_irq(shost->host_lock); 2403 2404 shost = lpfc_shost_from_vport(vports[i]); 2405 list_for_each_entry_safe(ndlp, next_ndlp, 2406 &vports[i]->fc_nodes, 2407 nlp_listp) { 2408 if (!NLP_CHK_NODE_ACT(ndlp)) 2409 continue; 2410 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2411 continue; 2412 if (ndlp->nlp_type & NLP_FABRIC) { 2413 lpfc_disc_state_machine(vports[i], ndlp, 2414 NULL, NLP_EVT_DEVICE_RECOVERY); 2415 lpfc_disc_state_machine(vports[i], ndlp, 2416 NULL, NLP_EVT_DEVICE_RM); 2417 } 2418 spin_lock_irq(shost->host_lock); 2419 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2420 spin_unlock_irq(shost->host_lock); 2421 lpfc_unreg_rpi(vports[i], ndlp); 2422 } 2423 } 2424 } 2425 lpfc_destroy_vport_work_array(phba, vports); 2426 2427 lpfc_sli_mbox_sys_shutdown(phba); 2428} 2429 2430/** 2431 * lpfc_offline - Bring a HBA offline 2432 * @phba: pointer to lpfc hba data structure. 2433 * 2434 * This routine actually brings a HBA offline. It stops all the timers 2435 * associated with the HBA, brings down the SLI layer, and eventually 2436 * marks the HBA as in offline state for the upper layer protocol. 2437 **/ 2438void 2439lpfc_offline(struct lpfc_hba *phba) 2440{ 2441 struct Scsi_Host *shost; 2442 struct lpfc_vport **vports; 2443 int i; 2444 2445 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2446 return; 2447 2448 /* stop port and all timers associated with this hba */ 2449 lpfc_stop_port(phba); 2450 vports = lpfc_create_vport_work_array(phba); 2451 if (vports != NULL) 2452 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2453 lpfc_stop_vport_timers(vports[i]); 2454 lpfc_destroy_vport_work_array(phba, vports); 2455 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2456 "0460 Bring Adapter offline\n"); 2457 /* Bring down the SLI Layer and cleanup. The HBA is offline 2458 now. */ 2459 lpfc_sli_hba_down(phba); 2460 spin_lock_irq(&phba->hbalock); 2461 phba->work_ha = 0; 2462 spin_unlock_irq(&phba->hbalock); 2463 vports = lpfc_create_vport_work_array(phba); 2464 if (vports != NULL) 2465 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2466 shost = lpfc_shost_from_vport(vports[i]); 2467 spin_lock_irq(shost->host_lock); 2468 vports[i]->work_port_events = 0; 2469 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2470 spin_unlock_irq(shost->host_lock); 2471 } 2472 lpfc_destroy_vport_work_array(phba, vports); 2473} 2474 2475/** 2476 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2477 * @phba: pointer to lpfc hba data structure. 2478 * 2479 * This routine is to free all the SCSI buffers and IOCBs from the driver 2480 * list back to kernel. It is called from lpfc_pci_remove_one to free 2481 * the internal resources before the device is removed from the system. 2482 * 2483 * Return codes 2484 * 0 - successful (for now, it always returns 0) 2485 **/ 2486static int 2487lpfc_scsi_free(struct lpfc_hba *phba) 2488{ 2489 struct lpfc_scsi_buf *sb, *sb_next; 2490 struct lpfc_iocbq *io, *io_next; 2491 2492 spin_lock_irq(&phba->hbalock); 2493 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2494 spin_lock(&phba->scsi_buf_list_lock); 2495 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2496 list_del(&sb->list); 2497 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2498 sb->dma_handle); 2499 kfree(sb); 2500 phba->total_scsi_bufs--; 2501 } 2502 spin_unlock(&phba->scsi_buf_list_lock); 2503 2504 /* Release all the lpfc_iocbq entries maintained by this host. */ 2505 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2506 list_del(&io->list); 2507 kfree(io); 2508 phba->total_iocbq_bufs--; 2509 } 2510 spin_unlock_irq(&phba->hbalock); 2511 return 0; 2512} 2513 2514/** 2515 * lpfc_create_port - Create an FC port 2516 * @phba: pointer to lpfc hba data structure. 2517 * @instance: a unique integer ID to this FC port. 2518 * @dev: pointer to the device data structure. 2519 * 2520 * This routine creates a FC port for the upper layer protocol. The FC port 2521 * can be created on top of either a physical port or a virtual port provided 2522 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2523 * and associates the FC port created before adding the shost into the SCSI 2524 * layer. 2525 * 2526 * Return codes 2527 * @vport - pointer to the virtual N_Port data structure. 2528 * NULL - port create failed. 2529 **/ 2530struct lpfc_vport * 2531lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2532{ 2533 struct lpfc_vport *vport; 2534 struct Scsi_Host *shost; 2535 int error = 0; 2536 2537 if (dev != &phba->pcidev->dev) 2538 shost = scsi_host_alloc(&lpfc_vport_template, 2539 sizeof(struct lpfc_vport)); 2540 else 2541 shost = scsi_host_alloc(&lpfc_template, 2542 sizeof(struct lpfc_vport)); 2543 if (!shost) 2544 goto out; 2545 2546 vport = (struct lpfc_vport *) shost->hostdata; 2547 vport->phba = phba; 2548 vport->load_flag |= FC_LOADING; 2549 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2550 vport->fc_rscn_flush = 0; 2551 2552 lpfc_get_vport_cfgparam(vport); 2553 shost->unique_id = instance; 2554 shost->max_id = LPFC_MAX_TARGET; 2555 shost->max_lun = vport->cfg_max_luns; 2556 shost->this_id = -1; 2557 shost->max_cmd_len = 16; 2558 if (phba->sli_rev == LPFC_SLI_REV4) { 2559 shost->dma_boundary = 2560 phba->sli4_hba.pc_sli4_params.sge_supp_len; 2561 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2562 } 2563 2564 /* 2565 * Set initial can_queue value since 0 is no longer supported and 2566 * scsi_add_host will fail. This will be adjusted later based on the 2567 * max xri value determined in hba setup. 2568 */ 2569 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2570 if (dev != &phba->pcidev->dev) { 2571 shost->transportt = lpfc_vport_transport_template; 2572 vport->port_type = LPFC_NPIV_PORT; 2573 } else { 2574 shost->transportt = lpfc_transport_template; 2575 vport->port_type = LPFC_PHYSICAL_PORT; 2576 } 2577 2578 /* Initialize all internally managed lists. */ 2579 INIT_LIST_HEAD(&vport->fc_nodes); 2580 INIT_LIST_HEAD(&vport->rcv_buffer_list); 2581 spin_lock_init(&vport->work_port_lock); 2582 2583 init_timer(&vport->fc_disctmo); 2584 vport->fc_disctmo.function = lpfc_disc_timeout; 2585 vport->fc_disctmo.data = (unsigned long)vport; 2586 2587 init_timer(&vport->fc_fdmitmo); 2588 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2589 vport->fc_fdmitmo.data = (unsigned long)vport; 2590 2591 init_timer(&vport->els_tmofunc); 2592 vport->els_tmofunc.function = lpfc_els_timeout; 2593 vport->els_tmofunc.data = (unsigned long)vport; 2594 2595 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2596 if (error) 2597 goto out_put_shost; 2598 2599 spin_lock_irq(&phba->hbalock); 2600 list_add_tail(&vport->listentry, &phba->port_list); 2601 spin_unlock_irq(&phba->hbalock); 2602 return vport; 2603 2604out_put_shost: 2605 scsi_host_put(shost); 2606out: 2607 return NULL; 2608} 2609 2610/** 2611 * destroy_port - destroy an FC port 2612 * @vport: pointer to an lpfc virtual N_Port data structure. 2613 * 2614 * This routine destroys a FC port from the upper layer protocol. All the 2615 * resources associated with the port are released. 2616 **/ 2617void 2618destroy_port(struct lpfc_vport *vport) 2619{ 2620 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2621 struct lpfc_hba *phba = vport->phba; 2622 2623 lpfc_debugfs_terminate(vport); 2624 fc_remove_host(shost); 2625 scsi_remove_host(shost); 2626 2627 spin_lock_irq(&phba->hbalock); 2628 list_del_init(&vport->listentry); 2629 spin_unlock_irq(&phba->hbalock); 2630 2631 lpfc_cleanup(vport); 2632 return; 2633} 2634 2635/** 2636 * lpfc_get_instance - Get a unique integer ID 2637 * 2638 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2639 * uses the kernel idr facility to perform the task. 2640 * 2641 * Return codes: 2642 * instance - a unique integer ID allocated as the new instance. 2643 * -1 - lpfc get instance failed. 2644 **/ 2645int 2646lpfc_get_instance(void) 2647{ 2648 int instance = 0; 2649 2650 /* Assign an unused number */ 2651 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2652 return -1; 2653 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2654 return -1; 2655 return instance; 2656} 2657 2658/** 2659 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2660 * @shost: pointer to SCSI host data structure. 2661 * @time: elapsed time of the scan in jiffies. 2662 * 2663 * This routine is called by the SCSI layer with a SCSI host to determine 2664 * whether the scan host is finished. 2665 * 2666 * Note: there is no scan_start function as adapter initialization will have 2667 * asynchronously kicked off the link initialization. 2668 * 2669 * Return codes 2670 * 0 - SCSI host scan is not over yet. 2671 * 1 - SCSI host scan is over. 2672 **/ 2673int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2674{ 2675 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2676 struct lpfc_hba *phba = vport->phba; 2677 int stat = 0; 2678 2679 spin_lock_irq(shost->host_lock); 2680 2681 if (vport->load_flag & FC_UNLOADING) { 2682 stat = 1; 2683 goto finished; 2684 } 2685 if (time >= 30 * HZ) { 2686 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2687 "0461 Scanning longer than 30 " 2688 "seconds. Continuing initialization\n"); 2689 stat = 1; 2690 goto finished; 2691 } 2692 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2693 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2694 "0465 Link down longer than 15 " 2695 "seconds. Continuing initialization\n"); 2696 stat = 1; 2697 goto finished; 2698 } 2699 2700 if (vport->port_state != LPFC_VPORT_READY) 2701 goto finished; 2702 if (vport->num_disc_nodes || vport->fc_prli_sent) 2703 goto finished; 2704 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2705 goto finished; 2706 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2707 goto finished; 2708 2709 stat = 1; 2710 2711finished: 2712 spin_unlock_irq(shost->host_lock); 2713 return stat; 2714} 2715 2716/** 2717 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2718 * @shost: pointer to SCSI host data structure. 2719 * 2720 * This routine initializes a given SCSI host attributes on a FC port. The 2721 * SCSI host can be either on top of a physical port or a virtual port. 2722 **/ 2723void lpfc_host_attrib_init(struct Scsi_Host *shost) 2724{ 2725 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2726 struct lpfc_hba *phba = vport->phba; 2727 /* 2728 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2729 */ 2730 2731 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2732 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2733 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2734 2735 memset(fc_host_supported_fc4s(shost), 0, 2736 sizeof(fc_host_supported_fc4s(shost))); 2737 fc_host_supported_fc4s(shost)[2] = 1; 2738 fc_host_supported_fc4s(shost)[7] = 1; 2739 2740 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2741 sizeof fc_host_symbolic_name(shost)); 2742 2743 fc_host_supported_speeds(shost) = 0; 2744 if (phba->lmt & LMT_10Gb) 2745 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2746 if (phba->lmt & LMT_8Gb) 2747 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2748 if (phba->lmt & LMT_4Gb) 2749 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2750 if (phba->lmt & LMT_2Gb) 2751 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2752 if (phba->lmt & LMT_1Gb) 2753 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2754 2755 fc_host_maxframe_size(shost) = 2756 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2757 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2758 2759 /* This value is also unchanging */ 2760 memset(fc_host_active_fc4s(shost), 0, 2761 sizeof(fc_host_active_fc4s(shost))); 2762 fc_host_active_fc4s(shost)[2] = 1; 2763 fc_host_active_fc4s(shost)[7] = 1; 2764 2765 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2766 spin_lock_irq(shost->host_lock); 2767 vport->load_flag &= ~FC_LOADING; 2768 spin_unlock_irq(shost->host_lock); 2769} 2770 2771/** 2772 * lpfc_stop_port_s3 - Stop SLI3 device port 2773 * @phba: pointer to lpfc hba data structure. 2774 * 2775 * This routine is invoked to stop an SLI3 device port, it stops the device 2776 * from generating interrupts and stops the device driver's timers for the 2777 * device. 2778 **/ 2779static void 2780lpfc_stop_port_s3(struct lpfc_hba *phba) 2781{ 2782 /* Clear all interrupt enable conditions */ 2783 writel(0, phba->HCregaddr); 2784 readl(phba->HCregaddr); /* flush */ 2785 /* Clear all pending interrupts */ 2786 writel(0xffffffff, phba->HAregaddr); 2787 readl(phba->HAregaddr); /* flush */ 2788 2789 /* Reset some HBA SLI setup states */ 2790 lpfc_stop_hba_timers(phba); 2791 phba->pport->work_port_events = 0; 2792} 2793 2794/** 2795 * lpfc_stop_port_s4 - Stop SLI4 device port 2796 * @phba: pointer to lpfc hba data structure. 2797 * 2798 * This routine is invoked to stop an SLI4 device port, it stops the device 2799 * from generating interrupts and stops the device driver's timers for the 2800 * device. 2801 **/ 2802static void 2803lpfc_stop_port_s4(struct lpfc_hba *phba) 2804{ 2805 /* Reset some HBA SLI4 setup states */ 2806 lpfc_stop_hba_timers(phba); 2807 phba->pport->work_port_events = 0; 2808 phba->sli4_hba.intr_enable = 0; 2809} 2810 2811/** 2812 * lpfc_stop_port - Wrapper function for stopping hba port 2813 * @phba: Pointer to HBA context object. 2814 * 2815 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 2816 * the API jump table function pointer from the lpfc_hba struct. 2817 **/ 2818void 2819lpfc_stop_port(struct lpfc_hba *phba) 2820{ 2821 phba->lpfc_stop_port(phba); 2822} 2823 2824/** 2825 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port. 2826 * @phba: pointer to lpfc hba data structure. 2827 * 2828 * This routine is invoked to remove the driver default fcf record from 2829 * the port. This routine currently acts on FCF Index 0. 2830 * 2831 **/ 2832void 2833lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) 2834{ 2835 int rc = 0; 2836 LPFC_MBOXQ_t *mboxq; 2837 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record; 2838 uint32_t mbox_tmo, req_len; 2839 uint32_t shdr_status, shdr_add_status; 2840 2841 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2842 if (!mboxq) { 2843 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2844 "2020 Failed to allocate mbox for ADD_FCF cmd\n"); 2845 return; 2846 } 2847 2848 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) - 2849 sizeof(struct lpfc_sli4_cfg_mhdr); 2850 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2851 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF, 2852 req_len, LPFC_SLI4_MBX_EMBED); 2853 /* 2854 * In phase 1, there is a single FCF index, 0. In phase2, the driver 2855 * supports multiple FCF indices. 2856 */ 2857 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; 2858 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); 2859 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, 2860 phba->fcf.current_rec.fcf_indx); 2861 2862 if (!phba->sli4_hba.intr_enable) 2863 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2864 else { 2865 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 2866 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 2867 } 2868 /* The IOCTL status is embedded in the mailbox subheader. */ 2869 shdr_status = bf_get(lpfc_mbox_hdr_status, 2870 &del_fcf_record->header.cfg_shdr.response); 2871 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 2872 &del_fcf_record->header.cfg_shdr.response); 2873 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { 2874 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2875 "2516 DEL FCF of default FCF Index failed " 2876 "mbx status x%x, status x%x add_status x%x\n", 2877 rc, shdr_status, shdr_add_status); 2878 } 2879 if (rc != MBX_TIMEOUT) 2880 mempool_free(mboxq, phba->mbox_mem_pool); 2881} 2882 2883/** 2884 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 2885 * @phba: Pointer to hba for which this call is being executed. 2886 * 2887 * This routine starts the timer waiting for the FCF rediscovery to complete. 2888 **/ 2889void 2890lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 2891{ 2892 unsigned long fcf_redisc_wait_tmo = 2893 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 2894 /* Start fcf rediscovery wait period timer */ 2895 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 2896 spin_lock_irq(&phba->hbalock); 2897 /* Allow action to new fcf asynchronous event */ 2898 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 2899 /* Mark the FCF rediscovery pending state */ 2900 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 2901 spin_unlock_irq(&phba->hbalock); 2902} 2903 2904/** 2905 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 2906 * @ptr: Map to lpfc_hba data structure pointer. 2907 * 2908 * This routine is invoked when waiting for FCF table rediscover has been 2909 * timed out. If new FCF record(s) has (have) been discovered during the 2910 * wait period, a new FCF event shall be added to the FCOE async event 2911 * list, and then worker thread shall be waked up for processing from the 2912 * worker thread context. 2913 **/ 2914void 2915lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 2916{ 2917 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 2918 2919 /* Don't send FCF rediscovery event if timer cancelled */ 2920 spin_lock_irq(&phba->hbalock); 2921 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2922 spin_unlock_irq(&phba->hbalock); 2923 return; 2924 } 2925 /* Clear FCF rediscovery timer pending flag */ 2926 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2927 /* FCF rediscovery event to worker thread */ 2928 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 2929 spin_unlock_irq(&phba->hbalock); 2930 /* wake up worker thread */ 2931 lpfc_worker_wake_up(phba); 2932} 2933 2934/** 2935 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support 2936 * @phba: pointer to lpfc hba data structure. 2937 * 2938 * This function uses the QUERY_FW_CFG mailbox command to determine if the 2939 * firmware loaded supports FCoE. A return of zero indicates that the mailbox 2940 * was successful and the firmware supports FCoE. Any other return indicates 2941 * a error. It is assumed that this function will be called before interrupts 2942 * are enabled. 2943 **/ 2944static int 2945lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba) 2946{ 2947 int rc = 0; 2948 LPFC_MBOXQ_t *mboxq; 2949 struct lpfc_mbx_query_fw_cfg *query_fw_cfg; 2950 uint32_t length; 2951 uint32_t shdr_status, shdr_add_status; 2952 2953 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2954 if (!mboxq) { 2955 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2956 "2621 Failed to allocate mbox for " 2957 "query firmware config cmd\n"); 2958 return -ENOMEM; 2959 } 2960 query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg; 2961 length = (sizeof(struct lpfc_mbx_query_fw_cfg) - 2962 sizeof(struct lpfc_sli4_cfg_mhdr)); 2963 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 2964 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 2965 length, LPFC_SLI4_MBX_EMBED); 2966 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2967 /* The IOCTL status is embedded in the mailbox subheader. */ 2968 shdr_status = bf_get(lpfc_mbox_hdr_status, 2969 &query_fw_cfg->header.cfg_shdr.response); 2970 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 2971 &query_fw_cfg->header.cfg_shdr.response); 2972 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { 2973 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2974 "2622 Query Firmware Config failed " 2975 "mbx status x%x, status x%x add_status x%x\n", 2976 rc, shdr_status, shdr_add_status); 2977 return -EINVAL; 2978 } 2979 if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) { 2980 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2981 "2623 FCoE Function not supported by firmware. " 2982 "Function mode = %08x\n", 2983 query_fw_cfg->function_mode); 2984 return -EINVAL; 2985 } 2986 if (rc != MBX_TIMEOUT) 2987 mempool_free(mboxq, phba->mbox_mem_pool); 2988 return 0; 2989} 2990 2991/** 2992 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 2993 * @phba: pointer to lpfc hba data structure. 2994 * @acqe_link: pointer to the async link completion queue entry. 2995 * 2996 * This routine is to parse the SLI4 link-attention link fault code and 2997 * translate it into the base driver's read link attention mailbox command 2998 * status. 2999 * 3000 * Return: Link-attention status in terms of base driver's coding. 3001 **/ 3002static uint16_t 3003lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3004 struct lpfc_acqe_link *acqe_link) 3005{ 3006 uint16_t latt_fault; 3007 3008 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3009 case LPFC_ASYNC_LINK_FAULT_NONE: 3010 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3011 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3012 latt_fault = 0; 3013 break; 3014 default: 3015 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3016 "0398 Invalid link fault code: x%x\n", 3017 bf_get(lpfc_acqe_link_fault, acqe_link)); 3018 latt_fault = MBXERR_ERROR; 3019 break; 3020 } 3021 return latt_fault; 3022} 3023 3024/** 3025 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3026 * @phba: pointer to lpfc hba data structure. 3027 * @acqe_link: pointer to the async link completion queue entry. 3028 * 3029 * This routine is to parse the SLI4 link attention type and translate it 3030 * into the base driver's link attention type coding. 3031 * 3032 * Return: Link attention type in terms of base driver's coding. 3033 **/ 3034static uint8_t 3035lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3036 struct lpfc_acqe_link *acqe_link) 3037{ 3038 uint8_t att_type; 3039 3040 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3041 case LPFC_ASYNC_LINK_STATUS_DOWN: 3042 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3043 att_type = AT_LINK_DOWN; 3044 break; 3045 case LPFC_ASYNC_LINK_STATUS_UP: 3046 /* Ignore physical link up events - wait for logical link up */ 3047 att_type = AT_RESERVED; 3048 break; 3049 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3050 att_type = AT_LINK_UP; 3051 break; 3052 default: 3053 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3054 "0399 Invalid link attention type: x%x\n", 3055 bf_get(lpfc_acqe_link_status, acqe_link)); 3056 att_type = AT_RESERVED; 3057 break; 3058 } 3059 return att_type; 3060} 3061 3062/** 3063 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 3064 * @phba: pointer to lpfc hba data structure. 3065 * @acqe_link: pointer to the async link completion queue entry. 3066 * 3067 * This routine is to parse the SLI4 link-attention link speed and translate 3068 * it into the base driver's link-attention link speed coding. 3069 * 3070 * Return: Link-attention link speed in terms of base driver's coding. 3071 **/ 3072static uint8_t 3073lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 3074 struct lpfc_acqe_link *acqe_link) 3075{ 3076 uint8_t link_speed; 3077 3078 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3079 case LPFC_ASYNC_LINK_SPEED_ZERO: 3080 link_speed = LA_UNKNW_LINK; 3081 break; 3082 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3083 link_speed = LA_UNKNW_LINK; 3084 break; 3085 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3086 link_speed = LA_UNKNW_LINK; 3087 break; 3088 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3089 link_speed = LA_1GHZ_LINK; 3090 break; 3091 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3092 link_speed = LA_10GHZ_LINK; 3093 break; 3094 default: 3095 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3096 "0483 Invalid link-attention link speed: x%x\n", 3097 bf_get(lpfc_acqe_link_speed, acqe_link)); 3098 link_speed = LA_UNKNW_LINK; 3099 break; 3100 } 3101 return link_speed; 3102} 3103 3104/** 3105 * lpfc_sli4_async_link_evt - Process the asynchronous link event 3106 * @phba: pointer to lpfc hba data structure. 3107 * @acqe_link: pointer to the async link completion queue entry. 3108 * 3109 * This routine is to handle the SLI4 asynchronous link event. 3110 **/ 3111static void 3112lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3113 struct lpfc_acqe_link *acqe_link) 3114{ 3115 struct lpfc_dmabuf *mp; 3116 LPFC_MBOXQ_t *pmb; 3117 MAILBOX_t *mb; 3118 READ_LA_VAR *la; 3119 uint8_t att_type; 3120 3121 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3122 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) 3123 return; 3124 phba->fcoe_eventtag = acqe_link->event_tag; 3125 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3126 if (!pmb) { 3127 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3128 "0395 The mboxq allocation failed\n"); 3129 return; 3130 } 3131 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3132 if (!mp) { 3133 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3134 "0396 The lpfc_dmabuf allocation failed\n"); 3135 goto out_free_pmb; 3136 } 3137 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3138 if (!mp->virt) { 3139 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3140 "0397 The mbuf allocation failed\n"); 3141 goto out_free_dmabuf; 3142 } 3143 3144 /* Cleanup any outstanding ELS commands */ 3145 lpfc_els_flush_all_cmd(phba); 3146 3147 /* Block ELS IOCBs until we have done process link event */ 3148 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3149 3150 /* Update link event statistics */ 3151 phba->sli.slistat.link_event++; 3152 3153 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ 3154 lpfc_read_la(phba, pmb, mp); 3155 pmb->vport = phba->pport; 3156 3157 /* Parse and translate status field */ 3158 mb = &pmb->u.mb; 3159 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3160 3161 /* Parse and translate link attention fields */ 3162 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; 3163 la->eventTag = acqe_link->event_tag; 3164 la->attType = att_type; 3165 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); 3166 3167 /* Fake the the following irrelvant fields */ 3168 la->topology = TOPOLOGY_PT_PT; 3169 la->granted_AL_PA = 0; 3170 la->il = 0; 3171 la->pb = 0; 3172 la->fa = 0; 3173 la->mm = 0; 3174 3175 /* Keep the link status for extra SLI4 state machine reference */ 3176 phba->sli4_hba.link_state.speed = 3177 bf_get(lpfc_acqe_link_speed, acqe_link); 3178 phba->sli4_hba.link_state.duplex = 3179 bf_get(lpfc_acqe_link_duplex, acqe_link); 3180 phba->sli4_hba.link_state.status = 3181 bf_get(lpfc_acqe_link_status, acqe_link); 3182 phba->sli4_hba.link_state.physical = 3183 bf_get(lpfc_acqe_link_physical, acqe_link); 3184 phba->sli4_hba.link_state.fault = 3185 bf_get(lpfc_acqe_link_fault, acqe_link); 3186 phba->sli4_hba.link_state.logical_speed = 3187 bf_get(lpfc_acqe_qos_link_speed, acqe_link); 3188 3189 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3190 lpfc_mbx_cmpl_read_la(phba, pmb); 3191 3192 return; 3193 3194out_free_dmabuf: 3195 kfree(mp); 3196out_free_pmb: 3197 mempool_free(pmb, phba->mbox_mem_pool); 3198} 3199 3200/** 3201 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 3202 * @phba: pointer to lpfc hba data structure. 3203 * @acqe_link: pointer to the async fcoe completion queue entry. 3204 * 3205 * This routine is to handle the SLI4 asynchronous fcoe event. 3206 **/ 3207static void 3208lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, 3209 struct lpfc_acqe_fcoe *acqe_fcoe) 3210{ 3211 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 3212 int rc; 3213 struct lpfc_vport *vport; 3214 struct lpfc_nodelist *ndlp; 3215 struct Scsi_Host *shost; 3216 uint32_t link_state; 3217 int active_vlink_present; 3218 struct lpfc_vport **vports; 3219 int i; 3220 3221 phba->fc_eventTag = acqe_fcoe->event_tag; 3222 phba->fcoe_eventtag = acqe_fcoe->event_tag; 3223 switch (event_type) { 3224 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3225 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: 3226 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3227 "2546 New FCF found index 0x%x tag 0x%x\n", 3228 acqe_fcoe->index, 3229 acqe_fcoe->event_tag); 3230 spin_lock_irq(&phba->hbalock); 3231 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || 3232 (phba->hba_flag & FCF_DISC_INPROGRESS)) { 3233 /* 3234 * If the current FCF is in discovered state or 3235 * FCF discovery is in progress, do nothing. 3236 */ 3237 spin_unlock_irq(&phba->hbalock); 3238 break; 3239 } 3240 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3241 /* 3242 * If fast FCF failover rescan event is pending, 3243 * do nothing. 3244 */ 3245 spin_unlock_irq(&phba->hbalock); 3246 break; 3247 } 3248 spin_unlock_irq(&phba->hbalock); 3249 3250 /* Read the FCF table and re-discover SAN. */ 3251 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 3252 if (rc) 3253 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3254 "2547 Read FCF record failed 0x%x\n", 3255 rc); 3256 break; 3257 3258 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3259 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3260 "2548 FCF Table full count 0x%x tag 0x%x\n", 3261 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), 3262 acqe_fcoe->event_tag); 3263 break; 3264 3265 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3266 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3267 "2549 FCF disconnected from network index 0x%x" 3268 " tag 0x%x\n", acqe_fcoe->index, 3269 acqe_fcoe->event_tag); 3270 /* If the event is not for currently used fcf do nothing */ 3271 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) 3272 break; 3273 /* 3274 * Currently, driver support only one FCF - so treat this as 3275 * a link down, but save the link state because we don't want 3276 * it to be changed to Link Down unless it is already down. 3277 */ 3278 link_state = phba->link_state; 3279 lpfc_linkdown(phba); 3280 phba->link_state = link_state; 3281 /* Unregister FCF if no devices connected to it */ 3282 lpfc_unregister_unused_fcf(phba); 3283 break; 3284 case LPFC_FCOE_EVENT_TYPE_CVL: 3285 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3286 "2718 Clear Virtual Link Received for VPI 0x%x" 3287 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3288 vport = lpfc_find_vport_by_vpid(phba, 3289 acqe_fcoe->index - phba->vpi_base); 3290 if (!vport) 3291 break; 3292 ndlp = lpfc_findnode_did(vport, Fabric_DID); 3293 if (!ndlp) 3294 break; 3295 shost = lpfc_shost_from_vport(vport); 3296 if (phba->pport->port_state <= LPFC_FLOGI) 3297 break; 3298 /* If virtual link is not yet instantiated ignore CVL */ 3299 if (vport->port_state <= LPFC_FDISC) 3300 break; 3301 3302 lpfc_linkdown_port(vport); 3303 lpfc_cleanup_pending_mbox(vport); 3304 spin_lock_irq(shost->host_lock); 3305 vport->fc_flag |= FC_VPORT_CVL_RCVD; 3306 spin_unlock_irq(shost->host_lock); 3307 active_vlink_present = 0; 3308 3309 vports = lpfc_create_vport_work_array(phba); 3310 if (vports) { 3311 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 3312 i++) { 3313 if ((!(vports[i]->fc_flag & 3314 FC_VPORT_CVL_RCVD)) && 3315 (vports[i]->port_state > LPFC_FDISC)) { 3316 active_vlink_present = 1; 3317 break; 3318 } 3319 } 3320 lpfc_destroy_vport_work_array(phba, vports); 3321 } 3322 3323 if (active_vlink_present) { 3324 /* 3325 * If there are other active VLinks present, 3326 * re-instantiate the Vlink using FDISC. 3327 */ 3328 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3329 spin_lock_irq(shost->host_lock); 3330 ndlp->nlp_flag |= NLP_DELAY_TMO; 3331 spin_unlock_irq(shost->host_lock); 3332 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 3333 vport->port_state = LPFC_FDISC; 3334 } else { 3335 /* 3336 * Otherwise, we request port to rediscover 3337 * the entire FCF table for a fast recovery 3338 * from possible case that the current FCF 3339 * is no longer valid. 3340 */ 3341 rc = lpfc_sli4_redisc_fcf_table(phba); 3342 if (rc) 3343 /* 3344 * Last resort will be re-try on the 3345 * the current registered FCF entry. 3346 */ 3347 lpfc_retry_pport_discovery(phba); 3348 } 3349 break; 3350 default: 3351 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3352 "0288 Unknown FCoE event type 0x%x event tag " 3353 "0x%x\n", event_type, acqe_fcoe->event_tag); 3354 break; 3355 } 3356} 3357 3358/** 3359 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 3360 * @phba: pointer to lpfc hba data structure. 3361 * @acqe_link: pointer to the async dcbx completion queue entry. 3362 * 3363 * This routine is to handle the SLI4 asynchronous dcbx event. 3364 **/ 3365static void 3366lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3367 struct lpfc_acqe_dcbx *acqe_dcbx) 3368{ 3369 phba->fc_eventTag = acqe_dcbx->event_tag; 3370 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3371 "0290 The SLI4 DCBX asynchronous event is not " 3372 "handled yet\n"); 3373} 3374 3375/** 3376 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 3377 * @phba: pointer to lpfc hba data structure. 3378 * 3379 * This routine is invoked by the worker thread to process all the pending 3380 * SLI4 asynchronous events. 3381 **/ 3382void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 3383{ 3384 struct lpfc_cq_event *cq_event; 3385 3386 /* First, declare the async event has been handled */ 3387 spin_lock_irq(&phba->hbalock); 3388 phba->hba_flag &= ~ASYNC_EVENT; 3389 spin_unlock_irq(&phba->hbalock); 3390 /* Now, handle all the async events */ 3391 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 3392 /* Get the first event from the head of the event queue */ 3393 spin_lock_irq(&phba->hbalock); 3394 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 3395 cq_event, struct lpfc_cq_event, list); 3396 spin_unlock_irq(&phba->hbalock); 3397 /* Process the asynchronous event */ 3398 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 3399 case LPFC_TRAILER_CODE_LINK: 3400 lpfc_sli4_async_link_evt(phba, 3401 &cq_event->cqe.acqe_link); 3402 break; 3403 case LPFC_TRAILER_CODE_FCOE: 3404 lpfc_sli4_async_fcoe_evt(phba, 3405 &cq_event->cqe.acqe_fcoe); 3406 break; 3407 case LPFC_TRAILER_CODE_DCBX: 3408 lpfc_sli4_async_dcbx_evt(phba, 3409 &cq_event->cqe.acqe_dcbx); 3410 break; 3411 default: 3412 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3413 "1804 Invalid asynchrous event code: " 3414 "x%x\n", bf_get(lpfc_trailer_code, 3415 &cq_event->cqe.mcqe_cmpl)); 3416 break; 3417 } 3418 /* Free the completion event processed to the free pool */ 3419 lpfc_sli4_cq_event_release(phba, cq_event); 3420 } 3421} 3422 3423/** 3424 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 3425 * @phba: pointer to lpfc hba data structure. 3426 * 3427 * This routine is invoked by the worker thread to process FCF table 3428 * rediscovery pending completion event. 3429 **/ 3430void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 3431{ 3432 int rc; 3433 3434 spin_lock_irq(&phba->hbalock); 3435 /* Clear FCF rediscovery timeout event */ 3436 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 3437 /* Clear driver fast failover FCF record flag */ 3438 phba->fcf.failover_rec.flag = 0; 3439 /* Set state for FCF fast failover */ 3440 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 3441 spin_unlock_irq(&phba->hbalock); 3442 3443 /* Scan FCF table from the first entry to re-discover SAN */ 3444 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 3445 if (rc) 3446 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3447 "2747 Post FCF rediscovery read FCF record " 3448 "failed 0x%x\n", rc); 3449} 3450 3451/** 3452 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3453 * @phba: pointer to lpfc hba data structure. 3454 * @dev_grp: The HBA PCI-Device group number. 3455 * 3456 * This routine is invoked to set up the per HBA PCI-Device group function 3457 * API jump table entries. 3458 * 3459 * Return: 0 if success, otherwise -ENODEV 3460 **/ 3461int 3462lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3463{ 3464 int rc; 3465 3466 /* Set up lpfc PCI-device group */ 3467 phba->pci_dev_grp = dev_grp; 3468 3469 /* The LPFC_PCI_DEV_OC uses SLI4 */ 3470 if (dev_grp == LPFC_PCI_DEV_OC) 3471 phba->sli_rev = LPFC_SLI_REV4; 3472 3473 /* Set up device INIT API function jump table */ 3474 rc = lpfc_init_api_table_setup(phba, dev_grp); 3475 if (rc) 3476 return -ENODEV; 3477 /* Set up SCSI API function jump table */ 3478 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 3479 if (rc) 3480 return -ENODEV; 3481 /* Set up SLI API function jump table */ 3482 rc = lpfc_sli_api_table_setup(phba, dev_grp); 3483 if (rc) 3484 return -ENODEV; 3485 /* Set up MBOX API function jump table */ 3486 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 3487 if (rc) 3488 return -ENODEV; 3489 3490 return 0; 3491} 3492 3493/** 3494 * lpfc_log_intr_mode - Log the active interrupt mode 3495 * @phba: pointer to lpfc hba data structure. 3496 * @intr_mode: active interrupt mode adopted. 3497 * 3498 * This routine it invoked to log the currently used active interrupt mode 3499 * to the device. 3500 **/ 3501static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 3502{ 3503 switch (intr_mode) { 3504 case 0: 3505 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3506 "0470 Enable INTx interrupt mode.\n"); 3507 break; 3508 case 1: 3509 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3510 "0481 Enabled MSI interrupt mode.\n"); 3511 break; 3512 case 2: 3513 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3514 "0480 Enabled MSI-X interrupt mode.\n"); 3515 break; 3516 default: 3517 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3518 "0482 Illegal interrupt mode.\n"); 3519 break; 3520 } 3521 return; 3522} 3523 3524/** 3525 * lpfc_enable_pci_dev - Enable a generic PCI device. 3526 * @phba: pointer to lpfc hba data structure. 3527 * 3528 * This routine is invoked to enable the PCI device that is common to all 3529 * PCI devices. 3530 * 3531 * Return codes 3532 * 0 - successful 3533 * other values - error 3534 **/ 3535static int 3536lpfc_enable_pci_dev(struct lpfc_hba *phba) 3537{ 3538 struct pci_dev *pdev; 3539 int bars; 3540 3541 /* Obtain PCI device reference */ 3542 if (!phba->pcidev) 3543 goto out_error; 3544 else 3545 pdev = phba->pcidev; 3546 /* Select PCI BARs */ 3547 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3548 /* Enable PCI device */ 3549 if (pci_enable_device_mem(pdev)) 3550 goto out_error; 3551 /* Request PCI resource for the device */ 3552 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 3553 goto out_disable_device; 3554 /* Set up device as PCI master and save state for EEH */ 3555 pci_set_master(pdev); 3556 pci_try_set_mwi(pdev); 3557 pci_save_state(pdev); 3558 3559 return 0; 3560 3561out_disable_device: 3562 pci_disable_device(pdev); 3563out_error: 3564 return -ENODEV; 3565} 3566 3567/** 3568 * lpfc_disable_pci_dev - Disable a generic PCI device. 3569 * @phba: pointer to lpfc hba data structure. 3570 * 3571 * This routine is invoked to disable the PCI device that is common to all 3572 * PCI devices. 3573 **/ 3574static void 3575lpfc_disable_pci_dev(struct lpfc_hba *phba) 3576{ 3577 struct pci_dev *pdev; 3578 int bars; 3579 3580 /* Obtain PCI device reference */ 3581 if (!phba->pcidev) 3582 return; 3583 else 3584 pdev = phba->pcidev; 3585 /* Select PCI BARs */ 3586 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3587 /* Release PCI resource and disable PCI device */ 3588 pci_release_selected_regions(pdev, bars); 3589 pci_disable_device(pdev); 3590 /* Null out PCI private reference to driver */ 3591 pci_set_drvdata(pdev, NULL); 3592 3593 return; 3594} 3595 3596/** 3597 * lpfc_reset_hba - Reset a hba 3598 * @phba: pointer to lpfc hba data structure. 3599 * 3600 * This routine is invoked to reset a hba device. It brings the HBA 3601 * offline, performs a board restart, and then brings the board back 3602 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 3603 * on outstanding mailbox commands. 3604 **/ 3605void 3606lpfc_reset_hba(struct lpfc_hba *phba) 3607{ 3608 /* If resets are disabled then set error state and return. */ 3609 if (!phba->cfg_enable_hba_reset) { 3610 phba->link_state = LPFC_HBA_ERROR; 3611 return; 3612 } 3613 lpfc_offline_prep(phba); 3614 lpfc_offline(phba); 3615 lpfc_sli_brdrestart(phba); 3616 lpfc_online(phba); 3617 lpfc_unblock_mgmt_io(phba); 3618} 3619 3620/** 3621 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 3622 * @phba: pointer to lpfc hba data structure. 3623 * 3624 * This routine is invoked to set up the driver internal resources specific to 3625 * support the SLI-3 HBA device it attached to. 3626 * 3627 * Return codes 3628 * 0 - successful 3629 * other values - error 3630 **/ 3631static int 3632lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 3633{ 3634 struct lpfc_sli *psli; 3635 3636 /* 3637 * Initialize timers used by driver 3638 */ 3639 3640 /* Heartbeat timer */ 3641 init_timer(&phba->hb_tmofunc); 3642 phba->hb_tmofunc.function = lpfc_hb_timeout; 3643 phba->hb_tmofunc.data = (unsigned long)phba; 3644 3645 psli = &phba->sli; 3646 /* MBOX heartbeat timer */ 3647 init_timer(&psli->mbox_tmo); 3648 psli->mbox_tmo.function = lpfc_mbox_timeout; 3649 psli->mbox_tmo.data = (unsigned long) phba; 3650 /* FCP polling mode timer */ 3651 init_timer(&phba->fcp_poll_timer); 3652 phba->fcp_poll_timer.function = lpfc_poll_timeout; 3653 phba->fcp_poll_timer.data = (unsigned long) phba; 3654 /* Fabric block timer */ 3655 init_timer(&phba->fabric_block_timer); 3656 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3657 phba->fabric_block_timer.data = (unsigned long) phba; 3658 /* EA polling mode timer */ 3659 init_timer(&phba->eratt_poll); 3660 phba->eratt_poll.function = lpfc_poll_eratt; 3661 phba->eratt_poll.data = (unsigned long) phba; 3662 3663 /* Host attention work mask setup */ 3664 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 3665 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 3666 3667 /* Get all the module params for configuring this host */ 3668 lpfc_get_cfgparam(phba); 3669 /* 3670 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3671 * used to create the sg_dma_buf_pool must be dynamically calculated. 3672 * 2 segments are added since the IOCB needs a command and response bde. 3673 */ 3674 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 3675 sizeof(struct fcp_rsp) + 3676 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 3677 3678 if (phba->cfg_enable_bg) { 3679 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 3680 phba->cfg_sg_dma_buf_size += 3681 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 3682 } 3683 3684 /* Also reinitialize the host templates with new values. */ 3685 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3686 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3687 3688 phba->max_vpi = LPFC_MAX_VPI; 3689 /* This will be set to correct value after config_port mbox */ 3690 phba->max_vports = 0; 3691 3692 /* 3693 * Initialize the SLI Layer to run with lpfc HBAs. 3694 */ 3695 lpfc_sli_setup(phba); 3696 lpfc_sli_queue_setup(phba); 3697 3698 /* Allocate device driver memory */ 3699 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 3700 return -ENOMEM; 3701 3702 return 0; 3703} 3704 3705/** 3706 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 3707 * @phba: pointer to lpfc hba data structure. 3708 * 3709 * This routine is invoked to unset the driver internal resources set up 3710 * specific for supporting the SLI-3 HBA device it attached to. 3711 **/ 3712static void 3713lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 3714{ 3715 /* Free device driver memory allocated */ 3716 lpfc_mem_free_all(phba); 3717 3718 return; 3719} 3720 3721/** 3722 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 3723 * @phba: pointer to lpfc hba data structure. 3724 * 3725 * This routine is invoked to set up the driver internal resources specific to 3726 * support the SLI-4 HBA device it attached to. 3727 * 3728 * Return codes 3729 * 0 - successful 3730 * other values - error 3731 **/ 3732static int 3733lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 3734{ 3735 struct lpfc_sli *psli; 3736 LPFC_MBOXQ_t *mboxq; 3737 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 3738 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 3739 struct lpfc_mqe *mqe; 3740 3741 /* Before proceed, wait for POST done and device ready */ 3742 rc = lpfc_sli4_post_status_check(phba); 3743 if (rc) 3744 return -ENODEV; 3745 3746 /* 3747 * Initialize timers used by driver 3748 */ 3749 3750 /* Heartbeat timer */ 3751 init_timer(&phba->hb_tmofunc); 3752 phba->hb_tmofunc.function = lpfc_hb_timeout; 3753 phba->hb_tmofunc.data = (unsigned long)phba; 3754 3755 psli = &phba->sli; 3756 /* MBOX heartbeat timer */ 3757 init_timer(&psli->mbox_tmo); 3758 psli->mbox_tmo.function = lpfc_mbox_timeout; 3759 psli->mbox_tmo.data = (unsigned long) phba; 3760 /* Fabric block timer */ 3761 init_timer(&phba->fabric_block_timer); 3762 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3763 phba->fabric_block_timer.data = (unsigned long) phba; 3764 /* EA polling mode timer */ 3765 init_timer(&phba->eratt_poll); 3766 phba->eratt_poll.function = lpfc_poll_eratt; 3767 phba->eratt_poll.data = (unsigned long) phba; 3768 /* FCF rediscover timer */ 3769 init_timer(&phba->fcf.redisc_wait); 3770 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 3771 phba->fcf.redisc_wait.data = (unsigned long)phba; 3772 3773 /* 3774 * We need to do a READ_CONFIG mailbox command here before 3775 * calling lpfc_get_cfgparam. For VFs this will report the 3776 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 3777 * All of the resources allocated 3778 * for this Port are tied to these values. 3779 */ 3780 /* Get all the module params for configuring this host */ 3781 lpfc_get_cfgparam(phba); 3782 phba->max_vpi = LPFC_MAX_VPI; 3783 /* This will be set to correct value after the read_config mbox */ 3784 phba->max_vports = 0; 3785 3786 /* Program the default value of vlan_id and fc_map */ 3787 phba->valid_vlan = 0; 3788 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 3789 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 3790 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 3791 3792 /* 3793 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3794 * used to create the sg_dma_buf_pool must be dynamically calculated. 3795 * 2 segments are added since the IOCB needs a command and response bde. 3796 * To insure that the scsi sgl does not cross a 4k page boundary only 3797 * sgl sizes of must be a power of 2. 3798 */ 3799 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 3800 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); 3801 /* Feature Level 1 hardware is limited to 2 pages */ 3802 if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) == 3803 LPFC_SLI_INTF_FEATURELEVEL1_1)) 3804 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 3805 else 3806 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 3807 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 3808 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 3809 dma_buf_size = dma_buf_size << 1) 3810 ; 3811 if (dma_buf_size == max_buf_size) 3812 phba->cfg_sg_seg_cnt = (dma_buf_size - 3813 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - 3814 (2 * sizeof(struct sli4_sge))) / 3815 sizeof(struct sli4_sge); 3816 phba->cfg_sg_dma_buf_size = dma_buf_size; 3817 3818 /* Initialize buffer queue management fields */ 3819 hbq_count = lpfc_sli_hbq_count(); 3820 for (i = 0; i < hbq_count; ++i) 3821 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 3822 INIT_LIST_HEAD(&phba->rb_pend_list); 3823 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 3824 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 3825 3826 /* 3827 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 3828 */ 3829 /* Initialize the Abort scsi buffer list used by driver */ 3830 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 3831 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 3832 /* This abort list used by worker thread */ 3833 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 3834 3835 /* 3836 * Initialize dirver internal slow-path work queues 3837 */ 3838 3839 /* Driver internel slow-path CQ Event pool */ 3840 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 3841 /* Response IOCB work queue list */ 3842 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 3843 /* Asynchronous event CQ Event work queue list */ 3844 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 3845 /* Fast-path XRI aborted CQ Event work queue list */ 3846 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 3847 /* Slow-path XRI aborted CQ Event work queue list */ 3848 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 3849 /* Receive queue CQ Event work queue list */ 3850 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 3851 3852 /* Initialize the driver internal SLI layer lists. */ 3853 lpfc_sli_setup(phba); 3854 lpfc_sli_queue_setup(phba); 3855 3856 /* Allocate device driver memory */ 3857 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 3858 if (rc) 3859 return -ENOMEM; 3860 3861 /* Create the bootstrap mailbox command */ 3862 rc = lpfc_create_bootstrap_mbox(phba); 3863 if (unlikely(rc)) 3864 goto out_free_mem; 3865 3866 /* Set up the host's endian order with the device. */ 3867 rc = lpfc_setup_endian_order(phba); 3868 if (unlikely(rc)) 3869 goto out_free_bsmbx; 3870 3871 rc = lpfc_sli4_fw_cfg_check(phba); 3872 if (unlikely(rc)) 3873 goto out_free_bsmbx; 3874 3875 /* Set up the hba's configuration parameters. */ 3876 rc = lpfc_sli4_read_config(phba); 3877 if (unlikely(rc)) 3878 goto out_free_bsmbx; 3879 3880 /* Perform a function reset */ 3881 rc = lpfc_pci_function_reset(phba); 3882 if (unlikely(rc)) 3883 goto out_free_bsmbx; 3884 3885 /* Create all the SLI4 queues */ 3886 rc = lpfc_sli4_queue_create(phba); 3887 if (rc) 3888 goto out_free_bsmbx; 3889 3890 /* Create driver internal CQE event pool */ 3891 rc = lpfc_sli4_cq_event_pool_create(phba); 3892 if (rc) 3893 goto out_destroy_queue; 3894 3895 /* Initialize and populate the iocb list per host */ 3896 rc = lpfc_init_sgl_list(phba); 3897 if (rc) { 3898 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3899 "1400 Failed to initialize sgl list.\n"); 3900 goto out_destroy_cq_event_pool; 3901 } 3902 rc = lpfc_init_active_sgl_array(phba); 3903 if (rc) { 3904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3905 "1430 Failed to initialize sgl list.\n"); 3906 goto out_free_sgl_list; 3907 } 3908 3909 rc = lpfc_sli4_init_rpi_hdrs(phba); 3910 if (rc) { 3911 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3912 "1432 Failed to initialize rpi headers.\n"); 3913 goto out_free_active_sgl; 3914 } 3915 3916 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 3917 phba->cfg_fcp_eq_count), GFP_KERNEL); 3918 if (!phba->sli4_hba.fcp_eq_hdl) { 3919 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3920 "2572 Failed allocate memory for fast-path " 3921 "per-EQ handle array\n"); 3922 goto out_remove_rpi_hdrs; 3923 } 3924 3925 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 3926 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 3927 if (!phba->sli4_hba.msix_entries) { 3928 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3929 "2573 Failed allocate memory for msi-x " 3930 "interrupt vector entries\n"); 3931 goto out_free_fcp_eq_hdl; 3932 } 3933 3934 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 3935 GFP_KERNEL); 3936 if (!mboxq) { 3937 rc = -ENOMEM; 3938 goto out_free_fcp_eq_hdl; 3939 } 3940 3941 /* Get the Supported Pages. It is always available. */ 3942 lpfc_supported_pages(mboxq); 3943 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 3944 if (unlikely(rc)) { 3945 rc = -EIO; 3946 mempool_free(mboxq, phba->mbox_mem_pool); 3947 goto out_free_fcp_eq_hdl; 3948 } 3949 3950 mqe = &mboxq->u.mqe; 3951 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 3952 LPFC_MAX_SUPPORTED_PAGES); 3953 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 3954 switch (pn_page[i]) { 3955 case LPFC_SLI4_PARAMETERS: 3956 phba->sli4_hba.pc_sli4_params.supported = 1; 3957 break; 3958 default: 3959 break; 3960 } 3961 } 3962 3963 /* Read the port's SLI4 Parameters capabilities if supported. */ 3964 if (phba->sli4_hba.pc_sli4_params.supported) 3965 rc = lpfc_pc_sli4_params_get(phba, mboxq); 3966 mempool_free(mboxq, phba->mbox_mem_pool); 3967 if (rc) { 3968 rc = -EIO; 3969 goto out_free_fcp_eq_hdl; 3970 } 3971 return rc; 3972 3973out_free_fcp_eq_hdl: 3974 kfree(phba->sli4_hba.fcp_eq_hdl); 3975out_remove_rpi_hdrs: 3976 lpfc_sli4_remove_rpi_hdrs(phba); 3977out_free_active_sgl: 3978 lpfc_free_active_sgl(phba); 3979out_free_sgl_list: 3980 lpfc_free_sgl_list(phba); 3981out_destroy_cq_event_pool: 3982 lpfc_sli4_cq_event_pool_destroy(phba); 3983out_destroy_queue: 3984 lpfc_sli4_queue_destroy(phba); 3985out_free_bsmbx: 3986 lpfc_destroy_bootstrap_mbox(phba); 3987out_free_mem: 3988 lpfc_mem_free(phba); 3989 return rc; 3990} 3991 3992/** 3993 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 3994 * @phba: pointer to lpfc hba data structure. 3995 * 3996 * This routine is invoked to unset the driver internal resources set up 3997 * specific for supporting the SLI-4 HBA device it attached to. 3998 **/ 3999static void 4000lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 4001{ 4002 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 4003 4004 /* unregister default FCFI from the HBA */ 4005 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); 4006 4007 /* Free the default FCR table */ 4008 lpfc_sli_remove_dflt_fcf(phba); 4009 4010 /* Free memory allocated for msi-x interrupt vector entries */ 4011 kfree(phba->sli4_hba.msix_entries); 4012 4013 /* Free memory allocated for fast-path work queue handles */ 4014 kfree(phba->sli4_hba.fcp_eq_hdl); 4015 4016 /* Free the allocated rpi headers. */ 4017 lpfc_sli4_remove_rpi_hdrs(phba); 4018 lpfc_sli4_remove_rpis(phba); 4019 4020 /* Free the ELS sgl list */ 4021 lpfc_free_active_sgl(phba); 4022 lpfc_free_sgl_list(phba); 4023 4024 /* Free the SCSI sgl management array */ 4025 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4026 4027 /* Free the SLI4 queues */ 4028 lpfc_sli4_queue_destroy(phba); 4029 4030 /* Free the completion queue EQ event pool */ 4031 lpfc_sli4_cq_event_release_all(phba); 4032 lpfc_sli4_cq_event_pool_destroy(phba); 4033 4034 /* Reset SLI4 HBA FCoE function */ 4035 lpfc_pci_function_reset(phba); 4036 4037 /* Free the bsmbx region. */ 4038 lpfc_destroy_bootstrap_mbox(phba); 4039 4040 /* Free the SLI Layer memory with SLI4 HBAs */ 4041 lpfc_mem_free_all(phba); 4042 4043 /* Free the current connect table */ 4044 list_for_each_entry_safe(conn_entry, next_conn_entry, 4045 &phba->fcf_conn_rec_list, list) { 4046 list_del_init(&conn_entry->list); 4047 kfree(conn_entry); 4048 } 4049 4050 return; 4051} 4052 4053/** 4054 * lpfc_init_api_table_setup - Set up init api fucntion jump table 4055 * @phba: The hba struct for which this call is being executed. 4056 * @dev_grp: The HBA PCI-Device group number. 4057 * 4058 * This routine sets up the device INIT interface API function jump table 4059 * in @phba struct. 4060 * 4061 * Returns: 0 - success, -ENODEV - failure. 4062 **/ 4063int 4064lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4065{ 4066 phba->lpfc_hba_init_link = lpfc_hba_init_link; 4067 phba->lpfc_hba_down_link = lpfc_hba_down_link; 4068 switch (dev_grp) { 4069 case LPFC_PCI_DEV_LP: 4070 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4071 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 4072 phba->lpfc_stop_port = lpfc_stop_port_s3; 4073 break; 4074 case LPFC_PCI_DEV_OC: 4075 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 4076 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 4077 phba->lpfc_stop_port = lpfc_stop_port_s4; 4078 break; 4079 default: 4080 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4081 "1431 Invalid HBA PCI-device group: 0x%x\n", 4082 dev_grp); 4083 return -ENODEV; 4084 break; 4085 } 4086 return 0; 4087} 4088 4089/** 4090 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 4091 * @phba: pointer to lpfc hba data structure. 4092 * 4093 * This routine is invoked to set up the driver internal resources before the 4094 * device specific resource setup to support the HBA device it attached to. 4095 * 4096 * Return codes 4097 * 0 - successful 4098 * other values - error 4099 **/ 4100static int 4101lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 4102{ 4103 /* 4104 * Driver resources common to all SLI revisions 4105 */ 4106 atomic_set(&phba->fast_event_count, 0); 4107 spin_lock_init(&phba->hbalock); 4108 4109 /* Initialize ndlp management spinlock */ 4110 spin_lock_init(&phba->ndlp_lock); 4111 4112 INIT_LIST_HEAD(&phba->port_list); 4113 INIT_LIST_HEAD(&phba->work_list); 4114 init_waitqueue_head(&phba->wait_4_mlo_m_q); 4115 4116 /* Initialize the wait queue head for the kernel thread */ 4117 init_waitqueue_head(&phba->work_waitq); 4118 4119 /* Initialize the scsi buffer list used by driver for scsi IO */ 4120 spin_lock_init(&phba->scsi_buf_list_lock); 4121 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 4122 4123 /* Initialize the fabric iocb list */ 4124 INIT_LIST_HEAD(&phba->fabric_iocb_list); 4125 4126 /* Initialize list to save ELS buffers */ 4127 INIT_LIST_HEAD(&phba->elsbuf); 4128 4129 /* Initialize FCF connection rec list */ 4130 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 4131 4132 return 0; 4133} 4134 4135/** 4136 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 4137 * @phba: pointer to lpfc hba data structure. 4138 * 4139 * This routine is invoked to set up the driver internal resources after the 4140 * device specific resource setup to support the HBA device it attached to. 4141 * 4142 * Return codes 4143 * 0 - successful 4144 * other values - error 4145 **/ 4146static int 4147lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 4148{ 4149 int error; 4150 4151 /* Startup the kernel thread for this host adapter. */ 4152 phba->worker_thread = kthread_run(lpfc_do_work, phba, 4153 "lpfc_worker_%d", phba->brd_no); 4154 if (IS_ERR(phba->worker_thread)) { 4155 error = PTR_ERR(phba->worker_thread); 4156 return error; 4157 } 4158 4159 return 0; 4160} 4161 4162/** 4163 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 4164 * @phba: pointer to lpfc hba data structure. 4165 * 4166 * This routine is invoked to unset the driver internal resources set up after 4167 * the device specific resource setup for supporting the HBA device it 4168 * attached to. 4169 **/ 4170static void 4171lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 4172{ 4173 /* Stop kernel worker thread */ 4174 kthread_stop(phba->worker_thread); 4175} 4176 4177/** 4178 * lpfc_free_iocb_list - Free iocb list. 4179 * @phba: pointer to lpfc hba data structure. 4180 * 4181 * This routine is invoked to free the driver's IOCB list and memory. 4182 **/ 4183static void 4184lpfc_free_iocb_list(struct lpfc_hba *phba) 4185{ 4186 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 4187 4188 spin_lock_irq(&phba->hbalock); 4189 list_for_each_entry_safe(iocbq_entry, iocbq_next, 4190 &phba->lpfc_iocb_list, list) { 4191 list_del(&iocbq_entry->list); 4192 kfree(iocbq_entry); 4193 phba->total_iocbq_bufs--; 4194 } 4195 spin_unlock_irq(&phba->hbalock); 4196 4197 return; 4198} 4199 4200/** 4201 * lpfc_init_iocb_list - Allocate and initialize iocb list. 4202 * @phba: pointer to lpfc hba data structure. 4203 * 4204 * This routine is invoked to allocate and initizlize the driver's IOCB 4205 * list and set up the IOCB tag array accordingly. 4206 * 4207 * Return codes 4208 * 0 - successful 4209 * other values - error 4210 **/ 4211static int 4212lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 4213{ 4214 struct lpfc_iocbq *iocbq_entry = NULL; 4215 uint16_t iotag; 4216 int i; 4217 4218 /* Initialize and populate the iocb list per host. */ 4219 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 4220 for (i = 0; i < iocb_count; i++) { 4221 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 4222 if (iocbq_entry == NULL) { 4223 printk(KERN_ERR "%s: only allocated %d iocbs of " 4224 "expected %d count. Unloading driver.\n", 4225 __func__, i, LPFC_IOCB_LIST_CNT); 4226 goto out_free_iocbq; 4227 } 4228 4229 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 4230 if (iotag == 0) { 4231 kfree(iocbq_entry); 4232 printk(KERN_ERR "%s: failed to allocate IOTAG. " 4233 "Unloading driver.\n", __func__); 4234 goto out_free_iocbq; 4235 } 4236 iocbq_entry->sli4_xritag = NO_XRI; 4237 4238 spin_lock_irq(&phba->hbalock); 4239 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 4240 phba->total_iocbq_bufs++; 4241 spin_unlock_irq(&phba->hbalock); 4242 } 4243 4244 return 0; 4245 4246out_free_iocbq: 4247 lpfc_free_iocb_list(phba); 4248 4249 return -ENOMEM; 4250} 4251 4252/** 4253 * lpfc_free_sgl_list - Free sgl list. 4254 * @phba: pointer to lpfc hba data structure. 4255 * 4256 * This routine is invoked to free the driver's sgl list and memory. 4257 **/ 4258static void 4259lpfc_free_sgl_list(struct lpfc_hba *phba) 4260{ 4261 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 4262 LIST_HEAD(sglq_list); 4263 int rc = 0; 4264 4265 spin_lock_irq(&phba->hbalock); 4266 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 4267 spin_unlock_irq(&phba->hbalock); 4268 4269 list_for_each_entry_safe(sglq_entry, sglq_next, 4270 &sglq_list, list) { 4271 list_del(&sglq_entry->list); 4272 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 4273 kfree(sglq_entry); 4274 phba->sli4_hba.total_sglq_bufs--; 4275 } 4276 rc = lpfc_sli4_remove_all_sgl_pages(phba); 4277 if (rc) { 4278 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4279 "2005 Unable to deregister pages from HBA: %x\n", rc); 4280 } 4281 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4282} 4283 4284/** 4285 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 4286 * @phba: pointer to lpfc hba data structure. 4287 * 4288 * This routine is invoked to allocate the driver's active sgl memory. 4289 * This array will hold the sglq_entry's for active IOs. 4290 **/ 4291static int 4292lpfc_init_active_sgl_array(struct lpfc_hba *phba) 4293{ 4294 int size; 4295 size = sizeof(struct lpfc_sglq *); 4296 size *= phba->sli4_hba.max_cfg_param.max_xri; 4297 4298 phba->sli4_hba.lpfc_sglq_active_list = 4299 kzalloc(size, GFP_KERNEL); 4300 if (!phba->sli4_hba.lpfc_sglq_active_list) 4301 return -ENOMEM; 4302 return 0; 4303} 4304 4305/** 4306 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 4307 * @phba: pointer to lpfc hba data structure. 4308 * 4309 * This routine is invoked to walk through the array of active sglq entries 4310 * and free all of the resources. 4311 * This is just a place holder for now. 4312 **/ 4313static void 4314lpfc_free_active_sgl(struct lpfc_hba *phba) 4315{ 4316 kfree(phba->sli4_hba.lpfc_sglq_active_list); 4317} 4318 4319/** 4320 * lpfc_init_sgl_list - Allocate and initialize sgl list. 4321 * @phba: pointer to lpfc hba data structure. 4322 * 4323 * This routine is invoked to allocate and initizlize the driver's sgl 4324 * list and set up the sgl xritag tag array accordingly. 4325 * 4326 * Return codes 4327 * 0 - successful 4328 * other values - error 4329 **/ 4330static int 4331lpfc_init_sgl_list(struct lpfc_hba *phba) 4332{ 4333 struct lpfc_sglq *sglq_entry = NULL; 4334 int i; 4335 int els_xri_cnt; 4336 4337 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4338 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4339 "2400 lpfc_init_sgl_list els %d.\n", 4340 els_xri_cnt); 4341 /* Initialize and populate the sglq list per host/VF. */ 4342 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4343 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 4344 4345 /* Sanity check on XRI management */ 4346 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 4347 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4348 "2562 No room left for SCSI XRI allocation: " 4349 "max_xri=%d, els_xri=%d\n", 4350 phba->sli4_hba.max_cfg_param.max_xri, 4351 els_xri_cnt); 4352 return -ENOMEM; 4353 } 4354 4355 /* Allocate memory for the ELS XRI management array */ 4356 phba->sli4_hba.lpfc_els_sgl_array = 4357 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), 4358 GFP_KERNEL); 4359 4360 if (!phba->sli4_hba.lpfc_els_sgl_array) { 4361 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4362 "2401 Failed to allocate memory for ELS " 4363 "XRI management array of size %d.\n", 4364 els_xri_cnt); 4365 return -ENOMEM; 4366 } 4367 4368 /* Keep the SCSI XRI into the XRI management array */ 4369 phba->sli4_hba.scsi_xri_max = 4370 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4371 phba->sli4_hba.scsi_xri_cnt = 0; 4372 4373 phba->sli4_hba.lpfc_scsi_psb_array = 4374 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4375 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4376 4377 if (!phba->sli4_hba.lpfc_scsi_psb_array) { 4378 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4379 "2563 Failed to allocate memory for SCSI " 4380 "XRI management array of size %d.\n", 4381 phba->sli4_hba.scsi_xri_max); 4382 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4383 return -ENOMEM; 4384 } 4385 4386 for (i = 0; i < els_xri_cnt; i++) { 4387 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); 4388 if (sglq_entry == NULL) { 4389 printk(KERN_ERR "%s: only allocated %d sgls of " 4390 "expected %d count. Unloading driver.\n", 4391 __func__, i, els_xri_cnt); 4392 goto out_free_mem; 4393 } 4394 4395 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); 4396 if (sglq_entry->sli4_xritag == NO_XRI) { 4397 kfree(sglq_entry); 4398 printk(KERN_ERR "%s: failed to allocate XRI.\n" 4399 "Unloading driver.\n", __func__); 4400 goto out_free_mem; 4401 } 4402 sglq_entry->buff_type = GEN_BUFF_TYPE; 4403 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4404 if (sglq_entry->virt == NULL) { 4405 kfree(sglq_entry); 4406 printk(KERN_ERR "%s: failed to allocate mbuf.\n" 4407 "Unloading driver.\n", __func__); 4408 goto out_free_mem; 4409 } 4410 sglq_entry->sgl = sglq_entry->virt; 4411 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 4412 4413 /* The list order is used by later block SGL registraton */ 4414 spin_lock_irq(&phba->hbalock); 4415 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4416 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4417 phba->sli4_hba.total_sglq_bufs++; 4418 spin_unlock_irq(&phba->hbalock); 4419 } 4420 return 0; 4421 4422out_free_mem: 4423 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4424 lpfc_free_sgl_list(phba); 4425 return -ENOMEM; 4426} 4427 4428/** 4429 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 4430 * @phba: pointer to lpfc hba data structure. 4431 * 4432 * This routine is invoked to post rpi header templates to the 4433 * HBA consistent with the SLI-4 interface spec. This routine 4434 * posts a PAGE_SIZE memory region to the port to hold up to 4435 * PAGE_SIZE modulo 64 rpi context headers. 4436 * No locks are held here because this is an initialization routine 4437 * called only from probe or lpfc_online when interrupts are not 4438 * enabled and the driver is reinitializing the device. 4439 * 4440 * Return codes 4441 * 0 - successful 4442 * ENOMEM - No availble memory 4443 * EIO - The mailbox failed to complete successfully. 4444 **/ 4445int 4446lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4447{ 4448 int rc = 0; 4449 int longs; 4450 uint16_t rpi_count; 4451 struct lpfc_rpi_hdr *rpi_hdr; 4452 4453 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 4454 4455 /* 4456 * Provision an rpi bitmask range for discovery. The total count 4457 * is the difference between max and base + 1. 4458 */ 4459 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 4460 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4461 4462 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 4463 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 4464 GFP_KERNEL); 4465 if (!phba->sli4_hba.rpi_bmask) 4466 return -ENOMEM; 4467 4468 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 4469 if (!rpi_hdr) { 4470 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4471 "0391 Error during rpi post operation\n"); 4472 lpfc_sli4_remove_rpis(phba); 4473 rc = -ENODEV; 4474 } 4475 4476 return rc; 4477} 4478 4479/** 4480 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 4481 * @phba: pointer to lpfc hba data structure. 4482 * 4483 * This routine is invoked to allocate a single 4KB memory region to 4484 * support rpis and stores them in the phba. This single region 4485 * provides support for up to 64 rpis. The region is used globally 4486 * by the device. 4487 * 4488 * Returns: 4489 * A valid rpi hdr on success. 4490 * A NULL pointer on any failure. 4491 **/ 4492struct lpfc_rpi_hdr * 4493lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 4494{ 4495 uint16_t rpi_limit, curr_rpi_range; 4496 struct lpfc_dmabuf *dmabuf; 4497 struct lpfc_rpi_hdr *rpi_hdr; 4498 4499 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 4500 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4501 4502 spin_lock_irq(&phba->hbalock); 4503 curr_rpi_range = phba->sli4_hba.next_rpi; 4504 spin_unlock_irq(&phba->hbalock); 4505 4506 /* 4507 * The port has a limited number of rpis. The increment here 4508 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 4509 * and to allow the full max_rpi range per port. 4510 */ 4511 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 4512 return NULL; 4513 4514 /* 4515 * First allocate the protocol header region for the port. The 4516 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 4517 */ 4518 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4519 if (!dmabuf) 4520 return NULL; 4521 4522 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4523 LPFC_HDR_TEMPLATE_SIZE, 4524 &dmabuf->phys, 4525 GFP_KERNEL); 4526 if (!dmabuf->virt) { 4527 rpi_hdr = NULL; 4528 goto err_free_dmabuf; 4529 } 4530 4531 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 4532 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 4533 rpi_hdr = NULL; 4534 goto err_free_coherent; 4535 } 4536 4537 /* Save the rpi header data for cleanup later. */ 4538 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 4539 if (!rpi_hdr) 4540 goto err_free_coherent; 4541 4542 rpi_hdr->dmabuf = dmabuf; 4543 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 4544 rpi_hdr->page_count = 1; 4545 spin_lock_irq(&phba->hbalock); 4546 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 4547 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 4548 4549 /* 4550 * The next_rpi stores the next module-64 rpi value to post 4551 * in any subsequent rpi memory region postings. 4552 */ 4553 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; 4554 spin_unlock_irq(&phba->hbalock); 4555 return rpi_hdr; 4556 4557 err_free_coherent: 4558 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 4559 dmabuf->virt, dmabuf->phys); 4560 err_free_dmabuf: 4561 kfree(dmabuf); 4562 return NULL; 4563} 4564 4565/** 4566 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 4567 * @phba: pointer to lpfc hba data structure. 4568 * 4569 * This routine is invoked to remove all memory resources allocated 4570 * to support rpis. This routine presumes the caller has released all 4571 * rpis consumed by fabric or port logins and is prepared to have 4572 * the header pages removed. 4573 **/ 4574void 4575lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 4576{ 4577 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 4578 4579 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 4580 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 4581 list_del(&rpi_hdr->list); 4582 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 4583 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 4584 kfree(rpi_hdr->dmabuf); 4585 kfree(rpi_hdr); 4586 } 4587 4588 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4589 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 4590} 4591 4592/** 4593 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 4594 * @pdev: pointer to pci device data structure. 4595 * 4596 * This routine is invoked to allocate the driver hba data structure for an 4597 * HBA device. If the allocation is successful, the phba reference to the 4598 * PCI device data structure is set. 4599 * 4600 * Return codes 4601 * pointer to @phba - successful 4602 * NULL - error 4603 **/ 4604static struct lpfc_hba * 4605lpfc_hba_alloc(struct pci_dev *pdev) 4606{ 4607 struct lpfc_hba *phba; 4608 4609 /* Allocate memory for HBA structure */ 4610 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 4611 if (!phba) { 4612 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 4613 return NULL; 4614 } 4615 4616 /* Set reference to PCI device in HBA structure */ 4617 phba->pcidev = pdev; 4618 4619 /* Assign an unused board number */ 4620 phba->brd_no = lpfc_get_instance(); 4621 if (phba->brd_no < 0) { 4622 kfree(phba); 4623 return NULL; 4624 } 4625 4626 spin_lock_init(&phba->ct_ev_lock); 4627 INIT_LIST_HEAD(&phba->ct_ev_waiters); 4628 4629 return phba; 4630} 4631 4632/** 4633 * lpfc_hba_free - Free driver hba data structure with a device. 4634 * @phba: pointer to lpfc hba data structure. 4635 * 4636 * This routine is invoked to free the driver hba data structure with an 4637 * HBA device. 4638 **/ 4639static void 4640lpfc_hba_free(struct lpfc_hba *phba) 4641{ 4642 /* Release the driver assigned board number */ 4643 idr_remove(&lpfc_hba_index, phba->brd_no); 4644 4645 kfree(phba); 4646 return; 4647} 4648 4649/** 4650 * lpfc_create_shost - Create hba physical port with associated scsi host. 4651 * @phba: pointer to lpfc hba data structure. 4652 * 4653 * This routine is invoked to create HBA physical port and associate a SCSI 4654 * host with it. 4655 * 4656 * Return codes 4657 * 0 - successful 4658 * other values - error 4659 **/ 4660static int 4661lpfc_create_shost(struct lpfc_hba *phba) 4662{ 4663 struct lpfc_vport *vport; 4664 struct Scsi_Host *shost; 4665 4666 /* Initialize HBA FC structure */ 4667 phba->fc_edtov = FF_DEF_EDTOV; 4668 phba->fc_ratov = FF_DEF_RATOV; 4669 phba->fc_altov = FF_DEF_ALTOV; 4670 phba->fc_arbtov = FF_DEF_ARBTOV; 4671 4672 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 4673 if (!vport) 4674 return -ENODEV; 4675 4676 shost = lpfc_shost_from_vport(vport); 4677 phba->pport = vport; 4678 lpfc_debugfs_initialize(vport); 4679 /* Put reference to SCSI host to driver's device private data */ 4680 pci_set_drvdata(phba->pcidev, shost); 4681 4682 return 0; 4683} 4684 4685/** 4686 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 4687 * @phba: pointer to lpfc hba data structure. 4688 * 4689 * This routine is invoked to destroy HBA physical port and the associated 4690 * SCSI host. 4691 **/ 4692static void 4693lpfc_destroy_shost(struct lpfc_hba *phba) 4694{ 4695 struct lpfc_vport *vport = phba->pport; 4696 4697 /* Destroy physical port that associated with the SCSI host */ 4698 destroy_port(vport); 4699 4700 return; 4701} 4702 4703/** 4704 * lpfc_setup_bg - Setup Block guard structures and debug areas. 4705 * @phba: pointer to lpfc hba data structure. 4706 * @shost: the shost to be used to detect Block guard settings. 4707 * 4708 * This routine sets up the local Block guard protocol settings for @shost. 4709 * This routine also allocates memory for debugging bg buffers. 4710 **/ 4711static void 4712lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 4713{ 4714 int pagecnt = 10; 4715 if (lpfc_prot_mask && lpfc_prot_guard) { 4716 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4717 "1478 Registering BlockGuard with the " 4718 "SCSI layer\n"); 4719 scsi_host_set_prot(shost, lpfc_prot_mask); 4720 scsi_host_set_guard(shost, lpfc_prot_guard); 4721 } 4722 if (!_dump_buf_data) { 4723 while (pagecnt) { 4724 spin_lock_init(&_dump_buf_lock); 4725 _dump_buf_data = 4726 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4727 if (_dump_buf_data) { 4728 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4729 "9043 BLKGRD: allocated %d pages for " 4730 "_dump_buf_data at 0x%p\n", 4731 (1 << pagecnt), _dump_buf_data); 4732 _dump_buf_data_order = pagecnt; 4733 memset(_dump_buf_data, 0, 4734 ((1 << PAGE_SHIFT) << pagecnt)); 4735 break; 4736 } else 4737 --pagecnt; 4738 } 4739 if (!_dump_buf_data_order) 4740 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4741 "9044 BLKGRD: ERROR unable to allocate " 4742 "memory for hexdump\n"); 4743 } else 4744 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4745 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 4746 "\n", _dump_buf_data); 4747 if (!_dump_buf_dif) { 4748 while (pagecnt) { 4749 _dump_buf_dif = 4750 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4751 if (_dump_buf_dif) { 4752 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4753 "9046 BLKGRD: allocated %d pages for " 4754 "_dump_buf_dif at 0x%p\n", 4755 (1 << pagecnt), _dump_buf_dif); 4756 _dump_buf_dif_order = pagecnt; 4757 memset(_dump_buf_dif, 0, 4758 ((1 << PAGE_SHIFT) << pagecnt)); 4759 break; 4760 } else 4761 --pagecnt; 4762 } 4763 if (!_dump_buf_dif_order) 4764 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4765 "9047 BLKGRD: ERROR unable to allocate " 4766 "memory for hexdump\n"); 4767 } else 4768 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4769 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 4770 _dump_buf_dif); 4771} 4772 4773/** 4774 * lpfc_post_init_setup - Perform necessary device post initialization setup. 4775 * @phba: pointer to lpfc hba data structure. 4776 * 4777 * This routine is invoked to perform all the necessary post initialization 4778 * setup for the device. 4779 **/ 4780static void 4781lpfc_post_init_setup(struct lpfc_hba *phba) 4782{ 4783 struct Scsi_Host *shost; 4784 struct lpfc_adapter_event_header adapter_event; 4785 4786 /* Get the default values for Model Name and Description */ 4787 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 4788 4789 /* 4790 * hba setup may have changed the hba_queue_depth so we need to 4791 * adjust the value of can_queue. 4792 */ 4793 shost = pci_get_drvdata(phba->pcidev); 4794 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4795 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4796 lpfc_setup_bg(phba, shost); 4797 4798 lpfc_host_attrib_init(shost); 4799 4800 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 4801 spin_lock_irq(shost->host_lock); 4802 lpfc_poll_start_timer(phba); 4803 spin_unlock_irq(shost->host_lock); 4804 } 4805 4806 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4807 "0428 Perform SCSI scan\n"); 4808 /* Send board arrival event to upper layer */ 4809 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 4810 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 4811 fc_host_post_vendor_event(shost, fc_get_event_number(), 4812 sizeof(adapter_event), 4813 (char *) &adapter_event, 4814 LPFC_NL_VENDOR_ID); 4815 return; 4816} 4817 4818/** 4819 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 4820 * @phba: pointer to lpfc hba data structure. 4821 * 4822 * This routine is invoked to set up the PCI device memory space for device 4823 * with SLI-3 interface spec. 4824 * 4825 * Return codes 4826 * 0 - successful 4827 * other values - error 4828 **/ 4829static int 4830lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 4831{ 4832 struct pci_dev *pdev; 4833 unsigned long bar0map_len, bar2map_len; 4834 int i, hbq_count; 4835 void *ptr; 4836 int error = -ENODEV; 4837 4838 /* Obtain PCI device reference */ 4839 if (!phba->pcidev) 4840 return error; 4841 else 4842 pdev = phba->pcidev; 4843 4844 /* Set the device DMA mask size */ 4845 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 4846 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 4847 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 4848 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 4849 return error; 4850 } 4851 } 4852 4853 /* Get the bus address of Bar0 and Bar2 and the number of bytes 4854 * required by each mapping. 4855 */ 4856 phba->pci_bar0_map = pci_resource_start(pdev, 0); 4857 bar0map_len = pci_resource_len(pdev, 0); 4858 4859 phba->pci_bar2_map = pci_resource_start(pdev, 2); 4860 bar2map_len = pci_resource_len(pdev, 2); 4861 4862 /* Map HBA SLIM to a kernel virtual address. */ 4863 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 4864 if (!phba->slim_memmap_p) { 4865 dev_printk(KERN_ERR, &pdev->dev, 4866 "ioremap failed for SLIM memory.\n"); 4867 goto out; 4868 } 4869 4870 /* Map HBA Control Registers to a kernel virtual address. */ 4871 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 4872 if (!phba->ctrl_regs_memmap_p) { 4873 dev_printk(KERN_ERR, &pdev->dev, 4874 "ioremap failed for HBA control registers.\n"); 4875 goto out_iounmap_slim; 4876 } 4877 4878 /* Allocate memory for SLI-2 structures */ 4879 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 4880 SLI2_SLIM_SIZE, 4881 &phba->slim2p.phys, 4882 GFP_KERNEL); 4883 if (!phba->slim2p.virt) 4884 goto out_iounmap; 4885 4886 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 4887 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 4888 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 4889 phba->IOCBs = (phba->slim2p.virt + 4890 offsetof(struct lpfc_sli2_slim, IOCBs)); 4891 4892 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 4893 lpfc_sli_hbq_size(), 4894 &phba->hbqslimp.phys, 4895 GFP_KERNEL); 4896 if (!phba->hbqslimp.virt) 4897 goto out_free_slim; 4898 4899 hbq_count = lpfc_sli_hbq_count(); 4900 ptr = phba->hbqslimp.virt; 4901 for (i = 0; i < hbq_count; ++i) { 4902 phba->hbqs[i].hbq_virt = ptr; 4903 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 4904 ptr += (lpfc_hbq_defs[i]->entry_count * 4905 sizeof(struct lpfc_hbq_entry)); 4906 } 4907 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 4908 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 4909 4910 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 4911 4912 INIT_LIST_HEAD(&phba->rb_pend_list); 4913 4914 phba->MBslimaddr = phba->slim_memmap_p; 4915 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 4916 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 4917 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 4918 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 4919 4920 return 0; 4921 4922out_free_slim: 4923 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 4924 phba->slim2p.virt, phba->slim2p.phys); 4925out_iounmap: 4926 iounmap(phba->ctrl_regs_memmap_p); 4927out_iounmap_slim: 4928 iounmap(phba->slim_memmap_p); 4929out: 4930 return error; 4931} 4932 4933/** 4934 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 4935 * @phba: pointer to lpfc hba data structure. 4936 * 4937 * This routine is invoked to unset the PCI device memory space for device 4938 * with SLI-3 interface spec. 4939 **/ 4940static void 4941lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 4942{ 4943 struct pci_dev *pdev; 4944 4945 /* Obtain PCI device reference */ 4946 if (!phba->pcidev) 4947 return; 4948 else 4949 pdev = phba->pcidev; 4950 4951 /* Free coherent DMA memory allocated */ 4952 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 4953 phba->hbqslimp.virt, phba->hbqslimp.phys); 4954 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 4955 phba->slim2p.virt, phba->slim2p.phys); 4956 4957 /* I/O memory unmap */ 4958 iounmap(phba->ctrl_regs_memmap_p); 4959 iounmap(phba->slim_memmap_p); 4960 4961 return; 4962} 4963 4964/** 4965 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 4966 * @phba: pointer to lpfc hba data structure. 4967 * 4968 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 4969 * done and check status. 4970 * 4971 * Return 0 if successful, otherwise -ENODEV. 4972 **/ 4973int 4974lpfc_sli4_post_status_check(struct lpfc_hba *phba) 4975{ 4976 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg; 4977 int i, port_error = -ENODEV; 4978 4979 if (!phba->sli4_hba.STAregaddr) 4980 return -ENODEV; 4981 4982 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 4983 for (i = 0; i < 3000; i++) { 4984 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); 4985 /* Encounter fatal POST error, break out */ 4986 if (bf_get(lpfc_hst_state_perr, &sta_reg)) { 4987 port_error = -ENODEV; 4988 break; 4989 } 4990 if (LPFC_POST_STAGE_ARMFW_READY == 4991 bf_get(lpfc_hst_state_port_status, &sta_reg)) { 4992 port_error = 0; 4993 break; 4994 } 4995 msleep(10); 4996 } 4997 4998 if (port_error) 4999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5000 "1408 Failure HBA POST Status: sta_reg=0x%x, " 5001 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " 5002 "dl=x%x, pstatus=x%x\n", sta_reg.word0, 5003 bf_get(lpfc_hst_state_perr, &sta_reg), 5004 bf_get(lpfc_hst_state_sfi, &sta_reg), 5005 bf_get(lpfc_hst_state_nip, &sta_reg), 5006 bf_get(lpfc_hst_state_ipc, &sta_reg), 5007 bf_get(lpfc_hst_state_xrom, &sta_reg), 5008 bf_get(lpfc_hst_state_dl, &sta_reg), 5009 bf_get(lpfc_hst_state_port_status, &sta_reg)); 5010 5011 /* Log device information */ 5012 phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr); 5013 if (bf_get(lpfc_sli_intf_valid, 5014 &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) { 5015 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5016 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " 5017 "FeatureL1=0x%x, FeatureL2=0x%x\n", 5018 bf_get(lpfc_sli_intf_sli_family, 5019 &phba->sli4_hba.sli_intf), 5020 bf_get(lpfc_sli_intf_slirev, 5021 &phba->sli4_hba.sli_intf), 5022 bf_get(lpfc_sli_intf_featurelevel1, 5023 &phba->sli4_hba.sli_intf), 5024 bf_get(lpfc_sli_intf_featurelevel2, 5025 &phba->sli4_hba.sli_intf)); 5026 } 5027 phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr); 5028 phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr); 5029 /* With uncoverable error, log the error message and return error */ 5030 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); 5031 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); 5032 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 5033 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 5034 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5035 "1422 HBA Unrecoverable error: " 5036 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 5037 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n", 5038 uerrlo_reg.word0, uerrhi_reg.word0, 5039 phba->sli4_hba.ue_mask_lo, 5040 phba->sli4_hba.ue_mask_hi); 5041 return -ENODEV; 5042 } 5043 5044 return port_error; 5045} 5046 5047/** 5048 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 5049 * @phba: pointer to lpfc hba data structure. 5050 * 5051 * This routine is invoked to set up SLI4 BAR0 PCI config space register 5052 * memory map. 5053 **/ 5054static void 5055lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) 5056{ 5057 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 5058 LPFC_UERR_STATUS_LO; 5059 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5060 LPFC_UERR_STATUS_HI; 5061 phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 5062 LPFC_UE_MASK_LO; 5063 phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5064 LPFC_UE_MASK_HI; 5065 phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p + 5066 LPFC_SLI_INTF; 5067} 5068 5069/** 5070 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 5071 * @phba: pointer to lpfc hba data structure. 5072 * 5073 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 5074 * memory map. 5075 **/ 5076static void 5077lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 5078{ 5079 5080 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5081 LPFC_HST_STATE; 5082 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5083 LPFC_HST_ISR0; 5084 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5085 LPFC_HST_IMR0; 5086 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5087 LPFC_HST_ISCR0; 5088 return; 5089} 5090 5091/** 5092 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 5093 * @phba: pointer to lpfc hba data structure. 5094 * @vf: virtual function number 5095 * 5096 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 5097 * based on the given viftual function number, @vf. 5098 * 5099 * Return 0 if successful, otherwise -ENODEV. 5100 **/ 5101static int 5102lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 5103{ 5104 if (vf > LPFC_VIR_FUNC_MAX) 5105 return -ENODEV; 5106 5107 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5108 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 5109 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5110 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 5111 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5112 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 5113 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5114 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 5115 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5116 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 5117 return 0; 5118} 5119 5120/** 5121 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 5122 * @phba: pointer to lpfc hba data structure. 5123 * 5124 * This routine is invoked to create the bootstrap mailbox 5125 * region consistent with the SLI-4 interface spec. This 5126 * routine allocates all memory necessary to communicate 5127 * mailbox commands to the port and sets up all alignment 5128 * needs. No locks are expected to be held when calling 5129 * this routine. 5130 * 5131 * Return codes 5132 * 0 - successful 5133 * ENOMEM - could not allocated memory. 5134 **/ 5135static int 5136lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 5137{ 5138 uint32_t bmbx_size; 5139 struct lpfc_dmabuf *dmabuf; 5140 struct dma_address *dma_address; 5141 uint32_t pa_addr; 5142 uint64_t phys_addr; 5143 5144 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5145 if (!dmabuf) 5146 return -ENOMEM; 5147 5148 /* 5149 * The bootstrap mailbox region is comprised of 2 parts 5150 * plus an alignment restriction of 16 bytes. 5151 */ 5152 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 5153 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5154 bmbx_size, 5155 &dmabuf->phys, 5156 GFP_KERNEL); 5157 if (!dmabuf->virt) { 5158 kfree(dmabuf); 5159 return -ENOMEM; 5160 } 5161 memset(dmabuf->virt, 0, bmbx_size); 5162 5163 /* 5164 * Initialize the bootstrap mailbox pointers now so that the register 5165 * operations are simple later. The mailbox dma address is required 5166 * to be 16-byte aligned. Also align the virtual memory as each 5167 * maibox is copied into the bmbx mailbox region before issuing the 5168 * command to the port. 5169 */ 5170 phba->sli4_hba.bmbx.dmabuf = dmabuf; 5171 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 5172 5173 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 5174 LPFC_ALIGN_16_BYTE); 5175 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 5176 LPFC_ALIGN_16_BYTE); 5177 5178 /* 5179 * Set the high and low physical addresses now. The SLI4 alignment 5180 * requirement is 16 bytes and the mailbox is posted to the port 5181 * as two 30-bit addresses. The other data is a bit marking whether 5182 * the 30-bit address is the high or low address. 5183 * Upcast bmbx aphys to 64bits so shift instruction compiles 5184 * clean on 32 bit machines. 5185 */ 5186 dma_address = &phba->sli4_hba.bmbx.dma_address; 5187 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 5188 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 5189 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 5190 LPFC_BMBX_BIT1_ADDR_HI); 5191 5192 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 5193 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 5194 LPFC_BMBX_BIT1_ADDR_LO); 5195 return 0; 5196} 5197 5198/** 5199 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 5200 * @phba: pointer to lpfc hba data structure. 5201 * 5202 * This routine is invoked to teardown the bootstrap mailbox 5203 * region and release all host resources. This routine requires 5204 * the caller to ensure all mailbox commands recovered, no 5205 * additional mailbox comands are sent, and interrupts are disabled 5206 * before calling this routine. 5207 * 5208 **/ 5209static void 5210lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 5211{ 5212 dma_free_coherent(&phba->pcidev->dev, 5213 phba->sli4_hba.bmbx.bmbx_size, 5214 phba->sli4_hba.bmbx.dmabuf->virt, 5215 phba->sli4_hba.bmbx.dmabuf->phys); 5216 5217 kfree(phba->sli4_hba.bmbx.dmabuf); 5218 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 5219} 5220 5221/** 5222 * lpfc_sli4_read_config - Get the config parameters. 5223 * @phba: pointer to lpfc hba data structure. 5224 * 5225 * This routine is invoked to read the configuration parameters from the HBA. 5226 * The configuration parameters are used to set the base and maximum values 5227 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 5228 * allocation for the port. 5229 * 5230 * Return codes 5231 * 0 - successful 5232 * ENOMEM - No availble memory 5233 * EIO - The mailbox failed to complete successfully. 5234 **/ 5235static int 5236lpfc_sli4_read_config(struct lpfc_hba *phba) 5237{ 5238 LPFC_MBOXQ_t *pmb; 5239 struct lpfc_mbx_read_config *rd_config; 5240 uint32_t rc = 0; 5241 5242 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5243 if (!pmb) { 5244 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5245 "2011 Unable to allocate memory for issuing " 5246 "SLI_CONFIG_SPECIAL mailbox command\n"); 5247 return -ENOMEM; 5248 } 5249 5250 lpfc_read_config(phba, pmb); 5251 5252 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 5253 if (rc != MBX_SUCCESS) { 5254 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5255 "2012 Mailbox failed , mbxCmd x%x " 5256 "READ_CONFIG, mbxStatus x%x\n", 5257 bf_get(lpfc_mqe_command, &pmb->u.mqe), 5258 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 5259 rc = -EIO; 5260 } else { 5261 rd_config = &pmb->u.mqe.un.rd_config; 5262 phba->sli4_hba.max_cfg_param.max_xri = 5263 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 5264 phba->sli4_hba.max_cfg_param.xri_base = 5265 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 5266 phba->sli4_hba.max_cfg_param.max_vpi = 5267 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 5268 phba->sli4_hba.max_cfg_param.vpi_base = 5269 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 5270 phba->sli4_hba.max_cfg_param.max_rpi = 5271 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 5272 phba->sli4_hba.max_cfg_param.rpi_base = 5273 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 5274 phba->sli4_hba.max_cfg_param.max_vfi = 5275 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 5276 phba->sli4_hba.max_cfg_param.vfi_base = 5277 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 5278 phba->sli4_hba.max_cfg_param.max_fcfi = 5279 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 5280 phba->sli4_hba.max_cfg_param.fcfi_base = 5281 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); 5282 phba->sli4_hba.max_cfg_param.max_eq = 5283 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 5284 phba->sli4_hba.max_cfg_param.max_rq = 5285 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 5286 phba->sli4_hba.max_cfg_param.max_wq = 5287 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 5288 phba->sli4_hba.max_cfg_param.max_cq = 5289 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 5290 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 5291 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 5292 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 5293 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 5294 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5295 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 5296 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 5297 phba->max_vports = phba->max_vpi; 5298 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5299 "2003 cfg params XRI(B:%d M:%d), " 5300 "VPI(B:%d M:%d) " 5301 "VFI(B:%d M:%d) " 5302 "RPI(B:%d M:%d) " 5303 "FCFI(B:%d M:%d)\n", 5304 phba->sli4_hba.max_cfg_param.xri_base, 5305 phba->sli4_hba.max_cfg_param.max_xri, 5306 phba->sli4_hba.max_cfg_param.vpi_base, 5307 phba->sli4_hba.max_cfg_param.max_vpi, 5308 phba->sli4_hba.max_cfg_param.vfi_base, 5309 phba->sli4_hba.max_cfg_param.max_vfi, 5310 phba->sli4_hba.max_cfg_param.rpi_base, 5311 phba->sli4_hba.max_cfg_param.max_rpi, 5312 phba->sli4_hba.max_cfg_param.fcfi_base, 5313 phba->sli4_hba.max_cfg_param.max_fcfi); 5314 } 5315 mempool_free(pmb, phba->mbox_mem_pool); 5316 5317 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 5318 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri)) 5319 phba->cfg_hba_queue_depth = 5320 phba->sli4_hba.max_cfg_param.max_xri; 5321 return rc; 5322} 5323 5324/** 5325 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. 5326 * @phba: pointer to lpfc hba data structure. 5327 * 5328 * This routine is invoked to setup the host-side endian order to the 5329 * HBA consistent with the SLI-4 interface spec. 5330 * 5331 * Return codes 5332 * 0 - successful 5333 * ENOMEM - No availble memory 5334 * EIO - The mailbox failed to complete successfully. 5335 **/ 5336static int 5337lpfc_setup_endian_order(struct lpfc_hba *phba) 5338{ 5339 LPFC_MBOXQ_t *mboxq; 5340 uint32_t rc = 0; 5341 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 5342 HOST_ENDIAN_HIGH_WORD1}; 5343 5344 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5345 if (!mboxq) { 5346 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5347 "0492 Unable to allocate memory for issuing " 5348 "SLI_CONFIG_SPECIAL mailbox command\n"); 5349 return -ENOMEM; 5350 } 5351 5352 /* 5353 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two 5354 * words to contain special data values and no other data. 5355 */ 5356 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 5357 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 5358 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5359 if (rc != MBX_SUCCESS) { 5360 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5361 "0493 SLI_CONFIG_SPECIAL mailbox failed with " 5362 "status x%x\n", 5363 rc); 5364 rc = -EIO; 5365 } 5366 5367 mempool_free(mboxq, phba->mbox_mem_pool); 5368 return rc; 5369} 5370 5371/** 5372 * lpfc_sli4_queue_create - Create all the SLI4 queues 5373 * @phba: pointer to lpfc hba data structure. 5374 * 5375 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 5376 * operation. For each SLI4 queue type, the parameters such as queue entry 5377 * count (queue depth) shall be taken from the module parameter. For now, 5378 * we just use some constant number as place holder. 5379 * 5380 * Return codes 5381 * 0 - successful 5382 * ENOMEM - No availble memory 5383 * EIO - The mailbox failed to complete successfully. 5384 **/ 5385static int 5386lpfc_sli4_queue_create(struct lpfc_hba *phba) 5387{ 5388 struct lpfc_queue *qdesc; 5389 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5390 int cfg_fcp_wq_count; 5391 int cfg_fcp_eq_count; 5392 5393 /* 5394 * Sanity check for confiugred queue parameters against the run-time 5395 * device parameters 5396 */ 5397 5398 /* Sanity check on FCP fast-path WQ parameters */ 5399 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 5400 if (cfg_fcp_wq_count > 5401 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 5402 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 5403 LPFC_SP_WQN_DEF; 5404 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 5405 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5406 "2581 Not enough WQs (%d) from " 5407 "the pci function for supporting " 5408 "FCP WQs (%d)\n", 5409 phba->sli4_hba.max_cfg_param.max_wq, 5410 phba->cfg_fcp_wq_count); 5411 goto out_error; 5412 } 5413 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5414 "2582 Not enough WQs (%d) from the pci " 5415 "function for supporting the requested " 5416 "FCP WQs (%d), the actual FCP WQs can " 5417 "be supported: %d\n", 5418 phba->sli4_hba.max_cfg_param.max_wq, 5419 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 5420 } 5421 /* The actual number of FCP work queues adopted */ 5422 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 5423 5424 /* Sanity check on FCP fast-path EQ parameters */ 5425 cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 5426 if (cfg_fcp_eq_count > 5427 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 5428 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 5429 LPFC_SP_EQN_DEF; 5430 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 5431 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5432 "2574 Not enough EQs (%d) from the " 5433 "pci function for supporting FCP " 5434 "EQs (%d)\n", 5435 phba->sli4_hba.max_cfg_param.max_eq, 5436 phba->cfg_fcp_eq_count); 5437 goto out_error; 5438 } 5439 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5440 "2575 Not enough EQs (%d) from the pci " 5441 "function for supporting the requested " 5442 "FCP EQs (%d), the actual FCP EQs can " 5443 "be supported: %d\n", 5444 phba->sli4_hba.max_cfg_param.max_eq, 5445 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 5446 } 5447 /* It does not make sense to have more EQs than WQs */ 5448 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 5449 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5450 "2593 The FCP EQ count(%d) cannot be greater " 5451 "than the FCP WQ count(%d), limiting the " 5452 "FCP EQ count to %d\n", cfg_fcp_eq_count, 5453 phba->cfg_fcp_wq_count, 5454 phba->cfg_fcp_wq_count); 5455 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 5456 } 5457 /* The actual number of FCP event queues adopted */ 5458 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 5459 /* The overall number of event queues used */ 5460 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 5461 5462 /* 5463 * Create Event Queues (EQs) 5464 */ 5465 5466 /* Get EQ depth from module parameter, fake the default for now */ 5467 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 5468 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 5469 5470 /* Create slow path event queue */ 5471 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5472 phba->sli4_hba.eq_ecount); 5473 if (!qdesc) { 5474 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5475 "0496 Failed allocate slow-path EQ\n"); 5476 goto out_error; 5477 } 5478 phba->sli4_hba.sp_eq = qdesc; 5479 5480 /* Create fast-path FCP Event Queue(s) */ 5481 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 5482 phba->cfg_fcp_eq_count), GFP_KERNEL); 5483 if (!phba->sli4_hba.fp_eq) { 5484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5485 "2576 Failed allocate memory for fast-path " 5486 "EQ record array\n"); 5487 goto out_free_sp_eq; 5488 } 5489 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5490 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5491 phba->sli4_hba.eq_ecount); 5492 if (!qdesc) { 5493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5494 "0497 Failed allocate fast-path EQ\n"); 5495 goto out_free_fp_eq; 5496 } 5497 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 5498 } 5499 5500 /* 5501 * Create Complete Queues (CQs) 5502 */ 5503 5504 /* Get CQ depth from module parameter, fake the default for now */ 5505 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 5506 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 5507 5508 /* Create slow-path Mailbox Command Complete Queue */ 5509 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5510 phba->sli4_hba.cq_ecount); 5511 if (!qdesc) { 5512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5513 "0500 Failed allocate slow-path mailbox CQ\n"); 5514 goto out_free_fp_eq; 5515 } 5516 phba->sli4_hba.mbx_cq = qdesc; 5517 5518 /* Create slow-path ELS Complete Queue */ 5519 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5520 phba->sli4_hba.cq_ecount); 5521 if (!qdesc) { 5522 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5523 "0501 Failed allocate slow-path ELS CQ\n"); 5524 goto out_free_mbx_cq; 5525 } 5526 phba->sli4_hba.els_cq = qdesc; 5527 5528 5529 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 5530 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 5531 phba->cfg_fcp_eq_count), GFP_KERNEL); 5532 if (!phba->sli4_hba.fcp_cq) { 5533 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5534 "2577 Failed allocate memory for fast-path " 5535 "CQ record array\n"); 5536 goto out_free_els_cq; 5537 } 5538 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5539 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5540 phba->sli4_hba.cq_ecount); 5541 if (!qdesc) { 5542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5543 "0499 Failed allocate fast-path FCP " 5544 "CQ (%d)\n", fcp_cqidx); 5545 goto out_free_fcp_cq; 5546 } 5547 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 5548 } 5549 5550 /* Create Mailbox Command Queue */ 5551 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 5552 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 5553 5554 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 5555 phba->sli4_hba.mq_ecount); 5556 if (!qdesc) { 5557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5558 "0505 Failed allocate slow-path MQ\n"); 5559 goto out_free_fcp_cq; 5560 } 5561 phba->sli4_hba.mbx_wq = qdesc; 5562 5563 /* 5564 * Create all the Work Queues (WQs) 5565 */ 5566 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 5567 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 5568 5569 /* Create slow-path ELS Work Queue */ 5570 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5571 phba->sli4_hba.wq_ecount); 5572 if (!qdesc) { 5573 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5574 "0504 Failed allocate slow-path ELS WQ\n"); 5575 goto out_free_mbx_wq; 5576 } 5577 phba->sli4_hba.els_wq = qdesc; 5578 5579 /* Create fast-path FCP Work Queue(s) */ 5580 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 5581 phba->cfg_fcp_wq_count), GFP_KERNEL); 5582 if (!phba->sli4_hba.fcp_wq) { 5583 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5584 "2578 Failed allocate memory for fast-path " 5585 "WQ record array\n"); 5586 goto out_free_els_wq; 5587 } 5588 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 5589 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5590 phba->sli4_hba.wq_ecount); 5591 if (!qdesc) { 5592 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5593 "0503 Failed allocate fast-path FCP " 5594 "WQ (%d)\n", fcp_wqidx); 5595 goto out_free_fcp_wq; 5596 } 5597 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 5598 } 5599 5600 /* 5601 * Create Receive Queue (RQ) 5602 */ 5603 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 5604 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 5605 5606 /* Create Receive Queue for header */ 5607 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5608 phba->sli4_hba.rq_ecount); 5609 if (!qdesc) { 5610 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5611 "0506 Failed allocate receive HRQ\n"); 5612 goto out_free_fcp_wq; 5613 } 5614 phba->sli4_hba.hdr_rq = qdesc; 5615 5616 /* Create Receive Queue for data */ 5617 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5618 phba->sli4_hba.rq_ecount); 5619 if (!qdesc) { 5620 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5621 "0507 Failed allocate receive DRQ\n"); 5622 goto out_free_hdr_rq; 5623 } 5624 phba->sli4_hba.dat_rq = qdesc; 5625 5626 return 0; 5627 5628out_free_hdr_rq: 5629 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5630 phba->sli4_hba.hdr_rq = NULL; 5631out_free_fcp_wq: 5632 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 5633 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 5634 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 5635 } 5636 kfree(phba->sli4_hba.fcp_wq); 5637out_free_els_wq: 5638 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5639 phba->sli4_hba.els_wq = NULL; 5640out_free_mbx_wq: 5641 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5642 phba->sli4_hba.mbx_wq = NULL; 5643out_free_fcp_cq: 5644 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 5645 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 5646 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 5647 } 5648 kfree(phba->sli4_hba.fcp_cq); 5649out_free_els_cq: 5650 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5651 phba->sli4_hba.els_cq = NULL; 5652out_free_mbx_cq: 5653 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5654 phba->sli4_hba.mbx_cq = NULL; 5655out_free_fp_eq: 5656 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 5657 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 5658 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 5659 } 5660 kfree(phba->sli4_hba.fp_eq); 5661out_free_sp_eq: 5662 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5663 phba->sli4_hba.sp_eq = NULL; 5664out_error: 5665 return -ENOMEM; 5666} 5667 5668/** 5669 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 5670 * @phba: pointer to lpfc hba data structure. 5671 * 5672 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 5673 * operation. 5674 * 5675 * Return codes 5676 * 0 - successful 5677 * ENOMEM - No availble memory 5678 * EIO - The mailbox failed to complete successfully. 5679 **/ 5680static void 5681lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 5682{ 5683 int fcp_qidx; 5684 5685 /* Release mailbox command work queue */ 5686 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5687 phba->sli4_hba.mbx_wq = NULL; 5688 5689 /* Release ELS work queue */ 5690 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5691 phba->sli4_hba.els_wq = NULL; 5692 5693 /* Release FCP work queue */ 5694 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 5695 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 5696 kfree(phba->sli4_hba.fcp_wq); 5697 phba->sli4_hba.fcp_wq = NULL; 5698 5699 /* Release unsolicited receive queue */ 5700 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5701 phba->sli4_hba.hdr_rq = NULL; 5702 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 5703 phba->sli4_hba.dat_rq = NULL; 5704 5705 /* Release ELS complete queue */ 5706 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5707 phba->sli4_hba.els_cq = NULL; 5708 5709 /* Release mailbox command complete queue */ 5710 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5711 phba->sli4_hba.mbx_cq = NULL; 5712 5713 /* Release FCP response complete queue */ 5714 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5715 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 5716 kfree(phba->sli4_hba.fcp_cq); 5717 phba->sli4_hba.fcp_cq = NULL; 5718 5719 /* Release fast-path event queue */ 5720 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5721 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 5722 kfree(phba->sli4_hba.fp_eq); 5723 phba->sli4_hba.fp_eq = NULL; 5724 5725 /* Release slow-path event queue */ 5726 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5727 phba->sli4_hba.sp_eq = NULL; 5728 5729 return; 5730} 5731 5732/** 5733 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 5734 * @phba: pointer to lpfc hba data structure. 5735 * 5736 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 5737 * operation. 5738 * 5739 * Return codes 5740 * 0 - successful 5741 * ENOMEM - No availble memory 5742 * EIO - The mailbox failed to complete successfully. 5743 **/ 5744int 5745lpfc_sli4_queue_setup(struct lpfc_hba *phba) 5746{ 5747 int rc = -ENOMEM; 5748 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5749 int fcp_cq_index = 0; 5750 5751 /* 5752 * Set up Event Queues (EQs) 5753 */ 5754 5755 /* Set up slow-path event queue */ 5756 if (!phba->sli4_hba.sp_eq) { 5757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5758 "0520 Slow-path EQ not allocated\n"); 5759 goto out_error; 5760 } 5761 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 5762 LPFC_SP_DEF_IMAX); 5763 if (rc) { 5764 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5765 "0521 Failed setup of slow-path EQ: " 5766 "rc = 0x%x\n", rc); 5767 goto out_error; 5768 } 5769 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5770 "2583 Slow-path EQ setup: queue-id=%d\n", 5771 phba->sli4_hba.sp_eq->queue_id); 5772 5773 /* Set up fast-path event queue */ 5774 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5775 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 5776 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5777 "0522 Fast-path EQ (%d) not " 5778 "allocated\n", fcp_eqidx); 5779 goto out_destroy_fp_eq; 5780 } 5781 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 5782 phba->cfg_fcp_imax); 5783 if (rc) { 5784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5785 "0523 Failed setup of fast-path EQ " 5786 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 5787 goto out_destroy_fp_eq; 5788 } 5789 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5790 "2584 Fast-path EQ setup: " 5791 "queue[%d]-id=%d\n", fcp_eqidx, 5792 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 5793 } 5794 5795 /* 5796 * Set up Complete Queues (CQs) 5797 */ 5798 5799 /* Set up slow-path MBOX Complete Queue as the first CQ */ 5800 if (!phba->sli4_hba.mbx_cq) { 5801 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5802 "0528 Mailbox CQ not allocated\n"); 5803 goto out_destroy_fp_eq; 5804 } 5805 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 5806 LPFC_MCQ, LPFC_MBOX); 5807 if (rc) { 5808 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5809 "0529 Failed setup of slow-path mailbox CQ: " 5810 "rc = 0x%x\n", rc); 5811 goto out_destroy_fp_eq; 5812 } 5813 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5814 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 5815 phba->sli4_hba.mbx_cq->queue_id, 5816 phba->sli4_hba.sp_eq->queue_id); 5817 5818 /* Set up slow-path ELS Complete Queue */ 5819 if (!phba->sli4_hba.els_cq) { 5820 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5821 "0530 ELS CQ not allocated\n"); 5822 goto out_destroy_mbx_cq; 5823 } 5824 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 5825 LPFC_WCQ, LPFC_ELS); 5826 if (rc) { 5827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5828 "0531 Failed setup of slow-path ELS CQ: " 5829 "rc = 0x%x\n", rc); 5830 goto out_destroy_mbx_cq; 5831 } 5832 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5833 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 5834 phba->sli4_hba.els_cq->queue_id, 5835 phba->sli4_hba.sp_eq->queue_id); 5836 5837 /* Set up fast-path FCP Response Complete Queue */ 5838 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5839 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 5840 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5841 "0526 Fast-path FCP CQ (%d) not " 5842 "allocated\n", fcp_cqidx); 5843 goto out_destroy_fcp_cq; 5844 } 5845 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 5846 phba->sli4_hba.fp_eq[fcp_cqidx], 5847 LPFC_WCQ, LPFC_FCP); 5848 if (rc) { 5849 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5850 "0527 Failed setup of fast-path FCP " 5851 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 5852 goto out_destroy_fcp_cq; 5853 } 5854 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5855 "2588 FCP CQ setup: cq[%d]-id=%d, " 5856 "parent eq[%d]-id=%d\n", 5857 fcp_cqidx, 5858 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 5859 fcp_cqidx, 5860 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 5861 } 5862 5863 /* 5864 * Set up all the Work Queues (WQs) 5865 */ 5866 5867 /* Set up Mailbox Command Queue */ 5868 if (!phba->sli4_hba.mbx_wq) { 5869 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5870 "0538 Slow-path MQ not allocated\n"); 5871 goto out_destroy_fcp_cq; 5872 } 5873 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 5874 phba->sli4_hba.mbx_cq, LPFC_MBOX); 5875 if (rc) { 5876 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5877 "0539 Failed setup of slow-path MQ: " 5878 "rc = 0x%x\n", rc); 5879 goto out_destroy_fcp_cq; 5880 } 5881 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5882 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 5883 phba->sli4_hba.mbx_wq->queue_id, 5884 phba->sli4_hba.mbx_cq->queue_id); 5885 5886 /* Set up slow-path ELS Work Queue */ 5887 if (!phba->sli4_hba.els_wq) { 5888 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5889 "0536 Slow-path ELS WQ not allocated\n"); 5890 goto out_destroy_mbx_wq; 5891 } 5892 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 5893 phba->sli4_hba.els_cq, LPFC_ELS); 5894 if (rc) { 5895 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5896 "0537 Failed setup of slow-path ELS WQ: " 5897 "rc = 0x%x\n", rc); 5898 goto out_destroy_mbx_wq; 5899 } 5900 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5901 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 5902 phba->sli4_hba.els_wq->queue_id, 5903 phba->sli4_hba.els_cq->queue_id); 5904 5905 /* Set up fast-path FCP Work Queue */ 5906 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 5907 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 5908 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5909 "0534 Fast-path FCP WQ (%d) not " 5910 "allocated\n", fcp_wqidx); 5911 goto out_destroy_fcp_wq; 5912 } 5913 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 5914 phba->sli4_hba.fcp_cq[fcp_cq_index], 5915 LPFC_FCP); 5916 if (rc) { 5917 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5918 "0535 Failed setup of fast-path FCP " 5919 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 5920 goto out_destroy_fcp_wq; 5921 } 5922 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5923 "2591 FCP WQ setup: wq[%d]-id=%d, " 5924 "parent cq[%d]-id=%d\n", 5925 fcp_wqidx, 5926 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 5927 fcp_cq_index, 5928 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 5929 /* Round robin FCP Work Queue's Completion Queue assignment */ 5930 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 5931 } 5932 5933 /* 5934 * Create Receive Queue (RQ) 5935 */ 5936 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 5937 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5938 "0540 Receive Queue not allocated\n"); 5939 goto out_destroy_fcp_wq; 5940 } 5941 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 5942 phba->sli4_hba.els_cq, LPFC_USOL); 5943 if (rc) { 5944 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5945 "0541 Failed setup of Receive Queue: " 5946 "rc = 0x%x\n", rc); 5947 goto out_destroy_fcp_wq; 5948 } 5949 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5950 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 5951 "parent cq-id=%d\n", 5952 phba->sli4_hba.hdr_rq->queue_id, 5953 phba->sli4_hba.dat_rq->queue_id, 5954 phba->sli4_hba.els_cq->queue_id); 5955 return 0; 5956 5957out_destroy_fcp_wq: 5958 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 5959 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 5960 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 5961out_destroy_mbx_wq: 5962 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 5963out_destroy_fcp_cq: 5964 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 5965 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 5966 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 5967out_destroy_mbx_cq: 5968 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 5969out_destroy_fp_eq: 5970 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 5971 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 5972 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 5973out_error: 5974 return rc; 5975} 5976 5977/** 5978 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 5979 * @phba: pointer to lpfc hba data structure. 5980 * 5981 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 5982 * operation. 5983 * 5984 * Return codes 5985 * 0 - successful 5986 * ENOMEM - No availble memory 5987 * EIO - The mailbox failed to complete successfully. 5988 **/ 5989void 5990lpfc_sli4_queue_unset(struct lpfc_hba *phba) 5991{ 5992 int fcp_qidx; 5993 5994 /* Unset mailbox command work queue */ 5995 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 5996 /* Unset ELS work queue */ 5997 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 5998 /* Unset unsolicited receive queue */ 5999 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 6000 /* Unset FCP work queue */ 6001 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6002 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 6003 /* Unset mailbox command complete queue */ 6004 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6005 /* Unset ELS complete queue */ 6006 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6007 /* Unset FCP response complete queue */ 6008 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6009 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6010 /* Unset fast-path event queue */ 6011 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6012 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6013 /* Unset slow-path event queue */ 6014 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6015} 6016 6017/** 6018 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 6019 * @phba: pointer to lpfc hba data structure. 6020 * 6021 * This routine is invoked to allocate and set up a pool of completion queue 6022 * events. The body of the completion queue event is a completion queue entry 6023 * CQE. For now, this pool is used for the interrupt service routine to queue 6024 * the following HBA completion queue events for the worker thread to process: 6025 * - Mailbox asynchronous events 6026 * - Receive queue completion unsolicited events 6027 * Later, this can be used for all the slow-path events. 6028 * 6029 * Return codes 6030 * 0 - successful 6031 * -ENOMEM - No availble memory 6032 **/ 6033static int 6034lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 6035{ 6036 struct lpfc_cq_event *cq_event; 6037 int i; 6038 6039 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 6040 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 6041 if (!cq_event) 6042 goto out_pool_create_fail; 6043 list_add_tail(&cq_event->list, 6044 &phba->sli4_hba.sp_cqe_event_pool); 6045 } 6046 return 0; 6047 6048out_pool_create_fail: 6049 lpfc_sli4_cq_event_pool_destroy(phba); 6050 return -ENOMEM; 6051} 6052 6053/** 6054 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 6055 * @phba: pointer to lpfc hba data structure. 6056 * 6057 * This routine is invoked to free the pool of completion queue events at 6058 * driver unload time. Note that, it is the responsibility of the driver 6059 * cleanup routine to free all the outstanding completion-queue events 6060 * allocated from this pool back into the pool before invoking this routine 6061 * to destroy the pool. 6062 **/ 6063static void 6064lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 6065{ 6066 struct lpfc_cq_event *cq_event, *next_cq_event; 6067 6068 list_for_each_entry_safe(cq_event, next_cq_event, 6069 &phba->sli4_hba.sp_cqe_event_pool, list) { 6070 list_del(&cq_event->list); 6071 kfree(cq_event); 6072 } 6073} 6074 6075/** 6076 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6077 * @phba: pointer to lpfc hba data structure. 6078 * 6079 * This routine is the lock free version of the API invoked to allocate a 6080 * completion-queue event from the free pool. 6081 * 6082 * Return: Pointer to the newly allocated completion-queue event if successful 6083 * NULL otherwise. 6084 **/ 6085struct lpfc_cq_event * 6086__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6087{ 6088 struct lpfc_cq_event *cq_event = NULL; 6089 6090 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 6091 struct lpfc_cq_event, list); 6092 return cq_event; 6093} 6094 6095/** 6096 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6097 * @phba: pointer to lpfc hba data structure. 6098 * 6099 * This routine is the lock version of the API invoked to allocate a 6100 * completion-queue event from the free pool. 6101 * 6102 * Return: Pointer to the newly allocated completion-queue event if successful 6103 * NULL otherwise. 6104 **/ 6105struct lpfc_cq_event * 6106lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6107{ 6108 struct lpfc_cq_event *cq_event; 6109 unsigned long iflags; 6110 6111 spin_lock_irqsave(&phba->hbalock, iflags); 6112 cq_event = __lpfc_sli4_cq_event_alloc(phba); 6113 spin_unlock_irqrestore(&phba->hbalock, iflags); 6114 return cq_event; 6115} 6116 6117/** 6118 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6119 * @phba: pointer to lpfc hba data structure. 6120 * @cq_event: pointer to the completion queue event to be freed. 6121 * 6122 * This routine is the lock free version of the API invoked to release a 6123 * completion-queue event back into the free pool. 6124 **/ 6125void 6126__lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6127 struct lpfc_cq_event *cq_event) 6128{ 6129 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 6130} 6131 6132/** 6133 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6134 * @phba: pointer to lpfc hba data structure. 6135 * @cq_event: pointer to the completion queue event to be freed. 6136 * 6137 * This routine is the lock version of the API invoked to release a 6138 * completion-queue event back into the free pool. 6139 **/ 6140void 6141lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6142 struct lpfc_cq_event *cq_event) 6143{ 6144 unsigned long iflags; 6145 spin_lock_irqsave(&phba->hbalock, iflags); 6146 __lpfc_sli4_cq_event_release(phba, cq_event); 6147 spin_unlock_irqrestore(&phba->hbalock, iflags); 6148} 6149 6150/** 6151 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 6152 * @phba: pointer to lpfc hba data structure. 6153 * 6154 * This routine is to free all the pending completion-queue events to the 6155 * back into the free pool for device reset. 6156 **/ 6157static void 6158lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 6159{ 6160 LIST_HEAD(cqelist); 6161 struct lpfc_cq_event *cqe; 6162 unsigned long iflags; 6163 6164 /* Retrieve all the pending WCQEs from pending WCQE lists */ 6165 spin_lock_irqsave(&phba->hbalock, iflags); 6166 /* Pending FCP XRI abort events */ 6167 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 6168 &cqelist); 6169 /* Pending ELS XRI abort events */ 6170 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 6171 &cqelist); 6172 /* Pending asynnc events */ 6173 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 6174 &cqelist); 6175 spin_unlock_irqrestore(&phba->hbalock, iflags); 6176 6177 while (!list_empty(&cqelist)) { 6178 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 6179 lpfc_sli4_cq_event_release(phba, cqe); 6180 } 6181} 6182 6183/** 6184 * lpfc_pci_function_reset - Reset pci function. 6185 * @phba: pointer to lpfc hba data structure. 6186 * 6187 * This routine is invoked to request a PCI function reset. It will destroys 6188 * all resources assigned to the PCI function which originates this request. 6189 * 6190 * Return codes 6191 * 0 - successful 6192 * ENOMEM - No availble memory 6193 * EIO - The mailbox failed to complete successfully. 6194 **/ 6195int 6196lpfc_pci_function_reset(struct lpfc_hba *phba) 6197{ 6198 LPFC_MBOXQ_t *mboxq; 6199 uint32_t rc = 0; 6200 uint32_t shdr_status, shdr_add_status; 6201 union lpfc_sli4_cfg_shdr *shdr; 6202 6203 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6204 if (!mboxq) { 6205 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6206 "0494 Unable to allocate memory for issuing " 6207 "SLI_FUNCTION_RESET mailbox command\n"); 6208 return -ENOMEM; 6209 } 6210 6211 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ 6212 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6213 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 6214 LPFC_SLI4_MBX_EMBED); 6215 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6216 shdr = (union lpfc_sli4_cfg_shdr *) 6217 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6218 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6219 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6220 if (rc != MBX_TIMEOUT) 6221 mempool_free(mboxq, phba->mbox_mem_pool); 6222 if (shdr_status || shdr_add_status || rc) { 6223 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6224 "0495 SLI_FUNCTION_RESET mailbox failed with " 6225 "status x%x add_status x%x, mbx status x%x\n", 6226 shdr_status, shdr_add_status, rc); 6227 rc = -ENXIO; 6228 } 6229 return rc; 6230} 6231 6232/** 6233 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 6234 * @phba: pointer to lpfc hba data structure. 6235 * @cnt: number of nop mailbox commands to send. 6236 * 6237 * This routine is invoked to send a number @cnt of NOP mailbox command and 6238 * wait for each command to complete. 6239 * 6240 * Return: the number of NOP mailbox command completed. 6241 **/ 6242static int 6243lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 6244{ 6245 LPFC_MBOXQ_t *mboxq; 6246 int length, cmdsent; 6247 uint32_t mbox_tmo; 6248 uint32_t rc = 0; 6249 uint32_t shdr_status, shdr_add_status; 6250 union lpfc_sli4_cfg_shdr *shdr; 6251 6252 if (cnt == 0) { 6253 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6254 "2518 Requested to send 0 NOP mailbox cmd\n"); 6255 return cnt; 6256 } 6257 6258 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6259 if (!mboxq) { 6260 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6261 "2519 Unable to allocate memory for issuing " 6262 "NOP mailbox command\n"); 6263 return 0; 6264 } 6265 6266 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 6267 length = (sizeof(struct lpfc_mbx_nop) - 6268 sizeof(struct lpfc_sli4_cfg_mhdr)); 6269 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6270 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 6271 6272 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 6273 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 6274 if (!phba->sli4_hba.intr_enable) 6275 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6276 else 6277 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 6278 if (rc == MBX_TIMEOUT) 6279 break; 6280 /* Check return status */ 6281 shdr = (union lpfc_sli4_cfg_shdr *) 6282 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6283 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6284 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 6285 &shdr->response); 6286 if (shdr_status || shdr_add_status || rc) { 6287 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6288 "2520 NOP mailbox command failed " 6289 "status x%x add_status x%x mbx " 6290 "status x%x\n", shdr_status, 6291 shdr_add_status, rc); 6292 break; 6293 } 6294 } 6295 6296 if (rc != MBX_TIMEOUT) 6297 mempool_free(mboxq, phba->mbox_mem_pool); 6298 6299 return cmdsent; 6300} 6301 6302/** 6303 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device 6304 * @phba: pointer to lpfc hba data structure. 6305 * @fcfi: fcf index. 6306 * 6307 * This routine is invoked to unregister a FCFI from device. 6308 **/ 6309void 6310lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) 6311{ 6312 LPFC_MBOXQ_t *mbox; 6313 uint32_t mbox_tmo; 6314 int rc; 6315 unsigned long flags; 6316 6317 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6318 6319 if (!mbox) 6320 return; 6321 6322 lpfc_unreg_fcfi(mbox, fcfi); 6323 6324 if (!phba->sli4_hba.intr_enable) 6325 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6326 else { 6327 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 6328 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6329 } 6330 if (rc != MBX_TIMEOUT) 6331 mempool_free(mbox, phba->mbox_mem_pool); 6332 if (rc != MBX_SUCCESS) 6333 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6334 "2517 Unregister FCFI command failed " 6335 "status %d, mbxStatus x%x\n", rc, 6336 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 6337 else { 6338 spin_lock_irqsave(&phba->hbalock, flags); 6339 /* Mark the FCFI is no longer registered */ 6340 phba->fcf.fcf_flag &= 6341 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE); 6342 spin_unlock_irqrestore(&phba->hbalock, flags); 6343 } 6344} 6345 6346/** 6347 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 6348 * @phba: pointer to lpfc hba data structure. 6349 * 6350 * This routine is invoked to set up the PCI device memory space for device 6351 * with SLI-4 interface spec. 6352 * 6353 * Return codes 6354 * 0 - successful 6355 * other values - error 6356 **/ 6357static int 6358lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 6359{ 6360 struct pci_dev *pdev; 6361 unsigned long bar0map_len, bar1map_len, bar2map_len; 6362 int error = -ENODEV; 6363 6364 /* Obtain PCI device reference */ 6365 if (!phba->pcidev) 6366 return error; 6367 else 6368 pdev = phba->pcidev; 6369 6370 /* Set the device DMA mask size */ 6371 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6372 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6373 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6374 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6375 return error; 6376 } 6377 } 6378 6379 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 6380 * number of bytes required by each mapping. They are actually 6381 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device. 6382 */ 6383 if (pci_resource_start(pdev, 0)) { 6384 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6385 bar0map_len = pci_resource_len(pdev, 0); 6386 } else { 6387 phba->pci_bar0_map = pci_resource_start(pdev, 1); 6388 bar0map_len = pci_resource_len(pdev, 1); 6389 } 6390 phba->pci_bar1_map = pci_resource_start(pdev, 2); 6391 bar1map_len = pci_resource_len(pdev, 2); 6392 6393 phba->pci_bar2_map = pci_resource_start(pdev, 4); 6394 bar2map_len = pci_resource_len(pdev, 4); 6395 6396 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ 6397 phba->sli4_hba.conf_regs_memmap_p = 6398 ioremap(phba->pci_bar0_map, bar0map_len); 6399 if (!phba->sli4_hba.conf_regs_memmap_p) { 6400 dev_printk(KERN_ERR, &pdev->dev, 6401 "ioremap failed for SLI4 PCI config registers.\n"); 6402 goto out; 6403 } 6404 6405 /* Map SLI4 HBA Control Register base to a kernel virtual address. */ 6406 phba->sli4_hba.ctrl_regs_memmap_p = 6407 ioremap(phba->pci_bar1_map, bar1map_len); 6408 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 6409 dev_printk(KERN_ERR, &pdev->dev, 6410 "ioremap failed for SLI4 HBA control registers.\n"); 6411 goto out_iounmap_conf; 6412 } 6413 6414 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ 6415 phba->sli4_hba.drbl_regs_memmap_p = 6416 ioremap(phba->pci_bar2_map, bar2map_len); 6417 if (!phba->sli4_hba.drbl_regs_memmap_p) { 6418 dev_printk(KERN_ERR, &pdev->dev, 6419 "ioremap failed for SLI4 HBA doorbell registers.\n"); 6420 goto out_iounmap_ctrl; 6421 } 6422 6423 /* Set up BAR0 PCI config space register memory map */ 6424 lpfc_sli4_bar0_register_memmap(phba); 6425 6426 /* Set up BAR1 register memory map */ 6427 lpfc_sli4_bar1_register_memmap(phba); 6428 6429 /* Set up BAR2 register memory map */ 6430 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 6431 if (error) 6432 goto out_iounmap_all; 6433 6434 return 0; 6435 6436out_iounmap_all: 6437 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 6438out_iounmap_ctrl: 6439 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 6440out_iounmap_conf: 6441 iounmap(phba->sli4_hba.conf_regs_memmap_p); 6442out: 6443 return error; 6444} 6445 6446/** 6447 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 6448 * @phba: pointer to lpfc hba data structure. 6449 * 6450 * This routine is invoked to unset the PCI device memory space for device 6451 * with SLI-4 interface spec. 6452 **/ 6453static void 6454lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 6455{ 6456 struct pci_dev *pdev; 6457 6458 /* Obtain PCI device reference */ 6459 if (!phba->pcidev) 6460 return; 6461 else 6462 pdev = phba->pcidev; 6463 6464 /* Free coherent DMA memory allocated */ 6465 6466 /* Unmap I/O memory space */ 6467 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 6468 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 6469 iounmap(phba->sli4_hba.conf_regs_memmap_p); 6470 6471 return; 6472} 6473 6474/** 6475 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 6476 * @phba: pointer to lpfc hba data structure. 6477 * 6478 * This routine is invoked to enable the MSI-X interrupt vectors to device 6479 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 6480 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 6481 * invoked, enables either all or nothing, depending on the current 6482 * availability of PCI vector resources. The device driver is responsible 6483 * for calling the individual request_irq() to register each MSI-X vector 6484 * with a interrupt handler, which is done in this function. Note that 6485 * later when device is unloading, the driver should always call free_irq() 6486 * on all MSI-X vectors it has done request_irq() on before calling 6487 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 6488 * will be left with MSI-X enabled and leaks its vectors. 6489 * 6490 * Return codes 6491 * 0 - successful 6492 * other values - error 6493 **/ 6494static int 6495lpfc_sli_enable_msix(struct lpfc_hba *phba) 6496{ 6497 int rc, i; 6498 LPFC_MBOXQ_t *pmb; 6499 6500 /* Set up MSI-X multi-message vectors */ 6501 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6502 phba->msix_entries[i].entry = i; 6503 6504 /* Configure MSI-X capability structure */ 6505 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 6506 ARRAY_SIZE(phba->msix_entries)); 6507 if (rc) { 6508 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6509 "0420 PCI enable MSI-X failed (%d)\n", rc); 6510 goto msi_fail_out; 6511 } 6512 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6513 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6514 "0477 MSI-X entry[%d]: vector=x%x " 6515 "message=%d\n", i, 6516 phba->msix_entries[i].vector, 6517 phba->msix_entries[i].entry); 6518 /* 6519 * Assign MSI-X vectors to interrupt handlers 6520 */ 6521 6522 /* vector-0 is associated to slow-path handler */ 6523 rc = request_irq(phba->msix_entries[0].vector, 6524 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 6525 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6526 if (rc) { 6527 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6528 "0421 MSI-X slow-path request_irq failed " 6529 "(%d)\n", rc); 6530 goto msi_fail_out; 6531 } 6532 6533 /* vector-1 is associated to fast-path handler */ 6534 rc = request_irq(phba->msix_entries[1].vector, 6535 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 6536 LPFC_FP_DRIVER_HANDLER_NAME, phba); 6537 6538 if (rc) { 6539 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6540 "0429 MSI-X fast-path request_irq failed " 6541 "(%d)\n", rc); 6542 goto irq_fail_out; 6543 } 6544 6545 /* 6546 * Configure HBA MSI-X attention conditions to messages 6547 */ 6548 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6549 6550 if (!pmb) { 6551 rc = -ENOMEM; 6552 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6553 "0474 Unable to allocate memory for issuing " 6554 "MBOX_CONFIG_MSI command\n"); 6555 goto mem_fail_out; 6556 } 6557 rc = lpfc_config_msi(phba, pmb); 6558 if (rc) 6559 goto mbx_fail_out; 6560 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6561 if (rc != MBX_SUCCESS) { 6562 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6563 "0351 Config MSI mailbox command failed, " 6564 "mbxCmd x%x, mbxStatus x%x\n", 6565 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 6566 goto mbx_fail_out; 6567 } 6568 6569 /* Free memory allocated for mailbox command */ 6570 mempool_free(pmb, phba->mbox_mem_pool); 6571 return rc; 6572 6573mbx_fail_out: 6574 /* Free memory allocated for mailbox command */ 6575 mempool_free(pmb, phba->mbox_mem_pool); 6576 6577mem_fail_out: 6578 /* free the irq already requested */ 6579 free_irq(phba->msix_entries[1].vector, phba); 6580 6581irq_fail_out: 6582 /* free the irq already requested */ 6583 free_irq(phba->msix_entries[0].vector, phba); 6584 6585msi_fail_out: 6586 /* Unconfigure MSI-X capability structure */ 6587 pci_disable_msix(phba->pcidev); 6588 return rc; 6589} 6590 6591/** 6592 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 6593 * @phba: pointer to lpfc hba data structure. 6594 * 6595 * This routine is invoked to release the MSI-X vectors and then disable the 6596 * MSI-X interrupt mode to device with SLI-3 interface spec. 6597 **/ 6598static void 6599lpfc_sli_disable_msix(struct lpfc_hba *phba) 6600{ 6601 int i; 6602 6603 /* Free up MSI-X multi-message vectors */ 6604 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6605 free_irq(phba->msix_entries[i].vector, phba); 6606 /* Disable MSI-X */ 6607 pci_disable_msix(phba->pcidev); 6608 6609 return; 6610} 6611 6612/** 6613 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 6614 * @phba: pointer to lpfc hba data structure. 6615 * 6616 * This routine is invoked to enable the MSI interrupt mode to device with 6617 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 6618 * enable the MSI vector. The device driver is responsible for calling the 6619 * request_irq() to register MSI vector with a interrupt the handler, which 6620 * is done in this function. 6621 * 6622 * Return codes 6623 * 0 - successful 6624 * other values - error 6625 */ 6626static int 6627lpfc_sli_enable_msi(struct lpfc_hba *phba) 6628{ 6629 int rc; 6630 6631 rc = pci_enable_msi(phba->pcidev); 6632 if (!rc) 6633 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6634 "0462 PCI enable MSI mode success.\n"); 6635 else { 6636 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6637 "0471 PCI enable MSI mode failed (%d)\n", rc); 6638 return rc; 6639 } 6640 6641 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6642 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6643 if (rc) { 6644 pci_disable_msi(phba->pcidev); 6645 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6646 "0478 MSI request_irq failed (%d)\n", rc); 6647 } 6648 return rc; 6649} 6650 6651/** 6652 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 6653 * @phba: pointer to lpfc hba data structure. 6654 * 6655 * This routine is invoked to disable the MSI interrupt mode to device with 6656 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 6657 * done request_irq() on before calling pci_disable_msi(). Failure to do so 6658 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 6659 * its vector. 6660 */ 6661static void 6662lpfc_sli_disable_msi(struct lpfc_hba *phba) 6663{ 6664 free_irq(phba->pcidev->irq, phba); 6665 pci_disable_msi(phba->pcidev); 6666 return; 6667} 6668 6669/** 6670 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 6671 * @phba: pointer to lpfc hba data structure. 6672 * 6673 * This routine is invoked to enable device interrupt and associate driver's 6674 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 6675 * spec. Depends on the interrupt mode configured to the driver, the driver 6676 * will try to fallback from the configured interrupt mode to an interrupt 6677 * mode which is supported by the platform, kernel, and device in the order 6678 * of: 6679 * MSI-X -> MSI -> IRQ. 6680 * 6681 * Return codes 6682 * 0 - successful 6683 * other values - error 6684 **/ 6685static uint32_t 6686lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6687{ 6688 uint32_t intr_mode = LPFC_INTR_ERROR; 6689 int retval; 6690 6691 if (cfg_mode == 2) { 6692 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 6693 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 6694 if (!retval) { 6695 /* Now, try to enable MSI-X interrupt mode */ 6696 retval = lpfc_sli_enable_msix(phba); 6697 if (!retval) { 6698 /* Indicate initialization to MSI-X mode */ 6699 phba->intr_type = MSIX; 6700 intr_mode = 2; 6701 } 6702 } 6703 } 6704 6705 /* Fallback to MSI if MSI-X initialization failed */ 6706 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6707 retval = lpfc_sli_enable_msi(phba); 6708 if (!retval) { 6709 /* Indicate initialization to MSI mode */ 6710 phba->intr_type = MSI; 6711 intr_mode = 1; 6712 } 6713 } 6714 6715 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6716 if (phba->intr_type == NONE) { 6717 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6718 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6719 if (!retval) { 6720 /* Indicate initialization to INTx mode */ 6721 phba->intr_type = INTx; 6722 intr_mode = 0; 6723 } 6724 } 6725 return intr_mode; 6726} 6727 6728/** 6729 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 6730 * @phba: pointer to lpfc hba data structure. 6731 * 6732 * This routine is invoked to disable device interrupt and disassociate the 6733 * driver's interrupt handler(s) from interrupt vector(s) to device with 6734 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 6735 * release the interrupt vector(s) for the message signaled interrupt. 6736 **/ 6737static void 6738lpfc_sli_disable_intr(struct lpfc_hba *phba) 6739{ 6740 /* Disable the currently initialized interrupt mode */ 6741 if (phba->intr_type == MSIX) 6742 lpfc_sli_disable_msix(phba); 6743 else if (phba->intr_type == MSI) 6744 lpfc_sli_disable_msi(phba); 6745 else if (phba->intr_type == INTx) 6746 free_irq(phba->pcidev->irq, phba); 6747 6748 /* Reset interrupt management states */ 6749 phba->intr_type = NONE; 6750 phba->sli.slistat.sli_intr = 0; 6751 6752 return; 6753} 6754 6755/** 6756 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 6757 * @phba: pointer to lpfc hba data structure. 6758 * 6759 * This routine is invoked to enable the MSI-X interrupt vectors to device 6760 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 6761 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 6762 * enables either all or nothing, depending on the current availability of 6763 * PCI vector resources. The device driver is responsible for calling the 6764 * individual request_irq() to register each MSI-X vector with a interrupt 6765 * handler, which is done in this function. Note that later when device is 6766 * unloading, the driver should always call free_irq() on all MSI-X vectors 6767 * it has done request_irq() on before calling pci_disable_msix(). Failure 6768 * to do so results in a BUG_ON() and a device will be left with MSI-X 6769 * enabled and leaks its vectors. 6770 * 6771 * Return codes 6772 * 0 - successful 6773 * other values - error 6774 **/ 6775static int 6776lpfc_sli4_enable_msix(struct lpfc_hba *phba) 6777{ 6778 int rc, index; 6779 6780 /* Set up MSI-X multi-message vectors */ 6781 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 6782 phba->sli4_hba.msix_entries[index].entry = index; 6783 6784 /* Configure MSI-X capability structure */ 6785 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 6786 phba->sli4_hba.cfg_eqn); 6787 if (rc) { 6788 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6789 "0484 PCI enable MSI-X failed (%d)\n", rc); 6790 goto msi_fail_out; 6791 } 6792 /* Log MSI-X vector assignment */ 6793 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 6794 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6795 "0489 MSI-X entry[%d]: vector=x%x " 6796 "message=%d\n", index, 6797 phba->sli4_hba.msix_entries[index].vector, 6798 phba->sli4_hba.msix_entries[index].entry); 6799 /* 6800 * Assign MSI-X vectors to interrupt handlers 6801 */ 6802 6803 /* The first vector must associated to slow-path handler for MQ */ 6804 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 6805 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 6806 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6807 if (rc) { 6808 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6809 "0485 MSI-X slow-path request_irq failed " 6810 "(%d)\n", rc); 6811 goto msi_fail_out; 6812 } 6813 6814 /* The rest of the vector(s) are associated to fast-path handler(s) */ 6815 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) { 6816 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 6817 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 6818 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 6819 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 6820 LPFC_FP_DRIVER_HANDLER_NAME, 6821 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6822 if (rc) { 6823 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6824 "0486 MSI-X fast-path (%d) " 6825 "request_irq failed (%d)\n", index, rc); 6826 goto cfg_fail_out; 6827 } 6828 } 6829 6830 return rc; 6831 6832cfg_fail_out: 6833 /* free the irq already requested */ 6834 for (--index; index >= 1; index--) 6835 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 6836 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6837 6838 /* free the irq already requested */ 6839 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 6840 6841msi_fail_out: 6842 /* Unconfigure MSI-X capability structure */ 6843 pci_disable_msix(phba->pcidev); 6844 return rc; 6845} 6846 6847/** 6848 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 6849 * @phba: pointer to lpfc hba data structure. 6850 * 6851 * This routine is invoked to release the MSI-X vectors and then disable the 6852 * MSI-X interrupt mode to device with SLI-4 interface spec. 6853 **/ 6854static void 6855lpfc_sli4_disable_msix(struct lpfc_hba *phba) 6856{ 6857 int index; 6858 6859 /* Free up MSI-X multi-message vectors */ 6860 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 6861 6862 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) 6863 free_irq(phba->sli4_hba.msix_entries[index].vector, 6864 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6865 /* Disable MSI-X */ 6866 pci_disable_msix(phba->pcidev); 6867 6868 return; 6869} 6870 6871/** 6872 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 6873 * @phba: pointer to lpfc hba data structure. 6874 * 6875 * This routine is invoked to enable the MSI interrupt mode to device with 6876 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 6877 * to enable the MSI vector. The device driver is responsible for calling 6878 * the request_irq() to register MSI vector with a interrupt the handler, 6879 * which is done in this function. 6880 * 6881 * Return codes 6882 * 0 - successful 6883 * other values - error 6884 **/ 6885static int 6886lpfc_sli4_enable_msi(struct lpfc_hba *phba) 6887{ 6888 int rc, index; 6889 6890 rc = pci_enable_msi(phba->pcidev); 6891 if (!rc) 6892 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6893 "0487 PCI enable MSI mode success.\n"); 6894 else { 6895 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6896 "0488 PCI enable MSI mode failed (%d)\n", rc); 6897 return rc; 6898 } 6899 6900 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 6901 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6902 if (rc) { 6903 pci_disable_msi(phba->pcidev); 6904 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6905 "0490 MSI request_irq failed (%d)\n", rc); 6906 } 6907 6908 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 6909 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 6910 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 6911 } 6912 6913 return rc; 6914} 6915 6916/** 6917 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 6918 * @phba: pointer to lpfc hba data structure. 6919 * 6920 * This routine is invoked to disable the MSI interrupt mode to device with 6921 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 6922 * done request_irq() on before calling pci_disable_msi(). Failure to do so 6923 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 6924 * its vector. 6925 **/ 6926static void 6927lpfc_sli4_disable_msi(struct lpfc_hba *phba) 6928{ 6929 free_irq(phba->pcidev->irq, phba); 6930 pci_disable_msi(phba->pcidev); 6931 return; 6932} 6933 6934/** 6935 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 6936 * @phba: pointer to lpfc hba data structure. 6937 * 6938 * This routine is invoked to enable device interrupt and associate driver's 6939 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 6940 * interface spec. Depends on the interrupt mode configured to the driver, 6941 * the driver will try to fallback from the configured interrupt mode to an 6942 * interrupt mode which is supported by the platform, kernel, and device in 6943 * the order of: 6944 * MSI-X -> MSI -> IRQ. 6945 * 6946 * Return codes 6947 * 0 - successful 6948 * other values - error 6949 **/ 6950static uint32_t 6951lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6952{ 6953 uint32_t intr_mode = LPFC_INTR_ERROR; 6954 int retval, index; 6955 6956 if (cfg_mode == 2) { 6957 /* Preparation before conf_msi mbox cmd */ 6958 retval = 0; 6959 if (!retval) { 6960 /* Now, try to enable MSI-X interrupt mode */ 6961 retval = lpfc_sli4_enable_msix(phba); 6962 if (!retval) { 6963 /* Indicate initialization to MSI-X mode */ 6964 phba->intr_type = MSIX; 6965 intr_mode = 2; 6966 } 6967 } 6968 } 6969 6970 /* Fallback to MSI if MSI-X initialization failed */ 6971 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6972 retval = lpfc_sli4_enable_msi(phba); 6973 if (!retval) { 6974 /* Indicate initialization to MSI mode */ 6975 phba->intr_type = MSI; 6976 intr_mode = 1; 6977 } 6978 } 6979 6980 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6981 if (phba->intr_type == NONE) { 6982 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 6983 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6984 if (!retval) { 6985 /* Indicate initialization to INTx mode */ 6986 phba->intr_type = INTx; 6987 intr_mode = 0; 6988 for (index = 0; index < phba->cfg_fcp_eq_count; 6989 index++) { 6990 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 6991 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 6992 } 6993 } 6994 } 6995 return intr_mode; 6996} 6997 6998/** 6999 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 7000 * @phba: pointer to lpfc hba data structure. 7001 * 7002 * This routine is invoked to disable device interrupt and disassociate 7003 * the driver's interrupt handler(s) from interrupt vector(s) to device 7004 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 7005 * will release the interrupt vector(s) for the message signaled interrupt. 7006 **/ 7007static void 7008lpfc_sli4_disable_intr(struct lpfc_hba *phba) 7009{ 7010 /* Disable the currently initialized interrupt mode */ 7011 if (phba->intr_type == MSIX) 7012 lpfc_sli4_disable_msix(phba); 7013 else if (phba->intr_type == MSI) 7014 lpfc_sli4_disable_msi(phba); 7015 else if (phba->intr_type == INTx) 7016 free_irq(phba->pcidev->irq, phba); 7017 7018 /* Reset interrupt management states */ 7019 phba->intr_type = NONE; 7020 phba->sli.slistat.sli_intr = 0; 7021 7022 return; 7023} 7024 7025/** 7026 * lpfc_unset_hba - Unset SLI3 hba device initialization 7027 * @phba: pointer to lpfc hba data structure. 7028 * 7029 * This routine is invoked to unset the HBA device initialization steps to 7030 * a device with SLI-3 interface spec. 7031 **/ 7032static void 7033lpfc_unset_hba(struct lpfc_hba *phba) 7034{ 7035 struct lpfc_vport *vport = phba->pport; 7036 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7037 7038 spin_lock_irq(shost->host_lock); 7039 vport->load_flag |= FC_UNLOADING; 7040 spin_unlock_irq(shost->host_lock); 7041 7042 lpfc_stop_hba_timers(phba); 7043 7044 phba->pport->work_port_events = 0; 7045 7046 lpfc_sli_hba_down(phba); 7047 7048 lpfc_sli_brdrestart(phba); 7049 7050 lpfc_sli_disable_intr(phba); 7051 7052 return; 7053} 7054 7055/** 7056 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 7057 * @phba: pointer to lpfc hba data structure. 7058 * 7059 * This routine is invoked to unset the HBA device initialization steps to 7060 * a device with SLI-4 interface spec. 7061 **/ 7062static void 7063lpfc_sli4_unset_hba(struct lpfc_hba *phba) 7064{ 7065 struct lpfc_vport *vport = phba->pport; 7066 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7067 7068 spin_lock_irq(shost->host_lock); 7069 vport->load_flag |= FC_UNLOADING; 7070 spin_unlock_irq(shost->host_lock); 7071 7072 phba->pport->work_port_events = 0; 7073 7074 lpfc_sli4_hba_down(phba); 7075 7076 lpfc_sli4_disable_intr(phba); 7077 7078 return; 7079} 7080 7081/** 7082 * lpfc_sli4_hba_unset - Unset the fcoe hba 7083 * @phba: Pointer to HBA context object. 7084 * 7085 * This function is called in the SLI4 code path to reset the HBA's FCoE 7086 * function. The caller is not required to hold any lock. This routine 7087 * issues PCI function reset mailbox command to reset the FCoE function. 7088 * At the end of the function, it calls lpfc_hba_down_post function to 7089 * free any pending commands. 7090 **/ 7091static void 7092lpfc_sli4_hba_unset(struct lpfc_hba *phba) 7093{ 7094 int wait_cnt = 0; 7095 LPFC_MBOXQ_t *mboxq; 7096 7097 lpfc_stop_hba_timers(phba); 7098 phba->sli4_hba.intr_enable = 0; 7099 7100 /* 7101 * Gracefully wait out the potential current outstanding asynchronous 7102 * mailbox command. 7103 */ 7104 7105 /* First, block any pending async mailbox command from posted */ 7106 spin_lock_irq(&phba->hbalock); 7107 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 7108 spin_unlock_irq(&phba->hbalock); 7109 /* Now, trying to wait it out if we can */ 7110 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7111 msleep(10); 7112 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 7113 break; 7114 } 7115 /* Forcefully release the outstanding mailbox command if timed out */ 7116 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7117 spin_lock_irq(&phba->hbalock); 7118 mboxq = phba->sli.mbox_active; 7119 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 7120 __lpfc_mbox_cmpl_put(phba, mboxq); 7121 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7122 phba->sli.mbox_active = NULL; 7123 spin_unlock_irq(&phba->hbalock); 7124 } 7125 7126 /* Tear down the queues in the HBA */ 7127 lpfc_sli4_queue_unset(phba); 7128 7129 /* Disable PCI subsystem interrupt */ 7130 lpfc_sli4_disable_intr(phba); 7131 7132 /* Stop kthread signal shall trigger work_done one more time */ 7133 kthread_stop(phba->worker_thread); 7134 7135 /* Stop the SLI4 device port */ 7136 phba->pport->work_port_events = 0; 7137} 7138 7139 /** 7140 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 7141 * @phba: Pointer to HBA context object. 7142 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 7143 * 7144 * This function is called in the SLI4 code path to read the port's 7145 * sli4 capabilities. 7146 * 7147 * This function may be be called from any context that can block-wait 7148 * for the completion. The expectation is that this routine is called 7149 * typically from probe_one or from the online routine. 7150 **/ 7151int 7152lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7153{ 7154 int rc; 7155 struct lpfc_mqe *mqe; 7156 struct lpfc_pc_sli4_params *sli4_params; 7157 uint32_t mbox_tmo; 7158 7159 rc = 0; 7160 mqe = &mboxq->u.mqe; 7161 7162 /* Read the port's SLI4 Parameters port capabilities */ 7163 lpfc_sli4_params(mboxq); 7164 if (!phba->sli4_hba.intr_enable) 7165 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7166 else { 7167 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES); 7168 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 7169 } 7170 7171 if (unlikely(rc)) 7172 return 1; 7173 7174 sli4_params = &phba->sli4_hba.pc_sli4_params; 7175 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 7176 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 7177 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 7178 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 7179 &mqe->un.sli4_params); 7180 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 7181 &mqe->un.sli4_params); 7182 sli4_params->proto_types = mqe->un.sli4_params.word3; 7183 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 7184 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 7185 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 7186 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 7187 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 7188 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 7189 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 7190 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 7191 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 7192 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 7193 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 7194 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 7195 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 7196 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 7197 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 7198 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 7199 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 7200 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 7201 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 7202 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 7203 return rc; 7204} 7205 7206/** 7207 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 7208 * @pdev: pointer to PCI device 7209 * @pid: pointer to PCI device identifier 7210 * 7211 * This routine is to be called to attach a device with SLI-3 interface spec 7212 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 7213 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 7214 * information of the device and driver to see if the driver state that it can 7215 * support this kind of device. If the match is successful, the driver core 7216 * invokes this routine. If this routine determines it can claim the HBA, it 7217 * does all the initialization that it needs to do to handle the HBA properly. 7218 * 7219 * Return code 7220 * 0 - driver can claim the device 7221 * negative value - driver can not claim the device 7222 **/ 7223static int __devinit 7224lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 7225{ 7226 struct lpfc_hba *phba; 7227 struct lpfc_vport *vport = NULL; 7228 struct Scsi_Host *shost = NULL; 7229 int error; 7230 uint32_t cfg_mode, intr_mode; 7231 7232 /* Allocate memory for HBA structure */ 7233 phba = lpfc_hba_alloc(pdev); 7234 if (!phba) 7235 return -ENOMEM; 7236 7237 /* Perform generic PCI device enabling operation */ 7238 error = lpfc_enable_pci_dev(phba); 7239 if (error) { 7240 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7241 "1401 Failed to enable pci device.\n"); 7242 goto out_free_phba; 7243 } 7244 7245 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 7246 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 7247 if (error) 7248 goto out_disable_pci_dev; 7249 7250 /* Set up SLI-3 specific device PCI memory space */ 7251 error = lpfc_sli_pci_mem_setup(phba); 7252 if (error) { 7253 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7254 "1402 Failed to set up pci memory space.\n"); 7255 goto out_disable_pci_dev; 7256 } 7257 7258 /* Set up phase-1 common device driver resources */ 7259 error = lpfc_setup_driver_resource_phase1(phba); 7260 if (error) { 7261 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7262 "1403 Failed to set up driver resource.\n"); 7263 goto out_unset_pci_mem_s3; 7264 } 7265 7266 /* Set up SLI-3 specific device driver resources */ 7267 error = lpfc_sli_driver_resource_setup(phba); 7268 if (error) { 7269 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7270 "1404 Failed to set up driver resource.\n"); 7271 goto out_unset_pci_mem_s3; 7272 } 7273 7274 /* Initialize and populate the iocb list per host */ 7275 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 7276 if (error) { 7277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7278 "1405 Failed to initialize iocb list.\n"); 7279 goto out_unset_driver_resource_s3; 7280 } 7281 7282 /* Set up common device driver resources */ 7283 error = lpfc_setup_driver_resource_phase2(phba); 7284 if (error) { 7285 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7286 "1406 Failed to set up driver resource.\n"); 7287 goto out_free_iocb_list; 7288 } 7289 7290 /* Create SCSI host to the physical port */ 7291 error = lpfc_create_shost(phba); 7292 if (error) { 7293 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7294 "1407 Failed to create scsi host.\n"); 7295 goto out_unset_driver_resource; 7296 } 7297 7298 /* Configure sysfs attributes */ 7299 vport = phba->pport; 7300 error = lpfc_alloc_sysfs_attr(vport); 7301 if (error) { 7302 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7303 "1476 Failed to allocate sysfs attr\n"); 7304 goto out_destroy_shost; 7305 } 7306 7307 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 7308 /* Now, trying to enable interrupt and bring up the device */ 7309 cfg_mode = phba->cfg_use_msi; 7310 while (true) { 7311 /* Put device to a known state before enabling interrupt */ 7312 lpfc_stop_port(phba); 7313 /* Configure and enable interrupt */ 7314 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 7315 if (intr_mode == LPFC_INTR_ERROR) { 7316 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7317 "0431 Failed to enable interrupt.\n"); 7318 error = -ENODEV; 7319 goto out_free_sysfs_attr; 7320 } 7321 /* SLI-3 HBA setup */ 7322 if (lpfc_sli_hba_setup(phba)) { 7323 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7324 "1477 Failed to set up hba\n"); 7325 error = -ENODEV; 7326 goto out_remove_device; 7327 } 7328 7329 /* Wait 50ms for the interrupts of previous mailbox commands */ 7330 msleep(50); 7331 /* Check active interrupts on message signaled interrupts */ 7332 if (intr_mode == 0 || 7333 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 7334 /* Log the current active interrupt mode */ 7335 phba->intr_mode = intr_mode; 7336 lpfc_log_intr_mode(phba, intr_mode); 7337 break; 7338 } else { 7339 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7340 "0447 Configure interrupt mode (%d) " 7341 "failed active interrupt test.\n", 7342 intr_mode); 7343 /* Disable the current interrupt mode */ 7344 lpfc_sli_disable_intr(phba); 7345 /* Try next level of interrupt mode */ 7346 cfg_mode = --intr_mode; 7347 } 7348 } 7349 7350 /* Perform post initialization setup */ 7351 lpfc_post_init_setup(phba); 7352 7353 /* Check if there are static vports to be created. */ 7354 lpfc_create_static_vport(phba); 7355 7356 return 0; 7357 7358out_remove_device: 7359 lpfc_unset_hba(phba); 7360out_free_sysfs_attr: 7361 lpfc_free_sysfs_attr(vport); 7362out_destroy_shost: 7363 lpfc_destroy_shost(phba); 7364out_unset_driver_resource: 7365 lpfc_unset_driver_resource_phase2(phba); 7366out_free_iocb_list: 7367 lpfc_free_iocb_list(phba); 7368out_unset_driver_resource_s3: 7369 lpfc_sli_driver_resource_unset(phba); 7370out_unset_pci_mem_s3: 7371 lpfc_sli_pci_mem_unset(phba); 7372out_disable_pci_dev: 7373 lpfc_disable_pci_dev(phba); 7374 if (shost) 7375 scsi_host_put(shost); 7376out_free_phba: 7377 lpfc_hba_free(phba); 7378 return error; 7379} 7380 7381/** 7382 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 7383 * @pdev: pointer to PCI device 7384 * 7385 * This routine is to be called to disattach a device with SLI-3 interface 7386 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 7387 * removed from PCI bus, it performs all the necessary cleanup for the HBA 7388 * device to be removed from the PCI subsystem properly. 7389 **/ 7390static void __devexit 7391lpfc_pci_remove_one_s3(struct pci_dev *pdev) 7392{ 7393 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7394 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 7395 struct lpfc_vport **vports; 7396 struct lpfc_hba *phba = vport->phba; 7397 int i; 7398 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 7399 7400 spin_lock_irq(&phba->hbalock); 7401 vport->load_flag |= FC_UNLOADING; 7402 spin_unlock_irq(&phba->hbalock); 7403 7404 lpfc_free_sysfs_attr(vport); 7405 7406 /* Release all the vports against this physical port */ 7407 vports = lpfc_create_vport_work_array(phba); 7408 if (vports != NULL) 7409 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 7410 fc_vport_terminate(vports[i]->fc_vport); 7411 lpfc_destroy_vport_work_array(phba, vports); 7412 7413 /* Remove FC host and then SCSI host with the physical port */ 7414 fc_remove_host(shost); 7415 scsi_remove_host(shost); 7416 lpfc_cleanup(vport); 7417 7418 /* 7419 * Bring down the SLI Layer. This step disable all interrupts, 7420 * clears the rings, discards all mailbox commands, and resets 7421 * the HBA. 7422 */ 7423 7424 /* HBA interrupt will be diabled after this call */ 7425 lpfc_sli_hba_down(phba); 7426 /* Stop kthread signal shall trigger work_done one more time */ 7427 kthread_stop(phba->worker_thread); 7428 /* Final cleanup of txcmplq and reset the HBA */ 7429 lpfc_sli_brdrestart(phba); 7430 7431 lpfc_stop_hba_timers(phba); 7432 spin_lock_irq(&phba->hbalock); 7433 list_del_init(&vport->listentry); 7434 spin_unlock_irq(&phba->hbalock); 7435 7436 lpfc_debugfs_terminate(vport); 7437 7438 /* Disable interrupt */ 7439 lpfc_sli_disable_intr(phba); 7440 7441 pci_set_drvdata(pdev, NULL); 7442 scsi_host_put(shost); 7443 7444 /* 7445 * Call scsi_free before mem_free since scsi bufs are released to their 7446 * corresponding pools here. 7447 */ 7448 lpfc_scsi_free(phba); 7449 lpfc_mem_free_all(phba); 7450 7451 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7452 phba->hbqslimp.virt, phba->hbqslimp.phys); 7453 7454 /* Free resources associated with SLI2 interface */ 7455 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7456 phba->slim2p.virt, phba->slim2p.phys); 7457 7458 /* unmap adapter SLIM and Control Registers */ 7459 iounmap(phba->ctrl_regs_memmap_p); 7460 iounmap(phba->slim_memmap_p); 7461 7462 lpfc_hba_free(phba); 7463 7464 pci_release_selected_regions(pdev, bars); 7465 pci_disable_device(pdev); 7466} 7467 7468/** 7469 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 7470 * @pdev: pointer to PCI device 7471 * @msg: power management message 7472 * 7473 * This routine is to be called from the kernel's PCI subsystem to support 7474 * system Power Management (PM) to device with SLI-3 interface spec. When 7475 * PM invokes this method, it quiesces the device by stopping the driver's 7476 * worker thread for the device, turning off device's interrupt and DMA, 7477 * and bring the device offline. Note that as the driver implements the 7478 * minimum PM requirements to a power-aware driver's PM support for the 7479 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 7480 * to the suspend() method call will be treated as SUSPEND and the driver will 7481 * fully reinitialize its device during resume() method call, the driver will 7482 * set device to PCI_D3hot state in PCI config space instead of setting it 7483 * according to the @msg provided by the PM. 7484 * 7485 * Return code 7486 * 0 - driver suspended the device 7487 * Error otherwise 7488 **/ 7489static int 7490lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 7491{ 7492 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7493 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7494 7495 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7496 "0473 PCI device Power Management suspend.\n"); 7497 7498 /* Bring down the device */ 7499 lpfc_offline_prep(phba); 7500 lpfc_offline(phba); 7501 kthread_stop(phba->worker_thread); 7502 7503 /* Disable interrupt from device */ 7504 lpfc_sli_disable_intr(phba); 7505 7506 /* Save device state to PCI config space */ 7507 pci_save_state(pdev); 7508 pci_set_power_state(pdev, PCI_D3hot); 7509 7510 return 0; 7511} 7512 7513/** 7514 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 7515 * @pdev: pointer to PCI device 7516 * 7517 * This routine is to be called from the kernel's PCI subsystem to support 7518 * system Power Management (PM) to device with SLI-3 interface spec. When PM 7519 * invokes this method, it restores the device's PCI config space state and 7520 * fully reinitializes the device and brings it online. Note that as the 7521 * driver implements the minimum PM requirements to a power-aware driver's 7522 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 7523 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 7524 * driver will fully reinitialize its device during resume() method call, 7525 * the device will be set to PCI_D0 directly in PCI config space before 7526 * restoring the state. 7527 * 7528 * Return code 7529 * 0 - driver suspended the device 7530 * Error otherwise 7531 **/ 7532static int 7533lpfc_pci_resume_one_s3(struct pci_dev *pdev) 7534{ 7535 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7536 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7537 uint32_t intr_mode; 7538 int error; 7539 7540 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7541 "0452 PCI device Power Management resume.\n"); 7542 7543 /* Restore device state from PCI config space */ 7544 pci_set_power_state(pdev, PCI_D0); 7545 pci_restore_state(pdev); 7546 7547 /* 7548 * As the new kernel behavior of pci_restore_state() API call clears 7549 * device saved_state flag, need to save the restored state again. 7550 */ 7551 pci_save_state(pdev); 7552 7553 if (pdev->is_busmaster) 7554 pci_set_master(pdev); 7555 7556 /* Startup the kernel thread for this host adapter. */ 7557 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7558 "lpfc_worker_%d", phba->brd_no); 7559 if (IS_ERR(phba->worker_thread)) { 7560 error = PTR_ERR(phba->worker_thread); 7561 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7562 "0434 PM resume failed to start worker " 7563 "thread: error=x%x.\n", error); 7564 return error; 7565 } 7566 7567 /* Configure and enable interrupt */ 7568 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7569 if (intr_mode == LPFC_INTR_ERROR) { 7570 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7571 "0430 PM resume Failed to enable interrupt\n"); 7572 return -EIO; 7573 } else 7574 phba->intr_mode = intr_mode; 7575 7576 /* Restart HBA and bring it online */ 7577 lpfc_sli_brdrestart(phba); 7578 lpfc_online(phba); 7579 7580 /* Log the current active interrupt mode */ 7581 lpfc_log_intr_mode(phba, phba->intr_mode); 7582 7583 return 0; 7584} 7585 7586/** 7587 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 7588 * @phba: pointer to lpfc hba data structure. 7589 * 7590 * This routine is called to prepare the SLI3 device for PCI slot recover. It 7591 * aborts and stops all the on-going I/Os on the pci device. 7592 **/ 7593static void 7594lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 7595{ 7596 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7597 "2723 PCI channel I/O abort preparing for recovery\n"); 7598 /* Prepare for bringing HBA offline */ 7599 lpfc_offline_prep(phba); 7600 /* Clear sli active flag to prevent sysfs access to HBA */ 7601 spin_lock_irq(&phba->hbalock); 7602 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 7603 spin_unlock_irq(&phba->hbalock); 7604 /* Stop and flush all I/Os and bring HBA offline */ 7605 lpfc_offline(phba); 7606} 7607 7608/** 7609 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 7610 * @phba: pointer to lpfc hba data structure. 7611 * 7612 * This routine is called to prepare the SLI3 device for PCI slot reset. It 7613 * disables the device interrupt and pci device, and aborts the internal FCP 7614 * pending I/Os. 7615 **/ 7616static void 7617lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 7618{ 7619 struct lpfc_sli *psli = &phba->sli; 7620 struct lpfc_sli_ring *pring; 7621 7622 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7623 "2710 PCI channel disable preparing for reset\n"); 7624 /* Disable interrupt and pci device */ 7625 lpfc_sli_disable_intr(phba); 7626 pci_disable_device(phba->pcidev); 7627 /* 7628 * There may be I/Os dropped by the firmware. 7629 * Error iocb (I/O) on txcmplq and let the SCSI layer 7630 * retry it after re-establishing link. 7631 */ 7632 pring = &psli->ring[psli->fcp_ring]; 7633 lpfc_sli_abort_iocb_ring(phba, pring); 7634} 7635 7636/** 7637 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 7638 * @phba: pointer to lpfc hba data structure. 7639 * 7640 * This routine is called to prepare the SLI3 device for PCI slot permanently 7641 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 7642 * pending I/Os. 7643 **/ 7644static void 7645lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba) 7646{ 7647 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7648 "2711 PCI channel permanent disable for failure\n"); 7649 /* Clean up all driver's outstanding SCSI I/Os */ 7650 lpfc_sli_flush_fcp_rings(phba); 7651} 7652 7653/** 7654 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 7655 * @pdev: pointer to PCI device. 7656 * @state: the current PCI connection state. 7657 * 7658 * This routine is called from the PCI subsystem for I/O error handling to 7659 * device with SLI-3 interface spec. This function is called by the PCI 7660 * subsystem after a PCI bus error affecting this device has been detected. 7661 * When this function is invoked, it will need to stop all the I/Os and 7662 * interrupt(s) to the device. Once that is done, it will return 7663 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 7664 * as desired. 7665 * 7666 * Return codes 7667 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 7668 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7669 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7670 **/ 7671static pci_ers_result_t 7672lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 7673{ 7674 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7675 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7676 7677 /* Block all SCSI devices' I/Os on the host */ 7678 lpfc_scsi_dev_block(phba); 7679 7680 switch (state) { 7681 case pci_channel_io_normal: 7682 /* Non-fatal error, prepare for recovery */ 7683 lpfc_sli_prep_dev_for_recover(phba); 7684 return PCI_ERS_RESULT_CAN_RECOVER; 7685 case pci_channel_io_frozen: 7686 /* Fatal error, prepare for slot reset */ 7687 lpfc_sli_prep_dev_for_reset(phba); 7688 return PCI_ERS_RESULT_NEED_RESET; 7689 case pci_channel_io_perm_failure: 7690 /* Permanent failure, prepare for device down */ 7691 lpfc_prep_dev_for_perm_failure(phba); 7692 return PCI_ERS_RESULT_DISCONNECT; 7693 default: 7694 /* Unknown state, prepare and request slot reset */ 7695 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7696 "0472 Unknown PCI error state: x%x\n", state); 7697 lpfc_sli_prep_dev_for_reset(phba); 7698 return PCI_ERS_RESULT_NEED_RESET; 7699 } 7700} 7701 7702/** 7703 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 7704 * @pdev: pointer to PCI device. 7705 * 7706 * This routine is called from the PCI subsystem for error handling to 7707 * device with SLI-3 interface spec. This is called after PCI bus has been 7708 * reset to restart the PCI card from scratch, as if from a cold-boot. 7709 * During the PCI subsystem error recovery, after driver returns 7710 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 7711 * recovery and then call this routine before calling the .resume method 7712 * to recover the device. This function will initialize the HBA device, 7713 * enable the interrupt, but it will just put the HBA to offline state 7714 * without passing any I/O traffic. 7715 * 7716 * Return codes 7717 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7718 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7719 */ 7720static pci_ers_result_t 7721lpfc_io_slot_reset_s3(struct pci_dev *pdev) 7722{ 7723 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7724 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7725 struct lpfc_sli *psli = &phba->sli; 7726 uint32_t intr_mode; 7727 7728 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 7729 if (pci_enable_device_mem(pdev)) { 7730 printk(KERN_ERR "lpfc: Cannot re-enable " 7731 "PCI device after reset.\n"); 7732 return PCI_ERS_RESULT_DISCONNECT; 7733 } 7734 7735 pci_restore_state(pdev); 7736 7737 /* 7738 * As the new kernel behavior of pci_restore_state() API call clears 7739 * device saved_state flag, need to save the restored state again. 7740 */ 7741 pci_save_state(pdev); 7742 7743 if (pdev->is_busmaster) 7744 pci_set_master(pdev); 7745 7746 spin_lock_irq(&phba->hbalock); 7747 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 7748 spin_unlock_irq(&phba->hbalock); 7749 7750 /* Configure and enable interrupt */ 7751 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7752 if (intr_mode == LPFC_INTR_ERROR) { 7753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7754 "0427 Cannot re-enable interrupt after " 7755 "slot reset.\n"); 7756 return PCI_ERS_RESULT_DISCONNECT; 7757 } else 7758 phba->intr_mode = intr_mode; 7759 7760 /* Take device offline; this will perform cleanup */ 7761 lpfc_offline(phba); 7762 lpfc_sli_brdrestart(phba); 7763 7764 /* Log the current active interrupt mode */ 7765 lpfc_log_intr_mode(phba, phba->intr_mode); 7766 7767 return PCI_ERS_RESULT_RECOVERED; 7768} 7769 7770/** 7771 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 7772 * @pdev: pointer to PCI device 7773 * 7774 * This routine is called from the PCI subsystem for error handling to device 7775 * with SLI-3 interface spec. It is called when kernel error recovery tells 7776 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 7777 * error recovery. After this call, traffic can start to flow from this device 7778 * again. 7779 */ 7780static void 7781lpfc_io_resume_s3(struct pci_dev *pdev) 7782{ 7783 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7784 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7785 7786 /* Bring the device online */ 7787 lpfc_online(phba); 7788 7789 /* Clean up Advanced Error Reporting (AER) if needed */ 7790 if (phba->hba_flag & HBA_AER_ENABLED) 7791 pci_cleanup_aer_uncorrect_error_status(pdev); 7792} 7793 7794/** 7795 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 7796 * @phba: pointer to lpfc hba data structure. 7797 * 7798 * returns the number of ELS/CT IOCBs to reserve 7799 **/ 7800int 7801lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 7802{ 7803 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 7804 7805 if (phba->sli_rev == LPFC_SLI_REV4) { 7806 if (max_xri <= 100) 7807 return 10; 7808 else if (max_xri <= 256) 7809 return 25; 7810 else if (max_xri <= 512) 7811 return 50; 7812 else if (max_xri <= 1024) 7813 return 100; 7814 else 7815 return 150; 7816 } else 7817 return 0; 7818} 7819 7820/** 7821 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 7822 * @pdev: pointer to PCI device 7823 * @pid: pointer to PCI device identifier 7824 * 7825 * This routine is called from the kernel's PCI subsystem to device with 7826 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 7827 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 7828 * information of the device and driver to see if the driver state that it 7829 * can support this kind of device. If the match is successful, the driver 7830 * core invokes this routine. If this routine determines it can claim the HBA, 7831 * it does all the initialization that it needs to do to handle the HBA 7832 * properly. 7833 * 7834 * Return code 7835 * 0 - driver can claim the device 7836 * negative value - driver can not claim the device 7837 **/ 7838static int __devinit 7839lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 7840{ 7841 struct lpfc_hba *phba; 7842 struct lpfc_vport *vport = NULL; 7843 struct Scsi_Host *shost = NULL; 7844 int error; 7845 uint32_t cfg_mode, intr_mode; 7846 int mcnt; 7847 7848 /* Allocate memory for HBA structure */ 7849 phba = lpfc_hba_alloc(pdev); 7850 if (!phba) 7851 return -ENOMEM; 7852 7853 /* Perform generic PCI device enabling operation */ 7854 error = lpfc_enable_pci_dev(phba); 7855 if (error) { 7856 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7857 "1409 Failed to enable pci device.\n"); 7858 goto out_free_phba; 7859 } 7860 7861 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 7862 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 7863 if (error) 7864 goto out_disable_pci_dev; 7865 7866 /* Set up SLI-4 specific device PCI memory space */ 7867 error = lpfc_sli4_pci_mem_setup(phba); 7868 if (error) { 7869 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7870 "1410 Failed to set up pci memory space.\n"); 7871 goto out_disable_pci_dev; 7872 } 7873 7874 /* Set up phase-1 common device driver resources */ 7875 error = lpfc_setup_driver_resource_phase1(phba); 7876 if (error) { 7877 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7878 "1411 Failed to set up driver resource.\n"); 7879 goto out_unset_pci_mem_s4; 7880 } 7881 7882 /* Set up SLI-4 Specific device driver resources */ 7883 error = lpfc_sli4_driver_resource_setup(phba); 7884 if (error) { 7885 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7886 "1412 Failed to set up driver resource.\n"); 7887 goto out_unset_pci_mem_s4; 7888 } 7889 7890 /* Initialize and populate the iocb list per host */ 7891 error = lpfc_init_iocb_list(phba, 7892 phba->sli4_hba.max_cfg_param.max_xri); 7893 if (error) { 7894 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7895 "1413 Failed to initialize iocb list.\n"); 7896 goto out_unset_driver_resource_s4; 7897 } 7898 7899 /* Set up common device driver resources */ 7900 error = lpfc_setup_driver_resource_phase2(phba); 7901 if (error) { 7902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7903 "1414 Failed to set up driver resource.\n"); 7904 goto out_free_iocb_list; 7905 } 7906 7907 /* Create SCSI host to the physical port */ 7908 error = lpfc_create_shost(phba); 7909 if (error) { 7910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7911 "1415 Failed to create scsi host.\n"); 7912 goto out_unset_driver_resource; 7913 } 7914 7915 /* Configure sysfs attributes */ 7916 vport = phba->pport; 7917 error = lpfc_alloc_sysfs_attr(vport); 7918 if (error) { 7919 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7920 "1416 Failed to allocate sysfs attr\n"); 7921 goto out_destroy_shost; 7922 } 7923 7924 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 7925 /* Now, trying to enable interrupt and bring up the device */ 7926 cfg_mode = phba->cfg_use_msi; 7927 while (true) { 7928 /* Put device to a known state before enabling interrupt */ 7929 lpfc_stop_port(phba); 7930 /* Configure and enable interrupt */ 7931 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 7932 if (intr_mode == LPFC_INTR_ERROR) { 7933 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7934 "0426 Failed to enable interrupt.\n"); 7935 error = -ENODEV; 7936 goto out_free_sysfs_attr; 7937 } 7938 /* Default to single FCP EQ for non-MSI-X */ 7939 if (phba->intr_type != MSIX) 7940 phba->cfg_fcp_eq_count = 1; 7941 /* Set up SLI-4 HBA */ 7942 if (lpfc_sli4_hba_setup(phba)) { 7943 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7944 "1421 Failed to set up hba\n"); 7945 error = -ENODEV; 7946 goto out_disable_intr; 7947 } 7948 7949 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 7950 if (intr_mode != 0) 7951 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 7952 LPFC_ACT_INTR_CNT); 7953 7954 /* Check active interrupts received only for MSI/MSI-X */ 7955 if (intr_mode == 0 || 7956 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 7957 /* Log the current active interrupt mode */ 7958 phba->intr_mode = intr_mode; 7959 lpfc_log_intr_mode(phba, intr_mode); 7960 break; 7961 } 7962 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7963 "0451 Configure interrupt mode (%d) " 7964 "failed active interrupt test.\n", 7965 intr_mode); 7966 /* Unset the preivous SLI-4 HBA setup */ 7967 lpfc_sli4_unset_hba(phba); 7968 /* Try next level of interrupt mode */ 7969 cfg_mode = --intr_mode; 7970 } 7971 7972 /* Perform post initialization setup */ 7973 lpfc_post_init_setup(phba); 7974 7975 /* Check if there are static vports to be created. */ 7976 lpfc_create_static_vport(phba); 7977 7978 return 0; 7979 7980out_disable_intr: 7981 lpfc_sli4_disable_intr(phba); 7982out_free_sysfs_attr: 7983 lpfc_free_sysfs_attr(vport); 7984out_destroy_shost: 7985 lpfc_destroy_shost(phba); 7986out_unset_driver_resource: 7987 lpfc_unset_driver_resource_phase2(phba); 7988out_free_iocb_list: 7989 lpfc_free_iocb_list(phba); 7990out_unset_driver_resource_s4: 7991 lpfc_sli4_driver_resource_unset(phba); 7992out_unset_pci_mem_s4: 7993 lpfc_sli4_pci_mem_unset(phba); 7994out_disable_pci_dev: 7995 lpfc_disable_pci_dev(phba); 7996 if (shost) 7997 scsi_host_put(shost); 7998out_free_phba: 7999 lpfc_hba_free(phba); 8000 return error; 8001} 8002 8003/** 8004 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 8005 * @pdev: pointer to PCI device 8006 * 8007 * This routine is called from the kernel's PCI subsystem to device with 8008 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8009 * removed from PCI bus, it performs all the necessary cleanup for the HBA 8010 * device to be removed from the PCI subsystem properly. 8011 **/ 8012static void __devexit 8013lpfc_pci_remove_one_s4(struct pci_dev *pdev) 8014{ 8015 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8016 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8017 struct lpfc_vport **vports; 8018 struct lpfc_hba *phba = vport->phba; 8019 int i; 8020 8021 /* Mark the device unloading flag */ 8022 spin_lock_irq(&phba->hbalock); 8023 vport->load_flag |= FC_UNLOADING; 8024 spin_unlock_irq(&phba->hbalock); 8025 8026 /* Free the HBA sysfs attributes */ 8027 lpfc_free_sysfs_attr(vport); 8028 8029 /* Release all the vports against this physical port */ 8030 vports = lpfc_create_vport_work_array(phba); 8031 if (vports != NULL) 8032 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8033 fc_vport_terminate(vports[i]->fc_vport); 8034 lpfc_destroy_vport_work_array(phba, vports); 8035 8036 /* Remove FC host and then SCSI host with the physical port */ 8037 fc_remove_host(shost); 8038 scsi_remove_host(shost); 8039 8040 /* Perform cleanup on the physical port */ 8041 lpfc_cleanup(vport); 8042 8043 /* 8044 * Bring down the SLI Layer. This step disables all interrupts, 8045 * clears the rings, discards all mailbox commands, and resets 8046 * the HBA FCoE function. 8047 */ 8048 lpfc_debugfs_terminate(vport); 8049 lpfc_sli4_hba_unset(phba); 8050 8051 spin_lock_irq(&phba->hbalock); 8052 list_del_init(&vport->listentry); 8053 spin_unlock_irq(&phba->hbalock); 8054 8055 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi 8056 * buffers are released to their corresponding pools here. 8057 */ 8058 lpfc_scsi_free(phba); 8059 lpfc_sli4_driver_resource_unset(phba); 8060 8061 /* Unmap adapter Control and Doorbell registers */ 8062 lpfc_sli4_pci_mem_unset(phba); 8063 8064 /* Release PCI resources and disable device's PCI function */ 8065 scsi_host_put(shost); 8066 lpfc_disable_pci_dev(phba); 8067 8068 /* Finally, free the driver's device data structure */ 8069 lpfc_hba_free(phba); 8070 8071 return; 8072} 8073 8074/** 8075 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 8076 * @pdev: pointer to PCI device 8077 * @msg: power management message 8078 * 8079 * This routine is called from the kernel's PCI subsystem to support system 8080 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 8081 * this method, it quiesces the device by stopping the driver's worker 8082 * thread for the device, turning off device's interrupt and DMA, and bring 8083 * the device offline. Note that as the driver implements the minimum PM 8084 * requirements to a power-aware driver's PM support for suspend/resume -- all 8085 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 8086 * method call will be treated as SUSPEND and the driver will fully 8087 * reinitialize its device during resume() method call, the driver will set 8088 * device to PCI_D3hot state in PCI config space instead of setting it 8089 * according to the @msg provided by the PM. 8090 * 8091 * Return code 8092 * 0 - driver suspended the device 8093 * Error otherwise 8094 **/ 8095static int 8096lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 8097{ 8098 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8099 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8100 8101 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8102 "0298 PCI device Power Management suspend.\n"); 8103 8104 /* Bring down the device */ 8105 lpfc_offline_prep(phba); 8106 lpfc_offline(phba); 8107 kthread_stop(phba->worker_thread); 8108 8109 /* Disable interrupt from device */ 8110 lpfc_sli4_disable_intr(phba); 8111 8112 /* Save device state to PCI config space */ 8113 pci_save_state(pdev); 8114 pci_set_power_state(pdev, PCI_D3hot); 8115 8116 return 0; 8117} 8118 8119/** 8120 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 8121 * @pdev: pointer to PCI device 8122 * 8123 * This routine is called from the kernel's PCI subsystem to support system 8124 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 8125 * this method, it restores the device's PCI config space state and fully 8126 * reinitializes the device and brings it online. Note that as the driver 8127 * implements the minimum PM requirements to a power-aware driver's PM for 8128 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8129 * to the suspend() method call will be treated as SUSPEND and the driver 8130 * will fully reinitialize its device during resume() method call, the device 8131 * will be set to PCI_D0 directly in PCI config space before restoring the 8132 * state. 8133 * 8134 * Return code 8135 * 0 - driver suspended the device 8136 * Error otherwise 8137 **/ 8138static int 8139lpfc_pci_resume_one_s4(struct pci_dev *pdev) 8140{ 8141 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8142 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8143 uint32_t intr_mode; 8144 int error; 8145 8146 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8147 "0292 PCI device Power Management resume.\n"); 8148 8149 /* Restore device state from PCI config space */ 8150 pci_set_power_state(pdev, PCI_D0); 8151 pci_restore_state(pdev); 8152 8153 /* 8154 * As the new kernel behavior of pci_restore_state() API call clears 8155 * device saved_state flag, need to save the restored state again. 8156 */ 8157 pci_save_state(pdev); 8158 8159 if (pdev->is_busmaster) 8160 pci_set_master(pdev); 8161 8162 /* Startup the kernel thread for this host adapter. */ 8163 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8164 "lpfc_worker_%d", phba->brd_no); 8165 if (IS_ERR(phba->worker_thread)) { 8166 error = PTR_ERR(phba->worker_thread); 8167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8168 "0293 PM resume failed to start worker " 8169 "thread: error=x%x.\n", error); 8170 return error; 8171 } 8172 8173 /* Configure and enable interrupt */ 8174 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 8175 if (intr_mode == LPFC_INTR_ERROR) { 8176 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8177 "0294 PM resume Failed to enable interrupt\n"); 8178 return -EIO; 8179 } else 8180 phba->intr_mode = intr_mode; 8181 8182 /* Restart HBA and bring it online */ 8183 lpfc_sli_brdrestart(phba); 8184 lpfc_online(phba); 8185 8186 /* Log the current active interrupt mode */ 8187 lpfc_log_intr_mode(phba, phba->intr_mode); 8188 8189 return 0; 8190} 8191 8192/** 8193 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 8194 * @pdev: pointer to PCI device. 8195 * @state: the current PCI connection state. 8196 * 8197 * This routine is called from the PCI subsystem for error handling to device 8198 * with SLI-4 interface spec. This function is called by the PCI subsystem 8199 * after a PCI bus error affecting this device has been detected. When this 8200 * function is invoked, it will need to stop all the I/Os and interrupt(s) 8201 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 8202 * for the PCI subsystem to perform proper recovery as desired. 8203 * 8204 * Return codes 8205 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8206 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8207 **/ 8208static pci_ers_result_t 8209lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 8210{ 8211 return PCI_ERS_RESULT_NEED_RESET; 8212} 8213 8214/** 8215 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 8216 * @pdev: pointer to PCI device. 8217 * 8218 * This routine is called from the PCI subsystem for error handling to device 8219 * with SLI-4 interface spec. It is called after PCI bus has been reset to 8220 * restart the PCI card from scratch, as if from a cold-boot. During the 8221 * PCI subsystem error recovery, after the driver returns 8222 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 8223 * recovery and then call this routine before calling the .resume method to 8224 * recover the device. This function will initialize the HBA device, enable 8225 * the interrupt, but it will just put the HBA to offline state without 8226 * passing any I/O traffic. 8227 * 8228 * Return codes 8229 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8230 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8231 */ 8232static pci_ers_result_t 8233lpfc_io_slot_reset_s4(struct pci_dev *pdev) 8234{ 8235 return PCI_ERS_RESULT_RECOVERED; 8236} 8237 8238/** 8239 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 8240 * @pdev: pointer to PCI device 8241 * 8242 * This routine is called from the PCI subsystem for error handling to device 8243 * with SLI-4 interface spec. It is called when kernel error recovery tells 8244 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 8245 * error recovery. After this call, traffic can start to flow from this device 8246 * again. 8247 **/ 8248static void 8249lpfc_io_resume_s4(struct pci_dev *pdev) 8250{ 8251 return; 8252} 8253 8254/** 8255 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 8256 * @pdev: pointer to PCI device 8257 * @pid: pointer to PCI device identifier 8258 * 8259 * This routine is to be registered to the kernel's PCI subsystem. When an 8260 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 8261 * at PCI device-specific information of the device and driver to see if the 8262 * driver state that it can support this kind of device. If the match is 8263 * successful, the driver core invokes this routine. This routine dispatches 8264 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 8265 * do all the initialization that it needs to do to handle the HBA device 8266 * properly. 8267 * 8268 * Return code 8269 * 0 - driver can claim the device 8270 * negative value - driver can not claim the device 8271 **/ 8272static int __devinit 8273lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 8274{ 8275 int rc; 8276 struct lpfc_sli_intf intf; 8277 8278 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 8279 return -ENODEV; 8280 8281 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 8282 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 8283 rc = lpfc_pci_probe_one_s4(pdev, pid); 8284 else 8285 rc = lpfc_pci_probe_one_s3(pdev, pid); 8286 8287 return rc; 8288} 8289 8290/** 8291 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 8292 * @pdev: pointer to PCI device 8293 * 8294 * This routine is to be registered to the kernel's PCI subsystem. When an 8295 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 8296 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 8297 * remove routine, which will perform all the necessary cleanup for the 8298 * device to be removed from the PCI subsystem properly. 8299 **/ 8300static void __devexit 8301lpfc_pci_remove_one(struct pci_dev *pdev) 8302{ 8303 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8304 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8305 8306 switch (phba->pci_dev_grp) { 8307 case LPFC_PCI_DEV_LP: 8308 lpfc_pci_remove_one_s3(pdev); 8309 break; 8310 case LPFC_PCI_DEV_OC: 8311 lpfc_pci_remove_one_s4(pdev); 8312 break; 8313 default: 8314 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8315 "1424 Invalid PCI device group: 0x%x\n", 8316 phba->pci_dev_grp); 8317 break; 8318 } 8319 return; 8320} 8321 8322/** 8323 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 8324 * @pdev: pointer to PCI device 8325 * @msg: power management message 8326 * 8327 * This routine is to be registered to the kernel's PCI subsystem to support 8328 * system Power Management (PM). When PM invokes this method, it dispatches 8329 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 8330 * suspend the device. 8331 * 8332 * Return code 8333 * 0 - driver suspended the device 8334 * Error otherwise 8335 **/ 8336static int 8337lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 8338{ 8339 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8340 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8341 int rc = -ENODEV; 8342 8343 switch (phba->pci_dev_grp) { 8344 case LPFC_PCI_DEV_LP: 8345 rc = lpfc_pci_suspend_one_s3(pdev, msg); 8346 break; 8347 case LPFC_PCI_DEV_OC: 8348 rc = lpfc_pci_suspend_one_s4(pdev, msg); 8349 break; 8350 default: 8351 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8352 "1425 Invalid PCI device group: 0x%x\n", 8353 phba->pci_dev_grp); 8354 break; 8355 } 8356 return rc; 8357} 8358 8359/** 8360 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 8361 * @pdev: pointer to PCI device 8362 * 8363 * This routine is to be registered to the kernel's PCI subsystem to support 8364 * system Power Management (PM). When PM invokes this method, it dispatches 8365 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 8366 * resume the device. 8367 * 8368 * Return code 8369 * 0 - driver suspended the device 8370 * Error otherwise 8371 **/ 8372static int 8373lpfc_pci_resume_one(struct pci_dev *pdev) 8374{ 8375 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8376 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8377 int rc = -ENODEV; 8378 8379 switch (phba->pci_dev_grp) { 8380 case LPFC_PCI_DEV_LP: 8381 rc = lpfc_pci_resume_one_s3(pdev); 8382 break; 8383 case LPFC_PCI_DEV_OC: 8384 rc = lpfc_pci_resume_one_s4(pdev); 8385 break; 8386 default: 8387 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8388 "1426 Invalid PCI device group: 0x%x\n", 8389 phba->pci_dev_grp); 8390 break; 8391 } 8392 return rc; 8393} 8394 8395/** 8396 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 8397 * @pdev: pointer to PCI device. 8398 * @state: the current PCI connection state. 8399 * 8400 * This routine is registered to the PCI subsystem for error handling. This 8401 * function is called by the PCI subsystem after a PCI bus error affecting 8402 * this device has been detected. When this routine is invoked, it dispatches 8403 * the action to the proper SLI-3 or SLI-4 device error detected handling 8404 * routine, which will perform the proper error detected operation. 8405 * 8406 * Return codes 8407 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8408 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8409 **/ 8410static pci_ers_result_t 8411lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 8412{ 8413 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8414 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8415 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 8416 8417 switch (phba->pci_dev_grp) { 8418 case LPFC_PCI_DEV_LP: 8419 rc = lpfc_io_error_detected_s3(pdev, state); 8420 break; 8421 case LPFC_PCI_DEV_OC: 8422 rc = lpfc_io_error_detected_s4(pdev, state); 8423 break; 8424 default: 8425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8426 "1427 Invalid PCI device group: 0x%x\n", 8427 phba->pci_dev_grp); 8428 break; 8429 } 8430 return rc; 8431} 8432 8433/** 8434 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 8435 * @pdev: pointer to PCI device. 8436 * 8437 * This routine is registered to the PCI subsystem for error handling. This 8438 * function is called after PCI bus has been reset to restart the PCI card 8439 * from scratch, as if from a cold-boot. When this routine is invoked, it 8440 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 8441 * routine, which will perform the proper device reset. 8442 * 8443 * Return codes 8444 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8445 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8446 **/ 8447static pci_ers_result_t 8448lpfc_io_slot_reset(struct pci_dev *pdev) 8449{ 8450 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8451 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8452 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 8453 8454 switch (phba->pci_dev_grp) { 8455 case LPFC_PCI_DEV_LP: 8456 rc = lpfc_io_slot_reset_s3(pdev); 8457 break; 8458 case LPFC_PCI_DEV_OC: 8459 rc = lpfc_io_slot_reset_s4(pdev); 8460 break; 8461 default: 8462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8463 "1428 Invalid PCI device group: 0x%x\n", 8464 phba->pci_dev_grp); 8465 break; 8466 } 8467 return rc; 8468} 8469 8470/** 8471 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 8472 * @pdev: pointer to PCI device 8473 * 8474 * This routine is registered to the PCI subsystem for error handling. It 8475 * is called when kernel error recovery tells the lpfc driver that it is 8476 * OK to resume normal PCI operation after PCI bus error recovery. When 8477 * this routine is invoked, it dispatches the action to the proper SLI-3 8478 * or SLI-4 device io_resume routine, which will resume the device operation. 8479 **/ 8480static void 8481lpfc_io_resume(struct pci_dev *pdev) 8482{ 8483 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8484 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8485 8486 switch (phba->pci_dev_grp) { 8487 case LPFC_PCI_DEV_LP: 8488 lpfc_io_resume_s3(pdev); 8489 break; 8490 case LPFC_PCI_DEV_OC: 8491 lpfc_io_resume_s4(pdev); 8492 break; 8493 default: 8494 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8495 "1429 Invalid PCI device group: 0x%x\n", 8496 phba->pci_dev_grp); 8497 break; 8498 } 8499 return; 8500} 8501 8502static struct pci_device_id lpfc_id_table[] = { 8503 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 8504 PCI_ANY_ID, PCI_ANY_ID, }, 8505 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 8506 PCI_ANY_ID, PCI_ANY_ID, }, 8507 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 8508 PCI_ANY_ID, PCI_ANY_ID, }, 8509 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 8510 PCI_ANY_ID, PCI_ANY_ID, }, 8511 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 8512 PCI_ANY_ID, PCI_ANY_ID, }, 8513 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 8514 PCI_ANY_ID, PCI_ANY_ID, }, 8515 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 8516 PCI_ANY_ID, PCI_ANY_ID, }, 8517 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 8518 PCI_ANY_ID, PCI_ANY_ID, }, 8519 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 8520 PCI_ANY_ID, PCI_ANY_ID, }, 8521 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 8522 PCI_ANY_ID, PCI_ANY_ID, }, 8523 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 8524 PCI_ANY_ID, PCI_ANY_ID, }, 8525 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 8526 PCI_ANY_ID, PCI_ANY_ID, }, 8527 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 8528 PCI_ANY_ID, PCI_ANY_ID, }, 8529 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 8530 PCI_ANY_ID, PCI_ANY_ID, }, 8531 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 8532 PCI_ANY_ID, PCI_ANY_ID, }, 8533 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 8534 PCI_ANY_ID, PCI_ANY_ID, }, 8535 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 8536 PCI_ANY_ID, PCI_ANY_ID, }, 8537 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 8538 PCI_ANY_ID, PCI_ANY_ID, }, 8539 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 8540 PCI_ANY_ID, PCI_ANY_ID, }, 8541 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 8542 PCI_ANY_ID, PCI_ANY_ID, }, 8543 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 8544 PCI_ANY_ID, PCI_ANY_ID, }, 8545 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 8546 PCI_ANY_ID, PCI_ANY_ID, }, 8547 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 8548 PCI_ANY_ID, PCI_ANY_ID, }, 8549 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 8550 PCI_ANY_ID, PCI_ANY_ID, }, 8551 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 8552 PCI_ANY_ID, PCI_ANY_ID, }, 8553 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 8554 PCI_ANY_ID, PCI_ANY_ID, }, 8555 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 8556 PCI_ANY_ID, PCI_ANY_ID, }, 8557 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 8558 PCI_ANY_ID, PCI_ANY_ID, }, 8559 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 8560 PCI_ANY_ID, PCI_ANY_ID, }, 8561 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 8562 PCI_ANY_ID, PCI_ANY_ID, }, 8563 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 8564 PCI_ANY_ID, PCI_ANY_ID, }, 8565 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 8566 PCI_ANY_ID, PCI_ANY_ID, }, 8567 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 8568 PCI_ANY_ID, PCI_ANY_ID, }, 8569 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 8570 PCI_ANY_ID, PCI_ANY_ID, }, 8571 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 8572 PCI_ANY_ID, PCI_ANY_ID, }, 8573 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 8574 PCI_ANY_ID, PCI_ANY_ID, }, 8575 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 8576 PCI_ANY_ID, PCI_ANY_ID, }, 8577 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 8578 PCI_ANY_ID, PCI_ANY_ID, }, 8579 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 8580 PCI_ANY_ID, PCI_ANY_ID, }, 8581 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 8582 PCI_ANY_ID, PCI_ANY_ID, }, 8583 { 0 } 8584}; 8585 8586MODULE_DEVICE_TABLE(pci, lpfc_id_table); 8587 8588static struct pci_error_handlers lpfc_err_handler = { 8589 .error_detected = lpfc_io_error_detected, 8590 .slot_reset = lpfc_io_slot_reset, 8591 .resume = lpfc_io_resume, 8592}; 8593 8594static struct pci_driver lpfc_driver = { 8595 .name = LPFC_DRIVER_NAME, 8596 .id_table = lpfc_id_table, 8597 .probe = lpfc_pci_probe_one, 8598 .remove = __devexit_p(lpfc_pci_remove_one), 8599 .suspend = lpfc_pci_suspend_one, 8600 .resume = lpfc_pci_resume_one, 8601 .err_handler = &lpfc_err_handler, 8602}; 8603 8604/** 8605 * lpfc_init - lpfc module initialization routine 8606 * 8607 * This routine is to be invoked when the lpfc module is loaded into the 8608 * kernel. The special kernel macro module_init() is used to indicate the 8609 * role of this routine to the kernel as lpfc module entry point. 8610 * 8611 * Return codes 8612 * 0 - successful 8613 * -ENOMEM - FC attach transport failed 8614 * all others - failed 8615 */ 8616static int __init 8617lpfc_init(void) 8618{ 8619 int error = 0; 8620 8621 printk(LPFC_MODULE_DESC "\n"); 8622 printk(LPFC_COPYRIGHT "\n"); 8623 8624 if (lpfc_enable_npiv) { 8625 lpfc_transport_functions.vport_create = lpfc_vport_create; 8626 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 8627 } 8628 lpfc_transport_template = 8629 fc_attach_transport(&lpfc_transport_functions); 8630 if (lpfc_transport_template == NULL) 8631 return -ENOMEM; 8632 if (lpfc_enable_npiv) { 8633 lpfc_vport_transport_template = 8634 fc_attach_transport(&lpfc_vport_transport_functions); 8635 if (lpfc_vport_transport_template == NULL) { 8636 fc_release_transport(lpfc_transport_template); 8637 return -ENOMEM; 8638 } 8639 } 8640 error = pci_register_driver(&lpfc_driver); 8641 if (error) { 8642 fc_release_transport(lpfc_transport_template); 8643 if (lpfc_enable_npiv) 8644 fc_release_transport(lpfc_vport_transport_template); 8645 } 8646 8647 return error; 8648} 8649 8650/** 8651 * lpfc_exit - lpfc module removal routine 8652 * 8653 * This routine is invoked when the lpfc module is removed from the kernel. 8654 * The special kernel macro module_exit() is used to indicate the role of 8655 * this routine to the kernel as lpfc module exit point. 8656 */ 8657static void __exit 8658lpfc_exit(void) 8659{ 8660 pci_unregister_driver(&lpfc_driver); 8661 fc_release_transport(lpfc_transport_template); 8662 if (lpfc_enable_npiv) 8663 fc_release_transport(lpfc_vport_transport_template); 8664 if (_dump_buf_data) { 8665 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 8666 "_dump_buf_data at 0x%p\n", 8667 (1L << _dump_buf_data_order), _dump_buf_data); 8668 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 8669 } 8670 8671 if (_dump_buf_dif) { 8672 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 8673 "_dump_buf_dif at 0x%p\n", 8674 (1L << _dump_buf_dif_order), _dump_buf_dif); 8675 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 8676 } 8677} 8678 8679module_init(lpfc_init); 8680module_exit(lpfc_exit); 8681MODULE_LICENSE("GPL"); 8682MODULE_DESCRIPTION(LPFC_MODULE_DESC); 8683MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 8684MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 8685