lpfc_init.c revision 04c684968487eb4f98728363a97b8da48f3bb958
1/******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/dma-mapping.h> 25#include <linux/idr.h> 26#include <linux/interrupt.h> 27#include <linux/kthread.h> 28#include <linux/pci.h> 29#include <linux/spinlock.h> 30#include <linux/ctype.h> 31 32#include <scsi/scsi.h> 33#include <scsi/scsi_device.h> 34#include <scsi/scsi_host.h> 35#include <scsi/scsi_transport_fc.h> 36 37#include "lpfc_hw4.h" 38#include "lpfc_hw.h" 39#include "lpfc_sli.h" 40#include "lpfc_sli4.h" 41#include "lpfc_nl.h" 42#include "lpfc_disc.h" 43#include "lpfc_scsi.h" 44#include "lpfc.h" 45#include "lpfc_logmsg.h" 46#include "lpfc_crtn.h" 47#include "lpfc_vport.h" 48#include "lpfc_version.h" 49 50char *_dump_buf_data; 51unsigned long _dump_buf_data_order; 52char *_dump_buf_dif; 53unsigned long _dump_buf_dif_order; 54spinlock_t _dump_buf_lock; 55 56static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 57static int lpfc_post_rcv_buf(struct lpfc_hba *); 58static int lpfc_sli4_queue_create(struct lpfc_hba *); 59static void lpfc_sli4_queue_destroy(struct lpfc_hba *); 60static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 61static int lpfc_setup_endian_order(struct lpfc_hba *); 62static int lpfc_sli4_read_config(struct lpfc_hba *); 63static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 64static void lpfc_free_sgl_list(struct lpfc_hba *); 65static int lpfc_init_sgl_list(struct lpfc_hba *); 66static int lpfc_init_active_sgl_array(struct lpfc_hba *); 67static void lpfc_free_active_sgl(struct lpfc_hba *); 68static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 69static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 70static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 71static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 72static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 73 74static struct scsi_transport_template *lpfc_transport_template = NULL; 75static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 76static DEFINE_IDR(lpfc_hba_index); 77 78/** 79 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 80 * @phba: pointer to lpfc hba data structure. 81 * 82 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 83 * mailbox command. It retrieves the revision information from the HBA and 84 * collects the Vital Product Data (VPD) about the HBA for preparing the 85 * configuration of the HBA. 86 * 87 * Return codes: 88 * 0 - success. 89 * -ERESTART - requests the SLI layer to reset the HBA and try again. 90 * Any other value - indicates an error. 91 **/ 92int 93lpfc_config_port_prep(struct lpfc_hba *phba) 94{ 95 lpfc_vpd_t *vp = &phba->vpd; 96 int i = 0, rc; 97 LPFC_MBOXQ_t *pmb; 98 MAILBOX_t *mb; 99 char *lpfc_vpd_data = NULL; 100 uint16_t offset = 0; 101 static char licensed[56] = 102 "key unlock for use with gnu public licensed code only\0"; 103 static int init_key = 1; 104 105 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 106 if (!pmb) { 107 phba->link_state = LPFC_HBA_ERROR; 108 return -ENOMEM; 109 } 110 111 mb = &pmb->u.mb; 112 phba->link_state = LPFC_INIT_MBX_CMDS; 113 114 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 115 if (init_key) { 116 uint32_t *ptext = (uint32_t *) licensed; 117 118 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 119 *ptext = cpu_to_be32(*ptext); 120 init_key = 0; 121 } 122 123 lpfc_read_nv(phba, pmb); 124 memset((char*)mb->un.varRDnvp.rsvd3, 0, 125 sizeof (mb->un.varRDnvp.rsvd3)); 126 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 127 sizeof (licensed)); 128 129 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 130 131 if (rc != MBX_SUCCESS) { 132 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 133 "0324 Config Port initialization " 134 "error, mbxCmd x%x READ_NVPARM, " 135 "mbxStatus x%x\n", 136 mb->mbxCommand, mb->mbxStatus); 137 mempool_free(pmb, phba->mbox_mem_pool); 138 return -ERESTART; 139 } 140 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 141 sizeof(phba->wwnn)); 142 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 143 sizeof(phba->wwpn)); 144 } 145 146 phba->sli3_options = 0x0; 147 148 /* Setup and issue mailbox READ REV command */ 149 lpfc_read_rev(phba, pmb); 150 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 151 if (rc != MBX_SUCCESS) { 152 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 153 "0439 Adapter failed to init, mbxCmd x%x " 154 "READ_REV, mbxStatus x%x\n", 155 mb->mbxCommand, mb->mbxStatus); 156 mempool_free( pmb, phba->mbox_mem_pool); 157 return -ERESTART; 158 } 159 160 161 /* 162 * The value of rr must be 1 since the driver set the cv field to 1. 163 * This setting requires the FW to set all revision fields. 164 */ 165 if (mb->un.varRdRev.rr == 0) { 166 vp->rev.rBit = 0; 167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 168 "0440 Adapter failed to init, READ_REV has " 169 "missing revision information.\n"); 170 mempool_free(pmb, phba->mbox_mem_pool); 171 return -ERESTART; 172 } 173 174 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 175 mempool_free(pmb, phba->mbox_mem_pool); 176 return -EINVAL; 177 } 178 179 /* Save information as VPD data */ 180 vp->rev.rBit = 1; 181 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 182 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 183 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 184 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 185 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 186 vp->rev.biuRev = mb->un.varRdRev.biuRev; 187 vp->rev.smRev = mb->un.varRdRev.smRev; 188 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 189 vp->rev.endecRev = mb->un.varRdRev.endecRev; 190 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 191 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 192 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 193 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 194 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 195 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 196 197 /* If the sli feature level is less then 9, we must 198 * tear down all RPIs and VPIs on link down if NPIV 199 * is enabled. 200 */ 201 if (vp->rev.feaLevelHigh < 9) 202 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 203 204 if (lpfc_is_LC_HBA(phba->pcidev->device)) 205 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 206 sizeof (phba->RandomData)); 207 208 /* Get adapter VPD information */ 209 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 210 if (!lpfc_vpd_data) 211 goto out_free_mbox; 212 213 do { 214 lpfc_dump_mem(phba, pmb, offset); 215 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 216 217 if (rc != MBX_SUCCESS) { 218 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 219 "0441 VPD not present on adapter, " 220 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 221 mb->mbxCommand, mb->mbxStatus); 222 mb->un.varDmp.word_cnt = 0; 223 } 224 /* dump mem may return a zero when finished or we got a 225 * mailbox error, either way we are done. 226 */ 227 if (mb->un.varDmp.word_cnt == 0) 228 break; 229 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 230 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 231 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 232 lpfc_vpd_data + offset, 233 mb->un.varDmp.word_cnt); 234 offset += mb->un.varDmp.word_cnt; 235 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 236 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 237 238 kfree(lpfc_vpd_data); 239out_free_mbox: 240 mempool_free(pmb, phba->mbox_mem_pool); 241 return 0; 242} 243 244/** 245 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 246 * @phba: pointer to lpfc hba data structure. 247 * @pmboxq: pointer to the driver internal queue element for mailbox command. 248 * 249 * This is the completion handler for driver's configuring asynchronous event 250 * mailbox command to the device. If the mailbox command returns successfully, 251 * it will set internal async event support flag to 1; otherwise, it will 252 * set internal async event support flag to 0. 253 **/ 254static void 255lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 256{ 257 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 258 phba->temp_sensor_support = 1; 259 else 260 phba->temp_sensor_support = 0; 261 mempool_free(pmboxq, phba->mbox_mem_pool); 262 return; 263} 264 265/** 266 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 267 * @phba: pointer to lpfc hba data structure. 268 * @pmboxq: pointer to the driver internal queue element for mailbox command. 269 * 270 * This is the completion handler for dump mailbox command for getting 271 * wake up parameters. When this command complete, the response contain 272 * Option rom version of the HBA. This function translate the version number 273 * into a human readable string and store it in OptionROMVersion. 274 **/ 275static void 276lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 277{ 278 struct prog_id *prg; 279 uint32_t prog_id_word; 280 char dist = ' '; 281 /* character array used for decoding dist type. */ 282 char dist_char[] = "nabx"; 283 284 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 285 mempool_free(pmboxq, phba->mbox_mem_pool); 286 return; 287 } 288 289 prg = (struct prog_id *) &prog_id_word; 290 291 /* word 7 contain option rom version */ 292 prog_id_word = pmboxq->u.mb.un.varWords[7]; 293 294 /* Decode the Option rom version word to a readable string */ 295 if (prg->dist < 4) 296 dist = dist_char[prg->dist]; 297 298 if ((prg->dist == 3) && (prg->num == 0)) 299 sprintf(phba->OptionROMVersion, "%d.%d%d", 300 prg->ver, prg->rev, prg->lev); 301 else 302 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 303 prg->ver, prg->rev, prg->lev, 304 dist, prg->num); 305 mempool_free(pmboxq, phba->mbox_mem_pool); 306 return; 307} 308 309/** 310 * lpfc_config_port_post - Perform lpfc initialization after config port 311 * @phba: pointer to lpfc hba data structure. 312 * 313 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 314 * command call. It performs all internal resource and state setups on the 315 * port: post IOCB buffers, enable appropriate host interrupt attentions, 316 * ELS ring timers, etc. 317 * 318 * Return codes 319 * 0 - success. 320 * Any other value - error. 321 **/ 322int 323lpfc_config_port_post(struct lpfc_hba *phba) 324{ 325 struct lpfc_vport *vport = phba->pport; 326 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 327 LPFC_MBOXQ_t *pmb; 328 MAILBOX_t *mb; 329 struct lpfc_dmabuf *mp; 330 struct lpfc_sli *psli = &phba->sli; 331 uint32_t status, timeout; 332 int i, j; 333 int rc; 334 335 spin_lock_irq(&phba->hbalock); 336 /* 337 * If the Config port completed correctly the HBA is not 338 * over heated any more. 339 */ 340 if (phba->over_temp_state == HBA_OVER_TEMP) 341 phba->over_temp_state = HBA_NORMAL_TEMP; 342 spin_unlock_irq(&phba->hbalock); 343 344 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 345 if (!pmb) { 346 phba->link_state = LPFC_HBA_ERROR; 347 return -ENOMEM; 348 } 349 mb = &pmb->u.mb; 350 351 /* Get login parameters for NID. */ 352 lpfc_read_sparam(phba, pmb, 0); 353 pmb->vport = vport; 354 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 356 "0448 Adapter failed init, mbxCmd x%x " 357 "READ_SPARM mbxStatus x%x\n", 358 mb->mbxCommand, mb->mbxStatus); 359 phba->link_state = LPFC_HBA_ERROR; 360 mp = (struct lpfc_dmabuf *) pmb->context1; 361 mempool_free( pmb, phba->mbox_mem_pool); 362 lpfc_mbuf_free(phba, mp->virt, mp->phys); 363 kfree(mp); 364 return -EIO; 365 } 366 367 mp = (struct lpfc_dmabuf *) pmb->context1; 368 369 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 370 lpfc_mbuf_free(phba, mp->virt, mp->phys); 371 kfree(mp); 372 pmb->context1 = NULL; 373 374 if (phba->cfg_soft_wwnn) 375 u64_to_wwn(phba->cfg_soft_wwnn, 376 vport->fc_sparam.nodeName.u.wwn); 377 if (phba->cfg_soft_wwpn) 378 u64_to_wwn(phba->cfg_soft_wwpn, 379 vport->fc_sparam.portName.u.wwn); 380 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 381 sizeof (struct lpfc_name)); 382 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 383 sizeof (struct lpfc_name)); 384 385 /* Update the fc_host data structures with new wwn. */ 386 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 387 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 388 389 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 390 /* This should be consolidated into parse_vpd ? - mr */ 391 if (phba->SerialNumber[0] == 0) { 392 uint8_t *outptr; 393 394 outptr = &vport->fc_nodename.u.s.IEEE[0]; 395 for (i = 0; i < 12; i++) { 396 status = *outptr++; 397 j = ((status & 0xf0) >> 4); 398 if (j <= 9) 399 phba->SerialNumber[i] = 400 (char)((uint8_t) 0x30 + (uint8_t) j); 401 else 402 phba->SerialNumber[i] = 403 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 404 i++; 405 j = (status & 0xf); 406 if (j <= 9) 407 phba->SerialNumber[i] = 408 (char)((uint8_t) 0x30 + (uint8_t) j); 409 else 410 phba->SerialNumber[i] = 411 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 412 } 413 } 414 415 lpfc_read_config(phba, pmb); 416 pmb->vport = vport; 417 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 418 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 419 "0453 Adapter failed to init, mbxCmd x%x " 420 "READ_CONFIG, mbxStatus x%x\n", 421 mb->mbxCommand, mb->mbxStatus); 422 phba->link_state = LPFC_HBA_ERROR; 423 mempool_free( pmb, phba->mbox_mem_pool); 424 return -EIO; 425 } 426 427 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 428 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 429 phba->cfg_hba_queue_depth = 430 mb->un.varRdConfig.max_xri + 1; 431 432 phba->lmt = mb->un.varRdConfig.lmt; 433 434 /* Get the default values for Model Name and Description */ 435 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 436 437 if ((phba->cfg_link_speed > LINK_SPEED_10G) 438 || ((phba->cfg_link_speed == LINK_SPEED_1G) 439 && !(phba->lmt & LMT_1Gb)) 440 || ((phba->cfg_link_speed == LINK_SPEED_2G) 441 && !(phba->lmt & LMT_2Gb)) 442 || ((phba->cfg_link_speed == LINK_SPEED_4G) 443 && !(phba->lmt & LMT_4Gb)) 444 || ((phba->cfg_link_speed == LINK_SPEED_8G) 445 && !(phba->lmt & LMT_8Gb)) 446 || ((phba->cfg_link_speed == LINK_SPEED_10G) 447 && !(phba->lmt & LMT_10Gb))) { 448 /* Reset link speed to auto */ 449 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 450 "1302 Invalid speed for this board: " 451 "Reset link speed to auto: x%x\n", 452 phba->cfg_link_speed); 453 phba->cfg_link_speed = LINK_SPEED_AUTO; 454 } 455 456 phba->link_state = LPFC_LINK_DOWN; 457 458 /* Only process IOCBs on ELS ring till hba_state is READY */ 459 if (psli->ring[psli->extra_ring].cmdringaddr) 460 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 461 if (psli->ring[psli->fcp_ring].cmdringaddr) 462 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 463 if (psli->ring[psli->next_ring].cmdringaddr) 464 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 465 466 /* Post receive buffers for desired rings */ 467 if (phba->sli_rev != 3) 468 lpfc_post_rcv_buf(phba); 469 470 /* 471 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 472 */ 473 if (phba->intr_type == MSIX) { 474 rc = lpfc_config_msi(phba, pmb); 475 if (rc) { 476 mempool_free(pmb, phba->mbox_mem_pool); 477 return -EIO; 478 } 479 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 480 if (rc != MBX_SUCCESS) { 481 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 482 "0352 Config MSI mailbox command " 483 "failed, mbxCmd x%x, mbxStatus x%x\n", 484 pmb->u.mb.mbxCommand, 485 pmb->u.mb.mbxStatus); 486 mempool_free(pmb, phba->mbox_mem_pool); 487 return -EIO; 488 } 489 } 490 491 spin_lock_irq(&phba->hbalock); 492 /* Initialize ERATT handling flag */ 493 phba->hba_flag &= ~HBA_ERATT_HANDLED; 494 495 /* Enable appropriate host interrupts */ 496 status = readl(phba->HCregaddr); 497 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 498 if (psli->num_rings > 0) 499 status |= HC_R0INT_ENA; 500 if (psli->num_rings > 1) 501 status |= HC_R1INT_ENA; 502 if (psli->num_rings > 2) 503 status |= HC_R2INT_ENA; 504 if (psli->num_rings > 3) 505 status |= HC_R3INT_ENA; 506 507 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 508 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 509 status &= ~(HC_R0INT_ENA); 510 511 writel(status, phba->HCregaddr); 512 readl(phba->HCregaddr); /* flush */ 513 spin_unlock_irq(&phba->hbalock); 514 515 /* Set up ring-0 (ELS) timer */ 516 timeout = phba->fc_ratov * 2; 517 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 518 /* Set up heart beat (HB) timer */ 519 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 520 phba->hb_outstanding = 0; 521 phba->last_completion_time = jiffies; 522 /* Set up error attention (ERATT) polling timer */ 523 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 524 525 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 526 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 527 lpfc_set_loopback_flag(phba); 528 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 529 if (rc != MBX_SUCCESS) { 530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 531 "0454 Adapter failed to init, mbxCmd x%x " 532 "INIT_LINK, mbxStatus x%x\n", 533 mb->mbxCommand, mb->mbxStatus); 534 535 /* Clear all interrupt enable conditions */ 536 writel(0, phba->HCregaddr); 537 readl(phba->HCregaddr); /* flush */ 538 /* Clear all pending interrupts */ 539 writel(0xffffffff, phba->HAregaddr); 540 readl(phba->HAregaddr); /* flush */ 541 542 phba->link_state = LPFC_HBA_ERROR; 543 if (rc != MBX_BUSY) 544 mempool_free(pmb, phba->mbox_mem_pool); 545 return -EIO; 546 } 547 /* MBOX buffer will be freed in mbox compl */ 548 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 549 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 550 pmb->mbox_cmpl = lpfc_config_async_cmpl; 551 pmb->vport = phba->pport; 552 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 553 554 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 555 lpfc_printf_log(phba, 556 KERN_ERR, 557 LOG_INIT, 558 "0456 Adapter failed to issue " 559 "ASYNCEVT_ENABLE mbox status x%x \n.", 560 rc); 561 mempool_free(pmb, phba->mbox_mem_pool); 562 } 563 564 /* Get Option rom version */ 565 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 566 lpfc_dump_wakeup_param(phba, pmb); 567 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 568 pmb->vport = phba->pport; 569 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 570 571 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 573 "to get Option ROM version status x%x\n.", rc); 574 mempool_free(pmb, phba->mbox_mem_pool); 575 } 576 577 return 0; 578} 579 580/** 581 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 582 * @phba: pointer to lpfc HBA data structure. 583 * 584 * This routine will do LPFC uninitialization before the HBA is reset when 585 * bringing down the SLI Layer. 586 * 587 * Return codes 588 * 0 - success. 589 * Any other value - error. 590 **/ 591int 592lpfc_hba_down_prep(struct lpfc_hba *phba) 593{ 594 struct lpfc_vport **vports; 595 int i; 596 597 if (phba->sli_rev <= LPFC_SLI_REV3) { 598 /* Disable interrupts */ 599 writel(0, phba->HCregaddr); 600 readl(phba->HCregaddr); /* flush */ 601 } 602 603 if (phba->pport->load_flag & FC_UNLOADING) 604 lpfc_cleanup_discovery_resources(phba->pport); 605 else { 606 vports = lpfc_create_vport_work_array(phba); 607 if (vports != NULL) 608 for (i = 0; i <= phba->max_vports && 609 vports[i] != NULL; i++) 610 lpfc_cleanup_discovery_resources(vports[i]); 611 lpfc_destroy_vport_work_array(phba, vports); 612 } 613 return 0; 614} 615 616/** 617 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 618 * @phba: pointer to lpfc HBA data structure. 619 * 620 * This routine will do uninitialization after the HBA is reset when bring 621 * down the SLI Layer. 622 * 623 * Return codes 624 * 0 - sucess. 625 * Any other value - error. 626 **/ 627static int 628lpfc_hba_down_post_s3(struct lpfc_hba *phba) 629{ 630 struct lpfc_sli *psli = &phba->sli; 631 struct lpfc_sli_ring *pring; 632 struct lpfc_dmabuf *mp, *next_mp; 633 LIST_HEAD(completions); 634 int i; 635 636 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 637 lpfc_sli_hbqbuf_free_all(phba); 638 else { 639 /* Cleanup preposted buffers on the ELS ring */ 640 pring = &psli->ring[LPFC_ELS_RING]; 641 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 642 list_del(&mp->list); 643 pring->postbufq_cnt--; 644 lpfc_mbuf_free(phba, mp->virt, mp->phys); 645 kfree(mp); 646 } 647 } 648 649 spin_lock_irq(&phba->hbalock); 650 for (i = 0; i < psli->num_rings; i++) { 651 pring = &psli->ring[i]; 652 653 /* At this point in time the HBA is either reset or DOA. Either 654 * way, nothing should be on txcmplq as it will NEVER complete. 655 */ 656 list_splice_init(&pring->txcmplq, &completions); 657 pring->txcmplq_cnt = 0; 658 spin_unlock_irq(&phba->hbalock); 659 660 /* Cancel all the IOCBs from the completions list */ 661 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 662 IOERR_SLI_ABORTED); 663 664 lpfc_sli_abort_iocb_ring(phba, pring); 665 spin_lock_irq(&phba->hbalock); 666 } 667 spin_unlock_irq(&phba->hbalock); 668 669 return 0; 670} 671/** 672 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 673 * @phba: pointer to lpfc HBA data structure. 674 * 675 * This routine will do uninitialization after the HBA is reset when bring 676 * down the SLI Layer. 677 * 678 * Return codes 679 * 0 - sucess. 680 * Any other value - error. 681 **/ 682static int 683lpfc_hba_down_post_s4(struct lpfc_hba *phba) 684{ 685 struct lpfc_scsi_buf *psb, *psb_next; 686 LIST_HEAD(aborts); 687 int ret; 688 unsigned long iflag = 0; 689 ret = lpfc_hba_down_post_s3(phba); 690 if (ret) 691 return ret; 692 /* At this point in time the HBA is either reset or DOA. Either 693 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 694 * on the lpfc_sgl_list so that it can either be freed if the 695 * driver is unloading or reposted if the driver is restarting 696 * the port. 697 */ 698 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 699 /* scsl_buf_list */ 700 /* abts_sgl_list_lock required because worker thread uses this 701 * list. 702 */ 703 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 704 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 705 &phba->sli4_hba.lpfc_sgl_list); 706 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 707 /* abts_scsi_buf_list_lock required because worker thread uses this 708 * list. 709 */ 710 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 711 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 712 &aborts); 713 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 714 spin_unlock_irq(&phba->hbalock); 715 716 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 717 psb->pCmd = NULL; 718 psb->status = IOSTAT_SUCCESS; 719 } 720 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 721 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 722 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 723 return 0; 724} 725 726/** 727 * lpfc_hba_down_post - Wrapper func for hba down post routine 728 * @phba: pointer to lpfc HBA data structure. 729 * 730 * This routine wraps the actual SLI3 or SLI4 routine for performing 731 * uninitialization after the HBA is reset when bring down the SLI Layer. 732 * 733 * Return codes 734 * 0 - sucess. 735 * Any other value - error. 736 **/ 737int 738lpfc_hba_down_post(struct lpfc_hba *phba) 739{ 740 return (*phba->lpfc_hba_down_post)(phba); 741} 742 743/** 744 * lpfc_hb_timeout - The HBA-timer timeout handler 745 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 746 * 747 * This is the HBA-timer timeout handler registered to the lpfc driver. When 748 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 749 * work-port-events bitmap and the worker thread is notified. This timeout 750 * event will be used by the worker thread to invoke the actual timeout 751 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 752 * be performed in the timeout handler and the HBA timeout event bit shall 753 * be cleared by the worker thread after it has taken the event bitmap out. 754 **/ 755static void 756lpfc_hb_timeout(unsigned long ptr) 757{ 758 struct lpfc_hba *phba; 759 uint32_t tmo_posted; 760 unsigned long iflag; 761 762 phba = (struct lpfc_hba *)ptr; 763 764 /* Check for heart beat timeout conditions */ 765 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 766 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 767 if (!tmo_posted) 768 phba->pport->work_port_events |= WORKER_HB_TMO; 769 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 770 771 /* Tell the worker thread there is work to do */ 772 if (!tmo_posted) 773 lpfc_worker_wake_up(phba); 774 return; 775} 776 777/** 778 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 779 * @phba: pointer to lpfc hba data structure. 780 * @pmboxq: pointer to the driver internal queue element for mailbox command. 781 * 782 * This is the callback function to the lpfc heart-beat mailbox command. 783 * If configured, the lpfc driver issues the heart-beat mailbox command to 784 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 785 * heart-beat mailbox command is issued, the driver shall set up heart-beat 786 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 787 * heart-beat outstanding state. Once the mailbox command comes back and 788 * no error conditions detected, the heart-beat mailbox command timer is 789 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 790 * state is cleared for the next heart-beat. If the timer expired with the 791 * heart-beat outstanding state set, the driver will put the HBA offline. 792 **/ 793static void 794lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 795{ 796 unsigned long drvr_flag; 797 798 spin_lock_irqsave(&phba->hbalock, drvr_flag); 799 phba->hb_outstanding = 0; 800 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 801 802 /* Check and reset heart-beat timer is necessary */ 803 mempool_free(pmboxq, phba->mbox_mem_pool); 804 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 805 !(phba->link_state == LPFC_HBA_ERROR) && 806 !(phba->pport->load_flag & FC_UNLOADING)) 807 mod_timer(&phba->hb_tmofunc, 808 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 809 return; 810} 811 812/** 813 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 814 * @phba: pointer to lpfc hba data structure. 815 * 816 * This is the actual HBA-timer timeout handler to be invoked by the worker 817 * thread whenever the HBA timer fired and HBA-timeout event posted. This 818 * handler performs any periodic operations needed for the device. If such 819 * periodic event has already been attended to either in the interrupt handler 820 * or by processing slow-ring or fast-ring events within the HBA-timer 821 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 822 * the timer for the next timeout period. If lpfc heart-beat mailbox command 823 * is configured and there is no heart-beat mailbox command outstanding, a 824 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 825 * has been a heart-beat mailbox command outstanding, the HBA shall be put 826 * to offline. 827 **/ 828void 829lpfc_hb_timeout_handler(struct lpfc_hba *phba) 830{ 831 LPFC_MBOXQ_t *pmboxq; 832 struct lpfc_dmabuf *buf_ptr; 833 int retval; 834 struct lpfc_sli *psli = &phba->sli; 835 LIST_HEAD(completions); 836 837 if ((phba->link_state == LPFC_HBA_ERROR) || 838 (phba->pport->load_flag & FC_UNLOADING) || 839 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 840 return; 841 842 spin_lock_irq(&phba->pport->work_port_lock); 843 844 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 845 jiffies)) { 846 spin_unlock_irq(&phba->pport->work_port_lock); 847 if (!phba->hb_outstanding) 848 mod_timer(&phba->hb_tmofunc, 849 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 850 else 851 mod_timer(&phba->hb_tmofunc, 852 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 853 return; 854 } 855 spin_unlock_irq(&phba->pport->work_port_lock); 856 857 if (phba->elsbuf_cnt && 858 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 859 spin_lock_irq(&phba->hbalock); 860 list_splice_init(&phba->elsbuf, &completions); 861 phba->elsbuf_cnt = 0; 862 phba->elsbuf_prev_cnt = 0; 863 spin_unlock_irq(&phba->hbalock); 864 865 while (!list_empty(&completions)) { 866 list_remove_head(&completions, buf_ptr, 867 struct lpfc_dmabuf, list); 868 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 869 kfree(buf_ptr); 870 } 871 } 872 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 873 874 /* If there is no heart beat outstanding, issue a heartbeat command */ 875 if (phba->cfg_enable_hba_heartbeat) { 876 if (!phba->hb_outstanding) { 877 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 878 if (!pmboxq) { 879 mod_timer(&phba->hb_tmofunc, 880 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 881 return; 882 } 883 884 lpfc_heart_beat(phba, pmboxq); 885 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 886 pmboxq->vport = phba->pport; 887 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 888 889 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 890 mempool_free(pmboxq, phba->mbox_mem_pool); 891 mod_timer(&phba->hb_tmofunc, 892 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 893 return; 894 } 895 mod_timer(&phba->hb_tmofunc, 896 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 897 phba->hb_outstanding = 1; 898 return; 899 } else { 900 /* 901 * If heart beat timeout called with hb_outstanding set 902 * we need to take the HBA offline. 903 */ 904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 905 "0459 Adapter heartbeat failure, " 906 "taking this port offline.\n"); 907 908 spin_lock_irq(&phba->hbalock); 909 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 910 spin_unlock_irq(&phba->hbalock); 911 912 lpfc_offline_prep(phba); 913 lpfc_offline(phba); 914 lpfc_unblock_mgmt_io(phba); 915 phba->link_state = LPFC_HBA_ERROR; 916 lpfc_hba_down_post(phba); 917 } 918 } 919} 920 921/** 922 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 923 * @phba: pointer to lpfc hba data structure. 924 * 925 * This routine is called to bring the HBA offline when HBA hardware error 926 * other than Port Error 6 has been detected. 927 **/ 928static void 929lpfc_offline_eratt(struct lpfc_hba *phba) 930{ 931 struct lpfc_sli *psli = &phba->sli; 932 933 spin_lock_irq(&phba->hbalock); 934 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 935 spin_unlock_irq(&phba->hbalock); 936 lpfc_offline_prep(phba); 937 938 lpfc_offline(phba); 939 lpfc_reset_barrier(phba); 940 lpfc_sli_brdreset(phba); 941 lpfc_hba_down_post(phba); 942 lpfc_sli_brdready(phba, HS_MBRDY); 943 lpfc_unblock_mgmt_io(phba); 944 phba->link_state = LPFC_HBA_ERROR; 945 return; 946} 947 948/** 949 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 950 * @phba: pointer to lpfc hba data structure. 951 * 952 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 953 * other than Port Error 6 has been detected. 954 **/ 955static void 956lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 957{ 958 lpfc_offline_prep(phba); 959 lpfc_offline(phba); 960 lpfc_sli4_brdreset(phba); 961 lpfc_hba_down_post(phba); 962 lpfc_sli4_post_status_check(phba); 963 lpfc_unblock_mgmt_io(phba); 964 phba->link_state = LPFC_HBA_ERROR; 965} 966 967/** 968 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 969 * @phba: pointer to lpfc hba data structure. 970 * 971 * This routine is invoked to handle the deferred HBA hardware error 972 * conditions. This type of error is indicated by HBA by setting ER1 973 * and another ER bit in the host status register. The driver will 974 * wait until the ER1 bit clears before handling the error condition. 975 **/ 976static void 977lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 978{ 979 uint32_t old_host_status = phba->work_hs; 980 struct lpfc_sli_ring *pring; 981 struct lpfc_sli *psli = &phba->sli; 982 983 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 984 "0479 Deferred Adapter Hardware Error " 985 "Data: x%x x%x x%x\n", 986 phba->work_hs, 987 phba->work_status[0], phba->work_status[1]); 988 989 spin_lock_irq(&phba->hbalock); 990 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 991 spin_unlock_irq(&phba->hbalock); 992 993 994 /* 995 * Firmware stops when it triggred erratt. That could cause the I/Os 996 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 997 * SCSI layer retry it after re-establishing link. 998 */ 999 pring = &psli->ring[psli->fcp_ring]; 1000 lpfc_sli_abort_iocb_ring(phba, pring); 1001 1002 /* 1003 * There was a firmware error. Take the hba offline and then 1004 * attempt to restart it. 1005 */ 1006 lpfc_offline_prep(phba); 1007 lpfc_offline(phba); 1008 1009 /* Wait for the ER1 bit to clear.*/ 1010 while (phba->work_hs & HS_FFER1) { 1011 msleep(100); 1012 phba->work_hs = readl(phba->HSregaddr); 1013 /* If driver is unloading let the worker thread continue */ 1014 if (phba->pport->load_flag & FC_UNLOADING) { 1015 phba->work_hs = 0; 1016 break; 1017 } 1018 } 1019 1020 /* 1021 * This is to ptrotect against a race condition in which 1022 * first write to the host attention register clear the 1023 * host status register. 1024 */ 1025 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1026 phba->work_hs = old_host_status & ~HS_FFER1; 1027 1028 spin_lock_irq(&phba->hbalock); 1029 phba->hba_flag &= ~DEFER_ERATT; 1030 spin_unlock_irq(&phba->hbalock); 1031 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1032 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1033} 1034 1035static void 1036lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1037{ 1038 struct lpfc_board_event_header board_event; 1039 struct Scsi_Host *shost; 1040 1041 board_event.event_type = FC_REG_BOARD_EVENT; 1042 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1043 shost = lpfc_shost_from_vport(phba->pport); 1044 fc_host_post_vendor_event(shost, fc_get_event_number(), 1045 sizeof(board_event), 1046 (char *) &board_event, 1047 LPFC_NL_VENDOR_ID); 1048} 1049 1050/** 1051 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1052 * @phba: pointer to lpfc hba data structure. 1053 * 1054 * This routine is invoked to handle the following HBA hardware error 1055 * conditions: 1056 * 1 - HBA error attention interrupt 1057 * 2 - DMA ring index out of range 1058 * 3 - Mailbox command came back as unknown 1059 **/ 1060static void 1061lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1062{ 1063 struct lpfc_vport *vport = phba->pport; 1064 struct lpfc_sli *psli = &phba->sli; 1065 struct lpfc_sli_ring *pring; 1066 uint32_t event_data; 1067 unsigned long temperature; 1068 struct temp_event temp_event_data; 1069 struct Scsi_Host *shost; 1070 1071 /* If the pci channel is offline, ignore possible errors, 1072 * since we cannot communicate with the pci card anyway. 1073 */ 1074 if (pci_channel_offline(phba->pcidev)) { 1075 spin_lock_irq(&phba->hbalock); 1076 phba->hba_flag &= ~DEFER_ERATT; 1077 spin_unlock_irq(&phba->hbalock); 1078 return; 1079 } 1080 1081 /* If resets are disabled then leave the HBA alone and return */ 1082 if (!phba->cfg_enable_hba_reset) 1083 return; 1084 1085 /* Send an internal error event to mgmt application */ 1086 lpfc_board_errevt_to_mgmt(phba); 1087 1088 if (phba->hba_flag & DEFER_ERATT) 1089 lpfc_handle_deferred_eratt(phba); 1090 1091 if (phba->work_hs & HS_FFER6) { 1092 /* Re-establishing Link */ 1093 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1094 "1301 Re-establishing Link " 1095 "Data: x%x x%x x%x\n", 1096 phba->work_hs, 1097 phba->work_status[0], phba->work_status[1]); 1098 1099 spin_lock_irq(&phba->hbalock); 1100 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1101 spin_unlock_irq(&phba->hbalock); 1102 1103 /* 1104 * Firmware stops when it triggled erratt with HS_FFER6. 1105 * That could cause the I/Os dropped by the firmware. 1106 * Error iocb (I/O) on txcmplq and let the SCSI layer 1107 * retry it after re-establishing link. 1108 */ 1109 pring = &psli->ring[psli->fcp_ring]; 1110 lpfc_sli_abort_iocb_ring(phba, pring); 1111 1112 /* 1113 * There was a firmware error. Take the hba offline and then 1114 * attempt to restart it. 1115 */ 1116 lpfc_offline_prep(phba); 1117 lpfc_offline(phba); 1118 lpfc_sli_brdrestart(phba); 1119 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1120 lpfc_unblock_mgmt_io(phba); 1121 return; 1122 } 1123 lpfc_unblock_mgmt_io(phba); 1124 } else if (phba->work_hs & HS_CRIT_TEMP) { 1125 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1126 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1127 temp_event_data.event_code = LPFC_CRIT_TEMP; 1128 temp_event_data.data = (uint32_t)temperature; 1129 1130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1131 "0406 Adapter maximum temperature exceeded " 1132 "(%ld), taking this port offline " 1133 "Data: x%x x%x x%x\n", 1134 temperature, phba->work_hs, 1135 phba->work_status[0], phba->work_status[1]); 1136 1137 shost = lpfc_shost_from_vport(phba->pport); 1138 fc_host_post_vendor_event(shost, fc_get_event_number(), 1139 sizeof(temp_event_data), 1140 (char *) &temp_event_data, 1141 SCSI_NL_VID_TYPE_PCI 1142 | PCI_VENDOR_ID_EMULEX); 1143 1144 spin_lock_irq(&phba->hbalock); 1145 phba->over_temp_state = HBA_OVER_TEMP; 1146 spin_unlock_irq(&phba->hbalock); 1147 lpfc_offline_eratt(phba); 1148 1149 } else { 1150 /* The if clause above forces this code path when the status 1151 * failure is a value other than FFER6. Do not call the offline 1152 * twice. This is the adapter hardware error path. 1153 */ 1154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1155 "0457 Adapter Hardware Error " 1156 "Data: x%x x%x x%x\n", 1157 phba->work_hs, 1158 phba->work_status[0], phba->work_status[1]); 1159 1160 event_data = FC_REG_DUMP_EVENT; 1161 shost = lpfc_shost_from_vport(vport); 1162 fc_host_post_vendor_event(shost, fc_get_event_number(), 1163 sizeof(event_data), (char *) &event_data, 1164 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1165 1166 lpfc_offline_eratt(phba); 1167 } 1168 return; 1169} 1170 1171/** 1172 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1173 * @phba: pointer to lpfc hba data structure. 1174 * 1175 * This routine is invoked to handle the SLI4 HBA hardware error attention 1176 * conditions. 1177 **/ 1178static void 1179lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1180{ 1181 struct lpfc_vport *vport = phba->pport; 1182 uint32_t event_data; 1183 struct Scsi_Host *shost; 1184 1185 /* If the pci channel is offline, ignore possible errors, since 1186 * we cannot communicate with the pci card anyway. 1187 */ 1188 if (pci_channel_offline(phba->pcidev)) 1189 return; 1190 /* If resets are disabled then leave the HBA alone and return */ 1191 if (!phba->cfg_enable_hba_reset) 1192 return; 1193 1194 /* Send an internal error event to mgmt application */ 1195 lpfc_board_errevt_to_mgmt(phba); 1196 1197 /* For now, the actual action for SLI4 device handling is not 1198 * specified yet, just treated it as adaptor hardware failure 1199 */ 1200 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1201 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", 1202 phba->work_status[0], phba->work_status[1]); 1203 1204 event_data = FC_REG_DUMP_EVENT; 1205 shost = lpfc_shost_from_vport(vport); 1206 fc_host_post_vendor_event(shost, fc_get_event_number(), 1207 sizeof(event_data), (char *) &event_data, 1208 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1209 1210 lpfc_sli4_offline_eratt(phba); 1211} 1212 1213/** 1214 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1215 * @phba: pointer to lpfc HBA data structure. 1216 * 1217 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1218 * routine from the API jump table function pointer from the lpfc_hba struct. 1219 * 1220 * Return codes 1221 * 0 - sucess. 1222 * Any other value - error. 1223 **/ 1224void 1225lpfc_handle_eratt(struct lpfc_hba *phba) 1226{ 1227 (*phba->lpfc_handle_eratt)(phba); 1228} 1229 1230/** 1231 * lpfc_handle_latt - The HBA link event handler 1232 * @phba: pointer to lpfc hba data structure. 1233 * 1234 * This routine is invoked from the worker thread to handle a HBA host 1235 * attention link event. 1236 **/ 1237void 1238lpfc_handle_latt(struct lpfc_hba *phba) 1239{ 1240 struct lpfc_vport *vport = phba->pport; 1241 struct lpfc_sli *psli = &phba->sli; 1242 LPFC_MBOXQ_t *pmb; 1243 volatile uint32_t control; 1244 struct lpfc_dmabuf *mp; 1245 int rc = 0; 1246 1247 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1248 if (!pmb) { 1249 rc = 1; 1250 goto lpfc_handle_latt_err_exit; 1251 } 1252 1253 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1254 if (!mp) { 1255 rc = 2; 1256 goto lpfc_handle_latt_free_pmb; 1257 } 1258 1259 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1260 if (!mp->virt) { 1261 rc = 3; 1262 goto lpfc_handle_latt_free_mp; 1263 } 1264 1265 /* Cleanup any outstanding ELS commands */ 1266 lpfc_els_flush_all_cmd(phba); 1267 1268 psli->slistat.link_event++; 1269 lpfc_read_la(phba, pmb, mp); 1270 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 1271 pmb->vport = vport; 1272 /* Block ELS IOCBs until we have processed this mbox command */ 1273 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1274 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1275 if (rc == MBX_NOT_FINISHED) { 1276 rc = 4; 1277 goto lpfc_handle_latt_free_mbuf; 1278 } 1279 1280 /* Clear Link Attention in HA REG */ 1281 spin_lock_irq(&phba->hbalock); 1282 writel(HA_LATT, phba->HAregaddr); 1283 readl(phba->HAregaddr); /* flush */ 1284 spin_unlock_irq(&phba->hbalock); 1285 1286 return; 1287 1288lpfc_handle_latt_free_mbuf: 1289 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1290 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1291lpfc_handle_latt_free_mp: 1292 kfree(mp); 1293lpfc_handle_latt_free_pmb: 1294 mempool_free(pmb, phba->mbox_mem_pool); 1295lpfc_handle_latt_err_exit: 1296 /* Enable Link attention interrupts */ 1297 spin_lock_irq(&phba->hbalock); 1298 psli->sli_flag |= LPFC_PROCESS_LA; 1299 control = readl(phba->HCregaddr); 1300 control |= HC_LAINT_ENA; 1301 writel(control, phba->HCregaddr); 1302 readl(phba->HCregaddr); /* flush */ 1303 1304 /* Clear Link Attention in HA REG */ 1305 writel(HA_LATT, phba->HAregaddr); 1306 readl(phba->HAregaddr); /* flush */ 1307 spin_unlock_irq(&phba->hbalock); 1308 lpfc_linkdown(phba); 1309 phba->link_state = LPFC_HBA_ERROR; 1310 1311 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1312 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1313 1314 return; 1315} 1316 1317/** 1318 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1319 * @phba: pointer to lpfc hba data structure. 1320 * @vpd: pointer to the vital product data. 1321 * @len: length of the vital product data in bytes. 1322 * 1323 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1324 * an array of characters. In this routine, the ModelName, ProgramType, and 1325 * ModelDesc, etc. fields of the phba data structure will be populated. 1326 * 1327 * Return codes 1328 * 0 - pointer to the VPD passed in is NULL 1329 * 1 - success 1330 **/ 1331int 1332lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1333{ 1334 uint8_t lenlo, lenhi; 1335 int Length; 1336 int i, j; 1337 int finished = 0; 1338 int index = 0; 1339 1340 if (!vpd) 1341 return 0; 1342 1343 /* Vital Product */ 1344 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1345 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1346 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1347 (uint32_t) vpd[3]); 1348 while (!finished && (index < (len - 4))) { 1349 switch (vpd[index]) { 1350 case 0x82: 1351 case 0x91: 1352 index += 1; 1353 lenlo = vpd[index]; 1354 index += 1; 1355 lenhi = vpd[index]; 1356 index += 1; 1357 i = ((((unsigned short)lenhi) << 8) + lenlo); 1358 index += i; 1359 break; 1360 case 0x90: 1361 index += 1; 1362 lenlo = vpd[index]; 1363 index += 1; 1364 lenhi = vpd[index]; 1365 index += 1; 1366 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1367 if (Length > len - index) 1368 Length = len - index; 1369 while (Length > 0) { 1370 /* Look for Serial Number */ 1371 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1372 index += 2; 1373 i = vpd[index]; 1374 index += 1; 1375 j = 0; 1376 Length -= (3+i); 1377 while(i--) { 1378 phba->SerialNumber[j++] = vpd[index++]; 1379 if (j == 31) 1380 break; 1381 } 1382 phba->SerialNumber[j] = 0; 1383 continue; 1384 } 1385 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1386 phba->vpd_flag |= VPD_MODEL_DESC; 1387 index += 2; 1388 i = vpd[index]; 1389 index += 1; 1390 j = 0; 1391 Length -= (3+i); 1392 while(i--) { 1393 phba->ModelDesc[j++] = vpd[index++]; 1394 if (j == 255) 1395 break; 1396 } 1397 phba->ModelDesc[j] = 0; 1398 continue; 1399 } 1400 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1401 phba->vpd_flag |= VPD_MODEL_NAME; 1402 index += 2; 1403 i = vpd[index]; 1404 index += 1; 1405 j = 0; 1406 Length -= (3+i); 1407 while(i--) { 1408 phba->ModelName[j++] = vpd[index++]; 1409 if (j == 79) 1410 break; 1411 } 1412 phba->ModelName[j] = 0; 1413 continue; 1414 } 1415 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1416 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1417 index += 2; 1418 i = vpd[index]; 1419 index += 1; 1420 j = 0; 1421 Length -= (3+i); 1422 while(i--) { 1423 phba->ProgramType[j++] = vpd[index++]; 1424 if (j == 255) 1425 break; 1426 } 1427 phba->ProgramType[j] = 0; 1428 continue; 1429 } 1430 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1431 phba->vpd_flag |= VPD_PORT; 1432 index += 2; 1433 i = vpd[index]; 1434 index += 1; 1435 j = 0; 1436 Length -= (3+i); 1437 while(i--) { 1438 phba->Port[j++] = vpd[index++]; 1439 if (j == 19) 1440 break; 1441 } 1442 phba->Port[j] = 0; 1443 continue; 1444 } 1445 else { 1446 index += 2; 1447 i = vpd[index]; 1448 index += 1; 1449 index += i; 1450 Length -= (3 + i); 1451 } 1452 } 1453 finished = 0; 1454 break; 1455 case 0x78: 1456 finished = 1; 1457 break; 1458 default: 1459 index ++; 1460 break; 1461 } 1462 } 1463 1464 return(1); 1465} 1466 1467/** 1468 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1469 * @phba: pointer to lpfc hba data structure. 1470 * @mdp: pointer to the data structure to hold the derived model name. 1471 * @descp: pointer to the data structure to hold the derived description. 1472 * 1473 * This routine retrieves HBA's description based on its registered PCI device 1474 * ID. The @descp passed into this function points to an array of 256 chars. It 1475 * shall be returned with the model name, maximum speed, and the host bus type. 1476 * The @mdp passed into this function points to an array of 80 chars. When the 1477 * function returns, the @mdp will be filled with the model name. 1478 **/ 1479static void 1480lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1481{ 1482 lpfc_vpd_t *vp; 1483 uint16_t dev_id = phba->pcidev->device; 1484 int max_speed; 1485 int GE = 0; 1486 int oneConnect = 0; /* default is not a oneConnect */ 1487 struct { 1488 char * name; 1489 int max_speed; 1490 char * bus; 1491 } m = {"<Unknown>", 0, ""}; 1492 1493 if (mdp && mdp[0] != '\0' 1494 && descp && descp[0] != '\0') 1495 return; 1496 1497 if (phba->lmt & LMT_10Gb) 1498 max_speed = 10; 1499 else if (phba->lmt & LMT_8Gb) 1500 max_speed = 8; 1501 else if (phba->lmt & LMT_4Gb) 1502 max_speed = 4; 1503 else if (phba->lmt & LMT_2Gb) 1504 max_speed = 2; 1505 else 1506 max_speed = 1; 1507 1508 vp = &phba->vpd; 1509 1510 switch (dev_id) { 1511 case PCI_DEVICE_ID_FIREFLY: 1512 m = (typeof(m)){"LP6000", max_speed, "PCI"}; 1513 break; 1514 case PCI_DEVICE_ID_SUPERFLY: 1515 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1516 m = (typeof(m)){"LP7000", max_speed, "PCI"}; 1517 else 1518 m = (typeof(m)){"LP7000E", max_speed, "PCI"}; 1519 break; 1520 case PCI_DEVICE_ID_DRAGONFLY: 1521 m = (typeof(m)){"LP8000", max_speed, "PCI"}; 1522 break; 1523 case PCI_DEVICE_ID_CENTAUR: 1524 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1525 m = (typeof(m)){"LP9002", max_speed, "PCI"}; 1526 else 1527 m = (typeof(m)){"LP9000", max_speed, "PCI"}; 1528 break; 1529 case PCI_DEVICE_ID_RFLY: 1530 m = (typeof(m)){"LP952", max_speed, "PCI"}; 1531 break; 1532 case PCI_DEVICE_ID_PEGASUS: 1533 m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; 1534 break; 1535 case PCI_DEVICE_ID_THOR: 1536 m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; 1537 break; 1538 case PCI_DEVICE_ID_VIPER: 1539 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"}; 1540 break; 1541 case PCI_DEVICE_ID_PFLY: 1542 m = (typeof(m)){"LP982", max_speed, "PCI-X"}; 1543 break; 1544 case PCI_DEVICE_ID_TFLY: 1545 m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; 1546 break; 1547 case PCI_DEVICE_ID_HELIOS: 1548 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; 1549 break; 1550 case PCI_DEVICE_ID_HELIOS_SCSP: 1551 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; 1552 break; 1553 case PCI_DEVICE_ID_HELIOS_DCSP: 1554 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; 1555 break; 1556 case PCI_DEVICE_ID_NEPTUNE: 1557 m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; 1558 break; 1559 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1560 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; 1561 break; 1562 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1563 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; 1564 break; 1565 case PCI_DEVICE_ID_BMID: 1566 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; 1567 break; 1568 case PCI_DEVICE_ID_BSMB: 1569 m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; 1570 break; 1571 case PCI_DEVICE_ID_ZEPHYR: 1572 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1573 break; 1574 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1575 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1576 break; 1577 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1578 m = (typeof(m)){"LP2105", max_speed, "PCIe"}; 1579 GE = 1; 1580 break; 1581 case PCI_DEVICE_ID_ZMID: 1582 m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; 1583 break; 1584 case PCI_DEVICE_ID_ZSMB: 1585 m = (typeof(m)){"LPe111", max_speed, "PCIe"}; 1586 break; 1587 case PCI_DEVICE_ID_LP101: 1588 m = (typeof(m)){"LP101", max_speed, "PCI-X"}; 1589 break; 1590 case PCI_DEVICE_ID_LP10000S: 1591 m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; 1592 break; 1593 case PCI_DEVICE_ID_LP11000S: 1594 m = (typeof(m)){"LP11000-S", max_speed, 1595 "PCI-X2"}; 1596 break; 1597 case PCI_DEVICE_ID_LPE11000S: 1598 m = (typeof(m)){"LPe11000-S", max_speed, 1599 "PCIe"}; 1600 break; 1601 case PCI_DEVICE_ID_SAT: 1602 m = (typeof(m)){"LPe12000", max_speed, "PCIe"}; 1603 break; 1604 case PCI_DEVICE_ID_SAT_MID: 1605 m = (typeof(m)){"LPe1250", max_speed, "PCIe"}; 1606 break; 1607 case PCI_DEVICE_ID_SAT_SMB: 1608 m = (typeof(m)){"LPe121", max_speed, "PCIe"}; 1609 break; 1610 case PCI_DEVICE_ID_SAT_DCSP: 1611 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"}; 1612 break; 1613 case PCI_DEVICE_ID_SAT_SCSP: 1614 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"}; 1615 break; 1616 case PCI_DEVICE_ID_SAT_S: 1617 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; 1618 break; 1619 case PCI_DEVICE_ID_HORNET: 1620 m = (typeof(m)){"LP21000", max_speed, "PCIe"}; 1621 GE = 1; 1622 break; 1623 case PCI_DEVICE_ID_PROTEUS_VF: 1624 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1625 break; 1626 case PCI_DEVICE_ID_PROTEUS_PF: 1627 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1628 break; 1629 case PCI_DEVICE_ID_PROTEUS_S: 1630 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; 1631 break; 1632 case PCI_DEVICE_ID_TIGERSHARK: 1633 oneConnect = 1; 1634 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; 1635 break; 1636 case PCI_DEVICE_ID_TIGERSHARK_S: 1637 oneConnect = 1; 1638 m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"}; 1639 break; 1640 default: 1641 m = (typeof(m)){ NULL }; 1642 break; 1643 } 1644 1645 if (mdp && mdp[0] == '\0') 1646 snprintf(mdp, 79,"%s", m.name); 1647 /* oneConnect hba requires special processing, they are all initiators 1648 * and we put the port number on the end 1649 */ 1650 if (descp && descp[0] == '\0') { 1651 if (oneConnect) 1652 snprintf(descp, 255, 1653 "Emulex OneConnect %s, FCoE Initiator, Port %s", 1654 m.name, 1655 phba->Port); 1656 else 1657 snprintf(descp, 255, 1658 "Emulex %s %d%s %s %s", 1659 m.name, m.max_speed, 1660 (GE) ? "GE" : "Gb", 1661 m.bus, 1662 (GE) ? "FCoE Adapter" : 1663 "Fibre Channel Adapter"); 1664 } 1665} 1666 1667/** 1668 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 1669 * @phba: pointer to lpfc hba data structure. 1670 * @pring: pointer to a IOCB ring. 1671 * @cnt: the number of IOCBs to be posted to the IOCB ring. 1672 * 1673 * This routine posts a given number of IOCBs with the associated DMA buffer 1674 * descriptors specified by the cnt argument to the given IOCB ring. 1675 * 1676 * Return codes 1677 * The number of IOCBs NOT able to be posted to the IOCB ring. 1678 **/ 1679int 1680lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 1681{ 1682 IOCB_t *icmd; 1683 struct lpfc_iocbq *iocb; 1684 struct lpfc_dmabuf *mp1, *mp2; 1685 1686 cnt += pring->missbufcnt; 1687 1688 /* While there are buffers to post */ 1689 while (cnt > 0) { 1690 /* Allocate buffer for command iocb */ 1691 iocb = lpfc_sli_get_iocbq(phba); 1692 if (iocb == NULL) { 1693 pring->missbufcnt = cnt; 1694 return cnt; 1695 } 1696 icmd = &iocb->iocb; 1697 1698 /* 2 buffers can be posted per command */ 1699 /* Allocate buffer to post */ 1700 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1701 if (mp1) 1702 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1703 if (!mp1 || !mp1->virt) { 1704 kfree(mp1); 1705 lpfc_sli_release_iocbq(phba, iocb); 1706 pring->missbufcnt = cnt; 1707 return cnt; 1708 } 1709 1710 INIT_LIST_HEAD(&mp1->list); 1711 /* Allocate buffer to post */ 1712 if (cnt > 1) { 1713 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1714 if (mp2) 1715 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1716 &mp2->phys); 1717 if (!mp2 || !mp2->virt) { 1718 kfree(mp2); 1719 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1720 kfree(mp1); 1721 lpfc_sli_release_iocbq(phba, iocb); 1722 pring->missbufcnt = cnt; 1723 return cnt; 1724 } 1725 1726 INIT_LIST_HEAD(&mp2->list); 1727 } else { 1728 mp2 = NULL; 1729 } 1730 1731 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 1732 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 1733 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 1734 icmd->ulpBdeCount = 1; 1735 cnt--; 1736 if (mp2) { 1737 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 1738 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 1739 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 1740 cnt--; 1741 icmd->ulpBdeCount = 2; 1742 } 1743 1744 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1745 icmd->ulpLe = 1; 1746 1747 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 1748 IOCB_ERROR) { 1749 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1750 kfree(mp1); 1751 cnt++; 1752 if (mp2) { 1753 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 1754 kfree(mp2); 1755 cnt++; 1756 } 1757 lpfc_sli_release_iocbq(phba, iocb); 1758 pring->missbufcnt = cnt; 1759 return cnt; 1760 } 1761 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1762 if (mp2) 1763 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1764 } 1765 pring->missbufcnt = 0; 1766 return 0; 1767} 1768 1769/** 1770 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 1771 * @phba: pointer to lpfc hba data structure. 1772 * 1773 * This routine posts initial receive IOCB buffers to the ELS ring. The 1774 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 1775 * set to 64 IOCBs. 1776 * 1777 * Return codes 1778 * 0 - success (currently always success) 1779 **/ 1780static int 1781lpfc_post_rcv_buf(struct lpfc_hba *phba) 1782{ 1783 struct lpfc_sli *psli = &phba->sli; 1784 1785 /* Ring 0, ELS / CT buffers */ 1786 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 1787 /* Ring 2 - FCP no buffers needed */ 1788 1789 return 0; 1790} 1791 1792#define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 1793 1794/** 1795 * lpfc_sha_init - Set up initial array of hash table entries 1796 * @HashResultPointer: pointer to an array as hash table. 1797 * 1798 * This routine sets up the initial values to the array of hash table entries 1799 * for the LC HBAs. 1800 **/ 1801static void 1802lpfc_sha_init(uint32_t * HashResultPointer) 1803{ 1804 HashResultPointer[0] = 0x67452301; 1805 HashResultPointer[1] = 0xEFCDAB89; 1806 HashResultPointer[2] = 0x98BADCFE; 1807 HashResultPointer[3] = 0x10325476; 1808 HashResultPointer[4] = 0xC3D2E1F0; 1809} 1810 1811/** 1812 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 1813 * @HashResultPointer: pointer to an initial/result hash table. 1814 * @HashWorkingPointer: pointer to an working hash table. 1815 * 1816 * This routine iterates an initial hash table pointed by @HashResultPointer 1817 * with the values from the working hash table pointeed by @HashWorkingPointer. 1818 * The results are putting back to the initial hash table, returned through 1819 * the @HashResultPointer as the result hash table. 1820 **/ 1821static void 1822lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 1823{ 1824 int t; 1825 uint32_t TEMP; 1826 uint32_t A, B, C, D, E; 1827 t = 16; 1828 do { 1829 HashWorkingPointer[t] = 1830 S(1, 1831 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 1832 8] ^ 1833 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 1834 } while (++t <= 79); 1835 t = 0; 1836 A = HashResultPointer[0]; 1837 B = HashResultPointer[1]; 1838 C = HashResultPointer[2]; 1839 D = HashResultPointer[3]; 1840 E = HashResultPointer[4]; 1841 1842 do { 1843 if (t < 20) { 1844 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 1845 } else if (t < 40) { 1846 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 1847 } else if (t < 60) { 1848 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 1849 } else { 1850 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 1851 } 1852 TEMP += S(5, A) + E + HashWorkingPointer[t]; 1853 E = D; 1854 D = C; 1855 C = S(30, B); 1856 B = A; 1857 A = TEMP; 1858 } while (++t <= 79); 1859 1860 HashResultPointer[0] += A; 1861 HashResultPointer[1] += B; 1862 HashResultPointer[2] += C; 1863 HashResultPointer[3] += D; 1864 HashResultPointer[4] += E; 1865 1866} 1867 1868/** 1869 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 1870 * @RandomChallenge: pointer to the entry of host challenge random number array. 1871 * @HashWorking: pointer to the entry of the working hash array. 1872 * 1873 * This routine calculates the working hash array referred by @HashWorking 1874 * from the challenge random numbers associated with the host, referred by 1875 * @RandomChallenge. The result is put into the entry of the working hash 1876 * array and returned by reference through @HashWorking. 1877 **/ 1878static void 1879lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 1880{ 1881 *HashWorking = (*RandomChallenge ^ *HashWorking); 1882} 1883 1884/** 1885 * lpfc_hba_init - Perform special handling for LC HBA initialization 1886 * @phba: pointer to lpfc hba data structure. 1887 * @hbainit: pointer to an array of unsigned 32-bit integers. 1888 * 1889 * This routine performs the special handling for LC HBA initialization. 1890 **/ 1891void 1892lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 1893{ 1894 int t; 1895 uint32_t *HashWorking; 1896 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 1897 1898 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 1899 if (!HashWorking) 1900 return; 1901 1902 HashWorking[0] = HashWorking[78] = *pwwnn++; 1903 HashWorking[1] = HashWorking[79] = *pwwnn; 1904 1905 for (t = 0; t < 7; t++) 1906 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 1907 1908 lpfc_sha_init(hbainit); 1909 lpfc_sha_iterate(hbainit, HashWorking); 1910 kfree(HashWorking); 1911} 1912 1913/** 1914 * lpfc_cleanup - Performs vport cleanups before deleting a vport 1915 * @vport: pointer to a virtual N_Port data structure. 1916 * 1917 * This routine performs the necessary cleanups before deleting the @vport. 1918 * It invokes the discovery state machine to perform necessary state 1919 * transitions and to release the ndlps associated with the @vport. Note, 1920 * the physical port is treated as @vport 0. 1921 **/ 1922void 1923lpfc_cleanup(struct lpfc_vport *vport) 1924{ 1925 struct lpfc_hba *phba = vport->phba; 1926 struct lpfc_nodelist *ndlp, *next_ndlp; 1927 int i = 0; 1928 1929 if (phba->link_state > LPFC_LINK_DOWN) 1930 lpfc_port_link_failure(vport); 1931 1932 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 1933 if (!NLP_CHK_NODE_ACT(ndlp)) { 1934 ndlp = lpfc_enable_node(vport, ndlp, 1935 NLP_STE_UNUSED_NODE); 1936 if (!ndlp) 1937 continue; 1938 spin_lock_irq(&phba->ndlp_lock); 1939 NLP_SET_FREE_REQ(ndlp); 1940 spin_unlock_irq(&phba->ndlp_lock); 1941 /* Trigger the release of the ndlp memory */ 1942 lpfc_nlp_put(ndlp); 1943 continue; 1944 } 1945 spin_lock_irq(&phba->ndlp_lock); 1946 if (NLP_CHK_FREE_REQ(ndlp)) { 1947 /* The ndlp should not be in memory free mode already */ 1948 spin_unlock_irq(&phba->ndlp_lock); 1949 continue; 1950 } else 1951 /* Indicate request for freeing ndlp memory */ 1952 NLP_SET_FREE_REQ(ndlp); 1953 spin_unlock_irq(&phba->ndlp_lock); 1954 1955 if (vport->port_type != LPFC_PHYSICAL_PORT && 1956 ndlp->nlp_DID == Fabric_DID) { 1957 /* Just free up ndlp with Fabric_DID for vports */ 1958 lpfc_nlp_put(ndlp); 1959 continue; 1960 } 1961 1962 if (ndlp->nlp_type & NLP_FABRIC) 1963 lpfc_disc_state_machine(vport, ndlp, NULL, 1964 NLP_EVT_DEVICE_RECOVERY); 1965 1966 lpfc_disc_state_machine(vport, ndlp, NULL, 1967 NLP_EVT_DEVICE_RM); 1968 1969 } 1970 1971 /* At this point, ALL ndlp's should be gone 1972 * because of the previous NLP_EVT_DEVICE_RM. 1973 * Lets wait for this to happen, if needed. 1974 */ 1975 while (!list_empty(&vport->fc_nodes)) { 1976 if (i++ > 3000) { 1977 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1978 "0233 Nodelist not empty\n"); 1979 list_for_each_entry_safe(ndlp, next_ndlp, 1980 &vport->fc_nodes, nlp_listp) { 1981 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 1982 LOG_NODE, 1983 "0282 did:x%x ndlp:x%p " 1984 "usgmap:x%x refcnt:%d\n", 1985 ndlp->nlp_DID, (void *)ndlp, 1986 ndlp->nlp_usg_map, 1987 atomic_read( 1988 &ndlp->kref.refcount)); 1989 } 1990 break; 1991 } 1992 1993 /* Wait for any activity on ndlps to settle */ 1994 msleep(10); 1995 } 1996} 1997 1998/** 1999 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2000 * @vport: pointer to a virtual N_Port data structure. 2001 * 2002 * This routine stops all the timers associated with a @vport. This function 2003 * is invoked before disabling or deleting a @vport. Note that the physical 2004 * port is treated as @vport 0. 2005 **/ 2006void 2007lpfc_stop_vport_timers(struct lpfc_vport *vport) 2008{ 2009 del_timer_sync(&vport->els_tmofunc); 2010 del_timer_sync(&vport->fc_fdmitmo); 2011 lpfc_can_disctmo(vport); 2012 return; 2013} 2014 2015/** 2016 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2017 * @phba: pointer to lpfc hba data structure. 2018 * 2019 * This routine stops all the timers associated with a HBA. This function is 2020 * invoked before either putting a HBA offline or unloading the driver. 2021 **/ 2022void 2023lpfc_stop_hba_timers(struct lpfc_hba *phba) 2024{ 2025 lpfc_stop_vport_timers(phba->pport); 2026 del_timer_sync(&phba->sli.mbox_tmo); 2027 del_timer_sync(&phba->fabric_block_timer); 2028 del_timer_sync(&phba->eratt_poll); 2029 del_timer_sync(&phba->hb_tmofunc); 2030 phba->hb_outstanding = 0; 2031 2032 switch (phba->pci_dev_grp) { 2033 case LPFC_PCI_DEV_LP: 2034 /* Stop any LightPulse device specific driver timers */ 2035 del_timer_sync(&phba->fcp_poll_timer); 2036 break; 2037 case LPFC_PCI_DEV_OC: 2038 /* Stop any OneConnect device sepcific driver timers */ 2039 break; 2040 default: 2041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2042 "0297 Invalid device group (x%x)\n", 2043 phba->pci_dev_grp); 2044 break; 2045 } 2046 return; 2047} 2048 2049/** 2050 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2051 * @phba: pointer to lpfc hba data structure. 2052 * 2053 * This routine marks a HBA's management interface as blocked. Once the HBA's 2054 * management interface is marked as blocked, all the user space access to 2055 * the HBA, whether they are from sysfs interface or libdfc interface will 2056 * all be blocked. The HBA is set to block the management interface when the 2057 * driver prepares the HBA interface for online or offline. 2058 **/ 2059static void 2060lpfc_block_mgmt_io(struct lpfc_hba * phba) 2061{ 2062 unsigned long iflag; 2063 2064 spin_lock_irqsave(&phba->hbalock, iflag); 2065 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2066 spin_unlock_irqrestore(&phba->hbalock, iflag); 2067} 2068 2069/** 2070 * lpfc_online - Initialize and bring a HBA online 2071 * @phba: pointer to lpfc hba data structure. 2072 * 2073 * This routine initializes the HBA and brings a HBA online. During this 2074 * process, the management interface is blocked to prevent user space access 2075 * to the HBA interfering with the driver initialization. 2076 * 2077 * Return codes 2078 * 0 - successful 2079 * 1 - failed 2080 **/ 2081int 2082lpfc_online(struct lpfc_hba *phba) 2083{ 2084 struct lpfc_vport *vport; 2085 struct lpfc_vport **vports; 2086 int i; 2087 2088 if (!phba) 2089 return 0; 2090 vport = phba->pport; 2091 2092 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2093 return 0; 2094 2095 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2096 "0458 Bring Adapter online\n"); 2097 2098 lpfc_block_mgmt_io(phba); 2099 2100 if (!lpfc_sli_queue_setup(phba)) { 2101 lpfc_unblock_mgmt_io(phba); 2102 return 1; 2103 } 2104 2105 if (phba->sli_rev == LPFC_SLI_REV4) { 2106 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2107 lpfc_unblock_mgmt_io(phba); 2108 return 1; 2109 } 2110 } else { 2111 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2112 lpfc_unblock_mgmt_io(phba); 2113 return 1; 2114 } 2115 } 2116 2117 vports = lpfc_create_vport_work_array(phba); 2118 if (vports != NULL) 2119 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2120 struct Scsi_Host *shost; 2121 shost = lpfc_shost_from_vport(vports[i]); 2122 spin_lock_irq(shost->host_lock); 2123 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2124 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2125 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2126 spin_unlock_irq(shost->host_lock); 2127 } 2128 lpfc_destroy_vport_work_array(phba, vports); 2129 2130 lpfc_unblock_mgmt_io(phba); 2131 return 0; 2132} 2133 2134/** 2135 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2136 * @phba: pointer to lpfc hba data structure. 2137 * 2138 * This routine marks a HBA's management interface as not blocked. Once the 2139 * HBA's management interface is marked as not blocked, all the user space 2140 * access to the HBA, whether they are from sysfs interface or libdfc 2141 * interface will be allowed. The HBA is set to block the management interface 2142 * when the driver prepares the HBA interface for online or offline and then 2143 * set to unblock the management interface afterwards. 2144 **/ 2145void 2146lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2147{ 2148 unsigned long iflag; 2149 2150 spin_lock_irqsave(&phba->hbalock, iflag); 2151 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2152 spin_unlock_irqrestore(&phba->hbalock, iflag); 2153} 2154 2155/** 2156 * lpfc_offline_prep - Prepare a HBA to be brought offline 2157 * @phba: pointer to lpfc hba data structure. 2158 * 2159 * This routine is invoked to prepare a HBA to be brought offline. It performs 2160 * unregistration login to all the nodes on all vports and flushes the mailbox 2161 * queue to make it ready to be brought offline. 2162 **/ 2163void 2164lpfc_offline_prep(struct lpfc_hba * phba) 2165{ 2166 struct lpfc_vport *vport = phba->pport; 2167 struct lpfc_nodelist *ndlp, *next_ndlp; 2168 struct lpfc_vport **vports; 2169 int i; 2170 2171 if (vport->fc_flag & FC_OFFLINE_MODE) 2172 return; 2173 2174 lpfc_block_mgmt_io(phba); 2175 2176 lpfc_linkdown(phba); 2177 2178 /* Issue an unreg_login to all nodes on all vports */ 2179 vports = lpfc_create_vport_work_array(phba); 2180 if (vports != NULL) { 2181 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2182 struct Scsi_Host *shost; 2183 2184 if (vports[i]->load_flag & FC_UNLOADING) 2185 continue; 2186 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; 2187 shost = lpfc_shost_from_vport(vports[i]); 2188 list_for_each_entry_safe(ndlp, next_ndlp, 2189 &vports[i]->fc_nodes, 2190 nlp_listp) { 2191 if (!NLP_CHK_NODE_ACT(ndlp)) 2192 continue; 2193 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2194 continue; 2195 if (ndlp->nlp_type & NLP_FABRIC) { 2196 lpfc_disc_state_machine(vports[i], ndlp, 2197 NULL, NLP_EVT_DEVICE_RECOVERY); 2198 lpfc_disc_state_machine(vports[i], ndlp, 2199 NULL, NLP_EVT_DEVICE_RM); 2200 } 2201 spin_lock_irq(shost->host_lock); 2202 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2203 spin_unlock_irq(shost->host_lock); 2204 lpfc_unreg_rpi(vports[i], ndlp); 2205 } 2206 } 2207 } 2208 lpfc_destroy_vport_work_array(phba, vports); 2209 2210 lpfc_sli_mbox_sys_shutdown(phba); 2211} 2212 2213/** 2214 * lpfc_offline - Bring a HBA offline 2215 * @phba: pointer to lpfc hba data structure. 2216 * 2217 * This routine actually brings a HBA offline. It stops all the timers 2218 * associated with the HBA, brings down the SLI layer, and eventually 2219 * marks the HBA as in offline state for the upper layer protocol. 2220 **/ 2221void 2222lpfc_offline(struct lpfc_hba *phba) 2223{ 2224 struct Scsi_Host *shost; 2225 struct lpfc_vport **vports; 2226 int i; 2227 2228 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2229 return; 2230 2231 /* stop port and all timers associated with this hba */ 2232 lpfc_stop_port(phba); 2233 vports = lpfc_create_vport_work_array(phba); 2234 if (vports != NULL) 2235 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2236 lpfc_stop_vport_timers(vports[i]); 2237 lpfc_destroy_vport_work_array(phba, vports); 2238 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2239 "0460 Bring Adapter offline\n"); 2240 /* Bring down the SLI Layer and cleanup. The HBA is offline 2241 now. */ 2242 lpfc_sli_hba_down(phba); 2243 spin_lock_irq(&phba->hbalock); 2244 phba->work_ha = 0; 2245 spin_unlock_irq(&phba->hbalock); 2246 vports = lpfc_create_vport_work_array(phba); 2247 if (vports != NULL) 2248 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2249 shost = lpfc_shost_from_vport(vports[i]); 2250 spin_lock_irq(shost->host_lock); 2251 vports[i]->work_port_events = 0; 2252 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2253 spin_unlock_irq(shost->host_lock); 2254 } 2255 lpfc_destroy_vport_work_array(phba, vports); 2256} 2257 2258/** 2259 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2260 * @phba: pointer to lpfc hba data structure. 2261 * 2262 * This routine is to free all the SCSI buffers and IOCBs from the driver 2263 * list back to kernel. It is called from lpfc_pci_remove_one to free 2264 * the internal resources before the device is removed from the system. 2265 * 2266 * Return codes 2267 * 0 - successful (for now, it always returns 0) 2268 **/ 2269static int 2270lpfc_scsi_free(struct lpfc_hba *phba) 2271{ 2272 struct lpfc_scsi_buf *sb, *sb_next; 2273 struct lpfc_iocbq *io, *io_next; 2274 2275 spin_lock_irq(&phba->hbalock); 2276 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2277 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2278 list_del(&sb->list); 2279 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2280 sb->dma_handle); 2281 kfree(sb); 2282 phba->total_scsi_bufs--; 2283 } 2284 2285 /* Release all the lpfc_iocbq entries maintained by this host. */ 2286 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2287 list_del(&io->list); 2288 kfree(io); 2289 phba->total_iocbq_bufs--; 2290 } 2291 2292 spin_unlock_irq(&phba->hbalock); 2293 2294 return 0; 2295} 2296 2297/** 2298 * lpfc_create_port - Create an FC port 2299 * @phba: pointer to lpfc hba data structure. 2300 * @instance: a unique integer ID to this FC port. 2301 * @dev: pointer to the device data structure. 2302 * 2303 * This routine creates a FC port for the upper layer protocol. The FC port 2304 * can be created on top of either a physical port or a virtual port provided 2305 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2306 * and associates the FC port created before adding the shost into the SCSI 2307 * layer. 2308 * 2309 * Return codes 2310 * @vport - pointer to the virtual N_Port data structure. 2311 * NULL - port create failed. 2312 **/ 2313struct lpfc_vport * 2314lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2315{ 2316 struct lpfc_vport *vport; 2317 struct Scsi_Host *shost; 2318 int error = 0; 2319 2320 if (dev != &phba->pcidev->dev) 2321 shost = scsi_host_alloc(&lpfc_vport_template, 2322 sizeof(struct lpfc_vport)); 2323 else 2324 shost = scsi_host_alloc(&lpfc_template, 2325 sizeof(struct lpfc_vport)); 2326 if (!shost) 2327 goto out; 2328 2329 vport = (struct lpfc_vport *) shost->hostdata; 2330 vport->phba = phba; 2331 vport->load_flag |= FC_LOADING; 2332 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2333 vport->fc_rscn_flush = 0; 2334 2335 lpfc_get_vport_cfgparam(vport); 2336 shost->unique_id = instance; 2337 shost->max_id = LPFC_MAX_TARGET; 2338 shost->max_lun = vport->cfg_max_luns; 2339 shost->this_id = -1; 2340 shost->max_cmd_len = 16; 2341 if (phba->sli_rev == LPFC_SLI_REV4) { 2342 shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE; 2343 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2344 } 2345 2346 /* 2347 * Set initial can_queue value since 0 is no longer supported and 2348 * scsi_add_host will fail. This will be adjusted later based on the 2349 * max xri value determined in hba setup. 2350 */ 2351 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2352 if (dev != &phba->pcidev->dev) { 2353 shost->transportt = lpfc_vport_transport_template; 2354 vport->port_type = LPFC_NPIV_PORT; 2355 } else { 2356 shost->transportt = lpfc_transport_template; 2357 vport->port_type = LPFC_PHYSICAL_PORT; 2358 } 2359 2360 /* Initialize all internally managed lists. */ 2361 INIT_LIST_HEAD(&vport->fc_nodes); 2362 INIT_LIST_HEAD(&vport->rcv_buffer_list); 2363 spin_lock_init(&vport->work_port_lock); 2364 2365 init_timer(&vport->fc_disctmo); 2366 vport->fc_disctmo.function = lpfc_disc_timeout; 2367 vport->fc_disctmo.data = (unsigned long)vport; 2368 2369 init_timer(&vport->fc_fdmitmo); 2370 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2371 vport->fc_fdmitmo.data = (unsigned long)vport; 2372 2373 init_timer(&vport->els_tmofunc); 2374 vport->els_tmofunc.function = lpfc_els_timeout; 2375 vport->els_tmofunc.data = (unsigned long)vport; 2376 2377 error = scsi_add_host(shost, dev); 2378 if (error) 2379 goto out_put_shost; 2380 2381 spin_lock_irq(&phba->hbalock); 2382 list_add_tail(&vport->listentry, &phba->port_list); 2383 spin_unlock_irq(&phba->hbalock); 2384 return vport; 2385 2386out_put_shost: 2387 scsi_host_put(shost); 2388out: 2389 return NULL; 2390} 2391 2392/** 2393 * destroy_port - destroy an FC port 2394 * @vport: pointer to an lpfc virtual N_Port data structure. 2395 * 2396 * This routine destroys a FC port from the upper layer protocol. All the 2397 * resources associated with the port are released. 2398 **/ 2399void 2400destroy_port(struct lpfc_vport *vport) 2401{ 2402 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2403 struct lpfc_hba *phba = vport->phba; 2404 2405 lpfc_debugfs_terminate(vport); 2406 fc_remove_host(shost); 2407 scsi_remove_host(shost); 2408 2409 spin_lock_irq(&phba->hbalock); 2410 list_del_init(&vport->listentry); 2411 spin_unlock_irq(&phba->hbalock); 2412 2413 lpfc_cleanup(vport); 2414 return; 2415} 2416 2417/** 2418 * lpfc_get_instance - Get a unique integer ID 2419 * 2420 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2421 * uses the kernel idr facility to perform the task. 2422 * 2423 * Return codes: 2424 * instance - a unique integer ID allocated as the new instance. 2425 * -1 - lpfc get instance failed. 2426 **/ 2427int 2428lpfc_get_instance(void) 2429{ 2430 int instance = 0; 2431 2432 /* Assign an unused number */ 2433 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2434 return -1; 2435 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2436 return -1; 2437 return instance; 2438} 2439 2440/** 2441 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2442 * @shost: pointer to SCSI host data structure. 2443 * @time: elapsed time of the scan in jiffies. 2444 * 2445 * This routine is called by the SCSI layer with a SCSI host to determine 2446 * whether the scan host is finished. 2447 * 2448 * Note: there is no scan_start function as adapter initialization will have 2449 * asynchronously kicked off the link initialization. 2450 * 2451 * Return codes 2452 * 0 - SCSI host scan is not over yet. 2453 * 1 - SCSI host scan is over. 2454 **/ 2455int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2456{ 2457 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2458 struct lpfc_hba *phba = vport->phba; 2459 int stat = 0; 2460 2461 spin_lock_irq(shost->host_lock); 2462 2463 if (vport->load_flag & FC_UNLOADING) { 2464 stat = 1; 2465 goto finished; 2466 } 2467 if (time >= 30 * HZ) { 2468 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2469 "0461 Scanning longer than 30 " 2470 "seconds. Continuing initialization\n"); 2471 stat = 1; 2472 goto finished; 2473 } 2474 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2475 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2476 "0465 Link down longer than 15 " 2477 "seconds. Continuing initialization\n"); 2478 stat = 1; 2479 goto finished; 2480 } 2481 2482 if (vport->port_state != LPFC_VPORT_READY) 2483 goto finished; 2484 if (vport->num_disc_nodes || vport->fc_prli_sent) 2485 goto finished; 2486 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2487 goto finished; 2488 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2489 goto finished; 2490 2491 stat = 1; 2492 2493finished: 2494 spin_unlock_irq(shost->host_lock); 2495 return stat; 2496} 2497 2498/** 2499 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2500 * @shost: pointer to SCSI host data structure. 2501 * 2502 * This routine initializes a given SCSI host attributes on a FC port. The 2503 * SCSI host can be either on top of a physical port or a virtual port. 2504 **/ 2505void lpfc_host_attrib_init(struct Scsi_Host *shost) 2506{ 2507 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2508 struct lpfc_hba *phba = vport->phba; 2509 /* 2510 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2511 */ 2512 2513 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2514 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2515 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2516 2517 memset(fc_host_supported_fc4s(shost), 0, 2518 sizeof(fc_host_supported_fc4s(shost))); 2519 fc_host_supported_fc4s(shost)[2] = 1; 2520 fc_host_supported_fc4s(shost)[7] = 1; 2521 2522 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2523 sizeof fc_host_symbolic_name(shost)); 2524 2525 fc_host_supported_speeds(shost) = 0; 2526 if (phba->lmt & LMT_10Gb) 2527 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2528 if (phba->lmt & LMT_8Gb) 2529 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2530 if (phba->lmt & LMT_4Gb) 2531 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2532 if (phba->lmt & LMT_2Gb) 2533 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2534 if (phba->lmt & LMT_1Gb) 2535 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2536 2537 fc_host_maxframe_size(shost) = 2538 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2539 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2540 2541 /* This value is also unchanging */ 2542 memset(fc_host_active_fc4s(shost), 0, 2543 sizeof(fc_host_active_fc4s(shost))); 2544 fc_host_active_fc4s(shost)[2] = 1; 2545 fc_host_active_fc4s(shost)[7] = 1; 2546 2547 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2548 spin_lock_irq(shost->host_lock); 2549 vport->load_flag &= ~FC_LOADING; 2550 spin_unlock_irq(shost->host_lock); 2551} 2552 2553/** 2554 * lpfc_stop_port_s3 - Stop SLI3 device port 2555 * @phba: pointer to lpfc hba data structure. 2556 * 2557 * This routine is invoked to stop an SLI3 device port, it stops the device 2558 * from generating interrupts and stops the device driver's timers for the 2559 * device. 2560 **/ 2561static void 2562lpfc_stop_port_s3(struct lpfc_hba *phba) 2563{ 2564 /* Clear all interrupt enable conditions */ 2565 writel(0, phba->HCregaddr); 2566 readl(phba->HCregaddr); /* flush */ 2567 /* Clear all pending interrupts */ 2568 writel(0xffffffff, phba->HAregaddr); 2569 readl(phba->HAregaddr); /* flush */ 2570 2571 /* Reset some HBA SLI setup states */ 2572 lpfc_stop_hba_timers(phba); 2573 phba->pport->work_port_events = 0; 2574} 2575 2576/** 2577 * lpfc_stop_port_s4 - Stop SLI4 device port 2578 * @phba: pointer to lpfc hba data structure. 2579 * 2580 * This routine is invoked to stop an SLI4 device port, it stops the device 2581 * from generating interrupts and stops the device driver's timers for the 2582 * device. 2583 **/ 2584static void 2585lpfc_stop_port_s4(struct lpfc_hba *phba) 2586{ 2587 /* Reset some HBA SLI4 setup states */ 2588 lpfc_stop_hba_timers(phba); 2589 phba->pport->work_port_events = 0; 2590 phba->sli4_hba.intr_enable = 0; 2591 /* Hard clear it for now, shall have more graceful way to wait later */ 2592 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2593} 2594 2595/** 2596 * lpfc_stop_port - Wrapper function for stopping hba port 2597 * @phba: Pointer to HBA context object. 2598 * 2599 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 2600 * the API jump table function pointer from the lpfc_hba struct. 2601 **/ 2602void 2603lpfc_stop_port(struct lpfc_hba *phba) 2604{ 2605 phba->lpfc_stop_port(phba); 2606} 2607 2608/** 2609 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port. 2610 * @phba: pointer to lpfc hba data structure. 2611 * 2612 * This routine is invoked to remove the driver default fcf record from 2613 * the port. This routine currently acts on FCF Index 0. 2614 * 2615 **/ 2616void 2617lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) 2618{ 2619 int rc = 0; 2620 LPFC_MBOXQ_t *mboxq; 2621 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record; 2622 uint32_t mbox_tmo, req_len; 2623 uint32_t shdr_status, shdr_add_status; 2624 2625 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2626 if (!mboxq) { 2627 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2628 "2020 Failed to allocate mbox for ADD_FCF cmd\n"); 2629 return; 2630 } 2631 2632 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) - 2633 sizeof(struct lpfc_sli4_cfg_mhdr); 2634 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2635 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF, 2636 req_len, LPFC_SLI4_MBX_EMBED); 2637 /* 2638 * In phase 1, there is a single FCF index, 0. In phase2, the driver 2639 * supports multiple FCF indices. 2640 */ 2641 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; 2642 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); 2643 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, 2644 phba->fcf.fcf_indx); 2645 2646 if (!phba->sli4_hba.intr_enable) 2647 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2648 else { 2649 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 2650 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 2651 } 2652 /* The IOCTL status is embedded in the mailbox subheader. */ 2653 shdr_status = bf_get(lpfc_mbox_hdr_status, 2654 &del_fcf_record->header.cfg_shdr.response); 2655 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 2656 &del_fcf_record->header.cfg_shdr.response); 2657 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { 2658 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2659 "2516 DEL FCF of default FCF Index failed " 2660 "mbx status x%x, status x%x add_status x%x\n", 2661 rc, shdr_status, shdr_add_status); 2662 } 2663 if (rc != MBX_TIMEOUT) 2664 mempool_free(mboxq, phba->mbox_mem_pool); 2665} 2666 2667/** 2668 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 2669 * @phba: pointer to lpfc hba data structure. 2670 * @acqe_link: pointer to the async link completion queue entry. 2671 * 2672 * This routine is to parse the SLI4 link-attention link fault code and 2673 * translate it into the base driver's read link attention mailbox command 2674 * status. 2675 * 2676 * Return: Link-attention status in terms of base driver's coding. 2677 **/ 2678static uint16_t 2679lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 2680 struct lpfc_acqe_link *acqe_link) 2681{ 2682 uint16_t latt_fault; 2683 2684 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 2685 case LPFC_ASYNC_LINK_FAULT_NONE: 2686 case LPFC_ASYNC_LINK_FAULT_LOCAL: 2687 case LPFC_ASYNC_LINK_FAULT_REMOTE: 2688 latt_fault = 0; 2689 break; 2690 default: 2691 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2692 "0398 Invalid link fault code: x%x\n", 2693 bf_get(lpfc_acqe_link_fault, acqe_link)); 2694 latt_fault = MBXERR_ERROR; 2695 break; 2696 } 2697 return latt_fault; 2698} 2699 2700/** 2701 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 2702 * @phba: pointer to lpfc hba data structure. 2703 * @acqe_link: pointer to the async link completion queue entry. 2704 * 2705 * This routine is to parse the SLI4 link attention type and translate it 2706 * into the base driver's link attention type coding. 2707 * 2708 * Return: Link attention type in terms of base driver's coding. 2709 **/ 2710static uint8_t 2711lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 2712 struct lpfc_acqe_link *acqe_link) 2713{ 2714 uint8_t att_type; 2715 2716 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 2717 case LPFC_ASYNC_LINK_STATUS_DOWN: 2718 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 2719 att_type = AT_LINK_DOWN; 2720 break; 2721 case LPFC_ASYNC_LINK_STATUS_UP: 2722 /* Ignore physical link up events - wait for logical link up */ 2723 att_type = AT_RESERVED; 2724 break; 2725 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 2726 att_type = AT_LINK_UP; 2727 break; 2728 default: 2729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2730 "0399 Invalid link attention type: x%x\n", 2731 bf_get(lpfc_acqe_link_status, acqe_link)); 2732 att_type = AT_RESERVED; 2733 break; 2734 } 2735 return att_type; 2736} 2737 2738/** 2739 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 2740 * @phba: pointer to lpfc hba data structure. 2741 * @acqe_link: pointer to the async link completion queue entry. 2742 * 2743 * This routine is to parse the SLI4 link-attention link speed and translate 2744 * it into the base driver's link-attention link speed coding. 2745 * 2746 * Return: Link-attention link speed in terms of base driver's coding. 2747 **/ 2748static uint8_t 2749lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 2750 struct lpfc_acqe_link *acqe_link) 2751{ 2752 uint8_t link_speed; 2753 2754 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 2755 case LPFC_ASYNC_LINK_SPEED_ZERO: 2756 link_speed = LA_UNKNW_LINK; 2757 break; 2758 case LPFC_ASYNC_LINK_SPEED_10MBPS: 2759 link_speed = LA_UNKNW_LINK; 2760 break; 2761 case LPFC_ASYNC_LINK_SPEED_100MBPS: 2762 link_speed = LA_UNKNW_LINK; 2763 break; 2764 case LPFC_ASYNC_LINK_SPEED_1GBPS: 2765 link_speed = LA_1GHZ_LINK; 2766 break; 2767 case LPFC_ASYNC_LINK_SPEED_10GBPS: 2768 link_speed = LA_10GHZ_LINK; 2769 break; 2770 default: 2771 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2772 "0483 Invalid link-attention link speed: x%x\n", 2773 bf_get(lpfc_acqe_link_speed, acqe_link)); 2774 link_speed = LA_UNKNW_LINK; 2775 break; 2776 } 2777 return link_speed; 2778} 2779 2780/** 2781 * lpfc_sli4_async_link_evt - Process the asynchronous link event 2782 * @phba: pointer to lpfc hba data structure. 2783 * @acqe_link: pointer to the async link completion queue entry. 2784 * 2785 * This routine is to handle the SLI4 asynchronous link event. 2786 **/ 2787static void 2788lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 2789 struct lpfc_acqe_link *acqe_link) 2790{ 2791 struct lpfc_dmabuf *mp; 2792 LPFC_MBOXQ_t *pmb; 2793 MAILBOX_t *mb; 2794 READ_LA_VAR *la; 2795 uint8_t att_type; 2796 2797 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 2798 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) 2799 return; 2800 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2801 if (!pmb) { 2802 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2803 "0395 The mboxq allocation failed\n"); 2804 return; 2805 } 2806 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2807 if (!mp) { 2808 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2809 "0396 The lpfc_dmabuf allocation failed\n"); 2810 goto out_free_pmb; 2811 } 2812 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2813 if (!mp->virt) { 2814 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2815 "0397 The mbuf allocation failed\n"); 2816 goto out_free_dmabuf; 2817 } 2818 2819 /* Cleanup any outstanding ELS commands */ 2820 lpfc_els_flush_all_cmd(phba); 2821 2822 /* Block ELS IOCBs until we have done process link event */ 2823 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2824 2825 /* Update link event statistics */ 2826 phba->sli.slistat.link_event++; 2827 2828 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ 2829 lpfc_read_la(phba, pmb, mp); 2830 pmb->vport = phba->pport; 2831 2832 /* Parse and translate status field */ 2833 mb = &pmb->u.mb; 2834 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 2835 2836 /* Parse and translate link attention fields */ 2837 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; 2838 la->eventTag = acqe_link->event_tag; 2839 la->attType = att_type; 2840 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); 2841 2842 /* Fake the the following irrelvant fields */ 2843 la->topology = TOPOLOGY_PT_PT; 2844 la->granted_AL_PA = 0; 2845 la->il = 0; 2846 la->pb = 0; 2847 la->fa = 0; 2848 la->mm = 0; 2849 2850 /* Keep the link status for extra SLI4 state machine reference */ 2851 phba->sli4_hba.link_state.speed = 2852 bf_get(lpfc_acqe_link_speed, acqe_link); 2853 phba->sli4_hba.link_state.duplex = 2854 bf_get(lpfc_acqe_link_duplex, acqe_link); 2855 phba->sli4_hba.link_state.status = 2856 bf_get(lpfc_acqe_link_status, acqe_link); 2857 phba->sli4_hba.link_state.physical = 2858 bf_get(lpfc_acqe_link_physical, acqe_link); 2859 phba->sli4_hba.link_state.fault = 2860 bf_get(lpfc_acqe_link_fault, acqe_link); 2861 2862 /* Invoke the lpfc_handle_latt mailbox command callback function */ 2863 lpfc_mbx_cmpl_read_la(phba, pmb); 2864 2865 return; 2866 2867out_free_dmabuf: 2868 kfree(mp); 2869out_free_pmb: 2870 mempool_free(pmb, phba->mbox_mem_pool); 2871} 2872 2873/** 2874 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 2875 * @phba: pointer to lpfc hba data structure. 2876 * @acqe_link: pointer to the async fcoe completion queue entry. 2877 * 2878 * This routine is to handle the SLI4 asynchronous fcoe event. 2879 **/ 2880static void 2881lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, 2882 struct lpfc_acqe_fcoe *acqe_fcoe) 2883{ 2884 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 2885 int rc; 2886 2887 switch (event_type) { 2888 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 2889 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2890 "2546 New FCF found index 0x%x tag 0x%x \n", 2891 acqe_fcoe->fcf_index, 2892 acqe_fcoe->event_tag); 2893 /* 2894 * If the current FCF is in discovered state, 2895 * do nothing. 2896 */ 2897 spin_lock_irq(&phba->hbalock); 2898 if (phba->fcf.fcf_flag & FCF_DISCOVERED) { 2899 spin_unlock_irq(&phba->hbalock); 2900 break; 2901 } 2902 spin_unlock_irq(&phba->hbalock); 2903 2904 /* Read the FCF table and re-discover SAN. */ 2905 rc = lpfc_sli4_read_fcf_record(phba, 2906 LPFC_FCOE_FCF_GET_FIRST); 2907 if (rc) 2908 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2909 "2547 Read FCF record failed 0x%x\n", 2910 rc); 2911 break; 2912 2913 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 2914 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2915 "2548 FCF Table full count 0x%x tag 0x%x \n", 2916 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), 2917 acqe_fcoe->event_tag); 2918 break; 2919 2920 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 2921 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2922 "2549 FCF disconnected fron network index 0x%x" 2923 " tag 0x%x \n", acqe_fcoe->fcf_index, 2924 acqe_fcoe->event_tag); 2925 /* If the event is not for currently used fcf do nothing */ 2926 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) 2927 break; 2928 /* 2929 * Currently, driver support only one FCF - so treat this as 2930 * a link down. 2931 */ 2932 lpfc_linkdown(phba); 2933 /* Unregister FCF if no devices connected to it */ 2934 lpfc_unregister_unused_fcf(phba); 2935 break; 2936 2937 default: 2938 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2939 "0288 Unknown FCoE event type 0x%x event tag " 2940 "0x%x\n", event_type, acqe_fcoe->event_tag); 2941 break; 2942 } 2943} 2944 2945/** 2946 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 2947 * @phba: pointer to lpfc hba data structure. 2948 * @acqe_link: pointer to the async dcbx completion queue entry. 2949 * 2950 * This routine is to handle the SLI4 asynchronous dcbx event. 2951 **/ 2952static void 2953lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 2954 struct lpfc_acqe_dcbx *acqe_dcbx) 2955{ 2956 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2957 "0290 The SLI4 DCBX asynchronous event is not " 2958 "handled yet\n"); 2959} 2960 2961/** 2962 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 2963 * @phba: pointer to lpfc hba data structure. 2964 * 2965 * This routine is invoked by the worker thread to process all the pending 2966 * SLI4 asynchronous events. 2967 **/ 2968void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 2969{ 2970 struct lpfc_cq_event *cq_event; 2971 2972 /* First, declare the async event has been handled */ 2973 spin_lock_irq(&phba->hbalock); 2974 phba->hba_flag &= ~ASYNC_EVENT; 2975 spin_unlock_irq(&phba->hbalock); 2976 /* Now, handle all the async events */ 2977 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 2978 /* Get the first event from the head of the event queue */ 2979 spin_lock_irq(&phba->hbalock); 2980 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 2981 cq_event, struct lpfc_cq_event, list); 2982 spin_unlock_irq(&phba->hbalock); 2983 /* Process the asynchronous event */ 2984 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 2985 case LPFC_TRAILER_CODE_LINK: 2986 lpfc_sli4_async_link_evt(phba, 2987 &cq_event->cqe.acqe_link); 2988 break; 2989 case LPFC_TRAILER_CODE_FCOE: 2990 lpfc_sli4_async_fcoe_evt(phba, 2991 &cq_event->cqe.acqe_fcoe); 2992 break; 2993 case LPFC_TRAILER_CODE_DCBX: 2994 lpfc_sli4_async_dcbx_evt(phba, 2995 &cq_event->cqe.acqe_dcbx); 2996 break; 2997 default: 2998 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2999 "1804 Invalid asynchrous event code: " 3000 "x%x\n", bf_get(lpfc_trailer_code, 3001 &cq_event->cqe.mcqe_cmpl)); 3002 break; 3003 } 3004 /* Free the completion event processed to the free pool */ 3005 lpfc_sli4_cq_event_release(phba, cq_event); 3006 } 3007} 3008 3009/** 3010 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3011 * @phba: pointer to lpfc hba data structure. 3012 * @dev_grp: The HBA PCI-Device group number. 3013 * 3014 * This routine is invoked to set up the per HBA PCI-Device group function 3015 * API jump table entries. 3016 * 3017 * Return: 0 if success, otherwise -ENODEV 3018 **/ 3019int 3020lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3021{ 3022 int rc; 3023 3024 /* Set up lpfc PCI-device group */ 3025 phba->pci_dev_grp = dev_grp; 3026 3027 /* The LPFC_PCI_DEV_OC uses SLI4 */ 3028 if (dev_grp == LPFC_PCI_DEV_OC) 3029 phba->sli_rev = LPFC_SLI_REV4; 3030 3031 /* Set up device INIT API function jump table */ 3032 rc = lpfc_init_api_table_setup(phba, dev_grp); 3033 if (rc) 3034 return -ENODEV; 3035 /* Set up SCSI API function jump table */ 3036 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 3037 if (rc) 3038 return -ENODEV; 3039 /* Set up SLI API function jump table */ 3040 rc = lpfc_sli_api_table_setup(phba, dev_grp); 3041 if (rc) 3042 return -ENODEV; 3043 /* Set up MBOX API function jump table */ 3044 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 3045 if (rc) 3046 return -ENODEV; 3047 3048 return 0; 3049} 3050 3051/** 3052 * lpfc_log_intr_mode - Log the active interrupt mode 3053 * @phba: pointer to lpfc hba data structure. 3054 * @intr_mode: active interrupt mode adopted. 3055 * 3056 * This routine it invoked to log the currently used active interrupt mode 3057 * to the device. 3058 **/ 3059static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 3060{ 3061 switch (intr_mode) { 3062 case 0: 3063 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3064 "0470 Enable INTx interrupt mode.\n"); 3065 break; 3066 case 1: 3067 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3068 "0481 Enabled MSI interrupt mode.\n"); 3069 break; 3070 case 2: 3071 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3072 "0480 Enabled MSI-X interrupt mode.\n"); 3073 break; 3074 default: 3075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3076 "0482 Illegal interrupt mode.\n"); 3077 break; 3078 } 3079 return; 3080} 3081 3082/** 3083 * lpfc_enable_pci_dev - Enable a generic PCI device. 3084 * @phba: pointer to lpfc hba data structure. 3085 * 3086 * This routine is invoked to enable the PCI device that is common to all 3087 * PCI devices. 3088 * 3089 * Return codes 3090 * 0 - sucessful 3091 * other values - error 3092 **/ 3093static int 3094lpfc_enable_pci_dev(struct lpfc_hba *phba) 3095{ 3096 struct pci_dev *pdev; 3097 int bars; 3098 3099 /* Obtain PCI device reference */ 3100 if (!phba->pcidev) 3101 goto out_error; 3102 else 3103 pdev = phba->pcidev; 3104 /* Select PCI BARs */ 3105 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3106 /* Enable PCI device */ 3107 if (pci_enable_device_mem(pdev)) 3108 goto out_error; 3109 /* Request PCI resource for the device */ 3110 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 3111 goto out_disable_device; 3112 /* Set up device as PCI master and save state for EEH */ 3113 pci_set_master(pdev); 3114 pci_try_set_mwi(pdev); 3115 pci_save_state(pdev); 3116 3117 return 0; 3118 3119out_disable_device: 3120 pci_disable_device(pdev); 3121out_error: 3122 return -ENODEV; 3123} 3124 3125/** 3126 * lpfc_disable_pci_dev - Disable a generic PCI device. 3127 * @phba: pointer to lpfc hba data structure. 3128 * 3129 * This routine is invoked to disable the PCI device that is common to all 3130 * PCI devices. 3131 **/ 3132static void 3133lpfc_disable_pci_dev(struct lpfc_hba *phba) 3134{ 3135 struct pci_dev *pdev; 3136 int bars; 3137 3138 /* Obtain PCI device reference */ 3139 if (!phba->pcidev) 3140 return; 3141 else 3142 pdev = phba->pcidev; 3143 /* Select PCI BARs */ 3144 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3145 /* Release PCI resource and disable PCI device */ 3146 pci_release_selected_regions(pdev, bars); 3147 pci_disable_device(pdev); 3148 /* Null out PCI private reference to driver */ 3149 pci_set_drvdata(pdev, NULL); 3150 3151 return; 3152} 3153 3154/** 3155 * lpfc_reset_hba - Reset a hba 3156 * @phba: pointer to lpfc hba data structure. 3157 * 3158 * This routine is invoked to reset a hba device. It brings the HBA 3159 * offline, performs a board restart, and then brings the board back 3160 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 3161 * on outstanding mailbox commands. 3162 **/ 3163void 3164lpfc_reset_hba(struct lpfc_hba *phba) 3165{ 3166 /* If resets are disabled then set error state and return. */ 3167 if (!phba->cfg_enable_hba_reset) { 3168 phba->link_state = LPFC_HBA_ERROR; 3169 return; 3170 } 3171 lpfc_offline_prep(phba); 3172 lpfc_offline(phba); 3173 lpfc_sli_brdrestart(phba); 3174 lpfc_online(phba); 3175 lpfc_unblock_mgmt_io(phba); 3176} 3177 3178/** 3179 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 3180 * @phba: pointer to lpfc hba data structure. 3181 * 3182 * This routine is invoked to set up the driver internal resources specific to 3183 * support the SLI-3 HBA device it attached to. 3184 * 3185 * Return codes 3186 * 0 - sucessful 3187 * other values - error 3188 **/ 3189static int 3190lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 3191{ 3192 struct lpfc_sli *psli; 3193 3194 /* 3195 * Initialize timers used by driver 3196 */ 3197 3198 /* Heartbeat timer */ 3199 init_timer(&phba->hb_tmofunc); 3200 phba->hb_tmofunc.function = lpfc_hb_timeout; 3201 phba->hb_tmofunc.data = (unsigned long)phba; 3202 3203 psli = &phba->sli; 3204 /* MBOX heartbeat timer */ 3205 init_timer(&psli->mbox_tmo); 3206 psli->mbox_tmo.function = lpfc_mbox_timeout; 3207 psli->mbox_tmo.data = (unsigned long) phba; 3208 /* FCP polling mode timer */ 3209 init_timer(&phba->fcp_poll_timer); 3210 phba->fcp_poll_timer.function = lpfc_poll_timeout; 3211 phba->fcp_poll_timer.data = (unsigned long) phba; 3212 /* Fabric block timer */ 3213 init_timer(&phba->fabric_block_timer); 3214 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3215 phba->fabric_block_timer.data = (unsigned long) phba; 3216 /* EA polling mode timer */ 3217 init_timer(&phba->eratt_poll); 3218 phba->eratt_poll.function = lpfc_poll_eratt; 3219 phba->eratt_poll.data = (unsigned long) phba; 3220 3221 /* Host attention work mask setup */ 3222 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 3223 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 3224 3225 /* Get all the module params for configuring this host */ 3226 lpfc_get_cfgparam(phba); 3227 /* 3228 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3229 * used to create the sg_dma_buf_pool must be dynamically calculated. 3230 * 2 segments are added since the IOCB needs a command and response bde. 3231 */ 3232 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 3233 sizeof(struct fcp_rsp) + 3234 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 3235 3236 if (phba->cfg_enable_bg) { 3237 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 3238 phba->cfg_sg_dma_buf_size += 3239 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 3240 } 3241 3242 /* Also reinitialize the host templates with new values. */ 3243 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3244 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3245 3246 phba->max_vpi = LPFC_MAX_VPI; 3247 /* This will be set to correct value after config_port mbox */ 3248 phba->max_vports = 0; 3249 3250 /* 3251 * Initialize the SLI Layer to run with lpfc HBAs. 3252 */ 3253 lpfc_sli_setup(phba); 3254 lpfc_sli_queue_setup(phba); 3255 3256 /* Allocate device driver memory */ 3257 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 3258 return -ENOMEM; 3259 3260 return 0; 3261} 3262 3263/** 3264 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 3265 * @phba: pointer to lpfc hba data structure. 3266 * 3267 * This routine is invoked to unset the driver internal resources set up 3268 * specific for supporting the SLI-3 HBA device it attached to. 3269 **/ 3270static void 3271lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 3272{ 3273 /* Free device driver memory allocated */ 3274 lpfc_mem_free_all(phba); 3275 3276 return; 3277} 3278 3279/** 3280 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 3281 * @phba: pointer to lpfc hba data structure. 3282 * 3283 * This routine is invoked to set up the driver internal resources specific to 3284 * support the SLI-4 HBA device it attached to. 3285 * 3286 * Return codes 3287 * 0 - sucessful 3288 * other values - error 3289 **/ 3290static int 3291lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 3292{ 3293 struct lpfc_sli *psli; 3294 int rc; 3295 int i, hbq_count; 3296 3297 /* Before proceed, wait for POST done and device ready */ 3298 rc = lpfc_sli4_post_status_check(phba); 3299 if (rc) 3300 return -ENODEV; 3301 3302 /* 3303 * Initialize timers used by driver 3304 */ 3305 3306 /* Heartbeat timer */ 3307 init_timer(&phba->hb_tmofunc); 3308 phba->hb_tmofunc.function = lpfc_hb_timeout; 3309 phba->hb_tmofunc.data = (unsigned long)phba; 3310 3311 psli = &phba->sli; 3312 /* MBOX heartbeat timer */ 3313 init_timer(&psli->mbox_tmo); 3314 psli->mbox_tmo.function = lpfc_mbox_timeout; 3315 psli->mbox_tmo.data = (unsigned long) phba; 3316 /* Fabric block timer */ 3317 init_timer(&phba->fabric_block_timer); 3318 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3319 phba->fabric_block_timer.data = (unsigned long) phba; 3320 /* EA polling mode timer */ 3321 init_timer(&phba->eratt_poll); 3322 phba->eratt_poll.function = lpfc_poll_eratt; 3323 phba->eratt_poll.data = (unsigned long) phba; 3324 /* 3325 * We need to do a READ_CONFIG mailbox command here before 3326 * calling lpfc_get_cfgparam. For VFs this will report the 3327 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 3328 * All of the resources allocated 3329 * for this Port are tied to these values. 3330 */ 3331 /* Get all the module params for configuring this host */ 3332 lpfc_get_cfgparam(phba); 3333 phba->max_vpi = LPFC_MAX_VPI; 3334 /* This will be set to correct value after the read_config mbox */ 3335 phba->max_vports = 0; 3336 3337 /* Program the default value of vlan_id and fc_map */ 3338 phba->valid_vlan = 0; 3339 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 3340 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 3341 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 3342 3343 /* 3344 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3345 * used to create the sg_dma_buf_pool must be dynamically calculated. 3346 * 2 segments are added since the IOCB needs a command and response bde. 3347 * To insure that the scsi sgl does not cross a 4k page boundary only 3348 * sgl sizes of 1k, 2k, 4k, and 8k are supported. 3349 * Table of sgl sizes and seg_cnt: 3350 * sgl size, sg_seg_cnt total seg 3351 * 1k 50 52 3352 * 2k 114 116 3353 * 4k 242 244 3354 * 8k 498 500 3355 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024 3356 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048 3357 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096 3358 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192 3359 */ 3360 if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT) 3361 phba->cfg_sg_seg_cnt = 50; 3362 else if (phba->cfg_sg_seg_cnt <= 114) 3363 phba->cfg_sg_seg_cnt = 114; 3364 else if (phba->cfg_sg_seg_cnt <= 242) 3365 phba->cfg_sg_seg_cnt = 242; 3366 else 3367 phba->cfg_sg_seg_cnt = 498; 3368 3369 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) 3370 + sizeof(struct fcp_rsp); 3371 phba->cfg_sg_dma_buf_size += 3372 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); 3373 3374 /* Initialize buffer queue management fields */ 3375 hbq_count = lpfc_sli_hbq_count(); 3376 for (i = 0; i < hbq_count; ++i) 3377 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 3378 INIT_LIST_HEAD(&phba->rb_pend_list); 3379 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 3380 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 3381 3382 /* 3383 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 3384 */ 3385 /* Initialize the Abort scsi buffer list used by driver */ 3386 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 3387 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 3388 /* This abort list used by worker thread */ 3389 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 3390 3391 /* 3392 * Initialize dirver internal slow-path work queues 3393 */ 3394 3395 /* Driver internel slow-path CQ Event pool */ 3396 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 3397 /* Response IOCB work queue list */ 3398 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue); 3399 /* Asynchronous event CQ Event work queue list */ 3400 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 3401 /* Fast-path XRI aborted CQ Event work queue list */ 3402 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 3403 /* Slow-path XRI aborted CQ Event work queue list */ 3404 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 3405 /* Receive queue CQ Event work queue list */ 3406 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 3407 3408 /* Initialize the driver internal SLI layer lists. */ 3409 lpfc_sli_setup(phba); 3410 lpfc_sli_queue_setup(phba); 3411 3412 /* Allocate device driver memory */ 3413 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 3414 if (rc) 3415 return -ENOMEM; 3416 3417 /* Create the bootstrap mailbox command */ 3418 rc = lpfc_create_bootstrap_mbox(phba); 3419 if (unlikely(rc)) 3420 goto out_free_mem; 3421 3422 /* Set up the host's endian order with the device. */ 3423 rc = lpfc_setup_endian_order(phba); 3424 if (unlikely(rc)) 3425 goto out_free_bsmbx; 3426 3427 /* Set up the hba's configuration parameters. */ 3428 rc = lpfc_sli4_read_config(phba); 3429 if (unlikely(rc)) 3430 goto out_free_bsmbx; 3431 3432 /* Perform a function reset */ 3433 rc = lpfc_pci_function_reset(phba); 3434 if (unlikely(rc)) 3435 goto out_free_bsmbx; 3436 3437 /* Create all the SLI4 queues */ 3438 rc = lpfc_sli4_queue_create(phba); 3439 if (rc) 3440 goto out_free_bsmbx; 3441 3442 /* Create driver internal CQE event pool */ 3443 rc = lpfc_sli4_cq_event_pool_create(phba); 3444 if (rc) 3445 goto out_destroy_queue; 3446 3447 /* Initialize and populate the iocb list per host */ 3448 rc = lpfc_init_sgl_list(phba); 3449 if (rc) { 3450 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3451 "1400 Failed to initialize sgl list.\n"); 3452 goto out_destroy_cq_event_pool; 3453 } 3454 rc = lpfc_init_active_sgl_array(phba); 3455 if (rc) { 3456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3457 "1430 Failed to initialize sgl list.\n"); 3458 goto out_free_sgl_list; 3459 } 3460 3461 rc = lpfc_sli4_init_rpi_hdrs(phba); 3462 if (rc) { 3463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3464 "1432 Failed to initialize rpi headers.\n"); 3465 goto out_free_active_sgl; 3466 } 3467 3468 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 3469 phba->cfg_fcp_eq_count), GFP_KERNEL); 3470 if (!phba->sli4_hba.fcp_eq_hdl) { 3471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3472 "2572 Failed allocate memory for fast-path " 3473 "per-EQ handle array\n"); 3474 goto out_remove_rpi_hdrs; 3475 } 3476 3477 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 3478 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 3479 if (!phba->sli4_hba.msix_entries) { 3480 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3481 "2573 Failed allocate memory for msi-x " 3482 "interrupt vector entries\n"); 3483 goto out_free_fcp_eq_hdl; 3484 } 3485 3486 return rc; 3487 3488out_free_fcp_eq_hdl: 3489 kfree(phba->sli4_hba.fcp_eq_hdl); 3490out_remove_rpi_hdrs: 3491 lpfc_sli4_remove_rpi_hdrs(phba); 3492out_free_active_sgl: 3493 lpfc_free_active_sgl(phba); 3494out_free_sgl_list: 3495 lpfc_free_sgl_list(phba); 3496out_destroy_cq_event_pool: 3497 lpfc_sli4_cq_event_pool_destroy(phba); 3498out_destroy_queue: 3499 lpfc_sli4_queue_destroy(phba); 3500out_free_bsmbx: 3501 lpfc_destroy_bootstrap_mbox(phba); 3502out_free_mem: 3503 lpfc_mem_free(phba); 3504 return rc; 3505} 3506 3507/** 3508 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 3509 * @phba: pointer to lpfc hba data structure. 3510 * 3511 * This routine is invoked to unset the driver internal resources set up 3512 * specific for supporting the SLI-4 HBA device it attached to. 3513 **/ 3514static void 3515lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 3516{ 3517 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 3518 3519 /* unregister default FCFI from the HBA */ 3520 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); 3521 3522 /* Free the default FCR table */ 3523 lpfc_sli_remove_dflt_fcf(phba); 3524 3525 /* Free memory allocated for msi-x interrupt vector entries */ 3526 kfree(phba->sli4_hba.msix_entries); 3527 3528 /* Free memory allocated for fast-path work queue handles */ 3529 kfree(phba->sli4_hba.fcp_eq_hdl); 3530 3531 /* Free the allocated rpi headers. */ 3532 lpfc_sli4_remove_rpi_hdrs(phba); 3533 3534 /* Free the ELS sgl list */ 3535 lpfc_free_active_sgl(phba); 3536 lpfc_free_sgl_list(phba); 3537 3538 /* Free the SCSI sgl management array */ 3539 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 3540 3541 /* Free the SLI4 queues */ 3542 lpfc_sli4_queue_destroy(phba); 3543 3544 /* Free the completion queue EQ event pool */ 3545 lpfc_sli4_cq_event_release_all(phba); 3546 lpfc_sli4_cq_event_pool_destroy(phba); 3547 3548 /* Reset SLI4 HBA FCoE function */ 3549 lpfc_pci_function_reset(phba); 3550 3551 /* Free the bsmbx region. */ 3552 lpfc_destroy_bootstrap_mbox(phba); 3553 3554 /* Free the SLI Layer memory with SLI4 HBAs */ 3555 lpfc_mem_free_all(phba); 3556 3557 /* Free the current connect table */ 3558 list_for_each_entry_safe(conn_entry, next_conn_entry, 3559 &phba->fcf_conn_rec_list, list) 3560 kfree(conn_entry); 3561 3562 return; 3563} 3564 3565/** 3566 * lpfc_init_api_table_setup - Set up init api fucntion jump table 3567 * @phba: The hba struct for which this call is being executed. 3568 * @dev_grp: The HBA PCI-Device group number. 3569 * 3570 * This routine sets up the device INIT interface API function jump table 3571 * in @phba struct. 3572 * 3573 * Returns: 0 - success, -ENODEV - failure. 3574 **/ 3575int 3576lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3577{ 3578 switch (dev_grp) { 3579 case LPFC_PCI_DEV_LP: 3580 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 3581 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 3582 phba->lpfc_stop_port = lpfc_stop_port_s3; 3583 break; 3584 case LPFC_PCI_DEV_OC: 3585 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 3586 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 3587 phba->lpfc_stop_port = lpfc_stop_port_s4; 3588 break; 3589 default: 3590 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3591 "1431 Invalid HBA PCI-device group: 0x%x\n", 3592 dev_grp); 3593 return -ENODEV; 3594 break; 3595 } 3596 return 0; 3597} 3598 3599/** 3600 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 3601 * @phba: pointer to lpfc hba data structure. 3602 * 3603 * This routine is invoked to set up the driver internal resources before the 3604 * device specific resource setup to support the HBA device it attached to. 3605 * 3606 * Return codes 3607 * 0 - sucessful 3608 * other values - error 3609 **/ 3610static int 3611lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 3612{ 3613 /* 3614 * Driver resources common to all SLI revisions 3615 */ 3616 atomic_set(&phba->fast_event_count, 0); 3617 spin_lock_init(&phba->hbalock); 3618 3619 /* Initialize ndlp management spinlock */ 3620 spin_lock_init(&phba->ndlp_lock); 3621 3622 INIT_LIST_HEAD(&phba->port_list); 3623 INIT_LIST_HEAD(&phba->work_list); 3624 init_waitqueue_head(&phba->wait_4_mlo_m_q); 3625 3626 /* Initialize the wait queue head for the kernel thread */ 3627 init_waitqueue_head(&phba->work_waitq); 3628 3629 /* Initialize the scsi buffer list used by driver for scsi IO */ 3630 spin_lock_init(&phba->scsi_buf_list_lock); 3631 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 3632 3633 /* Initialize the fabric iocb list */ 3634 INIT_LIST_HEAD(&phba->fabric_iocb_list); 3635 3636 /* Initialize list to save ELS buffers */ 3637 INIT_LIST_HEAD(&phba->elsbuf); 3638 3639 /* Initialize FCF connection rec list */ 3640 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 3641 3642 return 0; 3643} 3644 3645/** 3646 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 3647 * @phba: pointer to lpfc hba data structure. 3648 * 3649 * This routine is invoked to set up the driver internal resources after the 3650 * device specific resource setup to support the HBA device it attached to. 3651 * 3652 * Return codes 3653 * 0 - sucessful 3654 * other values - error 3655 **/ 3656static int 3657lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 3658{ 3659 int error; 3660 3661 /* Startup the kernel thread for this host adapter. */ 3662 phba->worker_thread = kthread_run(lpfc_do_work, phba, 3663 "lpfc_worker_%d", phba->brd_no); 3664 if (IS_ERR(phba->worker_thread)) { 3665 error = PTR_ERR(phba->worker_thread); 3666 return error; 3667 } 3668 3669 return 0; 3670} 3671 3672/** 3673 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 3674 * @phba: pointer to lpfc hba data structure. 3675 * 3676 * This routine is invoked to unset the driver internal resources set up after 3677 * the device specific resource setup for supporting the HBA device it 3678 * attached to. 3679 **/ 3680static void 3681lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 3682{ 3683 /* Stop kernel worker thread */ 3684 kthread_stop(phba->worker_thread); 3685} 3686 3687/** 3688 * lpfc_free_iocb_list - Free iocb list. 3689 * @phba: pointer to lpfc hba data structure. 3690 * 3691 * This routine is invoked to free the driver's IOCB list and memory. 3692 **/ 3693static void 3694lpfc_free_iocb_list(struct lpfc_hba *phba) 3695{ 3696 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 3697 3698 spin_lock_irq(&phba->hbalock); 3699 list_for_each_entry_safe(iocbq_entry, iocbq_next, 3700 &phba->lpfc_iocb_list, list) { 3701 list_del(&iocbq_entry->list); 3702 kfree(iocbq_entry); 3703 phba->total_iocbq_bufs--; 3704 } 3705 spin_unlock_irq(&phba->hbalock); 3706 3707 return; 3708} 3709 3710/** 3711 * lpfc_init_iocb_list - Allocate and initialize iocb list. 3712 * @phba: pointer to lpfc hba data structure. 3713 * 3714 * This routine is invoked to allocate and initizlize the driver's IOCB 3715 * list and set up the IOCB tag array accordingly. 3716 * 3717 * Return codes 3718 * 0 - sucessful 3719 * other values - error 3720 **/ 3721static int 3722lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 3723{ 3724 struct lpfc_iocbq *iocbq_entry = NULL; 3725 uint16_t iotag; 3726 int i; 3727 3728 /* Initialize and populate the iocb list per host. */ 3729 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 3730 for (i = 0; i < iocb_count; i++) { 3731 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 3732 if (iocbq_entry == NULL) { 3733 printk(KERN_ERR "%s: only allocated %d iocbs of " 3734 "expected %d count. Unloading driver.\n", 3735 __func__, i, LPFC_IOCB_LIST_CNT); 3736 goto out_free_iocbq; 3737 } 3738 3739 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 3740 if (iotag == 0) { 3741 kfree(iocbq_entry); 3742 printk(KERN_ERR "%s: failed to allocate IOTAG. " 3743 "Unloading driver.\n", __func__); 3744 goto out_free_iocbq; 3745 } 3746 iocbq_entry->sli4_xritag = NO_XRI; 3747 3748 spin_lock_irq(&phba->hbalock); 3749 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 3750 phba->total_iocbq_bufs++; 3751 spin_unlock_irq(&phba->hbalock); 3752 } 3753 3754 return 0; 3755 3756out_free_iocbq: 3757 lpfc_free_iocb_list(phba); 3758 3759 return -ENOMEM; 3760} 3761 3762/** 3763 * lpfc_free_sgl_list - Free sgl list. 3764 * @phba: pointer to lpfc hba data structure. 3765 * 3766 * This routine is invoked to free the driver's sgl list and memory. 3767 **/ 3768static void 3769lpfc_free_sgl_list(struct lpfc_hba *phba) 3770{ 3771 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 3772 LIST_HEAD(sglq_list); 3773 int rc = 0; 3774 3775 spin_lock_irq(&phba->hbalock); 3776 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 3777 spin_unlock_irq(&phba->hbalock); 3778 3779 list_for_each_entry_safe(sglq_entry, sglq_next, 3780 &sglq_list, list) { 3781 list_del(&sglq_entry->list); 3782 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 3783 kfree(sglq_entry); 3784 phba->sli4_hba.total_sglq_bufs--; 3785 } 3786 rc = lpfc_sli4_remove_all_sgl_pages(phba); 3787 if (rc) { 3788 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3789 "2005 Unable to deregister pages from HBA: %x", rc); 3790 } 3791 kfree(phba->sli4_hba.lpfc_els_sgl_array); 3792} 3793 3794/** 3795 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 3796 * @phba: pointer to lpfc hba data structure. 3797 * 3798 * This routine is invoked to allocate the driver's active sgl memory. 3799 * This array will hold the sglq_entry's for active IOs. 3800 **/ 3801static int 3802lpfc_init_active_sgl_array(struct lpfc_hba *phba) 3803{ 3804 int size; 3805 size = sizeof(struct lpfc_sglq *); 3806 size *= phba->sli4_hba.max_cfg_param.max_xri; 3807 3808 phba->sli4_hba.lpfc_sglq_active_list = 3809 kzalloc(size, GFP_KERNEL); 3810 if (!phba->sli4_hba.lpfc_sglq_active_list) 3811 return -ENOMEM; 3812 return 0; 3813} 3814 3815/** 3816 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 3817 * @phba: pointer to lpfc hba data structure. 3818 * 3819 * This routine is invoked to walk through the array of active sglq entries 3820 * and free all of the resources. 3821 * This is just a place holder for now. 3822 **/ 3823static void 3824lpfc_free_active_sgl(struct lpfc_hba *phba) 3825{ 3826 kfree(phba->sli4_hba.lpfc_sglq_active_list); 3827} 3828 3829/** 3830 * lpfc_init_sgl_list - Allocate and initialize sgl list. 3831 * @phba: pointer to lpfc hba data structure. 3832 * 3833 * This routine is invoked to allocate and initizlize the driver's sgl 3834 * list and set up the sgl xritag tag array accordingly. 3835 * 3836 * Return codes 3837 * 0 - sucessful 3838 * other values - error 3839 **/ 3840static int 3841lpfc_init_sgl_list(struct lpfc_hba *phba) 3842{ 3843 struct lpfc_sglq *sglq_entry = NULL; 3844 int i; 3845 int els_xri_cnt; 3846 3847 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3848 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3849 "2400 lpfc_init_sgl_list els %d.\n", 3850 els_xri_cnt); 3851 /* Initialize and populate the sglq list per host/VF. */ 3852 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 3853 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 3854 3855 /* Sanity check on XRI management */ 3856 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 3857 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3858 "2562 No room left for SCSI XRI allocation: " 3859 "max_xri=%d, els_xri=%d\n", 3860 phba->sli4_hba.max_cfg_param.max_xri, 3861 els_xri_cnt); 3862 return -ENOMEM; 3863 } 3864 3865 /* Allocate memory for the ELS XRI management array */ 3866 phba->sli4_hba.lpfc_els_sgl_array = 3867 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), 3868 GFP_KERNEL); 3869 3870 if (!phba->sli4_hba.lpfc_els_sgl_array) { 3871 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3872 "2401 Failed to allocate memory for ELS " 3873 "XRI management array of size %d.\n", 3874 els_xri_cnt); 3875 return -ENOMEM; 3876 } 3877 3878 /* Keep the SCSI XRI into the XRI management array */ 3879 phba->sli4_hba.scsi_xri_max = 3880 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3881 phba->sli4_hba.scsi_xri_cnt = 0; 3882 3883 phba->sli4_hba.lpfc_scsi_psb_array = 3884 kzalloc((sizeof(struct lpfc_scsi_buf *) * 3885 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 3886 3887 if (!phba->sli4_hba.lpfc_scsi_psb_array) { 3888 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3889 "2563 Failed to allocate memory for SCSI " 3890 "XRI management array of size %d.\n", 3891 phba->sli4_hba.scsi_xri_max); 3892 kfree(phba->sli4_hba.lpfc_els_sgl_array); 3893 return -ENOMEM; 3894 } 3895 3896 for (i = 0; i < els_xri_cnt; i++) { 3897 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); 3898 if (sglq_entry == NULL) { 3899 printk(KERN_ERR "%s: only allocated %d sgls of " 3900 "expected %d count. Unloading driver.\n", 3901 __func__, i, els_xri_cnt); 3902 goto out_free_mem; 3903 } 3904 3905 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); 3906 if (sglq_entry->sli4_xritag == NO_XRI) { 3907 kfree(sglq_entry); 3908 printk(KERN_ERR "%s: failed to allocate XRI.\n" 3909 "Unloading driver.\n", __func__); 3910 goto out_free_mem; 3911 } 3912 sglq_entry->buff_type = GEN_BUFF_TYPE; 3913 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 3914 if (sglq_entry->virt == NULL) { 3915 kfree(sglq_entry); 3916 printk(KERN_ERR "%s: failed to allocate mbuf.\n" 3917 "Unloading driver.\n", __func__); 3918 goto out_free_mem; 3919 } 3920 sglq_entry->sgl = sglq_entry->virt; 3921 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3922 3923 /* The list order is used by later block SGL registraton */ 3924 spin_lock_irq(&phba->hbalock); 3925 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 3926 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 3927 phba->sli4_hba.total_sglq_bufs++; 3928 spin_unlock_irq(&phba->hbalock); 3929 } 3930 return 0; 3931 3932out_free_mem: 3933 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 3934 lpfc_free_sgl_list(phba); 3935 return -ENOMEM; 3936} 3937 3938/** 3939 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 3940 * @phba: pointer to lpfc hba data structure. 3941 * 3942 * This routine is invoked to post rpi header templates to the 3943 * HBA consistent with the SLI-4 interface spec. This routine 3944 * posts a PAGE_SIZE memory region to the port to hold up to 3945 * PAGE_SIZE modulo 64 rpi context headers. 3946 * No locks are held here because this is an initialization routine 3947 * called only from probe or lpfc_online when interrupts are not 3948 * enabled and the driver is reinitializing the device. 3949 * 3950 * Return codes 3951 * 0 - sucessful 3952 * ENOMEM - No availble memory 3953 * EIO - The mailbox failed to complete successfully. 3954 **/ 3955int 3956lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 3957{ 3958 int rc = 0; 3959 int longs; 3960 uint16_t rpi_count; 3961 struct lpfc_rpi_hdr *rpi_hdr; 3962 3963 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 3964 3965 /* 3966 * Provision an rpi bitmask range for discovery. The total count 3967 * is the difference between max and base + 1. 3968 */ 3969 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 3970 phba->sli4_hba.max_cfg_param.max_rpi - 1; 3971 3972 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 3973 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 3974 GFP_KERNEL); 3975 if (!phba->sli4_hba.rpi_bmask) 3976 return -ENOMEM; 3977 3978 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 3979 if (!rpi_hdr) { 3980 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 3981 "0391 Error during rpi post operation\n"); 3982 lpfc_sli4_remove_rpis(phba); 3983 rc = -ENODEV; 3984 } 3985 3986 return rc; 3987} 3988 3989/** 3990 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 3991 * @phba: pointer to lpfc hba data structure. 3992 * 3993 * This routine is invoked to allocate a single 4KB memory region to 3994 * support rpis and stores them in the phba. This single region 3995 * provides support for up to 64 rpis. The region is used globally 3996 * by the device. 3997 * 3998 * Returns: 3999 * A valid rpi hdr on success. 4000 * A NULL pointer on any failure. 4001 **/ 4002struct lpfc_rpi_hdr * 4003lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 4004{ 4005 uint16_t rpi_limit, curr_rpi_range; 4006 struct lpfc_dmabuf *dmabuf; 4007 struct lpfc_rpi_hdr *rpi_hdr; 4008 4009 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 4010 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4011 4012 spin_lock_irq(&phba->hbalock); 4013 curr_rpi_range = phba->sli4_hba.next_rpi; 4014 spin_unlock_irq(&phba->hbalock); 4015 4016 /* 4017 * The port has a limited number of rpis. The increment here 4018 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 4019 * and to allow the full max_rpi range per port. 4020 */ 4021 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 4022 return NULL; 4023 4024 /* 4025 * First allocate the protocol header region for the port. The 4026 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 4027 */ 4028 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4029 if (!dmabuf) 4030 return NULL; 4031 4032 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4033 LPFC_HDR_TEMPLATE_SIZE, 4034 &dmabuf->phys, 4035 GFP_KERNEL); 4036 if (!dmabuf->virt) { 4037 rpi_hdr = NULL; 4038 goto err_free_dmabuf; 4039 } 4040 4041 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 4042 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 4043 rpi_hdr = NULL; 4044 goto err_free_coherent; 4045 } 4046 4047 /* Save the rpi header data for cleanup later. */ 4048 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 4049 if (!rpi_hdr) 4050 goto err_free_coherent; 4051 4052 rpi_hdr->dmabuf = dmabuf; 4053 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 4054 rpi_hdr->page_count = 1; 4055 spin_lock_irq(&phba->hbalock); 4056 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 4057 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 4058 4059 /* 4060 * The next_rpi stores the next module-64 rpi value to post 4061 * in any subsequent rpi memory region postings. 4062 */ 4063 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; 4064 spin_unlock_irq(&phba->hbalock); 4065 return rpi_hdr; 4066 4067 err_free_coherent: 4068 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 4069 dmabuf->virt, dmabuf->phys); 4070 err_free_dmabuf: 4071 kfree(dmabuf); 4072 return NULL; 4073} 4074 4075/** 4076 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 4077 * @phba: pointer to lpfc hba data structure. 4078 * 4079 * This routine is invoked to remove all memory resources allocated 4080 * to support rpis. This routine presumes the caller has released all 4081 * rpis consumed by fabric or port logins and is prepared to have 4082 * the header pages removed. 4083 **/ 4084void 4085lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 4086{ 4087 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 4088 4089 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 4090 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 4091 list_del(&rpi_hdr->list); 4092 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 4093 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 4094 kfree(rpi_hdr->dmabuf); 4095 kfree(rpi_hdr); 4096 } 4097 4098 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4099 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 4100} 4101 4102/** 4103 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 4104 * @pdev: pointer to pci device data structure. 4105 * 4106 * This routine is invoked to allocate the driver hba data structure for an 4107 * HBA device. If the allocation is successful, the phba reference to the 4108 * PCI device data structure is set. 4109 * 4110 * Return codes 4111 * pointer to @phba - sucessful 4112 * NULL - error 4113 **/ 4114static struct lpfc_hba * 4115lpfc_hba_alloc(struct pci_dev *pdev) 4116{ 4117 struct lpfc_hba *phba; 4118 4119 /* Allocate memory for HBA structure */ 4120 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 4121 if (!phba) { 4122 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4123 "1417 Failed to allocate hba struct.\n"); 4124 return NULL; 4125 } 4126 4127 /* Set reference to PCI device in HBA structure */ 4128 phba->pcidev = pdev; 4129 4130 /* Assign an unused board number */ 4131 phba->brd_no = lpfc_get_instance(); 4132 if (phba->brd_no < 0) { 4133 kfree(phba); 4134 return NULL; 4135 } 4136 4137 return phba; 4138} 4139 4140/** 4141 * lpfc_hba_free - Free driver hba data structure with a device. 4142 * @phba: pointer to lpfc hba data structure. 4143 * 4144 * This routine is invoked to free the driver hba data structure with an 4145 * HBA device. 4146 **/ 4147static void 4148lpfc_hba_free(struct lpfc_hba *phba) 4149{ 4150 /* Release the driver assigned board number */ 4151 idr_remove(&lpfc_hba_index, phba->brd_no); 4152 4153 kfree(phba); 4154 return; 4155} 4156 4157/** 4158 * lpfc_create_shost - Create hba physical port with associated scsi host. 4159 * @phba: pointer to lpfc hba data structure. 4160 * 4161 * This routine is invoked to create HBA physical port and associate a SCSI 4162 * host with it. 4163 * 4164 * Return codes 4165 * 0 - sucessful 4166 * other values - error 4167 **/ 4168static int 4169lpfc_create_shost(struct lpfc_hba *phba) 4170{ 4171 struct lpfc_vport *vport; 4172 struct Scsi_Host *shost; 4173 4174 /* Initialize HBA FC structure */ 4175 phba->fc_edtov = FF_DEF_EDTOV; 4176 phba->fc_ratov = FF_DEF_RATOV; 4177 phba->fc_altov = FF_DEF_ALTOV; 4178 phba->fc_arbtov = FF_DEF_ARBTOV; 4179 4180 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 4181 if (!vport) 4182 return -ENODEV; 4183 4184 shost = lpfc_shost_from_vport(vport); 4185 phba->pport = vport; 4186 lpfc_debugfs_initialize(vport); 4187 /* Put reference to SCSI host to driver's device private data */ 4188 pci_set_drvdata(phba->pcidev, shost); 4189 4190 return 0; 4191} 4192 4193/** 4194 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 4195 * @phba: pointer to lpfc hba data structure. 4196 * 4197 * This routine is invoked to destroy HBA physical port and the associated 4198 * SCSI host. 4199 **/ 4200static void 4201lpfc_destroy_shost(struct lpfc_hba *phba) 4202{ 4203 struct lpfc_vport *vport = phba->pport; 4204 4205 /* Destroy physical port that associated with the SCSI host */ 4206 destroy_port(vport); 4207 4208 return; 4209} 4210 4211/** 4212 * lpfc_setup_bg - Setup Block guard structures and debug areas. 4213 * @phba: pointer to lpfc hba data structure. 4214 * @shost: the shost to be used to detect Block guard settings. 4215 * 4216 * This routine sets up the local Block guard protocol settings for @shost. 4217 * This routine also allocates memory for debugging bg buffers. 4218 **/ 4219static void 4220lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 4221{ 4222 int pagecnt = 10; 4223 if (lpfc_prot_mask && lpfc_prot_guard) { 4224 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4225 "1478 Registering BlockGuard with the " 4226 "SCSI layer\n"); 4227 scsi_host_set_prot(shost, lpfc_prot_mask); 4228 scsi_host_set_guard(shost, lpfc_prot_guard); 4229 } 4230 if (!_dump_buf_data) { 4231 while (pagecnt) { 4232 spin_lock_init(&_dump_buf_lock); 4233 _dump_buf_data = 4234 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4235 if (_dump_buf_data) { 4236 printk(KERN_ERR "BLKGRD allocated %d pages for " 4237 "_dump_buf_data at 0x%p\n", 4238 (1 << pagecnt), _dump_buf_data); 4239 _dump_buf_data_order = pagecnt; 4240 memset(_dump_buf_data, 0, 4241 ((1 << PAGE_SHIFT) << pagecnt)); 4242 break; 4243 } else 4244 --pagecnt; 4245 } 4246 if (!_dump_buf_data_order) 4247 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 4248 "memory for hexdump\n"); 4249 } else 4250 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" 4251 "\n", _dump_buf_data); 4252 if (!_dump_buf_dif) { 4253 while (pagecnt) { 4254 _dump_buf_dif = 4255 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4256 if (_dump_buf_dif) { 4257 printk(KERN_ERR "BLKGRD allocated %d pages for " 4258 "_dump_buf_dif at 0x%p\n", 4259 (1 << pagecnt), _dump_buf_dif); 4260 _dump_buf_dif_order = pagecnt; 4261 memset(_dump_buf_dif, 0, 4262 ((1 << PAGE_SHIFT) << pagecnt)); 4263 break; 4264 } else 4265 --pagecnt; 4266 } 4267 if (!_dump_buf_dif_order) 4268 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 4269 "memory for hexdump\n"); 4270 } else 4271 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", 4272 _dump_buf_dif); 4273} 4274 4275/** 4276 * lpfc_post_init_setup - Perform necessary device post initialization setup. 4277 * @phba: pointer to lpfc hba data structure. 4278 * 4279 * This routine is invoked to perform all the necessary post initialization 4280 * setup for the device. 4281 **/ 4282static void 4283lpfc_post_init_setup(struct lpfc_hba *phba) 4284{ 4285 struct Scsi_Host *shost; 4286 struct lpfc_adapter_event_header adapter_event; 4287 4288 /* Get the default values for Model Name and Description */ 4289 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 4290 4291 /* 4292 * hba setup may have changed the hba_queue_depth so we need to 4293 * adjust the value of can_queue. 4294 */ 4295 shost = pci_get_drvdata(phba->pcidev); 4296 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4297 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4298 lpfc_setup_bg(phba, shost); 4299 4300 lpfc_host_attrib_init(shost); 4301 4302 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 4303 spin_lock_irq(shost->host_lock); 4304 lpfc_poll_start_timer(phba); 4305 spin_unlock_irq(shost->host_lock); 4306 } 4307 4308 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4309 "0428 Perform SCSI scan\n"); 4310 /* Send board arrival event to upper layer */ 4311 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 4312 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 4313 fc_host_post_vendor_event(shost, fc_get_event_number(), 4314 sizeof(adapter_event), 4315 (char *) &adapter_event, 4316 LPFC_NL_VENDOR_ID); 4317 return; 4318} 4319 4320/** 4321 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 4322 * @phba: pointer to lpfc hba data structure. 4323 * 4324 * This routine is invoked to set up the PCI device memory space for device 4325 * with SLI-3 interface spec. 4326 * 4327 * Return codes 4328 * 0 - sucessful 4329 * other values - error 4330 **/ 4331static int 4332lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 4333{ 4334 struct pci_dev *pdev; 4335 unsigned long bar0map_len, bar2map_len; 4336 int i, hbq_count; 4337 void *ptr; 4338 int error = -ENODEV; 4339 4340 /* Obtain PCI device reference */ 4341 if (!phba->pcidev) 4342 return error; 4343 else 4344 pdev = phba->pcidev; 4345 4346 /* Set the device DMA mask size */ 4347 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 4348 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 4349 return error; 4350 4351 /* Get the bus address of Bar0 and Bar2 and the number of bytes 4352 * required by each mapping. 4353 */ 4354 phba->pci_bar0_map = pci_resource_start(pdev, 0); 4355 bar0map_len = pci_resource_len(pdev, 0); 4356 4357 phba->pci_bar2_map = pci_resource_start(pdev, 2); 4358 bar2map_len = pci_resource_len(pdev, 2); 4359 4360 /* Map HBA SLIM to a kernel virtual address. */ 4361 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 4362 if (!phba->slim_memmap_p) { 4363 dev_printk(KERN_ERR, &pdev->dev, 4364 "ioremap failed for SLIM memory.\n"); 4365 goto out; 4366 } 4367 4368 /* Map HBA Control Registers to a kernel virtual address. */ 4369 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 4370 if (!phba->ctrl_regs_memmap_p) { 4371 dev_printk(KERN_ERR, &pdev->dev, 4372 "ioremap failed for HBA control registers.\n"); 4373 goto out_iounmap_slim; 4374 } 4375 4376 /* Allocate memory for SLI-2 structures */ 4377 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 4378 SLI2_SLIM_SIZE, 4379 &phba->slim2p.phys, 4380 GFP_KERNEL); 4381 if (!phba->slim2p.virt) 4382 goto out_iounmap; 4383 4384 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 4385 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 4386 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 4387 phba->IOCBs = (phba->slim2p.virt + 4388 offsetof(struct lpfc_sli2_slim, IOCBs)); 4389 4390 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 4391 lpfc_sli_hbq_size(), 4392 &phba->hbqslimp.phys, 4393 GFP_KERNEL); 4394 if (!phba->hbqslimp.virt) 4395 goto out_free_slim; 4396 4397 hbq_count = lpfc_sli_hbq_count(); 4398 ptr = phba->hbqslimp.virt; 4399 for (i = 0; i < hbq_count; ++i) { 4400 phba->hbqs[i].hbq_virt = ptr; 4401 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 4402 ptr += (lpfc_hbq_defs[i]->entry_count * 4403 sizeof(struct lpfc_hbq_entry)); 4404 } 4405 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 4406 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 4407 4408 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 4409 4410 INIT_LIST_HEAD(&phba->rb_pend_list); 4411 4412 phba->MBslimaddr = phba->slim_memmap_p; 4413 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 4414 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 4415 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 4416 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 4417 4418 return 0; 4419 4420out_free_slim: 4421 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 4422 phba->slim2p.virt, phba->slim2p.phys); 4423out_iounmap: 4424 iounmap(phba->ctrl_regs_memmap_p); 4425out_iounmap_slim: 4426 iounmap(phba->slim_memmap_p); 4427out: 4428 return error; 4429} 4430 4431/** 4432 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 4433 * @phba: pointer to lpfc hba data structure. 4434 * 4435 * This routine is invoked to unset the PCI device memory space for device 4436 * with SLI-3 interface spec. 4437 **/ 4438static void 4439lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 4440{ 4441 struct pci_dev *pdev; 4442 4443 /* Obtain PCI device reference */ 4444 if (!phba->pcidev) 4445 return; 4446 else 4447 pdev = phba->pcidev; 4448 4449 /* Free coherent DMA memory allocated */ 4450 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 4451 phba->hbqslimp.virt, phba->hbqslimp.phys); 4452 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 4453 phba->slim2p.virt, phba->slim2p.phys); 4454 4455 /* I/O memory unmap */ 4456 iounmap(phba->ctrl_regs_memmap_p); 4457 iounmap(phba->slim_memmap_p); 4458 4459 return; 4460} 4461 4462/** 4463 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 4464 * @phba: pointer to lpfc hba data structure. 4465 * 4466 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 4467 * done and check status. 4468 * 4469 * Return 0 if successful, otherwise -ENODEV. 4470 **/ 4471int 4472lpfc_sli4_post_status_check(struct lpfc_hba *phba) 4473{ 4474 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; 4475 uint32_t onlnreg0, onlnreg1; 4476 int i, port_error = -ENODEV; 4477 4478 if (!phba->sli4_hba.STAregaddr) 4479 return -ENODEV; 4480 4481 /* With uncoverable error, log the error message and return error */ 4482 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); 4483 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); 4484 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { 4485 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); 4486 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); 4487 if (uerrlo_reg.word0 || uerrhi_reg.word0) { 4488 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4489 "1422 HBA Unrecoverable error: " 4490 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 4491 "online0_reg=0x%x, online1_reg=0x%x\n", 4492 uerrlo_reg.word0, uerrhi_reg.word0, 4493 onlnreg0, onlnreg1); 4494 } 4495 return -ENODEV; 4496 } 4497 4498 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 4499 for (i = 0; i < 3000; i++) { 4500 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); 4501 /* Encounter fatal POST error, break out */ 4502 if (bf_get(lpfc_hst_state_perr, &sta_reg)) { 4503 port_error = -ENODEV; 4504 break; 4505 } 4506 if (LPFC_POST_STAGE_ARMFW_READY == 4507 bf_get(lpfc_hst_state_port_status, &sta_reg)) { 4508 port_error = 0; 4509 break; 4510 } 4511 msleep(10); 4512 } 4513 4514 if (port_error) 4515 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4516 "1408 Failure HBA POST Status: sta_reg=0x%x, " 4517 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " 4518 "dl=x%x, pstatus=x%x\n", sta_reg.word0, 4519 bf_get(lpfc_hst_state_perr, &sta_reg), 4520 bf_get(lpfc_hst_state_sfi, &sta_reg), 4521 bf_get(lpfc_hst_state_nip, &sta_reg), 4522 bf_get(lpfc_hst_state_ipc, &sta_reg), 4523 bf_get(lpfc_hst_state_xrom, &sta_reg), 4524 bf_get(lpfc_hst_state_dl, &sta_reg), 4525 bf_get(lpfc_hst_state_port_status, &sta_reg)); 4526 4527 /* Log device information */ 4528 scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr); 4529 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4530 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " 4531 "FeatureL1=0x%x, FeatureL2=0x%x\n", 4532 bf_get(lpfc_scratchpad_chiptype, &scratchpad), 4533 bf_get(lpfc_scratchpad_slirev, &scratchpad), 4534 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), 4535 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); 4536 4537 return port_error; 4538} 4539 4540/** 4541 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 4542 * @phba: pointer to lpfc hba data structure. 4543 * 4544 * This routine is invoked to set up SLI4 BAR0 PCI config space register 4545 * memory map. 4546 **/ 4547static void 4548lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) 4549{ 4550 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 4551 LPFC_UERR_STATUS_LO; 4552 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 4553 LPFC_UERR_STATUS_HI; 4554 phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p + 4555 LPFC_ONLINE0; 4556 phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p + 4557 LPFC_ONLINE1; 4558 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + 4559 LPFC_SCRATCHPAD; 4560} 4561 4562/** 4563 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 4564 * @phba: pointer to lpfc hba data structure. 4565 * 4566 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 4567 * memory map. 4568 **/ 4569static void 4570lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 4571{ 4572 4573 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 4574 LPFC_HST_STATE; 4575 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 4576 LPFC_HST_ISR0; 4577 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 4578 LPFC_HST_IMR0; 4579 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 4580 LPFC_HST_ISCR0; 4581 return; 4582} 4583 4584/** 4585 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 4586 * @phba: pointer to lpfc hba data structure. 4587 * @vf: virtual function number 4588 * 4589 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 4590 * based on the given viftual function number, @vf. 4591 * 4592 * Return 0 if successful, otherwise -ENODEV. 4593 **/ 4594static int 4595lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 4596{ 4597 if (vf > LPFC_VIR_FUNC_MAX) 4598 return -ENODEV; 4599 4600 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4601 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 4602 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4603 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 4604 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4605 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 4606 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4607 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 4608 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4609 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 4610 return 0; 4611} 4612 4613/** 4614 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 4615 * @phba: pointer to lpfc hba data structure. 4616 * 4617 * This routine is invoked to create the bootstrap mailbox 4618 * region consistent with the SLI-4 interface spec. This 4619 * routine allocates all memory necessary to communicate 4620 * mailbox commands to the port and sets up all alignment 4621 * needs. No locks are expected to be held when calling 4622 * this routine. 4623 * 4624 * Return codes 4625 * 0 - sucessful 4626 * ENOMEM - could not allocated memory. 4627 **/ 4628static int 4629lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 4630{ 4631 uint32_t bmbx_size; 4632 struct lpfc_dmabuf *dmabuf; 4633 struct dma_address *dma_address; 4634 uint32_t pa_addr; 4635 uint64_t phys_addr; 4636 4637 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4638 if (!dmabuf) 4639 return -ENOMEM; 4640 4641 /* 4642 * The bootstrap mailbox region is comprised of 2 parts 4643 * plus an alignment restriction of 16 bytes. 4644 */ 4645 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 4646 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4647 bmbx_size, 4648 &dmabuf->phys, 4649 GFP_KERNEL); 4650 if (!dmabuf->virt) { 4651 kfree(dmabuf); 4652 return -ENOMEM; 4653 } 4654 memset(dmabuf->virt, 0, bmbx_size); 4655 4656 /* 4657 * Initialize the bootstrap mailbox pointers now so that the register 4658 * operations are simple later. The mailbox dma address is required 4659 * to be 16-byte aligned. Also align the virtual memory as each 4660 * maibox is copied into the bmbx mailbox region before issuing the 4661 * command to the port. 4662 */ 4663 phba->sli4_hba.bmbx.dmabuf = dmabuf; 4664 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 4665 4666 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 4667 LPFC_ALIGN_16_BYTE); 4668 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 4669 LPFC_ALIGN_16_BYTE); 4670 4671 /* 4672 * Set the high and low physical addresses now. The SLI4 alignment 4673 * requirement is 16 bytes and the mailbox is posted to the port 4674 * as two 30-bit addresses. The other data is a bit marking whether 4675 * the 30-bit address is the high or low address. 4676 * Upcast bmbx aphys to 64bits so shift instruction compiles 4677 * clean on 32 bit machines. 4678 */ 4679 dma_address = &phba->sli4_hba.bmbx.dma_address; 4680 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 4681 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 4682 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 4683 LPFC_BMBX_BIT1_ADDR_HI); 4684 4685 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 4686 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 4687 LPFC_BMBX_BIT1_ADDR_LO); 4688 return 0; 4689} 4690 4691/** 4692 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 4693 * @phba: pointer to lpfc hba data structure. 4694 * 4695 * This routine is invoked to teardown the bootstrap mailbox 4696 * region and release all host resources. This routine requires 4697 * the caller to ensure all mailbox commands recovered, no 4698 * additional mailbox comands are sent, and interrupts are disabled 4699 * before calling this routine. 4700 * 4701 **/ 4702static void 4703lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 4704{ 4705 dma_free_coherent(&phba->pcidev->dev, 4706 phba->sli4_hba.bmbx.bmbx_size, 4707 phba->sli4_hba.bmbx.dmabuf->virt, 4708 phba->sli4_hba.bmbx.dmabuf->phys); 4709 4710 kfree(phba->sli4_hba.bmbx.dmabuf); 4711 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 4712} 4713 4714/** 4715 * lpfc_sli4_read_config - Get the config parameters. 4716 * @phba: pointer to lpfc hba data structure. 4717 * 4718 * This routine is invoked to read the configuration parameters from the HBA. 4719 * The configuration parameters are used to set the base and maximum values 4720 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 4721 * allocation for the port. 4722 * 4723 * Return codes 4724 * 0 - sucessful 4725 * ENOMEM - No availble memory 4726 * EIO - The mailbox failed to complete successfully. 4727 **/ 4728static int 4729lpfc_sli4_read_config(struct lpfc_hba *phba) 4730{ 4731 LPFC_MBOXQ_t *pmb; 4732 struct lpfc_mbx_read_config *rd_config; 4733 uint32_t rc = 0; 4734 4735 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4736 if (!pmb) { 4737 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4738 "2011 Unable to allocate memory for issuing " 4739 "SLI_CONFIG_SPECIAL mailbox command\n"); 4740 return -ENOMEM; 4741 } 4742 4743 lpfc_read_config(phba, pmb); 4744 4745 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4746 if (rc != MBX_SUCCESS) { 4747 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4748 "2012 Mailbox failed , mbxCmd x%x " 4749 "READ_CONFIG, mbxStatus x%x\n", 4750 bf_get(lpfc_mqe_command, &pmb->u.mqe), 4751 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 4752 rc = -EIO; 4753 } else { 4754 rd_config = &pmb->u.mqe.un.rd_config; 4755 phba->sli4_hba.max_cfg_param.max_xri = 4756 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 4757 phba->sli4_hba.max_cfg_param.xri_base = 4758 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 4759 phba->sli4_hba.max_cfg_param.max_vpi = 4760 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 4761 phba->sli4_hba.max_cfg_param.vpi_base = 4762 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 4763 phba->sli4_hba.max_cfg_param.max_rpi = 4764 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 4765 phba->sli4_hba.max_cfg_param.rpi_base = 4766 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 4767 phba->sli4_hba.max_cfg_param.max_vfi = 4768 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 4769 phba->sli4_hba.max_cfg_param.vfi_base = 4770 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 4771 phba->sli4_hba.max_cfg_param.max_fcfi = 4772 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 4773 phba->sli4_hba.max_cfg_param.fcfi_base = 4774 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); 4775 phba->sli4_hba.max_cfg_param.max_eq = 4776 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 4777 phba->sli4_hba.max_cfg_param.max_rq = 4778 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 4779 phba->sli4_hba.max_cfg_param.max_wq = 4780 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 4781 phba->sli4_hba.max_cfg_param.max_cq = 4782 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 4783 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 4784 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 4785 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 4786 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 4787 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4788 phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi; 4789 phba->max_vports = phba->max_vpi; 4790 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4791 "2003 cfg params XRI(B:%d M:%d), " 4792 "VPI(B:%d M:%d) " 4793 "VFI(B:%d M:%d) " 4794 "RPI(B:%d M:%d) " 4795 "FCFI(B:%d M:%d)\n", 4796 phba->sli4_hba.max_cfg_param.xri_base, 4797 phba->sli4_hba.max_cfg_param.max_xri, 4798 phba->sli4_hba.max_cfg_param.vpi_base, 4799 phba->sli4_hba.max_cfg_param.max_vpi, 4800 phba->sli4_hba.max_cfg_param.vfi_base, 4801 phba->sli4_hba.max_cfg_param.max_vfi, 4802 phba->sli4_hba.max_cfg_param.rpi_base, 4803 phba->sli4_hba.max_cfg_param.max_rpi, 4804 phba->sli4_hba.max_cfg_param.fcfi_base, 4805 phba->sli4_hba.max_cfg_param.max_fcfi); 4806 } 4807 mempool_free(pmb, phba->mbox_mem_pool); 4808 4809 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 4810 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri)) 4811 phba->cfg_hba_queue_depth = 4812 phba->sli4_hba.max_cfg_param.max_xri; 4813 return rc; 4814} 4815 4816/** 4817 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. 4818 * @phba: pointer to lpfc hba data structure. 4819 * 4820 * This routine is invoked to setup the host-side endian order to the 4821 * HBA consistent with the SLI-4 interface spec. 4822 * 4823 * Return codes 4824 * 0 - sucessful 4825 * ENOMEM - No availble memory 4826 * EIO - The mailbox failed to complete successfully. 4827 **/ 4828static int 4829lpfc_setup_endian_order(struct lpfc_hba *phba) 4830{ 4831 LPFC_MBOXQ_t *mboxq; 4832 uint32_t rc = 0; 4833 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 4834 HOST_ENDIAN_HIGH_WORD1}; 4835 4836 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4837 if (!mboxq) { 4838 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4839 "0492 Unable to allocate memory for issuing " 4840 "SLI_CONFIG_SPECIAL mailbox command\n"); 4841 return -ENOMEM; 4842 } 4843 4844 /* 4845 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two 4846 * words to contain special data values and no other data. 4847 */ 4848 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 4849 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 4850 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4851 if (rc != MBX_SUCCESS) { 4852 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4853 "0493 SLI_CONFIG_SPECIAL mailbox failed with " 4854 "status x%x\n", 4855 rc); 4856 rc = -EIO; 4857 } 4858 4859 mempool_free(mboxq, phba->mbox_mem_pool); 4860 return rc; 4861} 4862 4863/** 4864 * lpfc_sli4_queue_create - Create all the SLI4 queues 4865 * @phba: pointer to lpfc hba data structure. 4866 * 4867 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 4868 * operation. For each SLI4 queue type, the parameters such as queue entry 4869 * count (queue depth) shall be taken from the module parameter. For now, 4870 * we just use some constant number as place holder. 4871 * 4872 * Return codes 4873 * 0 - sucessful 4874 * ENOMEM - No availble memory 4875 * EIO - The mailbox failed to complete successfully. 4876 **/ 4877static int 4878lpfc_sli4_queue_create(struct lpfc_hba *phba) 4879{ 4880 struct lpfc_queue *qdesc; 4881 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 4882 int cfg_fcp_wq_count; 4883 int cfg_fcp_eq_count; 4884 4885 /* 4886 * Sanity check for confiugred queue parameters against the run-time 4887 * device parameters 4888 */ 4889 4890 /* Sanity check on FCP fast-path WQ parameters */ 4891 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 4892 if (cfg_fcp_wq_count > 4893 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 4894 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 4895 LPFC_SP_WQN_DEF; 4896 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 4897 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4898 "2581 Not enough WQs (%d) from " 4899 "the pci function for supporting " 4900 "FCP WQs (%d)\n", 4901 phba->sli4_hba.max_cfg_param.max_wq, 4902 phba->cfg_fcp_wq_count); 4903 goto out_error; 4904 } 4905 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4906 "2582 Not enough WQs (%d) from the pci " 4907 "function for supporting the requested " 4908 "FCP WQs (%d), the actual FCP WQs can " 4909 "be supported: %d\n", 4910 phba->sli4_hba.max_cfg_param.max_wq, 4911 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 4912 } 4913 /* The actual number of FCP work queues adopted */ 4914 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 4915 4916 /* Sanity check on FCP fast-path EQ parameters */ 4917 cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 4918 if (cfg_fcp_eq_count > 4919 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 4920 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 4921 LPFC_SP_EQN_DEF; 4922 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 4923 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4924 "2574 Not enough EQs (%d) from the " 4925 "pci function for supporting FCP " 4926 "EQs (%d)\n", 4927 phba->sli4_hba.max_cfg_param.max_eq, 4928 phba->cfg_fcp_eq_count); 4929 goto out_error; 4930 } 4931 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4932 "2575 Not enough EQs (%d) from the pci " 4933 "function for supporting the requested " 4934 "FCP EQs (%d), the actual FCP EQs can " 4935 "be supported: %d\n", 4936 phba->sli4_hba.max_cfg_param.max_eq, 4937 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 4938 } 4939 /* It does not make sense to have more EQs than WQs */ 4940 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 4941 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4942 "2593 The number of FCP EQs (%d) is more " 4943 "than the number of FCP WQs (%d), take " 4944 "the number of FCP EQs same as than of " 4945 "WQs (%d)\n", cfg_fcp_eq_count, 4946 phba->cfg_fcp_wq_count, 4947 phba->cfg_fcp_wq_count); 4948 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 4949 } 4950 /* The actual number of FCP event queues adopted */ 4951 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 4952 /* The overall number of event queues used */ 4953 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 4954 4955 /* 4956 * Create Event Queues (EQs) 4957 */ 4958 4959 /* Get EQ depth from module parameter, fake the default for now */ 4960 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 4961 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 4962 4963 /* Create slow path event queue */ 4964 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 4965 phba->sli4_hba.eq_ecount); 4966 if (!qdesc) { 4967 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4968 "0496 Failed allocate slow-path EQ\n"); 4969 goto out_error; 4970 } 4971 phba->sli4_hba.sp_eq = qdesc; 4972 4973 /* Create fast-path FCP Event Queue(s) */ 4974 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 4975 phba->cfg_fcp_eq_count), GFP_KERNEL); 4976 if (!phba->sli4_hba.fp_eq) { 4977 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4978 "2576 Failed allocate memory for fast-path " 4979 "EQ record array\n"); 4980 goto out_free_sp_eq; 4981 } 4982 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 4983 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 4984 phba->sli4_hba.eq_ecount); 4985 if (!qdesc) { 4986 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4987 "0497 Failed allocate fast-path EQ\n"); 4988 goto out_free_fp_eq; 4989 } 4990 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 4991 } 4992 4993 /* 4994 * Create Complete Queues (CQs) 4995 */ 4996 4997 /* Get CQ depth from module parameter, fake the default for now */ 4998 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 4999 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 5000 5001 /* Create slow-path Mailbox Command Complete Queue */ 5002 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5003 phba->sli4_hba.cq_ecount); 5004 if (!qdesc) { 5005 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5006 "0500 Failed allocate slow-path mailbox CQ\n"); 5007 goto out_free_fp_eq; 5008 } 5009 phba->sli4_hba.mbx_cq = qdesc; 5010 5011 /* Create slow-path ELS Complete Queue */ 5012 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5013 phba->sli4_hba.cq_ecount); 5014 if (!qdesc) { 5015 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5016 "0501 Failed allocate slow-path ELS CQ\n"); 5017 goto out_free_mbx_cq; 5018 } 5019 phba->sli4_hba.els_cq = qdesc; 5020 5021 /* Create slow-path Unsolicited Receive Complete Queue */ 5022 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5023 phba->sli4_hba.cq_ecount); 5024 if (!qdesc) { 5025 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5026 "0502 Failed allocate slow-path USOL RX CQ\n"); 5027 goto out_free_els_cq; 5028 } 5029 phba->sli4_hba.rxq_cq = qdesc; 5030 5031 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 5032 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 5033 phba->cfg_fcp_eq_count), GFP_KERNEL); 5034 if (!phba->sli4_hba.fcp_cq) { 5035 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5036 "2577 Failed allocate memory for fast-path " 5037 "CQ record array\n"); 5038 goto out_free_rxq_cq; 5039 } 5040 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5041 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5042 phba->sli4_hba.cq_ecount); 5043 if (!qdesc) { 5044 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5045 "0499 Failed allocate fast-path FCP " 5046 "CQ (%d)\n", fcp_cqidx); 5047 goto out_free_fcp_cq; 5048 } 5049 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 5050 } 5051 5052 /* Create Mailbox Command Queue */ 5053 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 5054 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 5055 5056 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 5057 phba->sli4_hba.mq_ecount); 5058 if (!qdesc) { 5059 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5060 "0505 Failed allocate slow-path MQ\n"); 5061 goto out_free_fcp_cq; 5062 } 5063 phba->sli4_hba.mbx_wq = qdesc; 5064 5065 /* 5066 * Create all the Work Queues (WQs) 5067 */ 5068 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 5069 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 5070 5071 /* Create slow-path ELS Work Queue */ 5072 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5073 phba->sli4_hba.wq_ecount); 5074 if (!qdesc) { 5075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5076 "0504 Failed allocate slow-path ELS WQ\n"); 5077 goto out_free_mbx_wq; 5078 } 5079 phba->sli4_hba.els_wq = qdesc; 5080 5081 /* Create fast-path FCP Work Queue(s) */ 5082 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 5083 phba->cfg_fcp_wq_count), GFP_KERNEL); 5084 if (!phba->sli4_hba.fcp_wq) { 5085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5086 "2578 Failed allocate memory for fast-path " 5087 "WQ record array\n"); 5088 goto out_free_els_wq; 5089 } 5090 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 5091 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5092 phba->sli4_hba.wq_ecount); 5093 if (!qdesc) { 5094 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5095 "0503 Failed allocate fast-path FCP " 5096 "WQ (%d)\n", fcp_wqidx); 5097 goto out_free_fcp_wq; 5098 } 5099 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 5100 } 5101 5102 /* 5103 * Create Receive Queue (RQ) 5104 */ 5105 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 5106 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 5107 5108 /* Create Receive Queue for header */ 5109 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5110 phba->sli4_hba.rq_ecount); 5111 if (!qdesc) { 5112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5113 "0506 Failed allocate receive HRQ\n"); 5114 goto out_free_fcp_wq; 5115 } 5116 phba->sli4_hba.hdr_rq = qdesc; 5117 5118 /* Create Receive Queue for data */ 5119 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5120 phba->sli4_hba.rq_ecount); 5121 if (!qdesc) { 5122 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5123 "0507 Failed allocate receive DRQ\n"); 5124 goto out_free_hdr_rq; 5125 } 5126 phba->sli4_hba.dat_rq = qdesc; 5127 5128 return 0; 5129 5130out_free_hdr_rq: 5131 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5132 phba->sli4_hba.hdr_rq = NULL; 5133out_free_fcp_wq: 5134 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 5135 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 5136 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 5137 } 5138 kfree(phba->sli4_hba.fcp_wq); 5139out_free_els_wq: 5140 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5141 phba->sli4_hba.els_wq = NULL; 5142out_free_mbx_wq: 5143 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5144 phba->sli4_hba.mbx_wq = NULL; 5145out_free_fcp_cq: 5146 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 5147 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 5148 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 5149 } 5150 kfree(phba->sli4_hba.fcp_cq); 5151out_free_rxq_cq: 5152 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); 5153 phba->sli4_hba.rxq_cq = NULL; 5154out_free_els_cq: 5155 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5156 phba->sli4_hba.els_cq = NULL; 5157out_free_mbx_cq: 5158 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5159 phba->sli4_hba.mbx_cq = NULL; 5160out_free_fp_eq: 5161 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 5162 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 5163 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 5164 } 5165 kfree(phba->sli4_hba.fp_eq); 5166out_free_sp_eq: 5167 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5168 phba->sli4_hba.sp_eq = NULL; 5169out_error: 5170 return -ENOMEM; 5171} 5172 5173/** 5174 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 5175 * @phba: pointer to lpfc hba data structure. 5176 * 5177 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 5178 * operation. 5179 * 5180 * Return codes 5181 * 0 - sucessful 5182 * ENOMEM - No availble memory 5183 * EIO - The mailbox failed to complete successfully. 5184 **/ 5185static void 5186lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 5187{ 5188 int fcp_qidx; 5189 5190 /* Release mailbox command work queue */ 5191 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5192 phba->sli4_hba.mbx_wq = NULL; 5193 5194 /* Release ELS work queue */ 5195 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5196 phba->sli4_hba.els_wq = NULL; 5197 5198 /* Release FCP work queue */ 5199 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 5200 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 5201 kfree(phba->sli4_hba.fcp_wq); 5202 phba->sli4_hba.fcp_wq = NULL; 5203 5204 /* Release unsolicited receive queue */ 5205 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5206 phba->sli4_hba.hdr_rq = NULL; 5207 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 5208 phba->sli4_hba.dat_rq = NULL; 5209 5210 /* Release unsolicited receive complete queue */ 5211 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); 5212 phba->sli4_hba.rxq_cq = NULL; 5213 5214 /* Release ELS complete queue */ 5215 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5216 phba->sli4_hba.els_cq = NULL; 5217 5218 /* Release mailbox command complete queue */ 5219 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5220 phba->sli4_hba.mbx_cq = NULL; 5221 5222 /* Release FCP response complete queue */ 5223 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5224 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 5225 kfree(phba->sli4_hba.fcp_cq); 5226 phba->sli4_hba.fcp_cq = NULL; 5227 5228 /* Release fast-path event queue */ 5229 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5230 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 5231 kfree(phba->sli4_hba.fp_eq); 5232 phba->sli4_hba.fp_eq = NULL; 5233 5234 /* Release slow-path event queue */ 5235 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5236 phba->sli4_hba.sp_eq = NULL; 5237 5238 return; 5239} 5240 5241/** 5242 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 5243 * @phba: pointer to lpfc hba data structure. 5244 * 5245 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 5246 * operation. 5247 * 5248 * Return codes 5249 * 0 - sucessful 5250 * ENOMEM - No availble memory 5251 * EIO - The mailbox failed to complete successfully. 5252 **/ 5253int 5254lpfc_sli4_queue_setup(struct lpfc_hba *phba) 5255{ 5256 int rc = -ENOMEM; 5257 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5258 int fcp_cq_index = 0; 5259 5260 /* 5261 * Set up Event Queues (EQs) 5262 */ 5263 5264 /* Set up slow-path event queue */ 5265 if (!phba->sli4_hba.sp_eq) { 5266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5267 "0520 Slow-path EQ not allocated\n"); 5268 goto out_error; 5269 } 5270 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 5271 LPFC_SP_DEF_IMAX); 5272 if (rc) { 5273 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5274 "0521 Failed setup of slow-path EQ: " 5275 "rc = 0x%x\n", rc); 5276 goto out_error; 5277 } 5278 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5279 "2583 Slow-path EQ setup: queue-id=%d\n", 5280 phba->sli4_hba.sp_eq->queue_id); 5281 5282 /* Set up fast-path event queue */ 5283 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5284 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 5285 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5286 "0522 Fast-path EQ (%d) not " 5287 "allocated\n", fcp_eqidx); 5288 goto out_destroy_fp_eq; 5289 } 5290 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 5291 phba->cfg_fcp_imax); 5292 if (rc) { 5293 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5294 "0523 Failed setup of fast-path EQ " 5295 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 5296 goto out_destroy_fp_eq; 5297 } 5298 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5299 "2584 Fast-path EQ setup: " 5300 "queue[%d]-id=%d\n", fcp_eqidx, 5301 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 5302 } 5303 5304 /* 5305 * Set up Complete Queues (CQs) 5306 */ 5307 5308 /* Set up slow-path MBOX Complete Queue as the first CQ */ 5309 if (!phba->sli4_hba.mbx_cq) { 5310 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5311 "0528 Mailbox CQ not allocated\n"); 5312 goto out_destroy_fp_eq; 5313 } 5314 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 5315 LPFC_MCQ, LPFC_MBOX); 5316 if (rc) { 5317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5318 "0529 Failed setup of slow-path mailbox CQ: " 5319 "rc = 0x%x\n", rc); 5320 goto out_destroy_fp_eq; 5321 } 5322 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5323 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 5324 phba->sli4_hba.mbx_cq->queue_id, 5325 phba->sli4_hba.sp_eq->queue_id); 5326 5327 /* Set up slow-path ELS Complete Queue */ 5328 if (!phba->sli4_hba.els_cq) { 5329 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5330 "0530 ELS CQ not allocated\n"); 5331 goto out_destroy_mbx_cq; 5332 } 5333 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 5334 LPFC_WCQ, LPFC_ELS); 5335 if (rc) { 5336 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5337 "0531 Failed setup of slow-path ELS CQ: " 5338 "rc = 0x%x\n", rc); 5339 goto out_destroy_mbx_cq; 5340 } 5341 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5342 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 5343 phba->sli4_hba.els_cq->queue_id, 5344 phba->sli4_hba.sp_eq->queue_id); 5345 5346 /* Set up slow-path Unsolicited Receive Complete Queue */ 5347 if (!phba->sli4_hba.rxq_cq) { 5348 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5349 "0532 USOL RX CQ not allocated\n"); 5350 goto out_destroy_els_cq; 5351 } 5352 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq, 5353 LPFC_RCQ, LPFC_USOL); 5354 if (rc) { 5355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5356 "0533 Failed setup of slow-path USOL RX CQ: " 5357 "rc = 0x%x\n", rc); 5358 goto out_destroy_els_cq; 5359 } 5360 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5361 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n", 5362 phba->sli4_hba.rxq_cq->queue_id, 5363 phba->sli4_hba.sp_eq->queue_id); 5364 5365 /* Set up fast-path FCP Response Complete Queue */ 5366 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5367 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 5368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5369 "0526 Fast-path FCP CQ (%d) not " 5370 "allocated\n", fcp_cqidx); 5371 goto out_destroy_fcp_cq; 5372 } 5373 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 5374 phba->sli4_hba.fp_eq[fcp_cqidx], 5375 LPFC_WCQ, LPFC_FCP); 5376 if (rc) { 5377 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5378 "0527 Failed setup of fast-path FCP " 5379 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 5380 goto out_destroy_fcp_cq; 5381 } 5382 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5383 "2588 FCP CQ setup: cq[%d]-id=%d, " 5384 "parent eq[%d]-id=%d\n", 5385 fcp_cqidx, 5386 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 5387 fcp_cqidx, 5388 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 5389 } 5390 5391 /* 5392 * Set up all the Work Queues (WQs) 5393 */ 5394 5395 /* Set up Mailbox Command Queue */ 5396 if (!phba->sli4_hba.mbx_wq) { 5397 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5398 "0538 Slow-path MQ not allocated\n"); 5399 goto out_destroy_fcp_cq; 5400 } 5401 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 5402 phba->sli4_hba.mbx_cq, LPFC_MBOX); 5403 if (rc) { 5404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5405 "0539 Failed setup of slow-path MQ: " 5406 "rc = 0x%x\n", rc); 5407 goto out_destroy_fcp_cq; 5408 } 5409 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5410 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 5411 phba->sli4_hba.mbx_wq->queue_id, 5412 phba->sli4_hba.mbx_cq->queue_id); 5413 5414 /* Set up slow-path ELS Work Queue */ 5415 if (!phba->sli4_hba.els_wq) { 5416 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5417 "0536 Slow-path ELS WQ not allocated\n"); 5418 goto out_destroy_mbx_wq; 5419 } 5420 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 5421 phba->sli4_hba.els_cq, LPFC_ELS); 5422 if (rc) { 5423 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5424 "0537 Failed setup of slow-path ELS WQ: " 5425 "rc = 0x%x\n", rc); 5426 goto out_destroy_mbx_wq; 5427 } 5428 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5429 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 5430 phba->sli4_hba.els_wq->queue_id, 5431 phba->sli4_hba.els_cq->queue_id); 5432 5433 /* Set up fast-path FCP Work Queue */ 5434 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 5435 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 5436 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5437 "0534 Fast-path FCP WQ (%d) not " 5438 "allocated\n", fcp_wqidx); 5439 goto out_destroy_fcp_wq; 5440 } 5441 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 5442 phba->sli4_hba.fcp_cq[fcp_cq_index], 5443 LPFC_FCP); 5444 if (rc) { 5445 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5446 "0535 Failed setup of fast-path FCP " 5447 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 5448 goto out_destroy_fcp_wq; 5449 } 5450 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5451 "2591 FCP WQ setup: wq[%d]-id=%d, " 5452 "parent cq[%d]-id=%d\n", 5453 fcp_wqidx, 5454 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 5455 fcp_cq_index, 5456 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 5457 /* Round robin FCP Work Queue's Completion Queue assignment */ 5458 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 5459 } 5460 5461 /* 5462 * Create Receive Queue (RQ) 5463 */ 5464 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 5465 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5466 "0540 Receive Queue not allocated\n"); 5467 goto out_destroy_fcp_wq; 5468 } 5469 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 5470 phba->sli4_hba.rxq_cq, LPFC_USOL); 5471 if (rc) { 5472 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5473 "0541 Failed setup of Receive Queue: " 5474 "rc = 0x%x\n", rc); 5475 goto out_destroy_fcp_wq; 5476 } 5477 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5478 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 5479 "parent cq-id=%d\n", 5480 phba->sli4_hba.hdr_rq->queue_id, 5481 phba->sli4_hba.dat_rq->queue_id, 5482 phba->sli4_hba.rxq_cq->queue_id); 5483 return 0; 5484 5485out_destroy_fcp_wq: 5486 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 5487 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 5488 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 5489out_destroy_mbx_wq: 5490 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 5491out_destroy_fcp_cq: 5492 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 5493 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 5494 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); 5495out_destroy_els_cq: 5496 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 5497out_destroy_mbx_cq: 5498 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 5499out_destroy_fp_eq: 5500 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 5501 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 5502 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 5503out_error: 5504 return rc; 5505} 5506 5507/** 5508 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 5509 * @phba: pointer to lpfc hba data structure. 5510 * 5511 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 5512 * operation. 5513 * 5514 * Return codes 5515 * 0 - sucessful 5516 * ENOMEM - No availble memory 5517 * EIO - The mailbox failed to complete successfully. 5518 **/ 5519void 5520lpfc_sli4_queue_unset(struct lpfc_hba *phba) 5521{ 5522 int fcp_qidx; 5523 5524 /* Unset mailbox command work queue */ 5525 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 5526 /* Unset ELS work queue */ 5527 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 5528 /* Unset unsolicited receive queue */ 5529 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 5530 /* Unset FCP work queue */ 5531 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 5532 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 5533 /* Unset mailbox command complete queue */ 5534 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 5535 /* Unset ELS complete queue */ 5536 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 5537 /* Unset unsolicited receive complete queue */ 5538 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); 5539 /* Unset FCP response complete queue */ 5540 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5541 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 5542 /* Unset fast-path event queue */ 5543 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5544 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 5545 /* Unset slow-path event queue */ 5546 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 5547} 5548 5549/** 5550 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 5551 * @phba: pointer to lpfc hba data structure. 5552 * 5553 * This routine is invoked to allocate and set up a pool of completion queue 5554 * events. The body of the completion queue event is a completion queue entry 5555 * CQE. For now, this pool is used for the interrupt service routine to queue 5556 * the following HBA completion queue events for the worker thread to process: 5557 * - Mailbox asynchronous events 5558 * - Receive queue completion unsolicited events 5559 * Later, this can be used for all the slow-path events. 5560 * 5561 * Return codes 5562 * 0 - sucessful 5563 * -ENOMEM - No availble memory 5564 **/ 5565static int 5566lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 5567{ 5568 struct lpfc_cq_event *cq_event; 5569 int i; 5570 5571 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 5572 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 5573 if (!cq_event) 5574 goto out_pool_create_fail; 5575 list_add_tail(&cq_event->list, 5576 &phba->sli4_hba.sp_cqe_event_pool); 5577 } 5578 return 0; 5579 5580out_pool_create_fail: 5581 lpfc_sli4_cq_event_pool_destroy(phba); 5582 return -ENOMEM; 5583} 5584 5585/** 5586 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 5587 * @phba: pointer to lpfc hba data structure. 5588 * 5589 * This routine is invoked to free the pool of completion queue events at 5590 * driver unload time. Note that, it is the responsibility of the driver 5591 * cleanup routine to free all the outstanding completion-queue events 5592 * allocated from this pool back into the pool before invoking this routine 5593 * to destroy the pool. 5594 **/ 5595static void 5596lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 5597{ 5598 struct lpfc_cq_event *cq_event, *next_cq_event; 5599 5600 list_for_each_entry_safe(cq_event, next_cq_event, 5601 &phba->sli4_hba.sp_cqe_event_pool, list) { 5602 list_del(&cq_event->list); 5603 kfree(cq_event); 5604 } 5605} 5606 5607/** 5608 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 5609 * @phba: pointer to lpfc hba data structure. 5610 * 5611 * This routine is the lock free version of the API invoked to allocate a 5612 * completion-queue event from the free pool. 5613 * 5614 * Return: Pointer to the newly allocated completion-queue event if successful 5615 * NULL otherwise. 5616 **/ 5617struct lpfc_cq_event * 5618__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 5619{ 5620 struct lpfc_cq_event *cq_event = NULL; 5621 5622 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 5623 struct lpfc_cq_event, list); 5624 return cq_event; 5625} 5626 5627/** 5628 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 5629 * @phba: pointer to lpfc hba data structure. 5630 * 5631 * This routine is the lock version of the API invoked to allocate a 5632 * completion-queue event from the free pool. 5633 * 5634 * Return: Pointer to the newly allocated completion-queue event if successful 5635 * NULL otherwise. 5636 **/ 5637struct lpfc_cq_event * 5638lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 5639{ 5640 struct lpfc_cq_event *cq_event; 5641 unsigned long iflags; 5642 5643 spin_lock_irqsave(&phba->hbalock, iflags); 5644 cq_event = __lpfc_sli4_cq_event_alloc(phba); 5645 spin_unlock_irqrestore(&phba->hbalock, iflags); 5646 return cq_event; 5647} 5648 5649/** 5650 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 5651 * @phba: pointer to lpfc hba data structure. 5652 * @cq_event: pointer to the completion queue event to be freed. 5653 * 5654 * This routine is the lock free version of the API invoked to release a 5655 * completion-queue event back into the free pool. 5656 **/ 5657void 5658__lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 5659 struct lpfc_cq_event *cq_event) 5660{ 5661 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 5662} 5663 5664/** 5665 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 5666 * @phba: pointer to lpfc hba data structure. 5667 * @cq_event: pointer to the completion queue event to be freed. 5668 * 5669 * This routine is the lock version of the API invoked to release a 5670 * completion-queue event back into the free pool. 5671 **/ 5672void 5673lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 5674 struct lpfc_cq_event *cq_event) 5675{ 5676 unsigned long iflags; 5677 spin_lock_irqsave(&phba->hbalock, iflags); 5678 __lpfc_sli4_cq_event_release(phba, cq_event); 5679 spin_unlock_irqrestore(&phba->hbalock, iflags); 5680} 5681 5682/** 5683 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 5684 * @phba: pointer to lpfc hba data structure. 5685 * 5686 * This routine is to free all the pending completion-queue events to the 5687 * back into the free pool for device reset. 5688 **/ 5689static void 5690lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 5691{ 5692 LIST_HEAD(cqelist); 5693 struct lpfc_cq_event *cqe; 5694 unsigned long iflags; 5695 5696 /* Retrieve all the pending WCQEs from pending WCQE lists */ 5697 spin_lock_irqsave(&phba->hbalock, iflags); 5698 /* Pending FCP XRI abort events */ 5699 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 5700 &cqelist); 5701 /* Pending ELS XRI abort events */ 5702 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 5703 &cqelist); 5704 /* Pending asynnc events */ 5705 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 5706 &cqelist); 5707 spin_unlock_irqrestore(&phba->hbalock, iflags); 5708 5709 while (!list_empty(&cqelist)) { 5710 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 5711 lpfc_sli4_cq_event_release(phba, cqe); 5712 } 5713} 5714 5715/** 5716 * lpfc_pci_function_reset - Reset pci function. 5717 * @phba: pointer to lpfc hba data structure. 5718 * 5719 * This routine is invoked to request a PCI function reset. It will destroys 5720 * all resources assigned to the PCI function which originates this request. 5721 * 5722 * Return codes 5723 * 0 - sucessful 5724 * ENOMEM - No availble memory 5725 * EIO - The mailbox failed to complete successfully. 5726 **/ 5727int 5728lpfc_pci_function_reset(struct lpfc_hba *phba) 5729{ 5730 LPFC_MBOXQ_t *mboxq; 5731 uint32_t rc = 0; 5732 uint32_t shdr_status, shdr_add_status; 5733 union lpfc_sli4_cfg_shdr *shdr; 5734 5735 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5736 if (!mboxq) { 5737 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5738 "0494 Unable to allocate memory for issuing " 5739 "SLI_FUNCTION_RESET mailbox command\n"); 5740 return -ENOMEM; 5741 } 5742 5743 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ 5744 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5745 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 5746 LPFC_SLI4_MBX_EMBED); 5747 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5748 shdr = (union lpfc_sli4_cfg_shdr *) 5749 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 5750 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5751 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5752 if (rc != MBX_TIMEOUT) 5753 mempool_free(mboxq, phba->mbox_mem_pool); 5754 if (shdr_status || shdr_add_status || rc) { 5755 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5756 "0495 SLI_FUNCTION_RESET mailbox failed with " 5757 "status x%x add_status x%x, mbx status x%x\n", 5758 shdr_status, shdr_add_status, rc); 5759 rc = -ENXIO; 5760 } 5761 return rc; 5762} 5763 5764/** 5765 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 5766 * @phba: pointer to lpfc hba data structure. 5767 * @cnt: number of nop mailbox commands to send. 5768 * 5769 * This routine is invoked to send a number @cnt of NOP mailbox command and 5770 * wait for each command to complete. 5771 * 5772 * Return: the number of NOP mailbox command completed. 5773 **/ 5774static int 5775lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 5776{ 5777 LPFC_MBOXQ_t *mboxq; 5778 int length, cmdsent; 5779 uint32_t mbox_tmo; 5780 uint32_t rc = 0; 5781 uint32_t shdr_status, shdr_add_status; 5782 union lpfc_sli4_cfg_shdr *shdr; 5783 5784 if (cnt == 0) { 5785 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5786 "2518 Requested to send 0 NOP mailbox cmd\n"); 5787 return cnt; 5788 } 5789 5790 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5791 if (!mboxq) { 5792 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5793 "2519 Unable to allocate memory for issuing " 5794 "NOP mailbox command\n"); 5795 return 0; 5796 } 5797 5798 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 5799 length = (sizeof(struct lpfc_mbx_nop) - 5800 sizeof(struct lpfc_sli4_cfg_mhdr)); 5801 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5802 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 5803 5804 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 5805 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 5806 if (!phba->sli4_hba.intr_enable) 5807 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5808 else 5809 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 5810 if (rc == MBX_TIMEOUT) 5811 break; 5812 /* Check return status */ 5813 shdr = (union lpfc_sli4_cfg_shdr *) 5814 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 5815 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5816 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 5817 &shdr->response); 5818 if (shdr_status || shdr_add_status || rc) { 5819 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5820 "2520 NOP mailbox command failed " 5821 "status x%x add_status x%x mbx " 5822 "status x%x\n", shdr_status, 5823 shdr_add_status, rc); 5824 break; 5825 } 5826 } 5827 5828 if (rc != MBX_TIMEOUT) 5829 mempool_free(mboxq, phba->mbox_mem_pool); 5830 5831 return cmdsent; 5832} 5833 5834/** 5835 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device 5836 * @phba: pointer to lpfc hba data structure. 5837 * @fcfi: fcf index. 5838 * 5839 * This routine is invoked to unregister a FCFI from device. 5840 **/ 5841void 5842lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) 5843{ 5844 LPFC_MBOXQ_t *mbox; 5845 uint32_t mbox_tmo; 5846 int rc; 5847 unsigned long flags; 5848 5849 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5850 5851 if (!mbox) 5852 return; 5853 5854 lpfc_unreg_fcfi(mbox, fcfi); 5855 5856 if (!phba->sli4_hba.intr_enable) 5857 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5858 else { 5859 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 5860 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5861 } 5862 if (rc != MBX_TIMEOUT) 5863 mempool_free(mbox, phba->mbox_mem_pool); 5864 if (rc != MBX_SUCCESS) 5865 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5866 "2517 Unregister FCFI command failed " 5867 "status %d, mbxStatus x%x\n", rc, 5868 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 5869 else { 5870 spin_lock_irqsave(&phba->hbalock, flags); 5871 /* Mark the FCFI is no longer registered */ 5872 phba->fcf.fcf_flag &= 5873 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED); 5874 spin_unlock_irqrestore(&phba->hbalock, flags); 5875 } 5876} 5877 5878/** 5879 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 5880 * @phba: pointer to lpfc hba data structure. 5881 * 5882 * This routine is invoked to set up the PCI device memory space for device 5883 * with SLI-4 interface spec. 5884 * 5885 * Return codes 5886 * 0 - sucessful 5887 * other values - error 5888 **/ 5889static int 5890lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 5891{ 5892 struct pci_dev *pdev; 5893 unsigned long bar0map_len, bar1map_len, bar2map_len; 5894 int error = -ENODEV; 5895 5896 /* Obtain PCI device reference */ 5897 if (!phba->pcidev) 5898 return error; 5899 else 5900 pdev = phba->pcidev; 5901 5902 /* Set the device DMA mask size */ 5903 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 5904 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 5905 return error; 5906 5907 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 5908 * number of bytes required by each mapping. They are actually 5909 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device. 5910 */ 5911 phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0); 5912 bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0); 5913 5914 phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1); 5915 bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1); 5916 5917 phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2); 5918 bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2); 5919 5920 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ 5921 phba->sli4_hba.conf_regs_memmap_p = 5922 ioremap(phba->pci_bar0_map, bar0map_len); 5923 if (!phba->sli4_hba.conf_regs_memmap_p) { 5924 dev_printk(KERN_ERR, &pdev->dev, 5925 "ioremap failed for SLI4 PCI config registers.\n"); 5926 goto out; 5927 } 5928 5929 /* Map SLI4 HBA Control Register base to a kernel virtual address. */ 5930 phba->sli4_hba.ctrl_regs_memmap_p = 5931 ioremap(phba->pci_bar1_map, bar1map_len); 5932 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 5933 dev_printk(KERN_ERR, &pdev->dev, 5934 "ioremap failed for SLI4 HBA control registers.\n"); 5935 goto out_iounmap_conf; 5936 } 5937 5938 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ 5939 phba->sli4_hba.drbl_regs_memmap_p = 5940 ioremap(phba->pci_bar2_map, bar2map_len); 5941 if (!phba->sli4_hba.drbl_regs_memmap_p) { 5942 dev_printk(KERN_ERR, &pdev->dev, 5943 "ioremap failed for SLI4 HBA doorbell registers.\n"); 5944 goto out_iounmap_ctrl; 5945 } 5946 5947 /* Set up BAR0 PCI config space register memory map */ 5948 lpfc_sli4_bar0_register_memmap(phba); 5949 5950 /* Set up BAR1 register memory map */ 5951 lpfc_sli4_bar1_register_memmap(phba); 5952 5953 /* Set up BAR2 register memory map */ 5954 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 5955 if (error) 5956 goto out_iounmap_all; 5957 5958 return 0; 5959 5960out_iounmap_all: 5961 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 5962out_iounmap_ctrl: 5963 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 5964out_iounmap_conf: 5965 iounmap(phba->sli4_hba.conf_regs_memmap_p); 5966out: 5967 return error; 5968} 5969 5970/** 5971 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 5972 * @phba: pointer to lpfc hba data structure. 5973 * 5974 * This routine is invoked to unset the PCI device memory space for device 5975 * with SLI-4 interface spec. 5976 **/ 5977static void 5978lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 5979{ 5980 struct pci_dev *pdev; 5981 5982 /* Obtain PCI device reference */ 5983 if (!phba->pcidev) 5984 return; 5985 else 5986 pdev = phba->pcidev; 5987 5988 /* Free coherent DMA memory allocated */ 5989 5990 /* Unmap I/O memory space */ 5991 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 5992 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 5993 iounmap(phba->sli4_hba.conf_regs_memmap_p); 5994 5995 return; 5996} 5997 5998/** 5999 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 6000 * @phba: pointer to lpfc hba data structure. 6001 * 6002 * This routine is invoked to enable the MSI-X interrupt vectors to device 6003 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 6004 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 6005 * invoked, enables either all or nothing, depending on the current 6006 * availability of PCI vector resources. The device driver is responsible 6007 * for calling the individual request_irq() to register each MSI-X vector 6008 * with a interrupt handler, which is done in this function. Note that 6009 * later when device is unloading, the driver should always call free_irq() 6010 * on all MSI-X vectors it has done request_irq() on before calling 6011 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 6012 * will be left with MSI-X enabled and leaks its vectors. 6013 * 6014 * Return codes 6015 * 0 - sucessful 6016 * other values - error 6017 **/ 6018static int 6019lpfc_sli_enable_msix(struct lpfc_hba *phba) 6020{ 6021 int rc, i; 6022 LPFC_MBOXQ_t *pmb; 6023 6024 /* Set up MSI-X multi-message vectors */ 6025 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6026 phba->msix_entries[i].entry = i; 6027 6028 /* Configure MSI-X capability structure */ 6029 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 6030 ARRAY_SIZE(phba->msix_entries)); 6031 if (rc) { 6032 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6033 "0420 PCI enable MSI-X failed (%d)\n", rc); 6034 goto msi_fail_out; 6035 } 6036 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6037 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6038 "0477 MSI-X entry[%d]: vector=x%x " 6039 "message=%d\n", i, 6040 phba->msix_entries[i].vector, 6041 phba->msix_entries[i].entry); 6042 /* 6043 * Assign MSI-X vectors to interrupt handlers 6044 */ 6045 6046 /* vector-0 is associated to slow-path handler */ 6047 rc = request_irq(phba->msix_entries[0].vector, 6048 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 6049 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6050 if (rc) { 6051 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6052 "0421 MSI-X slow-path request_irq failed " 6053 "(%d)\n", rc); 6054 goto msi_fail_out; 6055 } 6056 6057 /* vector-1 is associated to fast-path handler */ 6058 rc = request_irq(phba->msix_entries[1].vector, 6059 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 6060 LPFC_FP_DRIVER_HANDLER_NAME, phba); 6061 6062 if (rc) { 6063 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6064 "0429 MSI-X fast-path request_irq failed " 6065 "(%d)\n", rc); 6066 goto irq_fail_out; 6067 } 6068 6069 /* 6070 * Configure HBA MSI-X attention conditions to messages 6071 */ 6072 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6073 6074 if (!pmb) { 6075 rc = -ENOMEM; 6076 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6077 "0474 Unable to allocate memory for issuing " 6078 "MBOX_CONFIG_MSI command\n"); 6079 goto mem_fail_out; 6080 } 6081 rc = lpfc_config_msi(phba, pmb); 6082 if (rc) 6083 goto mbx_fail_out; 6084 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6085 if (rc != MBX_SUCCESS) { 6086 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6087 "0351 Config MSI mailbox command failed, " 6088 "mbxCmd x%x, mbxStatus x%x\n", 6089 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 6090 goto mbx_fail_out; 6091 } 6092 6093 /* Free memory allocated for mailbox command */ 6094 mempool_free(pmb, phba->mbox_mem_pool); 6095 return rc; 6096 6097mbx_fail_out: 6098 /* Free memory allocated for mailbox command */ 6099 mempool_free(pmb, phba->mbox_mem_pool); 6100 6101mem_fail_out: 6102 /* free the irq already requested */ 6103 free_irq(phba->msix_entries[1].vector, phba); 6104 6105irq_fail_out: 6106 /* free the irq already requested */ 6107 free_irq(phba->msix_entries[0].vector, phba); 6108 6109msi_fail_out: 6110 /* Unconfigure MSI-X capability structure */ 6111 pci_disable_msix(phba->pcidev); 6112 return rc; 6113} 6114 6115/** 6116 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 6117 * @phba: pointer to lpfc hba data structure. 6118 * 6119 * This routine is invoked to release the MSI-X vectors and then disable the 6120 * MSI-X interrupt mode to device with SLI-3 interface spec. 6121 **/ 6122static void 6123lpfc_sli_disable_msix(struct lpfc_hba *phba) 6124{ 6125 int i; 6126 6127 /* Free up MSI-X multi-message vectors */ 6128 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6129 free_irq(phba->msix_entries[i].vector, phba); 6130 /* Disable MSI-X */ 6131 pci_disable_msix(phba->pcidev); 6132 6133 return; 6134} 6135 6136/** 6137 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 6138 * @phba: pointer to lpfc hba data structure. 6139 * 6140 * This routine is invoked to enable the MSI interrupt mode to device with 6141 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 6142 * enable the MSI vector. The device driver is responsible for calling the 6143 * request_irq() to register MSI vector with a interrupt the handler, which 6144 * is done in this function. 6145 * 6146 * Return codes 6147 * 0 - sucessful 6148 * other values - error 6149 */ 6150static int 6151lpfc_sli_enable_msi(struct lpfc_hba *phba) 6152{ 6153 int rc; 6154 6155 rc = pci_enable_msi(phba->pcidev); 6156 if (!rc) 6157 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6158 "0462 PCI enable MSI mode success.\n"); 6159 else { 6160 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6161 "0471 PCI enable MSI mode failed (%d)\n", rc); 6162 return rc; 6163 } 6164 6165 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6166 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6167 if (rc) { 6168 pci_disable_msi(phba->pcidev); 6169 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6170 "0478 MSI request_irq failed (%d)\n", rc); 6171 } 6172 return rc; 6173} 6174 6175/** 6176 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 6177 * @phba: pointer to lpfc hba data structure. 6178 * 6179 * This routine is invoked to disable the MSI interrupt mode to device with 6180 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 6181 * done request_irq() on before calling pci_disable_msi(). Failure to do so 6182 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 6183 * its vector. 6184 */ 6185static void 6186lpfc_sli_disable_msi(struct lpfc_hba *phba) 6187{ 6188 free_irq(phba->pcidev->irq, phba); 6189 pci_disable_msi(phba->pcidev); 6190 return; 6191} 6192 6193/** 6194 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 6195 * @phba: pointer to lpfc hba data structure. 6196 * 6197 * This routine is invoked to enable device interrupt and associate driver's 6198 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 6199 * spec. Depends on the interrupt mode configured to the driver, the driver 6200 * will try to fallback from the configured interrupt mode to an interrupt 6201 * mode which is supported by the platform, kernel, and device in the order 6202 * of: 6203 * MSI-X -> MSI -> IRQ. 6204 * 6205 * Return codes 6206 * 0 - sucessful 6207 * other values - error 6208 **/ 6209static uint32_t 6210lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6211{ 6212 uint32_t intr_mode = LPFC_INTR_ERROR; 6213 int retval; 6214 6215 if (cfg_mode == 2) { 6216 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 6217 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 6218 if (!retval) { 6219 /* Now, try to enable MSI-X interrupt mode */ 6220 retval = lpfc_sli_enable_msix(phba); 6221 if (!retval) { 6222 /* Indicate initialization to MSI-X mode */ 6223 phba->intr_type = MSIX; 6224 intr_mode = 2; 6225 } 6226 } 6227 } 6228 6229 /* Fallback to MSI if MSI-X initialization failed */ 6230 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6231 retval = lpfc_sli_enable_msi(phba); 6232 if (!retval) { 6233 /* Indicate initialization to MSI mode */ 6234 phba->intr_type = MSI; 6235 intr_mode = 1; 6236 } 6237 } 6238 6239 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6240 if (phba->intr_type == NONE) { 6241 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6242 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6243 if (!retval) { 6244 /* Indicate initialization to INTx mode */ 6245 phba->intr_type = INTx; 6246 intr_mode = 0; 6247 } 6248 } 6249 return intr_mode; 6250} 6251 6252/** 6253 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 6254 * @phba: pointer to lpfc hba data structure. 6255 * 6256 * This routine is invoked to disable device interrupt and disassociate the 6257 * driver's interrupt handler(s) from interrupt vector(s) to device with 6258 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 6259 * release the interrupt vector(s) for the message signaled interrupt. 6260 **/ 6261static void 6262lpfc_sli_disable_intr(struct lpfc_hba *phba) 6263{ 6264 /* Disable the currently initialized interrupt mode */ 6265 if (phba->intr_type == MSIX) 6266 lpfc_sli_disable_msix(phba); 6267 else if (phba->intr_type == MSI) 6268 lpfc_sli_disable_msi(phba); 6269 else if (phba->intr_type == INTx) 6270 free_irq(phba->pcidev->irq, phba); 6271 6272 /* Reset interrupt management states */ 6273 phba->intr_type = NONE; 6274 phba->sli.slistat.sli_intr = 0; 6275 6276 return; 6277} 6278 6279/** 6280 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 6281 * @phba: pointer to lpfc hba data structure. 6282 * 6283 * This routine is invoked to enable the MSI-X interrupt vectors to device 6284 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 6285 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 6286 * enables either all or nothing, depending on the current availability of 6287 * PCI vector resources. The device driver is responsible for calling the 6288 * individual request_irq() to register each MSI-X vector with a interrupt 6289 * handler, which is done in this function. Note that later when device is 6290 * unloading, the driver should always call free_irq() on all MSI-X vectors 6291 * it has done request_irq() on before calling pci_disable_msix(). Failure 6292 * to do so results in a BUG_ON() and a device will be left with MSI-X 6293 * enabled and leaks its vectors. 6294 * 6295 * Return codes 6296 * 0 - sucessful 6297 * other values - error 6298 **/ 6299static int 6300lpfc_sli4_enable_msix(struct lpfc_hba *phba) 6301{ 6302 int rc, index; 6303 6304 /* Set up MSI-X multi-message vectors */ 6305 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 6306 phba->sli4_hba.msix_entries[index].entry = index; 6307 6308 /* Configure MSI-X capability structure */ 6309 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 6310 phba->sli4_hba.cfg_eqn); 6311 if (rc) { 6312 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6313 "0484 PCI enable MSI-X failed (%d)\n", rc); 6314 goto msi_fail_out; 6315 } 6316 /* Log MSI-X vector assignment */ 6317 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 6318 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6319 "0489 MSI-X entry[%d]: vector=x%x " 6320 "message=%d\n", index, 6321 phba->sli4_hba.msix_entries[index].vector, 6322 phba->sli4_hba.msix_entries[index].entry); 6323 /* 6324 * Assign MSI-X vectors to interrupt handlers 6325 */ 6326 6327 /* The first vector must associated to slow-path handler for MQ */ 6328 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 6329 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 6330 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6331 if (rc) { 6332 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6333 "0485 MSI-X slow-path request_irq failed " 6334 "(%d)\n", rc); 6335 goto msi_fail_out; 6336 } 6337 6338 /* The rest of the vector(s) are associated to fast-path handler(s) */ 6339 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) { 6340 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 6341 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 6342 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 6343 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 6344 LPFC_FP_DRIVER_HANDLER_NAME, 6345 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6346 if (rc) { 6347 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6348 "0486 MSI-X fast-path (%d) " 6349 "request_irq failed (%d)\n", index, rc); 6350 goto cfg_fail_out; 6351 } 6352 } 6353 6354 return rc; 6355 6356cfg_fail_out: 6357 /* free the irq already requested */ 6358 for (--index; index >= 1; index--) 6359 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 6360 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6361 6362 /* free the irq already requested */ 6363 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 6364 6365msi_fail_out: 6366 /* Unconfigure MSI-X capability structure */ 6367 pci_disable_msix(phba->pcidev); 6368 return rc; 6369} 6370 6371/** 6372 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 6373 * @phba: pointer to lpfc hba data structure. 6374 * 6375 * This routine is invoked to release the MSI-X vectors and then disable the 6376 * MSI-X interrupt mode to device with SLI-4 interface spec. 6377 **/ 6378static void 6379lpfc_sli4_disable_msix(struct lpfc_hba *phba) 6380{ 6381 int index; 6382 6383 /* Free up MSI-X multi-message vectors */ 6384 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 6385 6386 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) 6387 free_irq(phba->sli4_hba.msix_entries[index].vector, 6388 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6389 /* Disable MSI-X */ 6390 pci_disable_msix(phba->pcidev); 6391 6392 return; 6393} 6394 6395/** 6396 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 6397 * @phba: pointer to lpfc hba data structure. 6398 * 6399 * This routine is invoked to enable the MSI interrupt mode to device with 6400 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 6401 * to enable the MSI vector. The device driver is responsible for calling 6402 * the request_irq() to register MSI vector with a interrupt the handler, 6403 * which is done in this function. 6404 * 6405 * Return codes 6406 * 0 - sucessful 6407 * other values - error 6408 **/ 6409static int 6410lpfc_sli4_enable_msi(struct lpfc_hba *phba) 6411{ 6412 int rc, index; 6413 6414 rc = pci_enable_msi(phba->pcidev); 6415 if (!rc) 6416 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6417 "0487 PCI enable MSI mode success.\n"); 6418 else { 6419 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6420 "0488 PCI enable MSI mode failed (%d)\n", rc); 6421 return rc; 6422 } 6423 6424 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 6425 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6426 if (rc) { 6427 pci_disable_msi(phba->pcidev); 6428 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6429 "0490 MSI request_irq failed (%d)\n", rc); 6430 } 6431 6432 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 6433 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 6434 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 6435 } 6436 6437 return rc; 6438} 6439 6440/** 6441 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 6442 * @phba: pointer to lpfc hba data structure. 6443 * 6444 * This routine is invoked to disable the MSI interrupt mode to device with 6445 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 6446 * done request_irq() on before calling pci_disable_msi(). Failure to do so 6447 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 6448 * its vector. 6449 **/ 6450static void 6451lpfc_sli4_disable_msi(struct lpfc_hba *phba) 6452{ 6453 free_irq(phba->pcidev->irq, phba); 6454 pci_disable_msi(phba->pcidev); 6455 return; 6456} 6457 6458/** 6459 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 6460 * @phba: pointer to lpfc hba data structure. 6461 * 6462 * This routine is invoked to enable device interrupt and associate driver's 6463 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 6464 * interface spec. Depends on the interrupt mode configured to the driver, 6465 * the driver will try to fallback from the configured interrupt mode to an 6466 * interrupt mode which is supported by the platform, kernel, and device in 6467 * the order of: 6468 * MSI-X -> MSI -> IRQ. 6469 * 6470 * Return codes 6471 * 0 - sucessful 6472 * other values - error 6473 **/ 6474static uint32_t 6475lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6476{ 6477 uint32_t intr_mode = LPFC_INTR_ERROR; 6478 int retval, index; 6479 6480 if (cfg_mode == 2) { 6481 /* Preparation before conf_msi mbox cmd */ 6482 retval = 0; 6483 if (!retval) { 6484 /* Now, try to enable MSI-X interrupt mode */ 6485 retval = lpfc_sli4_enable_msix(phba); 6486 if (!retval) { 6487 /* Indicate initialization to MSI-X mode */ 6488 phba->intr_type = MSIX; 6489 intr_mode = 2; 6490 } 6491 } 6492 } 6493 6494 /* Fallback to MSI if MSI-X initialization failed */ 6495 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6496 retval = lpfc_sli4_enable_msi(phba); 6497 if (!retval) { 6498 /* Indicate initialization to MSI mode */ 6499 phba->intr_type = MSI; 6500 intr_mode = 1; 6501 } 6502 } 6503 6504 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6505 if (phba->intr_type == NONE) { 6506 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 6507 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6508 if (!retval) { 6509 /* Indicate initialization to INTx mode */ 6510 phba->intr_type = INTx; 6511 intr_mode = 0; 6512 for (index = 0; index < phba->cfg_fcp_eq_count; 6513 index++) { 6514 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 6515 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 6516 } 6517 } 6518 } 6519 return intr_mode; 6520} 6521 6522/** 6523 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 6524 * @phba: pointer to lpfc hba data structure. 6525 * 6526 * This routine is invoked to disable device interrupt and disassociate 6527 * the driver's interrupt handler(s) from interrupt vector(s) to device 6528 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 6529 * will release the interrupt vector(s) for the message signaled interrupt. 6530 **/ 6531static void 6532lpfc_sli4_disable_intr(struct lpfc_hba *phba) 6533{ 6534 /* Disable the currently initialized interrupt mode */ 6535 if (phba->intr_type == MSIX) 6536 lpfc_sli4_disable_msix(phba); 6537 else if (phba->intr_type == MSI) 6538 lpfc_sli4_disable_msi(phba); 6539 else if (phba->intr_type == INTx) 6540 free_irq(phba->pcidev->irq, phba); 6541 6542 /* Reset interrupt management states */ 6543 phba->intr_type = NONE; 6544 phba->sli.slistat.sli_intr = 0; 6545 6546 return; 6547} 6548 6549/** 6550 * lpfc_unset_hba - Unset SLI3 hba device initialization 6551 * @phba: pointer to lpfc hba data structure. 6552 * 6553 * This routine is invoked to unset the HBA device initialization steps to 6554 * a device with SLI-3 interface spec. 6555 **/ 6556static void 6557lpfc_unset_hba(struct lpfc_hba *phba) 6558{ 6559 struct lpfc_vport *vport = phba->pport; 6560 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6561 6562 spin_lock_irq(shost->host_lock); 6563 vport->load_flag |= FC_UNLOADING; 6564 spin_unlock_irq(shost->host_lock); 6565 6566 lpfc_stop_hba_timers(phba); 6567 6568 phba->pport->work_port_events = 0; 6569 6570 lpfc_sli_hba_down(phba); 6571 6572 lpfc_sli_brdrestart(phba); 6573 6574 lpfc_sli_disable_intr(phba); 6575 6576 return; 6577} 6578 6579/** 6580 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 6581 * @phba: pointer to lpfc hba data structure. 6582 * 6583 * This routine is invoked to unset the HBA device initialization steps to 6584 * a device with SLI-4 interface spec. 6585 **/ 6586static void 6587lpfc_sli4_unset_hba(struct lpfc_hba *phba) 6588{ 6589 struct lpfc_vport *vport = phba->pport; 6590 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6591 6592 spin_lock_irq(shost->host_lock); 6593 vport->load_flag |= FC_UNLOADING; 6594 spin_unlock_irq(shost->host_lock); 6595 6596 phba->pport->work_port_events = 0; 6597 6598 lpfc_sli4_hba_down(phba); 6599 6600 lpfc_sli4_disable_intr(phba); 6601 6602 return; 6603} 6604 6605/** 6606 * lpfc_sli4_hba_unset - Unset the fcoe hba 6607 * @phba: Pointer to HBA context object. 6608 * 6609 * This function is called in the SLI4 code path to reset the HBA's FCoE 6610 * function. The caller is not required to hold any lock. This routine 6611 * issues PCI function reset mailbox command to reset the FCoE function. 6612 * At the end of the function, it calls lpfc_hba_down_post function to 6613 * free any pending commands. 6614 **/ 6615static void 6616lpfc_sli4_hba_unset(struct lpfc_hba *phba) 6617{ 6618 int wait_cnt = 0; 6619 LPFC_MBOXQ_t *mboxq; 6620 6621 lpfc_stop_hba_timers(phba); 6622 phba->sli4_hba.intr_enable = 0; 6623 6624 /* 6625 * Gracefully wait out the potential current outstanding asynchronous 6626 * mailbox command. 6627 */ 6628 6629 /* First, block any pending async mailbox command from posted */ 6630 spin_lock_irq(&phba->hbalock); 6631 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 6632 spin_unlock_irq(&phba->hbalock); 6633 /* Now, trying to wait it out if we can */ 6634 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6635 msleep(10); 6636 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 6637 break; 6638 } 6639 /* Forcefully release the outstanding mailbox command if timed out */ 6640 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6641 spin_lock_irq(&phba->hbalock); 6642 mboxq = phba->sli.mbox_active; 6643 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 6644 __lpfc_mbox_cmpl_put(phba, mboxq); 6645 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6646 phba->sli.mbox_active = NULL; 6647 spin_unlock_irq(&phba->hbalock); 6648 } 6649 6650 /* Tear down the queues in the HBA */ 6651 lpfc_sli4_queue_unset(phba); 6652 6653 /* Disable PCI subsystem interrupt */ 6654 lpfc_sli4_disable_intr(phba); 6655 6656 /* Stop kthread signal shall trigger work_done one more time */ 6657 kthread_stop(phba->worker_thread); 6658 6659 /* Stop the SLI4 device port */ 6660 phba->pport->work_port_events = 0; 6661} 6662 6663/** 6664 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 6665 * @pdev: pointer to PCI device 6666 * @pid: pointer to PCI device identifier 6667 * 6668 * This routine is to be called to attach a device with SLI-3 interface spec 6669 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 6670 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 6671 * information of the device and driver to see if the driver state that it can 6672 * support this kind of device. If the match is successful, the driver core 6673 * invokes this routine. If this routine determines it can claim the HBA, it 6674 * does all the initialization that it needs to do to handle the HBA properly. 6675 * 6676 * Return code 6677 * 0 - driver can claim the device 6678 * negative value - driver can not claim the device 6679 **/ 6680static int __devinit 6681lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 6682{ 6683 struct lpfc_hba *phba; 6684 struct lpfc_vport *vport = NULL; 6685 int error; 6686 uint32_t cfg_mode, intr_mode; 6687 6688 /* Allocate memory for HBA structure */ 6689 phba = lpfc_hba_alloc(pdev); 6690 if (!phba) 6691 return -ENOMEM; 6692 6693 /* Perform generic PCI device enabling operation */ 6694 error = lpfc_enable_pci_dev(phba); 6695 if (error) { 6696 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6697 "1401 Failed to enable pci device.\n"); 6698 goto out_free_phba; 6699 } 6700 6701 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 6702 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 6703 if (error) 6704 goto out_disable_pci_dev; 6705 6706 /* Set up SLI-3 specific device PCI memory space */ 6707 error = lpfc_sli_pci_mem_setup(phba); 6708 if (error) { 6709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6710 "1402 Failed to set up pci memory space.\n"); 6711 goto out_disable_pci_dev; 6712 } 6713 6714 /* Set up phase-1 common device driver resources */ 6715 error = lpfc_setup_driver_resource_phase1(phba); 6716 if (error) { 6717 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6718 "1403 Failed to set up driver resource.\n"); 6719 goto out_unset_pci_mem_s3; 6720 } 6721 6722 /* Set up SLI-3 specific device driver resources */ 6723 error = lpfc_sli_driver_resource_setup(phba); 6724 if (error) { 6725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6726 "1404 Failed to set up driver resource.\n"); 6727 goto out_unset_pci_mem_s3; 6728 } 6729 6730 /* Initialize and populate the iocb list per host */ 6731 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 6732 if (error) { 6733 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6734 "1405 Failed to initialize iocb list.\n"); 6735 goto out_unset_driver_resource_s3; 6736 } 6737 6738 /* Set up common device driver resources */ 6739 error = lpfc_setup_driver_resource_phase2(phba); 6740 if (error) { 6741 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6742 "1406 Failed to set up driver resource.\n"); 6743 goto out_free_iocb_list; 6744 } 6745 6746 /* Create SCSI host to the physical port */ 6747 error = lpfc_create_shost(phba); 6748 if (error) { 6749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6750 "1407 Failed to create scsi host.\n"); 6751 goto out_unset_driver_resource; 6752 } 6753 6754 /* Configure sysfs attributes */ 6755 vport = phba->pport; 6756 error = lpfc_alloc_sysfs_attr(vport); 6757 if (error) { 6758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6759 "1476 Failed to allocate sysfs attr\n"); 6760 goto out_destroy_shost; 6761 } 6762 6763 /* Now, trying to enable interrupt and bring up the device */ 6764 cfg_mode = phba->cfg_use_msi; 6765 while (true) { 6766 /* Put device to a known state before enabling interrupt */ 6767 lpfc_stop_port(phba); 6768 /* Configure and enable interrupt */ 6769 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 6770 if (intr_mode == LPFC_INTR_ERROR) { 6771 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6772 "0431 Failed to enable interrupt.\n"); 6773 error = -ENODEV; 6774 goto out_free_sysfs_attr; 6775 } 6776 /* SLI-3 HBA setup */ 6777 if (lpfc_sli_hba_setup(phba)) { 6778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6779 "1477 Failed to set up hba\n"); 6780 error = -ENODEV; 6781 goto out_remove_device; 6782 } 6783 6784 /* Wait 50ms for the interrupts of previous mailbox commands */ 6785 msleep(50); 6786 /* Check active interrupts on message signaled interrupts */ 6787 if (intr_mode == 0 || 6788 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 6789 /* Log the current active interrupt mode */ 6790 phba->intr_mode = intr_mode; 6791 lpfc_log_intr_mode(phba, intr_mode); 6792 break; 6793 } else { 6794 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6795 "0447 Configure interrupt mode (%d) " 6796 "failed active interrupt test.\n", 6797 intr_mode); 6798 /* Disable the current interrupt mode */ 6799 lpfc_sli_disable_intr(phba); 6800 /* Try next level of interrupt mode */ 6801 cfg_mode = --intr_mode; 6802 } 6803 } 6804 6805 /* Perform post initialization setup */ 6806 lpfc_post_init_setup(phba); 6807 6808 /* Check if there are static vports to be created. */ 6809 lpfc_create_static_vport(phba); 6810 6811 return 0; 6812 6813out_remove_device: 6814 lpfc_unset_hba(phba); 6815out_free_sysfs_attr: 6816 lpfc_free_sysfs_attr(vport); 6817out_destroy_shost: 6818 lpfc_destroy_shost(phba); 6819out_unset_driver_resource: 6820 lpfc_unset_driver_resource_phase2(phba); 6821out_free_iocb_list: 6822 lpfc_free_iocb_list(phba); 6823out_unset_driver_resource_s3: 6824 lpfc_sli_driver_resource_unset(phba); 6825out_unset_pci_mem_s3: 6826 lpfc_sli_pci_mem_unset(phba); 6827out_disable_pci_dev: 6828 lpfc_disable_pci_dev(phba); 6829out_free_phba: 6830 lpfc_hba_free(phba); 6831 return error; 6832} 6833 6834/** 6835 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 6836 * @pdev: pointer to PCI device 6837 * 6838 * This routine is to be called to disattach a device with SLI-3 interface 6839 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 6840 * removed from PCI bus, it performs all the necessary cleanup for the HBA 6841 * device to be removed from the PCI subsystem properly. 6842 **/ 6843static void __devexit 6844lpfc_pci_remove_one_s3(struct pci_dev *pdev) 6845{ 6846 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6847 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6848 struct lpfc_vport **vports; 6849 struct lpfc_hba *phba = vport->phba; 6850 int i; 6851 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 6852 6853 spin_lock_irq(&phba->hbalock); 6854 vport->load_flag |= FC_UNLOADING; 6855 spin_unlock_irq(&phba->hbalock); 6856 6857 lpfc_free_sysfs_attr(vport); 6858 6859 /* Release all the vports against this physical port */ 6860 vports = lpfc_create_vport_work_array(phba); 6861 if (vports != NULL) 6862 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 6863 fc_vport_terminate(vports[i]->fc_vport); 6864 lpfc_destroy_vport_work_array(phba, vports); 6865 6866 /* Remove FC host and then SCSI host with the physical port */ 6867 fc_remove_host(shost); 6868 scsi_remove_host(shost); 6869 lpfc_cleanup(vport); 6870 6871 /* 6872 * Bring down the SLI Layer. This step disable all interrupts, 6873 * clears the rings, discards all mailbox commands, and resets 6874 * the HBA. 6875 */ 6876 6877 /* HBA interrupt will be diabled after this call */ 6878 lpfc_sli_hba_down(phba); 6879 /* Stop kthread signal shall trigger work_done one more time */ 6880 kthread_stop(phba->worker_thread); 6881 /* Final cleanup of txcmplq and reset the HBA */ 6882 lpfc_sli_brdrestart(phba); 6883 6884 lpfc_stop_hba_timers(phba); 6885 spin_lock_irq(&phba->hbalock); 6886 list_del_init(&vport->listentry); 6887 spin_unlock_irq(&phba->hbalock); 6888 6889 lpfc_debugfs_terminate(vport); 6890 6891 /* Disable interrupt */ 6892 lpfc_sli_disable_intr(phba); 6893 6894 pci_set_drvdata(pdev, NULL); 6895 scsi_host_put(shost); 6896 6897 /* 6898 * Call scsi_free before mem_free since scsi bufs are released to their 6899 * corresponding pools here. 6900 */ 6901 lpfc_scsi_free(phba); 6902 lpfc_mem_free_all(phba); 6903 6904 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 6905 phba->hbqslimp.virt, phba->hbqslimp.phys); 6906 6907 /* Free resources associated with SLI2 interface */ 6908 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6909 phba->slim2p.virt, phba->slim2p.phys); 6910 6911 /* unmap adapter SLIM and Control Registers */ 6912 iounmap(phba->ctrl_regs_memmap_p); 6913 iounmap(phba->slim_memmap_p); 6914 6915 lpfc_hba_free(phba); 6916 6917 pci_release_selected_regions(pdev, bars); 6918 pci_disable_device(pdev); 6919} 6920 6921/** 6922 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 6923 * @pdev: pointer to PCI device 6924 * @msg: power management message 6925 * 6926 * This routine is to be called from the kernel's PCI subsystem to support 6927 * system Power Management (PM) to device with SLI-3 interface spec. When 6928 * PM invokes this method, it quiesces the device by stopping the driver's 6929 * worker thread for the device, turning off device's interrupt and DMA, 6930 * and bring the device offline. Note that as the driver implements the 6931 * minimum PM requirements to a power-aware driver's PM support for the 6932 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 6933 * to the suspend() method call will be treated as SUSPEND and the driver will 6934 * fully reinitialize its device during resume() method call, the driver will 6935 * set device to PCI_D3hot state in PCI config space instead of setting it 6936 * according to the @msg provided by the PM. 6937 * 6938 * Return code 6939 * 0 - driver suspended the device 6940 * Error otherwise 6941 **/ 6942static int 6943lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 6944{ 6945 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6946 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 6947 6948 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6949 "0473 PCI device Power Management suspend.\n"); 6950 6951 /* Bring down the device */ 6952 lpfc_offline_prep(phba); 6953 lpfc_offline(phba); 6954 kthread_stop(phba->worker_thread); 6955 6956 /* Disable interrupt from device */ 6957 lpfc_sli_disable_intr(phba); 6958 6959 /* Save device state to PCI config space */ 6960 pci_save_state(pdev); 6961 pci_set_power_state(pdev, PCI_D3hot); 6962 6963 return 0; 6964} 6965 6966/** 6967 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 6968 * @pdev: pointer to PCI device 6969 * 6970 * This routine is to be called from the kernel's PCI subsystem to support 6971 * system Power Management (PM) to device with SLI-3 interface spec. When PM 6972 * invokes this method, it restores the device's PCI config space state and 6973 * fully reinitializes the device and brings it online. Note that as the 6974 * driver implements the minimum PM requirements to a power-aware driver's 6975 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 6976 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 6977 * driver will fully reinitialize its device during resume() method call, 6978 * the device will be set to PCI_D0 directly in PCI config space before 6979 * restoring the state. 6980 * 6981 * Return code 6982 * 0 - driver suspended the device 6983 * Error otherwise 6984 **/ 6985static int 6986lpfc_pci_resume_one_s3(struct pci_dev *pdev) 6987{ 6988 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6989 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 6990 uint32_t intr_mode; 6991 int error; 6992 6993 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6994 "0452 PCI device Power Management resume.\n"); 6995 6996 /* Restore device state from PCI config space */ 6997 pci_set_power_state(pdev, PCI_D0); 6998 pci_restore_state(pdev); 6999 if (pdev->is_busmaster) 7000 pci_set_master(pdev); 7001 7002 /* Startup the kernel thread for this host adapter. */ 7003 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7004 "lpfc_worker_%d", phba->brd_no); 7005 if (IS_ERR(phba->worker_thread)) { 7006 error = PTR_ERR(phba->worker_thread); 7007 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7008 "0434 PM resume failed to start worker " 7009 "thread: error=x%x.\n", error); 7010 return error; 7011 } 7012 7013 /* Configure and enable interrupt */ 7014 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7015 if (intr_mode == LPFC_INTR_ERROR) { 7016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7017 "0430 PM resume Failed to enable interrupt\n"); 7018 return -EIO; 7019 } else 7020 phba->intr_mode = intr_mode; 7021 7022 /* Restart HBA and bring it online */ 7023 lpfc_sli_brdrestart(phba); 7024 lpfc_online(phba); 7025 7026 /* Log the current active interrupt mode */ 7027 lpfc_log_intr_mode(phba, phba->intr_mode); 7028 7029 return 0; 7030} 7031 7032/** 7033 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 7034 * @pdev: pointer to PCI device. 7035 * @state: the current PCI connection state. 7036 * 7037 * This routine is called from the PCI subsystem for I/O error handling to 7038 * device with SLI-3 interface spec. This function is called by the PCI 7039 * subsystem after a PCI bus error affecting this device has been detected. 7040 * When this function is invoked, it will need to stop all the I/Os and 7041 * interrupt(s) to the device. Once that is done, it will return 7042 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 7043 * as desired. 7044 * 7045 * Return codes 7046 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7047 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7048 **/ 7049static pci_ers_result_t 7050lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 7051{ 7052 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7053 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7054 struct lpfc_sli *psli = &phba->sli; 7055 struct lpfc_sli_ring *pring; 7056 7057 if (state == pci_channel_io_perm_failure) { 7058 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7059 "0472 PCI channel I/O permanent failure\n"); 7060 /* Block all SCSI devices' I/Os on the host */ 7061 lpfc_scsi_dev_block(phba); 7062 /* Clean up all driver's outstanding SCSI I/Os */ 7063 lpfc_sli_flush_fcp_rings(phba); 7064 return PCI_ERS_RESULT_DISCONNECT; 7065 } 7066 7067 pci_disable_device(pdev); 7068 /* 7069 * There may be I/Os dropped by the firmware. 7070 * Error iocb (I/O) on txcmplq and let the SCSI layer 7071 * retry it after re-establishing link. 7072 */ 7073 pring = &psli->ring[psli->fcp_ring]; 7074 lpfc_sli_abort_iocb_ring(phba, pring); 7075 7076 /* Disable interrupt */ 7077 lpfc_sli_disable_intr(phba); 7078 7079 /* Request a slot reset. */ 7080 return PCI_ERS_RESULT_NEED_RESET; 7081} 7082 7083/** 7084 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 7085 * @pdev: pointer to PCI device. 7086 * 7087 * This routine is called from the PCI subsystem for error handling to 7088 * device with SLI-3 interface spec. This is called after PCI bus has been 7089 * reset to restart the PCI card from scratch, as if from a cold-boot. 7090 * During the PCI subsystem error recovery, after driver returns 7091 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 7092 * recovery and then call this routine before calling the .resume method 7093 * to recover the device. This function will initialize the HBA device, 7094 * enable the interrupt, but it will just put the HBA to offline state 7095 * without passing any I/O traffic. 7096 * 7097 * Return codes 7098 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7099 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7100 */ 7101static pci_ers_result_t 7102lpfc_io_slot_reset_s3(struct pci_dev *pdev) 7103{ 7104 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7105 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7106 struct lpfc_sli *psli = &phba->sli; 7107 uint32_t intr_mode; 7108 7109 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 7110 if (pci_enable_device_mem(pdev)) { 7111 printk(KERN_ERR "lpfc: Cannot re-enable " 7112 "PCI device after reset.\n"); 7113 return PCI_ERS_RESULT_DISCONNECT; 7114 } 7115 7116 pci_restore_state(pdev); 7117 if (pdev->is_busmaster) 7118 pci_set_master(pdev); 7119 7120 spin_lock_irq(&phba->hbalock); 7121 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 7122 spin_unlock_irq(&phba->hbalock); 7123 7124 /* Configure and enable interrupt */ 7125 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7126 if (intr_mode == LPFC_INTR_ERROR) { 7127 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7128 "0427 Cannot re-enable interrupt after " 7129 "slot reset.\n"); 7130 return PCI_ERS_RESULT_DISCONNECT; 7131 } else 7132 phba->intr_mode = intr_mode; 7133 7134 /* Take device offline; this will perform cleanup */ 7135 lpfc_offline(phba); 7136 lpfc_sli_brdrestart(phba); 7137 7138 /* Log the current active interrupt mode */ 7139 lpfc_log_intr_mode(phba, phba->intr_mode); 7140 7141 return PCI_ERS_RESULT_RECOVERED; 7142} 7143 7144/** 7145 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 7146 * @pdev: pointer to PCI device 7147 * 7148 * This routine is called from the PCI subsystem for error handling to device 7149 * with SLI-3 interface spec. It is called when kernel error recovery tells 7150 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 7151 * error recovery. After this call, traffic can start to flow from this device 7152 * again. 7153 */ 7154static void 7155lpfc_io_resume_s3(struct pci_dev *pdev) 7156{ 7157 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7158 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7159 7160 lpfc_online(phba); 7161} 7162 7163/** 7164 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 7165 * @phba: pointer to lpfc hba data structure. 7166 * 7167 * returns the number of ELS/CT IOCBs to reserve 7168 **/ 7169int 7170lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 7171{ 7172 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 7173 7174 if (max_xri <= 100) 7175 return 4; 7176 else if (max_xri <= 256) 7177 return 8; 7178 else if (max_xri <= 512) 7179 return 16; 7180 else if (max_xri <= 1024) 7181 return 32; 7182 else 7183 return 48; 7184} 7185 7186/** 7187 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 7188 * @pdev: pointer to PCI device 7189 * @pid: pointer to PCI device identifier 7190 * 7191 * This routine is called from the kernel's PCI subsystem to device with 7192 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 7193 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 7194 * information of the device and driver to see if the driver state that it 7195 * can support this kind of device. If the match is successful, the driver 7196 * core invokes this routine. If this routine determines it can claim the HBA, 7197 * it does all the initialization that it needs to do to handle the HBA 7198 * properly. 7199 * 7200 * Return code 7201 * 0 - driver can claim the device 7202 * negative value - driver can not claim the device 7203 **/ 7204static int __devinit 7205lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 7206{ 7207 struct lpfc_hba *phba; 7208 struct lpfc_vport *vport = NULL; 7209 int error; 7210 uint32_t cfg_mode, intr_mode; 7211 int mcnt; 7212 7213 /* Allocate memory for HBA structure */ 7214 phba = lpfc_hba_alloc(pdev); 7215 if (!phba) 7216 return -ENOMEM; 7217 7218 /* Perform generic PCI device enabling operation */ 7219 error = lpfc_enable_pci_dev(phba); 7220 if (error) { 7221 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7222 "1409 Failed to enable pci device.\n"); 7223 goto out_free_phba; 7224 } 7225 7226 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 7227 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 7228 if (error) 7229 goto out_disable_pci_dev; 7230 7231 /* Set up SLI-4 specific device PCI memory space */ 7232 error = lpfc_sli4_pci_mem_setup(phba); 7233 if (error) { 7234 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7235 "1410 Failed to set up pci memory space.\n"); 7236 goto out_disable_pci_dev; 7237 } 7238 7239 /* Set up phase-1 common device driver resources */ 7240 error = lpfc_setup_driver_resource_phase1(phba); 7241 if (error) { 7242 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7243 "1411 Failed to set up driver resource.\n"); 7244 goto out_unset_pci_mem_s4; 7245 } 7246 7247 /* Set up SLI-4 Specific device driver resources */ 7248 error = lpfc_sli4_driver_resource_setup(phba); 7249 if (error) { 7250 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7251 "1412 Failed to set up driver resource.\n"); 7252 goto out_unset_pci_mem_s4; 7253 } 7254 7255 /* Initialize and populate the iocb list per host */ 7256 error = lpfc_init_iocb_list(phba, 7257 phba->sli4_hba.max_cfg_param.max_xri); 7258 if (error) { 7259 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7260 "1413 Failed to initialize iocb list.\n"); 7261 goto out_unset_driver_resource_s4; 7262 } 7263 7264 /* Set up common device driver resources */ 7265 error = lpfc_setup_driver_resource_phase2(phba); 7266 if (error) { 7267 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7268 "1414 Failed to set up driver resource.\n"); 7269 goto out_free_iocb_list; 7270 } 7271 7272 /* Create SCSI host to the physical port */ 7273 error = lpfc_create_shost(phba); 7274 if (error) { 7275 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7276 "1415 Failed to create scsi host.\n"); 7277 goto out_unset_driver_resource; 7278 } 7279 7280 /* Configure sysfs attributes */ 7281 vport = phba->pport; 7282 error = lpfc_alloc_sysfs_attr(vport); 7283 if (error) { 7284 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7285 "1416 Failed to allocate sysfs attr\n"); 7286 goto out_destroy_shost; 7287 } 7288 7289 /* Now, trying to enable interrupt and bring up the device */ 7290 cfg_mode = phba->cfg_use_msi; 7291 while (true) { 7292 /* Put device to a known state before enabling interrupt */ 7293 lpfc_stop_port(phba); 7294 /* Configure and enable interrupt */ 7295 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 7296 if (intr_mode == LPFC_INTR_ERROR) { 7297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7298 "0426 Failed to enable interrupt.\n"); 7299 error = -ENODEV; 7300 goto out_free_sysfs_attr; 7301 } 7302 /* Set up SLI-4 HBA */ 7303 if (lpfc_sli4_hba_setup(phba)) { 7304 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7305 "1421 Failed to set up hba\n"); 7306 error = -ENODEV; 7307 goto out_disable_intr; 7308 } 7309 7310 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 7311 if (intr_mode != 0) 7312 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 7313 LPFC_ACT_INTR_CNT); 7314 7315 /* Check active interrupts received only for MSI/MSI-X */ 7316 if (intr_mode == 0 || 7317 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 7318 /* Log the current active interrupt mode */ 7319 phba->intr_mode = intr_mode; 7320 lpfc_log_intr_mode(phba, intr_mode); 7321 break; 7322 } 7323 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7324 "0451 Configure interrupt mode (%d) " 7325 "failed active interrupt test.\n", 7326 intr_mode); 7327 /* Unset the preivous SLI-4 HBA setup */ 7328 lpfc_sli4_unset_hba(phba); 7329 /* Try next level of interrupt mode */ 7330 cfg_mode = --intr_mode; 7331 } 7332 7333 /* Perform post initialization setup */ 7334 lpfc_post_init_setup(phba); 7335 7336 return 0; 7337 7338out_disable_intr: 7339 lpfc_sli4_disable_intr(phba); 7340out_free_sysfs_attr: 7341 lpfc_free_sysfs_attr(vport); 7342out_destroy_shost: 7343 lpfc_destroy_shost(phba); 7344out_unset_driver_resource: 7345 lpfc_unset_driver_resource_phase2(phba); 7346out_free_iocb_list: 7347 lpfc_free_iocb_list(phba); 7348out_unset_driver_resource_s4: 7349 lpfc_sli4_driver_resource_unset(phba); 7350out_unset_pci_mem_s4: 7351 lpfc_sli4_pci_mem_unset(phba); 7352out_disable_pci_dev: 7353 lpfc_disable_pci_dev(phba); 7354out_free_phba: 7355 lpfc_hba_free(phba); 7356 return error; 7357} 7358 7359/** 7360 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 7361 * @pdev: pointer to PCI device 7362 * 7363 * This routine is called from the kernel's PCI subsystem to device with 7364 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 7365 * removed from PCI bus, it performs all the necessary cleanup for the HBA 7366 * device to be removed from the PCI subsystem properly. 7367 **/ 7368static void __devexit 7369lpfc_pci_remove_one_s4(struct pci_dev *pdev) 7370{ 7371 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7372 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 7373 struct lpfc_vport **vports; 7374 struct lpfc_hba *phba = vport->phba; 7375 int i; 7376 7377 /* Mark the device unloading flag */ 7378 spin_lock_irq(&phba->hbalock); 7379 vport->load_flag |= FC_UNLOADING; 7380 spin_unlock_irq(&phba->hbalock); 7381 7382 /* Free the HBA sysfs attributes */ 7383 lpfc_free_sysfs_attr(vport); 7384 7385 /* Release all the vports against this physical port */ 7386 vports = lpfc_create_vport_work_array(phba); 7387 if (vports != NULL) 7388 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 7389 fc_vport_terminate(vports[i]->fc_vport); 7390 lpfc_destroy_vport_work_array(phba, vports); 7391 7392 /* Remove FC host and then SCSI host with the physical port */ 7393 fc_remove_host(shost); 7394 scsi_remove_host(shost); 7395 7396 /* Perform cleanup on the physical port */ 7397 lpfc_cleanup(vport); 7398 7399 /* 7400 * Bring down the SLI Layer. This step disables all interrupts, 7401 * clears the rings, discards all mailbox commands, and resets 7402 * the HBA FCoE function. 7403 */ 7404 lpfc_debugfs_terminate(vport); 7405 lpfc_sli4_hba_unset(phba); 7406 7407 spin_lock_irq(&phba->hbalock); 7408 list_del_init(&vport->listentry); 7409 spin_unlock_irq(&phba->hbalock); 7410 7411 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi 7412 * buffers are released to their corresponding pools here. 7413 */ 7414 lpfc_scsi_free(phba); 7415 lpfc_sli4_driver_resource_unset(phba); 7416 7417 /* Unmap adapter Control and Doorbell registers */ 7418 lpfc_sli4_pci_mem_unset(phba); 7419 7420 /* Release PCI resources and disable device's PCI function */ 7421 scsi_host_put(shost); 7422 lpfc_disable_pci_dev(phba); 7423 7424 /* Finally, free the driver's device data structure */ 7425 lpfc_hba_free(phba); 7426 7427 return; 7428} 7429 7430/** 7431 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 7432 * @pdev: pointer to PCI device 7433 * @msg: power management message 7434 * 7435 * This routine is called from the kernel's PCI subsystem to support system 7436 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 7437 * this method, it quiesces the device by stopping the driver's worker 7438 * thread for the device, turning off device's interrupt and DMA, and bring 7439 * the device offline. Note that as the driver implements the minimum PM 7440 * requirements to a power-aware driver's PM support for suspend/resume -- all 7441 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 7442 * method call will be treated as SUSPEND and the driver will fully 7443 * reinitialize its device during resume() method call, the driver will set 7444 * device to PCI_D3hot state in PCI config space instead of setting it 7445 * according to the @msg provided by the PM. 7446 * 7447 * Return code 7448 * 0 - driver suspended the device 7449 * Error otherwise 7450 **/ 7451static int 7452lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 7453{ 7454 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7455 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7456 7457 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7458 "0298 PCI device Power Management suspend.\n"); 7459 7460 /* Bring down the device */ 7461 lpfc_offline_prep(phba); 7462 lpfc_offline(phba); 7463 kthread_stop(phba->worker_thread); 7464 7465 /* Disable interrupt from device */ 7466 lpfc_sli4_disable_intr(phba); 7467 7468 /* Save device state to PCI config space */ 7469 pci_save_state(pdev); 7470 pci_set_power_state(pdev, PCI_D3hot); 7471 7472 return 0; 7473} 7474 7475/** 7476 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 7477 * @pdev: pointer to PCI device 7478 * 7479 * This routine is called from the kernel's PCI subsystem to support system 7480 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 7481 * this method, it restores the device's PCI config space state and fully 7482 * reinitializes the device and brings it online. Note that as the driver 7483 * implements the minimum PM requirements to a power-aware driver's PM for 7484 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 7485 * to the suspend() method call will be treated as SUSPEND and the driver 7486 * will fully reinitialize its device during resume() method call, the device 7487 * will be set to PCI_D0 directly in PCI config space before restoring the 7488 * state. 7489 * 7490 * Return code 7491 * 0 - driver suspended the device 7492 * Error otherwise 7493 **/ 7494static int 7495lpfc_pci_resume_one_s4(struct pci_dev *pdev) 7496{ 7497 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7498 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7499 uint32_t intr_mode; 7500 int error; 7501 7502 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7503 "0292 PCI device Power Management resume.\n"); 7504 7505 /* Restore device state from PCI config space */ 7506 pci_set_power_state(pdev, PCI_D0); 7507 pci_restore_state(pdev); 7508 if (pdev->is_busmaster) 7509 pci_set_master(pdev); 7510 7511 /* Startup the kernel thread for this host adapter. */ 7512 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7513 "lpfc_worker_%d", phba->brd_no); 7514 if (IS_ERR(phba->worker_thread)) { 7515 error = PTR_ERR(phba->worker_thread); 7516 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7517 "0293 PM resume failed to start worker " 7518 "thread: error=x%x.\n", error); 7519 return error; 7520 } 7521 7522 /* Configure and enable interrupt */ 7523 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 7524 if (intr_mode == LPFC_INTR_ERROR) { 7525 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7526 "0294 PM resume Failed to enable interrupt\n"); 7527 return -EIO; 7528 } else 7529 phba->intr_mode = intr_mode; 7530 7531 /* Restart HBA and bring it online */ 7532 lpfc_sli_brdrestart(phba); 7533 lpfc_online(phba); 7534 7535 /* Log the current active interrupt mode */ 7536 lpfc_log_intr_mode(phba, phba->intr_mode); 7537 7538 return 0; 7539} 7540 7541/** 7542 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 7543 * @pdev: pointer to PCI device. 7544 * @state: the current PCI connection state. 7545 * 7546 * This routine is called from the PCI subsystem for error handling to device 7547 * with SLI-4 interface spec. This function is called by the PCI subsystem 7548 * after a PCI bus error affecting this device has been detected. When this 7549 * function is invoked, it will need to stop all the I/Os and interrupt(s) 7550 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 7551 * for the PCI subsystem to perform proper recovery as desired. 7552 * 7553 * Return codes 7554 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7555 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7556 **/ 7557static pci_ers_result_t 7558lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 7559{ 7560 return PCI_ERS_RESULT_NEED_RESET; 7561} 7562 7563/** 7564 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 7565 * @pdev: pointer to PCI device. 7566 * 7567 * This routine is called from the PCI subsystem for error handling to device 7568 * with SLI-4 interface spec. It is called after PCI bus has been reset to 7569 * restart the PCI card from scratch, as if from a cold-boot. During the 7570 * PCI subsystem error recovery, after the driver returns 7571 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 7572 * recovery and then call this routine before calling the .resume method to 7573 * recover the device. This function will initialize the HBA device, enable 7574 * the interrupt, but it will just put the HBA to offline state without 7575 * passing any I/O traffic. 7576 * 7577 * Return codes 7578 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7579 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7580 */ 7581static pci_ers_result_t 7582lpfc_io_slot_reset_s4(struct pci_dev *pdev) 7583{ 7584 return PCI_ERS_RESULT_RECOVERED; 7585} 7586 7587/** 7588 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 7589 * @pdev: pointer to PCI device 7590 * 7591 * This routine is called from the PCI subsystem for error handling to device 7592 * with SLI-4 interface spec. It is called when kernel error recovery tells 7593 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 7594 * error recovery. After this call, traffic can start to flow from this device 7595 * again. 7596 **/ 7597static void 7598lpfc_io_resume_s4(struct pci_dev *pdev) 7599{ 7600 return; 7601} 7602 7603/** 7604 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 7605 * @pdev: pointer to PCI device 7606 * @pid: pointer to PCI device identifier 7607 * 7608 * This routine is to be registered to the kernel's PCI subsystem. When an 7609 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 7610 * at PCI device-specific information of the device and driver to see if the 7611 * driver state that it can support this kind of device. If the match is 7612 * successful, the driver core invokes this routine. This routine dispatches 7613 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 7614 * do all the initialization that it needs to do to handle the HBA device 7615 * properly. 7616 * 7617 * Return code 7618 * 0 - driver can claim the device 7619 * negative value - driver can not claim the device 7620 **/ 7621static int __devinit 7622lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 7623{ 7624 int rc; 7625 uint16_t dev_id; 7626 7627 if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id)) 7628 return -ENODEV; 7629 7630 switch (dev_id) { 7631 case PCI_DEVICE_ID_TIGERSHARK: 7632 case PCI_DEVICE_ID_TIGERSHARK_S: 7633 rc = lpfc_pci_probe_one_s4(pdev, pid); 7634 break; 7635 default: 7636 rc = lpfc_pci_probe_one_s3(pdev, pid); 7637 break; 7638 } 7639 return rc; 7640} 7641 7642/** 7643 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 7644 * @pdev: pointer to PCI device 7645 * 7646 * This routine is to be registered to the kernel's PCI subsystem. When an 7647 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 7648 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 7649 * remove routine, which will perform all the necessary cleanup for the 7650 * device to be removed from the PCI subsystem properly. 7651 **/ 7652static void __devexit 7653lpfc_pci_remove_one(struct pci_dev *pdev) 7654{ 7655 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7656 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7657 7658 switch (phba->pci_dev_grp) { 7659 case LPFC_PCI_DEV_LP: 7660 lpfc_pci_remove_one_s3(pdev); 7661 break; 7662 case LPFC_PCI_DEV_OC: 7663 lpfc_pci_remove_one_s4(pdev); 7664 break; 7665 default: 7666 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7667 "1424 Invalid PCI device group: 0x%x\n", 7668 phba->pci_dev_grp); 7669 break; 7670 } 7671 return; 7672} 7673 7674/** 7675 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 7676 * @pdev: pointer to PCI device 7677 * @msg: power management message 7678 * 7679 * This routine is to be registered to the kernel's PCI subsystem to support 7680 * system Power Management (PM). When PM invokes this method, it dispatches 7681 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 7682 * suspend the device. 7683 * 7684 * Return code 7685 * 0 - driver suspended the device 7686 * Error otherwise 7687 **/ 7688static int 7689lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 7690{ 7691 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7692 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7693 int rc = -ENODEV; 7694 7695 switch (phba->pci_dev_grp) { 7696 case LPFC_PCI_DEV_LP: 7697 rc = lpfc_pci_suspend_one_s3(pdev, msg); 7698 break; 7699 case LPFC_PCI_DEV_OC: 7700 rc = lpfc_pci_suspend_one_s4(pdev, msg); 7701 break; 7702 default: 7703 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7704 "1425 Invalid PCI device group: 0x%x\n", 7705 phba->pci_dev_grp); 7706 break; 7707 } 7708 return rc; 7709} 7710 7711/** 7712 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 7713 * @pdev: pointer to PCI device 7714 * 7715 * This routine is to be registered to the kernel's PCI subsystem to support 7716 * system Power Management (PM). When PM invokes this method, it dispatches 7717 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 7718 * resume the device. 7719 * 7720 * Return code 7721 * 0 - driver suspended the device 7722 * Error otherwise 7723 **/ 7724static int 7725lpfc_pci_resume_one(struct pci_dev *pdev) 7726{ 7727 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7728 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7729 int rc = -ENODEV; 7730 7731 switch (phba->pci_dev_grp) { 7732 case LPFC_PCI_DEV_LP: 7733 rc = lpfc_pci_resume_one_s3(pdev); 7734 break; 7735 case LPFC_PCI_DEV_OC: 7736 rc = lpfc_pci_resume_one_s4(pdev); 7737 break; 7738 default: 7739 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7740 "1426 Invalid PCI device group: 0x%x\n", 7741 phba->pci_dev_grp); 7742 break; 7743 } 7744 return rc; 7745} 7746 7747/** 7748 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 7749 * @pdev: pointer to PCI device. 7750 * @state: the current PCI connection state. 7751 * 7752 * This routine is registered to the PCI subsystem for error handling. This 7753 * function is called by the PCI subsystem after a PCI bus error affecting 7754 * this device has been detected. When this routine is invoked, it dispatches 7755 * the action to the proper SLI-3 or SLI-4 device error detected handling 7756 * routine, which will perform the proper error detected operation. 7757 * 7758 * Return codes 7759 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7760 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7761 **/ 7762static pci_ers_result_t 7763lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 7764{ 7765 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7766 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7767 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 7768 7769 switch (phba->pci_dev_grp) { 7770 case LPFC_PCI_DEV_LP: 7771 rc = lpfc_io_error_detected_s3(pdev, state); 7772 break; 7773 case LPFC_PCI_DEV_OC: 7774 rc = lpfc_io_error_detected_s4(pdev, state); 7775 break; 7776 default: 7777 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7778 "1427 Invalid PCI device group: 0x%x\n", 7779 phba->pci_dev_grp); 7780 break; 7781 } 7782 return rc; 7783} 7784 7785/** 7786 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 7787 * @pdev: pointer to PCI device. 7788 * 7789 * This routine is registered to the PCI subsystem for error handling. This 7790 * function is called after PCI bus has been reset to restart the PCI card 7791 * from scratch, as if from a cold-boot. When this routine is invoked, it 7792 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 7793 * routine, which will perform the proper device reset. 7794 * 7795 * Return codes 7796 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7797 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7798 **/ 7799static pci_ers_result_t 7800lpfc_io_slot_reset(struct pci_dev *pdev) 7801{ 7802 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7803 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7804 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 7805 7806 switch (phba->pci_dev_grp) { 7807 case LPFC_PCI_DEV_LP: 7808 rc = lpfc_io_slot_reset_s3(pdev); 7809 break; 7810 case LPFC_PCI_DEV_OC: 7811 rc = lpfc_io_slot_reset_s4(pdev); 7812 break; 7813 default: 7814 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7815 "1428 Invalid PCI device group: 0x%x\n", 7816 phba->pci_dev_grp); 7817 break; 7818 } 7819 return rc; 7820} 7821 7822/** 7823 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 7824 * @pdev: pointer to PCI device 7825 * 7826 * This routine is registered to the PCI subsystem for error handling. It 7827 * is called when kernel error recovery tells the lpfc driver that it is 7828 * OK to resume normal PCI operation after PCI bus error recovery. When 7829 * this routine is invoked, it dispatches the action to the proper SLI-3 7830 * or SLI-4 device io_resume routine, which will resume the device operation. 7831 **/ 7832static void 7833lpfc_io_resume(struct pci_dev *pdev) 7834{ 7835 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7836 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7837 7838 switch (phba->pci_dev_grp) { 7839 case LPFC_PCI_DEV_LP: 7840 lpfc_io_resume_s3(pdev); 7841 break; 7842 case LPFC_PCI_DEV_OC: 7843 lpfc_io_resume_s4(pdev); 7844 break; 7845 default: 7846 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7847 "1429 Invalid PCI device group: 0x%x\n", 7848 phba->pci_dev_grp); 7849 break; 7850 } 7851 return; 7852} 7853 7854static struct pci_device_id lpfc_id_table[] = { 7855 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 7856 PCI_ANY_ID, PCI_ANY_ID, }, 7857 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 7858 PCI_ANY_ID, PCI_ANY_ID, }, 7859 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 7860 PCI_ANY_ID, PCI_ANY_ID, }, 7861 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 7862 PCI_ANY_ID, PCI_ANY_ID, }, 7863 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 7864 PCI_ANY_ID, PCI_ANY_ID, }, 7865 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 7866 PCI_ANY_ID, PCI_ANY_ID, }, 7867 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 7868 PCI_ANY_ID, PCI_ANY_ID, }, 7869 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 7870 PCI_ANY_ID, PCI_ANY_ID, }, 7871 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 7872 PCI_ANY_ID, PCI_ANY_ID, }, 7873 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 7874 PCI_ANY_ID, PCI_ANY_ID, }, 7875 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 7876 PCI_ANY_ID, PCI_ANY_ID, }, 7877 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 7878 PCI_ANY_ID, PCI_ANY_ID, }, 7879 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 7880 PCI_ANY_ID, PCI_ANY_ID, }, 7881 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 7882 PCI_ANY_ID, PCI_ANY_ID, }, 7883 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 7884 PCI_ANY_ID, PCI_ANY_ID, }, 7885 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 7886 PCI_ANY_ID, PCI_ANY_ID, }, 7887 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 7888 PCI_ANY_ID, PCI_ANY_ID, }, 7889 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 7890 PCI_ANY_ID, PCI_ANY_ID, }, 7891 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 7892 PCI_ANY_ID, PCI_ANY_ID, }, 7893 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 7894 PCI_ANY_ID, PCI_ANY_ID, }, 7895 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 7896 PCI_ANY_ID, PCI_ANY_ID, }, 7897 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 7898 PCI_ANY_ID, PCI_ANY_ID, }, 7899 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 7900 PCI_ANY_ID, PCI_ANY_ID, }, 7901 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 7902 PCI_ANY_ID, PCI_ANY_ID, }, 7903 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 7904 PCI_ANY_ID, PCI_ANY_ID, }, 7905 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 7906 PCI_ANY_ID, PCI_ANY_ID, }, 7907 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 7908 PCI_ANY_ID, PCI_ANY_ID, }, 7909 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 7910 PCI_ANY_ID, PCI_ANY_ID, }, 7911 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 7912 PCI_ANY_ID, PCI_ANY_ID, }, 7913 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 7914 PCI_ANY_ID, PCI_ANY_ID, }, 7915 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 7916 PCI_ANY_ID, PCI_ANY_ID, }, 7917 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 7918 PCI_ANY_ID, PCI_ANY_ID, }, 7919 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 7920 PCI_ANY_ID, PCI_ANY_ID, }, 7921 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 7922 PCI_ANY_ID, PCI_ANY_ID, }, 7923 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 7924 PCI_ANY_ID, PCI_ANY_ID, }, 7925 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 7926 PCI_ANY_ID, PCI_ANY_ID, }, 7927 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 7928 PCI_ANY_ID, PCI_ANY_ID, }, 7929 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 7930 PCI_ANY_ID, PCI_ANY_ID, }, 7931 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S, 7932 PCI_ANY_ID, PCI_ANY_ID, }, 7933 { 0 } 7934}; 7935 7936MODULE_DEVICE_TABLE(pci, lpfc_id_table); 7937 7938static struct pci_error_handlers lpfc_err_handler = { 7939 .error_detected = lpfc_io_error_detected, 7940 .slot_reset = lpfc_io_slot_reset, 7941 .resume = lpfc_io_resume, 7942}; 7943 7944static struct pci_driver lpfc_driver = { 7945 .name = LPFC_DRIVER_NAME, 7946 .id_table = lpfc_id_table, 7947 .probe = lpfc_pci_probe_one, 7948 .remove = __devexit_p(lpfc_pci_remove_one), 7949 .suspend = lpfc_pci_suspend_one, 7950 .resume = lpfc_pci_resume_one, 7951 .err_handler = &lpfc_err_handler, 7952}; 7953 7954/** 7955 * lpfc_init - lpfc module initialization routine 7956 * 7957 * This routine is to be invoked when the lpfc module is loaded into the 7958 * kernel. The special kernel macro module_init() is used to indicate the 7959 * role of this routine to the kernel as lpfc module entry point. 7960 * 7961 * Return codes 7962 * 0 - successful 7963 * -ENOMEM - FC attach transport failed 7964 * all others - failed 7965 */ 7966static int __init 7967lpfc_init(void) 7968{ 7969 int error = 0; 7970 7971 printk(LPFC_MODULE_DESC "\n"); 7972 printk(LPFC_COPYRIGHT "\n"); 7973 7974 if (lpfc_enable_npiv) { 7975 lpfc_transport_functions.vport_create = lpfc_vport_create; 7976 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 7977 } 7978 lpfc_transport_template = 7979 fc_attach_transport(&lpfc_transport_functions); 7980 if (lpfc_transport_template == NULL) 7981 return -ENOMEM; 7982 if (lpfc_enable_npiv) { 7983 lpfc_vport_transport_template = 7984 fc_attach_transport(&lpfc_vport_transport_functions); 7985 if (lpfc_vport_transport_template == NULL) { 7986 fc_release_transport(lpfc_transport_template); 7987 return -ENOMEM; 7988 } 7989 } 7990 error = pci_register_driver(&lpfc_driver); 7991 if (error) { 7992 fc_release_transport(lpfc_transport_template); 7993 if (lpfc_enable_npiv) 7994 fc_release_transport(lpfc_vport_transport_template); 7995 } 7996 7997 return error; 7998} 7999 8000/** 8001 * lpfc_exit - lpfc module removal routine 8002 * 8003 * This routine is invoked when the lpfc module is removed from the kernel. 8004 * The special kernel macro module_exit() is used to indicate the role of 8005 * this routine to the kernel as lpfc module exit point. 8006 */ 8007static void __exit 8008lpfc_exit(void) 8009{ 8010 pci_unregister_driver(&lpfc_driver); 8011 fc_release_transport(lpfc_transport_template); 8012 if (lpfc_enable_npiv) 8013 fc_release_transport(lpfc_vport_transport_template); 8014 if (_dump_buf_data) { 8015 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data " 8016 "at 0x%p\n", 8017 (1L << _dump_buf_data_order), _dump_buf_data); 8018 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 8019 } 8020 8021 if (_dump_buf_dif) { 8022 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif " 8023 "at 0x%p\n", 8024 (1L << _dump_buf_dif_order), _dump_buf_dif); 8025 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 8026 } 8027} 8028 8029module_init(lpfc_init); 8030module_exit(lpfc_exit); 8031MODULE_LICENSE("GPL"); 8032MODULE_DESCRIPTION(LPFC_MODULE_DESC); 8033MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 8034MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 8035