lpfc_init.c revision 0af5d708aae3aef1f98a1c689007b92db2c10277
1/******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/dma-mapping.h> 25#include <linux/idr.h> 26#include <linux/interrupt.h> 27#include <linux/kthread.h> 28#include <linux/pci.h> 29#include <linux/spinlock.h> 30#include <linux/ctype.h> 31#include <linux/aer.h> 32#include <linux/slab.h> 33 34#include <scsi/scsi.h> 35#include <scsi/scsi_device.h> 36#include <scsi/scsi_host.h> 37#include <scsi/scsi_transport_fc.h> 38 39#include "lpfc_hw4.h" 40#include "lpfc_hw.h" 41#include "lpfc_sli.h" 42#include "lpfc_sli4.h" 43#include "lpfc_nl.h" 44#include "lpfc_disc.h" 45#include "lpfc_scsi.h" 46#include "lpfc.h" 47#include "lpfc_logmsg.h" 48#include "lpfc_crtn.h" 49#include "lpfc_vport.h" 50#include "lpfc_version.h" 51 52char *_dump_buf_data; 53unsigned long _dump_buf_data_order; 54char *_dump_buf_dif; 55unsigned long _dump_buf_dif_order; 56spinlock_t _dump_buf_lock; 57 58static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 59static int lpfc_post_rcv_buf(struct lpfc_hba *); 60static int lpfc_sli4_queue_create(struct lpfc_hba *); 61static void lpfc_sli4_queue_destroy(struct lpfc_hba *); 62static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 63static int lpfc_setup_endian_order(struct lpfc_hba *); 64static int lpfc_sli4_read_config(struct lpfc_hba *); 65static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 66static void lpfc_free_sgl_list(struct lpfc_hba *); 67static int lpfc_init_sgl_list(struct lpfc_hba *); 68static int lpfc_init_active_sgl_array(struct lpfc_hba *); 69static void lpfc_free_active_sgl(struct lpfc_hba *); 70static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 71static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 72static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 73static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 74static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 75 76static struct scsi_transport_template *lpfc_transport_template = NULL; 77static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 78static DEFINE_IDR(lpfc_hba_index); 79 80/** 81 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 82 * @phba: pointer to lpfc hba data structure. 83 * 84 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 85 * mailbox command. It retrieves the revision information from the HBA and 86 * collects the Vital Product Data (VPD) about the HBA for preparing the 87 * configuration of the HBA. 88 * 89 * Return codes: 90 * 0 - success. 91 * -ERESTART - requests the SLI layer to reset the HBA and try again. 92 * Any other value - indicates an error. 93 **/ 94int 95lpfc_config_port_prep(struct lpfc_hba *phba) 96{ 97 lpfc_vpd_t *vp = &phba->vpd; 98 int i = 0, rc; 99 LPFC_MBOXQ_t *pmb; 100 MAILBOX_t *mb; 101 char *lpfc_vpd_data = NULL; 102 uint16_t offset = 0; 103 static char licensed[56] = 104 "key unlock for use with gnu public licensed code only\0"; 105 static int init_key = 1; 106 107 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 108 if (!pmb) { 109 phba->link_state = LPFC_HBA_ERROR; 110 return -ENOMEM; 111 } 112 113 mb = &pmb->u.mb; 114 phba->link_state = LPFC_INIT_MBX_CMDS; 115 116 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 117 if (init_key) { 118 uint32_t *ptext = (uint32_t *) licensed; 119 120 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 121 *ptext = cpu_to_be32(*ptext); 122 init_key = 0; 123 } 124 125 lpfc_read_nv(phba, pmb); 126 memset((char*)mb->un.varRDnvp.rsvd3, 0, 127 sizeof (mb->un.varRDnvp.rsvd3)); 128 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 129 sizeof (licensed)); 130 131 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 132 133 if (rc != MBX_SUCCESS) { 134 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 135 "0324 Config Port initialization " 136 "error, mbxCmd x%x READ_NVPARM, " 137 "mbxStatus x%x\n", 138 mb->mbxCommand, mb->mbxStatus); 139 mempool_free(pmb, phba->mbox_mem_pool); 140 return -ERESTART; 141 } 142 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 143 sizeof(phba->wwnn)); 144 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 145 sizeof(phba->wwpn)); 146 } 147 148 phba->sli3_options = 0x0; 149 150 /* Setup and issue mailbox READ REV command */ 151 lpfc_read_rev(phba, pmb); 152 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 153 if (rc != MBX_SUCCESS) { 154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 155 "0439 Adapter failed to init, mbxCmd x%x " 156 "READ_REV, mbxStatus x%x\n", 157 mb->mbxCommand, mb->mbxStatus); 158 mempool_free( pmb, phba->mbox_mem_pool); 159 return -ERESTART; 160 } 161 162 163 /* 164 * The value of rr must be 1 since the driver set the cv field to 1. 165 * This setting requires the FW to set all revision fields. 166 */ 167 if (mb->un.varRdRev.rr == 0) { 168 vp->rev.rBit = 0; 169 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 170 "0440 Adapter failed to init, READ_REV has " 171 "missing revision information.\n"); 172 mempool_free(pmb, phba->mbox_mem_pool); 173 return -ERESTART; 174 } 175 176 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 177 mempool_free(pmb, phba->mbox_mem_pool); 178 return -EINVAL; 179 } 180 181 /* Save information as VPD data */ 182 vp->rev.rBit = 1; 183 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 184 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 185 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 186 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 187 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 188 vp->rev.biuRev = mb->un.varRdRev.biuRev; 189 vp->rev.smRev = mb->un.varRdRev.smRev; 190 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 191 vp->rev.endecRev = mb->un.varRdRev.endecRev; 192 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 193 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 194 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 195 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 196 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 197 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 198 199 /* If the sli feature level is less then 9, we must 200 * tear down all RPIs and VPIs on link down if NPIV 201 * is enabled. 202 */ 203 if (vp->rev.feaLevelHigh < 9) 204 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 205 206 if (lpfc_is_LC_HBA(phba->pcidev->device)) 207 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 208 sizeof (phba->RandomData)); 209 210 /* Get adapter VPD information */ 211 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 212 if (!lpfc_vpd_data) 213 goto out_free_mbox; 214 215 do { 216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 218 219 if (rc != MBX_SUCCESS) { 220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 221 "0441 VPD not present on adapter, " 222 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 223 mb->mbxCommand, mb->mbxStatus); 224 mb->un.varDmp.word_cnt = 0; 225 } 226 /* dump mem may return a zero when finished or we got a 227 * mailbox error, either way we are done. 228 */ 229 if (mb->un.varDmp.word_cnt == 0) 230 break; 231 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 232 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 233 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 234 lpfc_vpd_data + offset, 235 mb->un.varDmp.word_cnt); 236 offset += mb->un.varDmp.word_cnt; 237 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 238 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 239 240 kfree(lpfc_vpd_data); 241out_free_mbox: 242 mempool_free(pmb, phba->mbox_mem_pool); 243 return 0; 244} 245 246/** 247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 248 * @phba: pointer to lpfc hba data structure. 249 * @pmboxq: pointer to the driver internal queue element for mailbox command. 250 * 251 * This is the completion handler for driver's configuring asynchronous event 252 * mailbox command to the device. If the mailbox command returns successfully, 253 * it will set internal async event support flag to 1; otherwise, it will 254 * set internal async event support flag to 0. 255 **/ 256static void 257lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 258{ 259 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 260 phba->temp_sensor_support = 1; 261 else 262 phba->temp_sensor_support = 0; 263 mempool_free(pmboxq, phba->mbox_mem_pool); 264 return; 265} 266 267/** 268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 269 * @phba: pointer to lpfc hba data structure. 270 * @pmboxq: pointer to the driver internal queue element for mailbox command. 271 * 272 * This is the completion handler for dump mailbox command for getting 273 * wake up parameters. When this command complete, the response contain 274 * Option rom version of the HBA. This function translate the version number 275 * into a human readable string and store it in OptionROMVersion. 276 **/ 277static void 278lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 279{ 280 struct prog_id *prg; 281 uint32_t prog_id_word; 282 char dist = ' '; 283 /* character array used for decoding dist type. */ 284 char dist_char[] = "nabx"; 285 286 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 287 mempool_free(pmboxq, phba->mbox_mem_pool); 288 return; 289 } 290 291 prg = (struct prog_id *) &prog_id_word; 292 293 /* word 7 contain option rom version */ 294 prog_id_word = pmboxq->u.mb.un.varWords[7]; 295 296 /* Decode the Option rom version word to a readable string */ 297 if (prg->dist < 4) 298 dist = dist_char[prg->dist]; 299 300 if ((prg->dist == 3) && (prg->num == 0)) 301 sprintf(phba->OptionROMVersion, "%d.%d%d", 302 prg->ver, prg->rev, prg->lev); 303 else 304 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 305 prg->ver, prg->rev, prg->lev, 306 dist, prg->num); 307 mempool_free(pmboxq, phba->mbox_mem_pool); 308 return; 309} 310 311/** 312 * lpfc_config_port_post - Perform lpfc initialization after config port 313 * @phba: pointer to lpfc hba data structure. 314 * 315 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 316 * command call. It performs all internal resource and state setups on the 317 * port: post IOCB buffers, enable appropriate host interrupt attentions, 318 * ELS ring timers, etc. 319 * 320 * Return codes 321 * 0 - success. 322 * Any other value - error. 323 **/ 324int 325lpfc_config_port_post(struct lpfc_hba *phba) 326{ 327 struct lpfc_vport *vport = phba->pport; 328 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 329 LPFC_MBOXQ_t *pmb; 330 MAILBOX_t *mb; 331 struct lpfc_dmabuf *mp; 332 struct lpfc_sli *psli = &phba->sli; 333 uint32_t status, timeout; 334 int i, j; 335 int rc; 336 337 spin_lock_irq(&phba->hbalock); 338 /* 339 * If the Config port completed correctly the HBA is not 340 * over heated any more. 341 */ 342 if (phba->over_temp_state == HBA_OVER_TEMP) 343 phba->over_temp_state = HBA_NORMAL_TEMP; 344 spin_unlock_irq(&phba->hbalock); 345 346 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 347 if (!pmb) { 348 phba->link_state = LPFC_HBA_ERROR; 349 return -ENOMEM; 350 } 351 mb = &pmb->u.mb; 352 353 /* Get login parameters for NID. */ 354 rc = lpfc_read_sparam(phba, pmb, 0); 355 if (rc) { 356 mempool_free(pmb, phba->mbox_mem_pool); 357 return -ENOMEM; 358 } 359 360 pmb->vport = vport; 361 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 363 "0448 Adapter failed init, mbxCmd x%x " 364 "READ_SPARM mbxStatus x%x\n", 365 mb->mbxCommand, mb->mbxStatus); 366 phba->link_state = LPFC_HBA_ERROR; 367 mp = (struct lpfc_dmabuf *) pmb->context1; 368 mempool_free(pmb, phba->mbox_mem_pool); 369 lpfc_mbuf_free(phba, mp->virt, mp->phys); 370 kfree(mp); 371 return -EIO; 372 } 373 374 mp = (struct lpfc_dmabuf *) pmb->context1; 375 376 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 377 lpfc_mbuf_free(phba, mp->virt, mp->phys); 378 kfree(mp); 379 pmb->context1 = NULL; 380 381 if (phba->cfg_soft_wwnn) 382 u64_to_wwn(phba->cfg_soft_wwnn, 383 vport->fc_sparam.nodeName.u.wwn); 384 if (phba->cfg_soft_wwpn) 385 u64_to_wwn(phba->cfg_soft_wwpn, 386 vport->fc_sparam.portName.u.wwn); 387 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 388 sizeof (struct lpfc_name)); 389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 390 sizeof (struct lpfc_name)); 391 392 /* Update the fc_host data structures with new wwn. */ 393 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 394 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 395 fc_host_max_npiv_vports(shost) = phba->max_vpi; 396 397 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 398 /* This should be consolidated into parse_vpd ? - mr */ 399 if (phba->SerialNumber[0] == 0) { 400 uint8_t *outptr; 401 402 outptr = &vport->fc_nodename.u.s.IEEE[0]; 403 for (i = 0; i < 12; i++) { 404 status = *outptr++; 405 j = ((status & 0xf0) >> 4); 406 if (j <= 9) 407 phba->SerialNumber[i] = 408 (char)((uint8_t) 0x30 + (uint8_t) j); 409 else 410 phba->SerialNumber[i] = 411 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 412 i++; 413 j = (status & 0xf); 414 if (j <= 9) 415 phba->SerialNumber[i] = 416 (char)((uint8_t) 0x30 + (uint8_t) j); 417 else 418 phba->SerialNumber[i] = 419 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 420 } 421 } 422 423 lpfc_read_config(phba, pmb); 424 pmb->vport = vport; 425 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 427 "0453 Adapter failed to init, mbxCmd x%x " 428 "READ_CONFIG, mbxStatus x%x\n", 429 mb->mbxCommand, mb->mbxStatus); 430 phba->link_state = LPFC_HBA_ERROR; 431 mempool_free( pmb, phba->mbox_mem_pool); 432 return -EIO; 433 } 434 435 /* Check if the port is disabled */ 436 lpfc_sli_read_link_ste(phba); 437 438 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 439 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 440 phba->cfg_hba_queue_depth = 441 (mb->un.varRdConfig.max_xri + 1) - 442 lpfc_sli4_get_els_iocb_cnt(phba); 443 444 phba->lmt = mb->un.varRdConfig.lmt; 445 446 /* Get the default values for Model Name and Description */ 447 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 448 449 if ((phba->cfg_link_speed > LINK_SPEED_10G) 450 || ((phba->cfg_link_speed == LINK_SPEED_1G) 451 && !(phba->lmt & LMT_1Gb)) 452 || ((phba->cfg_link_speed == LINK_SPEED_2G) 453 && !(phba->lmt & LMT_2Gb)) 454 || ((phba->cfg_link_speed == LINK_SPEED_4G) 455 && !(phba->lmt & LMT_4Gb)) 456 || ((phba->cfg_link_speed == LINK_SPEED_8G) 457 && !(phba->lmt & LMT_8Gb)) 458 || ((phba->cfg_link_speed == LINK_SPEED_10G) 459 && !(phba->lmt & LMT_10Gb))) { 460 /* Reset link speed to auto */ 461 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 462 "1302 Invalid speed for this board: " 463 "Reset link speed to auto: x%x\n", 464 phba->cfg_link_speed); 465 phba->cfg_link_speed = LINK_SPEED_AUTO; 466 } 467 468 phba->link_state = LPFC_LINK_DOWN; 469 470 /* Only process IOCBs on ELS ring till hba_state is READY */ 471 if (psli->ring[psli->extra_ring].cmdringaddr) 472 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 473 if (psli->ring[psli->fcp_ring].cmdringaddr) 474 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 475 if (psli->ring[psli->next_ring].cmdringaddr) 476 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 477 478 /* Post receive buffers for desired rings */ 479 if (phba->sli_rev != 3) 480 lpfc_post_rcv_buf(phba); 481 482 /* 483 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 484 */ 485 if (phba->intr_type == MSIX) { 486 rc = lpfc_config_msi(phba, pmb); 487 if (rc) { 488 mempool_free(pmb, phba->mbox_mem_pool); 489 return -EIO; 490 } 491 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 492 if (rc != MBX_SUCCESS) { 493 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 494 "0352 Config MSI mailbox command " 495 "failed, mbxCmd x%x, mbxStatus x%x\n", 496 pmb->u.mb.mbxCommand, 497 pmb->u.mb.mbxStatus); 498 mempool_free(pmb, phba->mbox_mem_pool); 499 return -EIO; 500 } 501 } 502 503 spin_lock_irq(&phba->hbalock); 504 /* Initialize ERATT handling flag */ 505 phba->hba_flag &= ~HBA_ERATT_HANDLED; 506 507 /* Enable appropriate host interrupts */ 508 status = readl(phba->HCregaddr); 509 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 510 if (psli->num_rings > 0) 511 status |= HC_R0INT_ENA; 512 if (psli->num_rings > 1) 513 status |= HC_R1INT_ENA; 514 if (psli->num_rings > 2) 515 status |= HC_R2INT_ENA; 516 if (psli->num_rings > 3) 517 status |= HC_R3INT_ENA; 518 519 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 520 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 521 status &= ~(HC_R0INT_ENA); 522 523 writel(status, phba->HCregaddr); 524 readl(phba->HCregaddr); /* flush */ 525 spin_unlock_irq(&phba->hbalock); 526 527 /* Set up ring-0 (ELS) timer */ 528 timeout = phba->fc_ratov * 2; 529 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 530 /* Set up heart beat (HB) timer */ 531 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 532 phba->hb_outstanding = 0; 533 phba->last_completion_time = jiffies; 534 /* Set up error attention (ERATT) polling timer */ 535 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 536 537 if (phba->hba_flag & LINK_DISABLED) { 538 lpfc_printf_log(phba, 539 KERN_ERR, LOG_INIT, 540 "2598 Adapter Link is disabled.\n"); 541 lpfc_down_link(phba, pmb); 542 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 543 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 544 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 545 lpfc_printf_log(phba, 546 KERN_ERR, LOG_INIT, 547 "2599 Adapter failed to issue DOWN_LINK" 548 " mbox command rc 0x%x\n", rc); 549 550 mempool_free(pmb, phba->mbox_mem_pool); 551 return -EIO; 552 } 553 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 554 lpfc_init_link(phba, pmb, phba->cfg_topology, 555 phba->cfg_link_speed); 556 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 557 lpfc_set_loopback_flag(phba); 558 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 559 if (rc != MBX_SUCCESS) { 560 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 561 "0454 Adapter failed to init, mbxCmd x%x " 562 "INIT_LINK, mbxStatus x%x\n", 563 mb->mbxCommand, mb->mbxStatus); 564 565 /* Clear all interrupt enable conditions */ 566 writel(0, phba->HCregaddr); 567 readl(phba->HCregaddr); /* flush */ 568 /* Clear all pending interrupts */ 569 writel(0xffffffff, phba->HAregaddr); 570 readl(phba->HAregaddr); /* flush */ 571 572 phba->link_state = LPFC_HBA_ERROR; 573 if (rc != MBX_BUSY) 574 mempool_free(pmb, phba->mbox_mem_pool); 575 return -EIO; 576 } 577 } 578 /* MBOX buffer will be freed in mbox compl */ 579 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 580 if (!pmb) { 581 phba->link_state = LPFC_HBA_ERROR; 582 return -ENOMEM; 583 } 584 585 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 586 pmb->mbox_cmpl = lpfc_config_async_cmpl; 587 pmb->vport = phba->pport; 588 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 589 590 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 591 lpfc_printf_log(phba, 592 KERN_ERR, 593 LOG_INIT, 594 "0456 Adapter failed to issue " 595 "ASYNCEVT_ENABLE mbox status x%x\n", 596 rc); 597 mempool_free(pmb, phba->mbox_mem_pool); 598 } 599 600 /* Get Option rom version */ 601 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 602 if (!pmb) { 603 phba->link_state = LPFC_HBA_ERROR; 604 return -ENOMEM; 605 } 606 607 lpfc_dump_wakeup_param(phba, pmb); 608 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 609 pmb->vport = phba->pport; 610 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 611 612 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 613 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 614 "to get Option ROM version status x%x\n", rc); 615 mempool_free(pmb, phba->mbox_mem_pool); 616 } 617 618 return 0; 619} 620 621/** 622 * lpfc_hba_init_link - Initialize the FC link 623 * @phba: pointer to lpfc hba data structure. 624 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 625 * 626 * This routine will issue the INIT_LINK mailbox command call. 627 * It is available to other drivers through the lpfc_hba data 628 * structure for use as a delayed link up mechanism with the 629 * module parameter lpfc_suppress_link_up. 630 * 631 * Return code 632 * 0 - success 633 * Any other value - error 634 **/ 635int 636lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 637{ 638 struct lpfc_vport *vport = phba->pport; 639 LPFC_MBOXQ_t *pmb; 640 MAILBOX_t *mb; 641 int rc; 642 643 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 644 if (!pmb) { 645 phba->link_state = LPFC_HBA_ERROR; 646 return -ENOMEM; 647 } 648 mb = &pmb->u.mb; 649 pmb->vport = vport; 650 651 lpfc_init_link(phba, pmb, phba->cfg_topology, 652 phba->cfg_link_speed); 653 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 654 lpfc_set_loopback_flag(phba); 655 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 656 if (rc != MBX_SUCCESS) { 657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 658 "0498 Adapter failed to init, mbxCmd x%x " 659 "INIT_LINK, mbxStatus x%x\n", 660 mb->mbxCommand, mb->mbxStatus); 661 /* Clear all interrupt enable conditions */ 662 writel(0, phba->HCregaddr); 663 readl(phba->HCregaddr); /* flush */ 664 /* Clear all pending interrupts */ 665 writel(0xffffffff, phba->HAregaddr); 666 readl(phba->HAregaddr); /* flush */ 667 phba->link_state = LPFC_HBA_ERROR; 668 if (rc != MBX_BUSY || flag == MBX_POLL) 669 mempool_free(pmb, phba->mbox_mem_pool); 670 return -EIO; 671 } 672 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 673 if (flag == MBX_POLL) 674 mempool_free(pmb, phba->mbox_mem_pool); 675 676 return 0; 677} 678 679/** 680 * lpfc_hba_down_link - this routine downs the FC link 681 * @phba: pointer to lpfc hba data structure. 682 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 683 * 684 * This routine will issue the DOWN_LINK mailbox command call. 685 * It is available to other drivers through the lpfc_hba data 686 * structure for use to stop the link. 687 * 688 * Return code 689 * 0 - success 690 * Any other value - error 691 **/ 692int 693lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 694{ 695 LPFC_MBOXQ_t *pmb; 696 int rc; 697 698 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 699 if (!pmb) { 700 phba->link_state = LPFC_HBA_ERROR; 701 return -ENOMEM; 702 } 703 704 lpfc_printf_log(phba, 705 KERN_ERR, LOG_INIT, 706 "0491 Adapter Link is disabled.\n"); 707 lpfc_down_link(phba, pmb); 708 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 709 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 710 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 711 lpfc_printf_log(phba, 712 KERN_ERR, LOG_INIT, 713 "2522 Adapter failed to issue DOWN_LINK" 714 " mbox command rc 0x%x\n", rc); 715 716 mempool_free(pmb, phba->mbox_mem_pool); 717 return -EIO; 718 } 719 if (flag == MBX_POLL) 720 mempool_free(pmb, phba->mbox_mem_pool); 721 722 return 0; 723} 724 725/** 726 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 727 * @phba: pointer to lpfc HBA data structure. 728 * 729 * This routine will do LPFC uninitialization before the HBA is reset when 730 * bringing down the SLI Layer. 731 * 732 * Return codes 733 * 0 - success. 734 * Any other value - error. 735 **/ 736int 737lpfc_hba_down_prep(struct lpfc_hba *phba) 738{ 739 struct lpfc_vport **vports; 740 int i; 741 742 if (phba->sli_rev <= LPFC_SLI_REV3) { 743 /* Disable interrupts */ 744 writel(0, phba->HCregaddr); 745 readl(phba->HCregaddr); /* flush */ 746 } 747 748 if (phba->pport->load_flag & FC_UNLOADING) 749 lpfc_cleanup_discovery_resources(phba->pport); 750 else { 751 vports = lpfc_create_vport_work_array(phba); 752 if (vports != NULL) 753 for (i = 0; i <= phba->max_vports && 754 vports[i] != NULL; i++) 755 lpfc_cleanup_discovery_resources(vports[i]); 756 lpfc_destroy_vport_work_array(phba, vports); 757 } 758 return 0; 759} 760 761/** 762 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 763 * @phba: pointer to lpfc HBA data structure. 764 * 765 * This routine will do uninitialization after the HBA is reset when bring 766 * down the SLI Layer. 767 * 768 * Return codes 769 * 0 - success. 770 * Any other value - error. 771 **/ 772static int 773lpfc_hba_down_post_s3(struct lpfc_hba *phba) 774{ 775 struct lpfc_sli *psli = &phba->sli; 776 struct lpfc_sli_ring *pring; 777 struct lpfc_dmabuf *mp, *next_mp; 778 LIST_HEAD(completions); 779 int i; 780 781 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 782 lpfc_sli_hbqbuf_free_all(phba); 783 else { 784 /* Cleanup preposted buffers on the ELS ring */ 785 pring = &psli->ring[LPFC_ELS_RING]; 786 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 787 list_del(&mp->list); 788 pring->postbufq_cnt--; 789 lpfc_mbuf_free(phba, mp->virt, mp->phys); 790 kfree(mp); 791 } 792 } 793 794 spin_lock_irq(&phba->hbalock); 795 for (i = 0; i < psli->num_rings; i++) { 796 pring = &psli->ring[i]; 797 798 /* At this point in time the HBA is either reset or DOA. Either 799 * way, nothing should be on txcmplq as it will NEVER complete. 800 */ 801 list_splice_init(&pring->txcmplq, &completions); 802 pring->txcmplq_cnt = 0; 803 spin_unlock_irq(&phba->hbalock); 804 805 /* Cancel all the IOCBs from the completions list */ 806 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 807 IOERR_SLI_ABORTED); 808 809 lpfc_sli_abort_iocb_ring(phba, pring); 810 spin_lock_irq(&phba->hbalock); 811 } 812 spin_unlock_irq(&phba->hbalock); 813 814 return 0; 815} 816/** 817 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 818 * @phba: pointer to lpfc HBA data structure. 819 * 820 * This routine will do uninitialization after the HBA is reset when bring 821 * down the SLI Layer. 822 * 823 * Return codes 824 * 0 - success. 825 * Any other value - error. 826 **/ 827static int 828lpfc_hba_down_post_s4(struct lpfc_hba *phba) 829{ 830 struct lpfc_scsi_buf *psb, *psb_next; 831 LIST_HEAD(aborts); 832 int ret; 833 unsigned long iflag = 0; 834 struct lpfc_sglq *sglq_entry = NULL; 835 836 ret = lpfc_hba_down_post_s3(phba); 837 if (ret) 838 return ret; 839 /* At this point in time the HBA is either reset or DOA. Either 840 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 841 * on the lpfc_sgl_list so that it can either be freed if the 842 * driver is unloading or reposted if the driver is restarting 843 * the port. 844 */ 845 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 846 /* scsl_buf_list */ 847 /* abts_sgl_list_lock required because worker thread uses this 848 * list. 849 */ 850 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 851 list_for_each_entry(sglq_entry, 852 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 853 sglq_entry->state = SGL_FREED; 854 855 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 856 &phba->sli4_hba.lpfc_sgl_list); 857 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 858 /* abts_scsi_buf_list_lock required because worker thread uses this 859 * list. 860 */ 861 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 862 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 863 &aborts); 864 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 865 spin_unlock_irq(&phba->hbalock); 866 867 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 868 psb->pCmd = NULL; 869 psb->status = IOSTAT_SUCCESS; 870 } 871 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 872 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 873 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 874 return 0; 875} 876 877/** 878 * lpfc_hba_down_post - Wrapper func for hba down post routine 879 * @phba: pointer to lpfc HBA data structure. 880 * 881 * This routine wraps the actual SLI3 or SLI4 routine for performing 882 * uninitialization after the HBA is reset when bring down the SLI Layer. 883 * 884 * Return codes 885 * 0 - success. 886 * Any other value - error. 887 **/ 888int 889lpfc_hba_down_post(struct lpfc_hba *phba) 890{ 891 return (*phba->lpfc_hba_down_post)(phba); 892} 893 894/** 895 * lpfc_hb_timeout - The HBA-timer timeout handler 896 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 897 * 898 * This is the HBA-timer timeout handler registered to the lpfc driver. When 899 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 900 * work-port-events bitmap and the worker thread is notified. This timeout 901 * event will be used by the worker thread to invoke the actual timeout 902 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 903 * be performed in the timeout handler and the HBA timeout event bit shall 904 * be cleared by the worker thread after it has taken the event bitmap out. 905 **/ 906static void 907lpfc_hb_timeout(unsigned long ptr) 908{ 909 struct lpfc_hba *phba; 910 uint32_t tmo_posted; 911 unsigned long iflag; 912 913 phba = (struct lpfc_hba *)ptr; 914 915 /* Check for heart beat timeout conditions */ 916 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 917 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 918 if (!tmo_posted) 919 phba->pport->work_port_events |= WORKER_HB_TMO; 920 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 921 922 /* Tell the worker thread there is work to do */ 923 if (!tmo_posted) 924 lpfc_worker_wake_up(phba); 925 return; 926} 927 928/** 929 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 930 * @phba: pointer to lpfc hba data structure. 931 * @pmboxq: pointer to the driver internal queue element for mailbox command. 932 * 933 * This is the callback function to the lpfc heart-beat mailbox command. 934 * If configured, the lpfc driver issues the heart-beat mailbox command to 935 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 936 * heart-beat mailbox command is issued, the driver shall set up heart-beat 937 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 938 * heart-beat outstanding state. Once the mailbox command comes back and 939 * no error conditions detected, the heart-beat mailbox command timer is 940 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 941 * state is cleared for the next heart-beat. If the timer expired with the 942 * heart-beat outstanding state set, the driver will put the HBA offline. 943 **/ 944static void 945lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 946{ 947 unsigned long drvr_flag; 948 949 spin_lock_irqsave(&phba->hbalock, drvr_flag); 950 phba->hb_outstanding = 0; 951 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 952 953 /* Check and reset heart-beat timer is necessary */ 954 mempool_free(pmboxq, phba->mbox_mem_pool); 955 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 956 !(phba->link_state == LPFC_HBA_ERROR) && 957 !(phba->pport->load_flag & FC_UNLOADING)) 958 mod_timer(&phba->hb_tmofunc, 959 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 960 return; 961} 962 963/** 964 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 965 * @phba: pointer to lpfc hba data structure. 966 * 967 * This is the actual HBA-timer timeout handler to be invoked by the worker 968 * thread whenever the HBA timer fired and HBA-timeout event posted. This 969 * handler performs any periodic operations needed for the device. If such 970 * periodic event has already been attended to either in the interrupt handler 971 * or by processing slow-ring or fast-ring events within the HBA-timer 972 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 973 * the timer for the next timeout period. If lpfc heart-beat mailbox command 974 * is configured and there is no heart-beat mailbox command outstanding, a 975 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 976 * has been a heart-beat mailbox command outstanding, the HBA shall be put 977 * to offline. 978 **/ 979void 980lpfc_hb_timeout_handler(struct lpfc_hba *phba) 981{ 982 struct lpfc_vport **vports; 983 LPFC_MBOXQ_t *pmboxq; 984 struct lpfc_dmabuf *buf_ptr; 985 int retval, i; 986 struct lpfc_sli *psli = &phba->sli; 987 LIST_HEAD(completions); 988 989 vports = lpfc_create_vport_work_array(phba); 990 if (vports != NULL) 991 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 992 lpfc_rcv_seq_check_edtov(vports[i]); 993 lpfc_destroy_vport_work_array(phba, vports); 994 995 if ((phba->link_state == LPFC_HBA_ERROR) || 996 (phba->pport->load_flag & FC_UNLOADING) || 997 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 998 return; 999 1000 spin_lock_irq(&phba->pport->work_port_lock); 1001 1002 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 1003 jiffies)) { 1004 spin_unlock_irq(&phba->pport->work_port_lock); 1005 if (!phba->hb_outstanding) 1006 mod_timer(&phba->hb_tmofunc, 1007 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1008 else 1009 mod_timer(&phba->hb_tmofunc, 1010 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1011 return; 1012 } 1013 spin_unlock_irq(&phba->pport->work_port_lock); 1014 1015 if (phba->elsbuf_cnt && 1016 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1017 spin_lock_irq(&phba->hbalock); 1018 list_splice_init(&phba->elsbuf, &completions); 1019 phba->elsbuf_cnt = 0; 1020 phba->elsbuf_prev_cnt = 0; 1021 spin_unlock_irq(&phba->hbalock); 1022 1023 while (!list_empty(&completions)) { 1024 list_remove_head(&completions, buf_ptr, 1025 struct lpfc_dmabuf, list); 1026 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1027 kfree(buf_ptr); 1028 } 1029 } 1030 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1031 1032 /* If there is no heart beat outstanding, issue a heartbeat command */ 1033 if (phba->cfg_enable_hba_heartbeat) { 1034 if (!phba->hb_outstanding) { 1035 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1036 (list_empty(&psli->mboxq))) { 1037 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1038 GFP_KERNEL); 1039 if (!pmboxq) { 1040 mod_timer(&phba->hb_tmofunc, 1041 jiffies + 1042 HZ * LPFC_HB_MBOX_INTERVAL); 1043 return; 1044 } 1045 1046 lpfc_heart_beat(phba, pmboxq); 1047 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1048 pmboxq->vport = phba->pport; 1049 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1050 MBX_NOWAIT); 1051 1052 if (retval != MBX_BUSY && 1053 retval != MBX_SUCCESS) { 1054 mempool_free(pmboxq, 1055 phba->mbox_mem_pool); 1056 mod_timer(&phba->hb_tmofunc, 1057 jiffies + 1058 HZ * LPFC_HB_MBOX_INTERVAL); 1059 return; 1060 } 1061 phba->skipped_hb = 0; 1062 phba->hb_outstanding = 1; 1063 } else if (time_before_eq(phba->last_completion_time, 1064 phba->skipped_hb)) { 1065 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1066 "2857 Last completion time not " 1067 " updated in %d ms\n", 1068 jiffies_to_msecs(jiffies 1069 - phba->last_completion_time)); 1070 } else 1071 phba->skipped_hb = jiffies; 1072 1073 mod_timer(&phba->hb_tmofunc, 1074 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1075 return; 1076 } else { 1077 /* 1078 * If heart beat timeout called with hb_outstanding set 1079 * we need to take the HBA offline. 1080 */ 1081 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1082 "0459 Adapter heartbeat failure, " 1083 "taking this port offline.\n"); 1084 1085 spin_lock_irq(&phba->hbalock); 1086 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1087 spin_unlock_irq(&phba->hbalock); 1088 1089 lpfc_offline_prep(phba); 1090 lpfc_offline(phba); 1091 lpfc_unblock_mgmt_io(phba); 1092 phba->link_state = LPFC_HBA_ERROR; 1093 lpfc_hba_down_post(phba); 1094 } 1095 } 1096} 1097 1098/** 1099 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1100 * @phba: pointer to lpfc hba data structure. 1101 * 1102 * This routine is called to bring the HBA offline when HBA hardware error 1103 * other than Port Error 6 has been detected. 1104 **/ 1105static void 1106lpfc_offline_eratt(struct lpfc_hba *phba) 1107{ 1108 struct lpfc_sli *psli = &phba->sli; 1109 1110 spin_lock_irq(&phba->hbalock); 1111 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1112 spin_unlock_irq(&phba->hbalock); 1113 lpfc_offline_prep(phba); 1114 1115 lpfc_offline(phba); 1116 lpfc_reset_barrier(phba); 1117 spin_lock_irq(&phba->hbalock); 1118 lpfc_sli_brdreset(phba); 1119 spin_unlock_irq(&phba->hbalock); 1120 lpfc_hba_down_post(phba); 1121 lpfc_sli_brdready(phba, HS_MBRDY); 1122 lpfc_unblock_mgmt_io(phba); 1123 phba->link_state = LPFC_HBA_ERROR; 1124 return; 1125} 1126 1127/** 1128 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1129 * @phba: pointer to lpfc hba data structure. 1130 * 1131 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1132 * other than Port Error 6 has been detected. 1133 **/ 1134static void 1135lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1136{ 1137 lpfc_offline_prep(phba); 1138 lpfc_offline(phba); 1139 lpfc_sli4_brdreset(phba); 1140 lpfc_hba_down_post(phba); 1141 lpfc_sli4_post_status_check(phba); 1142 lpfc_unblock_mgmt_io(phba); 1143 phba->link_state = LPFC_HBA_ERROR; 1144} 1145 1146/** 1147 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1148 * @phba: pointer to lpfc hba data structure. 1149 * 1150 * This routine is invoked to handle the deferred HBA hardware error 1151 * conditions. This type of error is indicated by HBA by setting ER1 1152 * and another ER bit in the host status register. The driver will 1153 * wait until the ER1 bit clears before handling the error condition. 1154 **/ 1155static void 1156lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1157{ 1158 uint32_t old_host_status = phba->work_hs; 1159 struct lpfc_sli_ring *pring; 1160 struct lpfc_sli *psli = &phba->sli; 1161 1162 /* If the pci channel is offline, ignore possible errors, 1163 * since we cannot communicate with the pci card anyway. 1164 */ 1165 if (pci_channel_offline(phba->pcidev)) { 1166 spin_lock_irq(&phba->hbalock); 1167 phba->hba_flag &= ~DEFER_ERATT; 1168 spin_unlock_irq(&phba->hbalock); 1169 return; 1170 } 1171 1172 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1173 "0479 Deferred Adapter Hardware Error " 1174 "Data: x%x x%x x%x\n", 1175 phba->work_hs, 1176 phba->work_status[0], phba->work_status[1]); 1177 1178 spin_lock_irq(&phba->hbalock); 1179 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1180 spin_unlock_irq(&phba->hbalock); 1181 1182 1183 /* 1184 * Firmware stops when it triggred erratt. That could cause the I/Os 1185 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1186 * SCSI layer retry it after re-establishing link. 1187 */ 1188 pring = &psli->ring[psli->fcp_ring]; 1189 lpfc_sli_abort_iocb_ring(phba, pring); 1190 1191 /* 1192 * There was a firmware error. Take the hba offline and then 1193 * attempt to restart it. 1194 */ 1195 lpfc_offline_prep(phba); 1196 lpfc_offline(phba); 1197 1198 /* Wait for the ER1 bit to clear.*/ 1199 while (phba->work_hs & HS_FFER1) { 1200 msleep(100); 1201 phba->work_hs = readl(phba->HSregaddr); 1202 /* If driver is unloading let the worker thread continue */ 1203 if (phba->pport->load_flag & FC_UNLOADING) { 1204 phba->work_hs = 0; 1205 break; 1206 } 1207 } 1208 1209 /* 1210 * This is to ptrotect against a race condition in which 1211 * first write to the host attention register clear the 1212 * host status register. 1213 */ 1214 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1215 phba->work_hs = old_host_status & ~HS_FFER1; 1216 1217 spin_lock_irq(&phba->hbalock); 1218 phba->hba_flag &= ~DEFER_ERATT; 1219 spin_unlock_irq(&phba->hbalock); 1220 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1221 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1222} 1223 1224static void 1225lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1226{ 1227 struct lpfc_board_event_header board_event; 1228 struct Scsi_Host *shost; 1229 1230 board_event.event_type = FC_REG_BOARD_EVENT; 1231 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1232 shost = lpfc_shost_from_vport(phba->pport); 1233 fc_host_post_vendor_event(shost, fc_get_event_number(), 1234 sizeof(board_event), 1235 (char *) &board_event, 1236 LPFC_NL_VENDOR_ID); 1237} 1238 1239/** 1240 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1241 * @phba: pointer to lpfc hba data structure. 1242 * 1243 * This routine is invoked to handle the following HBA hardware error 1244 * conditions: 1245 * 1 - HBA error attention interrupt 1246 * 2 - DMA ring index out of range 1247 * 3 - Mailbox command came back as unknown 1248 **/ 1249static void 1250lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1251{ 1252 struct lpfc_vport *vport = phba->pport; 1253 struct lpfc_sli *psli = &phba->sli; 1254 struct lpfc_sli_ring *pring; 1255 uint32_t event_data; 1256 unsigned long temperature; 1257 struct temp_event temp_event_data; 1258 struct Scsi_Host *shost; 1259 1260 /* If the pci channel is offline, ignore possible errors, 1261 * since we cannot communicate with the pci card anyway. 1262 */ 1263 if (pci_channel_offline(phba->pcidev)) { 1264 spin_lock_irq(&phba->hbalock); 1265 phba->hba_flag &= ~DEFER_ERATT; 1266 spin_unlock_irq(&phba->hbalock); 1267 return; 1268 } 1269 1270 /* If resets are disabled then leave the HBA alone and return */ 1271 if (!phba->cfg_enable_hba_reset) 1272 return; 1273 1274 /* Send an internal error event to mgmt application */ 1275 lpfc_board_errevt_to_mgmt(phba); 1276 1277 if (phba->hba_flag & DEFER_ERATT) 1278 lpfc_handle_deferred_eratt(phba); 1279 1280 if (phba->work_hs & HS_FFER6) { 1281 /* Re-establishing Link */ 1282 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1283 "1301 Re-establishing Link " 1284 "Data: x%x x%x x%x\n", 1285 phba->work_hs, 1286 phba->work_status[0], phba->work_status[1]); 1287 1288 spin_lock_irq(&phba->hbalock); 1289 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1290 spin_unlock_irq(&phba->hbalock); 1291 1292 /* 1293 * Firmware stops when it triggled erratt with HS_FFER6. 1294 * That could cause the I/Os dropped by the firmware. 1295 * Error iocb (I/O) on txcmplq and let the SCSI layer 1296 * retry it after re-establishing link. 1297 */ 1298 pring = &psli->ring[psli->fcp_ring]; 1299 lpfc_sli_abort_iocb_ring(phba, pring); 1300 1301 /* 1302 * There was a firmware error. Take the hba offline and then 1303 * attempt to restart it. 1304 */ 1305 lpfc_offline_prep(phba); 1306 lpfc_offline(phba); 1307 lpfc_sli_brdrestart(phba); 1308 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1309 lpfc_unblock_mgmt_io(phba); 1310 return; 1311 } 1312 lpfc_unblock_mgmt_io(phba); 1313 } else if (phba->work_hs & HS_CRIT_TEMP) { 1314 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1315 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1316 temp_event_data.event_code = LPFC_CRIT_TEMP; 1317 temp_event_data.data = (uint32_t)temperature; 1318 1319 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1320 "0406 Adapter maximum temperature exceeded " 1321 "(%ld), taking this port offline " 1322 "Data: x%x x%x x%x\n", 1323 temperature, phba->work_hs, 1324 phba->work_status[0], phba->work_status[1]); 1325 1326 shost = lpfc_shost_from_vport(phba->pport); 1327 fc_host_post_vendor_event(shost, fc_get_event_number(), 1328 sizeof(temp_event_data), 1329 (char *) &temp_event_data, 1330 SCSI_NL_VID_TYPE_PCI 1331 | PCI_VENDOR_ID_EMULEX); 1332 1333 spin_lock_irq(&phba->hbalock); 1334 phba->over_temp_state = HBA_OVER_TEMP; 1335 spin_unlock_irq(&phba->hbalock); 1336 lpfc_offline_eratt(phba); 1337 1338 } else { 1339 /* The if clause above forces this code path when the status 1340 * failure is a value other than FFER6. Do not call the offline 1341 * twice. This is the adapter hardware error path. 1342 */ 1343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1344 "0457 Adapter Hardware Error " 1345 "Data: x%x x%x x%x\n", 1346 phba->work_hs, 1347 phba->work_status[0], phba->work_status[1]); 1348 1349 event_data = FC_REG_DUMP_EVENT; 1350 shost = lpfc_shost_from_vport(vport); 1351 fc_host_post_vendor_event(shost, fc_get_event_number(), 1352 sizeof(event_data), (char *) &event_data, 1353 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1354 1355 lpfc_offline_eratt(phba); 1356 } 1357 return; 1358} 1359 1360/** 1361 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1362 * @phba: pointer to lpfc hba data structure. 1363 * 1364 * This routine is invoked to handle the SLI4 HBA hardware error attention 1365 * conditions. 1366 **/ 1367static void 1368lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1369{ 1370 struct lpfc_vport *vport = phba->pport; 1371 uint32_t event_data; 1372 struct Scsi_Host *shost; 1373 1374 /* If the pci channel is offline, ignore possible errors, since 1375 * we cannot communicate with the pci card anyway. 1376 */ 1377 if (pci_channel_offline(phba->pcidev)) 1378 return; 1379 /* If resets are disabled then leave the HBA alone and return */ 1380 if (!phba->cfg_enable_hba_reset) 1381 return; 1382 1383 /* Send an internal error event to mgmt application */ 1384 lpfc_board_errevt_to_mgmt(phba); 1385 1386 /* For now, the actual action for SLI4 device handling is not 1387 * specified yet, just treated it as adaptor hardware failure 1388 */ 1389 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1390 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", 1391 phba->work_status[0], phba->work_status[1]); 1392 1393 event_data = FC_REG_DUMP_EVENT; 1394 shost = lpfc_shost_from_vport(vport); 1395 fc_host_post_vendor_event(shost, fc_get_event_number(), 1396 sizeof(event_data), (char *) &event_data, 1397 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1398 1399 lpfc_sli4_offline_eratt(phba); 1400} 1401 1402/** 1403 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1404 * @phba: pointer to lpfc HBA data structure. 1405 * 1406 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1407 * routine from the API jump table function pointer from the lpfc_hba struct. 1408 * 1409 * Return codes 1410 * 0 - success. 1411 * Any other value - error. 1412 **/ 1413void 1414lpfc_handle_eratt(struct lpfc_hba *phba) 1415{ 1416 (*phba->lpfc_handle_eratt)(phba); 1417} 1418 1419/** 1420 * lpfc_handle_latt - The HBA link event handler 1421 * @phba: pointer to lpfc hba data structure. 1422 * 1423 * This routine is invoked from the worker thread to handle a HBA host 1424 * attention link event. 1425 **/ 1426void 1427lpfc_handle_latt(struct lpfc_hba *phba) 1428{ 1429 struct lpfc_vport *vport = phba->pport; 1430 struct lpfc_sli *psli = &phba->sli; 1431 LPFC_MBOXQ_t *pmb; 1432 volatile uint32_t control; 1433 struct lpfc_dmabuf *mp; 1434 int rc = 0; 1435 1436 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1437 if (!pmb) { 1438 rc = 1; 1439 goto lpfc_handle_latt_err_exit; 1440 } 1441 1442 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1443 if (!mp) { 1444 rc = 2; 1445 goto lpfc_handle_latt_free_pmb; 1446 } 1447 1448 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1449 if (!mp->virt) { 1450 rc = 3; 1451 goto lpfc_handle_latt_free_mp; 1452 } 1453 1454 /* Cleanup any outstanding ELS commands */ 1455 lpfc_els_flush_all_cmd(phba); 1456 1457 psli->slistat.link_event++; 1458 lpfc_read_la(phba, pmb, mp); 1459 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 1460 pmb->vport = vport; 1461 /* Block ELS IOCBs until we have processed this mbox command */ 1462 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1463 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1464 if (rc == MBX_NOT_FINISHED) { 1465 rc = 4; 1466 goto lpfc_handle_latt_free_mbuf; 1467 } 1468 1469 /* Clear Link Attention in HA REG */ 1470 spin_lock_irq(&phba->hbalock); 1471 writel(HA_LATT, phba->HAregaddr); 1472 readl(phba->HAregaddr); /* flush */ 1473 spin_unlock_irq(&phba->hbalock); 1474 1475 return; 1476 1477lpfc_handle_latt_free_mbuf: 1478 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1479 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1480lpfc_handle_latt_free_mp: 1481 kfree(mp); 1482lpfc_handle_latt_free_pmb: 1483 mempool_free(pmb, phba->mbox_mem_pool); 1484lpfc_handle_latt_err_exit: 1485 /* Enable Link attention interrupts */ 1486 spin_lock_irq(&phba->hbalock); 1487 psli->sli_flag |= LPFC_PROCESS_LA; 1488 control = readl(phba->HCregaddr); 1489 control |= HC_LAINT_ENA; 1490 writel(control, phba->HCregaddr); 1491 readl(phba->HCregaddr); /* flush */ 1492 1493 /* Clear Link Attention in HA REG */ 1494 writel(HA_LATT, phba->HAregaddr); 1495 readl(phba->HAregaddr); /* flush */ 1496 spin_unlock_irq(&phba->hbalock); 1497 lpfc_linkdown(phba); 1498 phba->link_state = LPFC_HBA_ERROR; 1499 1500 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1501 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1502 1503 return; 1504} 1505 1506/** 1507 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1508 * @phba: pointer to lpfc hba data structure. 1509 * @vpd: pointer to the vital product data. 1510 * @len: length of the vital product data in bytes. 1511 * 1512 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1513 * an array of characters. In this routine, the ModelName, ProgramType, and 1514 * ModelDesc, etc. fields of the phba data structure will be populated. 1515 * 1516 * Return codes 1517 * 0 - pointer to the VPD passed in is NULL 1518 * 1 - success 1519 **/ 1520int 1521lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1522{ 1523 uint8_t lenlo, lenhi; 1524 int Length; 1525 int i, j; 1526 int finished = 0; 1527 int index = 0; 1528 1529 if (!vpd) 1530 return 0; 1531 1532 /* Vital Product */ 1533 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1534 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1535 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1536 (uint32_t) vpd[3]); 1537 while (!finished && (index < (len - 4))) { 1538 switch (vpd[index]) { 1539 case 0x82: 1540 case 0x91: 1541 index += 1; 1542 lenlo = vpd[index]; 1543 index += 1; 1544 lenhi = vpd[index]; 1545 index += 1; 1546 i = ((((unsigned short)lenhi) << 8) + lenlo); 1547 index += i; 1548 break; 1549 case 0x90: 1550 index += 1; 1551 lenlo = vpd[index]; 1552 index += 1; 1553 lenhi = vpd[index]; 1554 index += 1; 1555 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1556 if (Length > len - index) 1557 Length = len - index; 1558 while (Length > 0) { 1559 /* Look for Serial Number */ 1560 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1561 index += 2; 1562 i = vpd[index]; 1563 index += 1; 1564 j = 0; 1565 Length -= (3+i); 1566 while(i--) { 1567 phba->SerialNumber[j++] = vpd[index++]; 1568 if (j == 31) 1569 break; 1570 } 1571 phba->SerialNumber[j] = 0; 1572 continue; 1573 } 1574 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1575 phba->vpd_flag |= VPD_MODEL_DESC; 1576 index += 2; 1577 i = vpd[index]; 1578 index += 1; 1579 j = 0; 1580 Length -= (3+i); 1581 while(i--) { 1582 phba->ModelDesc[j++] = vpd[index++]; 1583 if (j == 255) 1584 break; 1585 } 1586 phba->ModelDesc[j] = 0; 1587 continue; 1588 } 1589 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1590 phba->vpd_flag |= VPD_MODEL_NAME; 1591 index += 2; 1592 i = vpd[index]; 1593 index += 1; 1594 j = 0; 1595 Length -= (3+i); 1596 while(i--) { 1597 phba->ModelName[j++] = vpd[index++]; 1598 if (j == 79) 1599 break; 1600 } 1601 phba->ModelName[j] = 0; 1602 continue; 1603 } 1604 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1605 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1606 index += 2; 1607 i = vpd[index]; 1608 index += 1; 1609 j = 0; 1610 Length -= (3+i); 1611 while(i--) { 1612 phba->ProgramType[j++] = vpd[index++]; 1613 if (j == 255) 1614 break; 1615 } 1616 phba->ProgramType[j] = 0; 1617 continue; 1618 } 1619 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1620 phba->vpd_flag |= VPD_PORT; 1621 index += 2; 1622 i = vpd[index]; 1623 index += 1; 1624 j = 0; 1625 Length -= (3+i); 1626 while(i--) { 1627 phba->Port[j++] = vpd[index++]; 1628 if (j == 19) 1629 break; 1630 } 1631 phba->Port[j] = 0; 1632 continue; 1633 } 1634 else { 1635 index += 2; 1636 i = vpd[index]; 1637 index += 1; 1638 index += i; 1639 Length -= (3 + i); 1640 } 1641 } 1642 finished = 0; 1643 break; 1644 case 0x78: 1645 finished = 1; 1646 break; 1647 default: 1648 index ++; 1649 break; 1650 } 1651 } 1652 1653 return(1); 1654} 1655 1656/** 1657 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1658 * @phba: pointer to lpfc hba data structure. 1659 * @mdp: pointer to the data structure to hold the derived model name. 1660 * @descp: pointer to the data structure to hold the derived description. 1661 * 1662 * This routine retrieves HBA's description based on its registered PCI device 1663 * ID. The @descp passed into this function points to an array of 256 chars. It 1664 * shall be returned with the model name, maximum speed, and the host bus type. 1665 * The @mdp passed into this function points to an array of 80 chars. When the 1666 * function returns, the @mdp will be filled with the model name. 1667 **/ 1668static void 1669lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1670{ 1671 lpfc_vpd_t *vp; 1672 uint16_t dev_id = phba->pcidev->device; 1673 int max_speed; 1674 int GE = 0; 1675 int oneConnect = 0; /* default is not a oneConnect */ 1676 struct { 1677 char *name; 1678 char *bus; 1679 char *function; 1680 } m = {"<Unknown>", "", ""}; 1681 1682 if (mdp && mdp[0] != '\0' 1683 && descp && descp[0] != '\0') 1684 return; 1685 1686 if (phba->lmt & LMT_10Gb) 1687 max_speed = 10; 1688 else if (phba->lmt & LMT_8Gb) 1689 max_speed = 8; 1690 else if (phba->lmt & LMT_4Gb) 1691 max_speed = 4; 1692 else if (phba->lmt & LMT_2Gb) 1693 max_speed = 2; 1694 else 1695 max_speed = 1; 1696 1697 vp = &phba->vpd; 1698 1699 switch (dev_id) { 1700 case PCI_DEVICE_ID_FIREFLY: 1701 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 1702 break; 1703 case PCI_DEVICE_ID_SUPERFLY: 1704 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1705 m = (typeof(m)){"LP7000", "PCI", 1706 "Fibre Channel Adapter"}; 1707 else 1708 m = (typeof(m)){"LP7000E", "PCI", 1709 "Fibre Channel Adapter"}; 1710 break; 1711 case PCI_DEVICE_ID_DRAGONFLY: 1712 m = (typeof(m)){"LP8000", "PCI", 1713 "Fibre Channel Adapter"}; 1714 break; 1715 case PCI_DEVICE_ID_CENTAUR: 1716 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1717 m = (typeof(m)){"LP9002", "PCI", 1718 "Fibre Channel Adapter"}; 1719 else 1720 m = (typeof(m)){"LP9000", "PCI", 1721 "Fibre Channel Adapter"}; 1722 break; 1723 case PCI_DEVICE_ID_RFLY: 1724 m = (typeof(m)){"LP952", "PCI", 1725 "Fibre Channel Adapter"}; 1726 break; 1727 case PCI_DEVICE_ID_PEGASUS: 1728 m = (typeof(m)){"LP9802", "PCI-X", 1729 "Fibre Channel Adapter"}; 1730 break; 1731 case PCI_DEVICE_ID_THOR: 1732 m = (typeof(m)){"LP10000", "PCI-X", 1733 "Fibre Channel Adapter"}; 1734 break; 1735 case PCI_DEVICE_ID_VIPER: 1736 m = (typeof(m)){"LPX1000", "PCI-X", 1737 "Fibre Channel Adapter"}; 1738 break; 1739 case PCI_DEVICE_ID_PFLY: 1740 m = (typeof(m)){"LP982", "PCI-X", 1741 "Fibre Channel Adapter"}; 1742 break; 1743 case PCI_DEVICE_ID_TFLY: 1744 m = (typeof(m)){"LP1050", "PCI-X", 1745 "Fibre Channel Adapter"}; 1746 break; 1747 case PCI_DEVICE_ID_HELIOS: 1748 m = (typeof(m)){"LP11000", "PCI-X2", 1749 "Fibre Channel Adapter"}; 1750 break; 1751 case PCI_DEVICE_ID_HELIOS_SCSP: 1752 m = (typeof(m)){"LP11000-SP", "PCI-X2", 1753 "Fibre Channel Adapter"}; 1754 break; 1755 case PCI_DEVICE_ID_HELIOS_DCSP: 1756 m = (typeof(m)){"LP11002-SP", "PCI-X2", 1757 "Fibre Channel Adapter"}; 1758 break; 1759 case PCI_DEVICE_ID_NEPTUNE: 1760 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 1761 break; 1762 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1763 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 1764 break; 1765 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1766 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 1767 break; 1768 case PCI_DEVICE_ID_BMID: 1769 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 1770 break; 1771 case PCI_DEVICE_ID_BSMB: 1772 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 1773 break; 1774 case PCI_DEVICE_ID_ZEPHYR: 1775 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1776 break; 1777 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1778 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1779 break; 1780 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1781 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 1782 GE = 1; 1783 break; 1784 case PCI_DEVICE_ID_ZMID: 1785 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 1786 break; 1787 case PCI_DEVICE_ID_ZSMB: 1788 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 1789 break; 1790 case PCI_DEVICE_ID_LP101: 1791 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 1792 break; 1793 case PCI_DEVICE_ID_LP10000S: 1794 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 1795 break; 1796 case PCI_DEVICE_ID_LP11000S: 1797 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 1798 break; 1799 case PCI_DEVICE_ID_LPE11000S: 1800 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 1801 break; 1802 case PCI_DEVICE_ID_SAT: 1803 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 1804 break; 1805 case PCI_DEVICE_ID_SAT_MID: 1806 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 1807 break; 1808 case PCI_DEVICE_ID_SAT_SMB: 1809 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 1810 break; 1811 case PCI_DEVICE_ID_SAT_DCSP: 1812 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 1813 break; 1814 case PCI_DEVICE_ID_SAT_SCSP: 1815 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 1816 break; 1817 case PCI_DEVICE_ID_SAT_S: 1818 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 1819 break; 1820 case PCI_DEVICE_ID_HORNET: 1821 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 1822 GE = 1; 1823 break; 1824 case PCI_DEVICE_ID_PROTEUS_VF: 1825 m = (typeof(m)){"LPev12000", "PCIe IOV", 1826 "Fibre Channel Adapter"}; 1827 break; 1828 case PCI_DEVICE_ID_PROTEUS_PF: 1829 m = (typeof(m)){"LPev12000", "PCIe IOV", 1830 "Fibre Channel Adapter"}; 1831 break; 1832 case PCI_DEVICE_ID_PROTEUS_S: 1833 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 1834 "Fibre Channel Adapter"}; 1835 break; 1836 case PCI_DEVICE_ID_TIGERSHARK: 1837 oneConnect = 1; 1838 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 1839 break; 1840 case PCI_DEVICE_ID_TOMCAT: 1841 oneConnect = 1; 1842 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 1843 break; 1844 case PCI_DEVICE_ID_FALCON: 1845 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 1846 "EmulexSecure Fibre"}; 1847 break; 1848 case PCI_DEVICE_ID_BALIUS: 1849 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 1850 "Fibre Channel Adapter"}; 1851 break; 1852 default: 1853 m = (typeof(m)){"Unknown", "", ""}; 1854 break; 1855 } 1856 1857 if (mdp && mdp[0] == '\0') 1858 snprintf(mdp, 79,"%s", m.name); 1859 /* oneConnect hba requires special processing, they are all initiators 1860 * and we put the port number on the end 1861 */ 1862 if (descp && descp[0] == '\0') { 1863 if (oneConnect) 1864 snprintf(descp, 255, 1865 "Emulex OneConnect %s, %s Initiator, Port %s", 1866 m.name, m.function, 1867 phba->Port); 1868 else 1869 snprintf(descp, 255, 1870 "Emulex %s %d%s %s %s", 1871 m.name, max_speed, (GE) ? "GE" : "Gb", 1872 m.bus, m.function); 1873 } 1874} 1875 1876/** 1877 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 1878 * @phba: pointer to lpfc hba data structure. 1879 * @pring: pointer to a IOCB ring. 1880 * @cnt: the number of IOCBs to be posted to the IOCB ring. 1881 * 1882 * This routine posts a given number of IOCBs with the associated DMA buffer 1883 * descriptors specified by the cnt argument to the given IOCB ring. 1884 * 1885 * Return codes 1886 * The number of IOCBs NOT able to be posted to the IOCB ring. 1887 **/ 1888int 1889lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 1890{ 1891 IOCB_t *icmd; 1892 struct lpfc_iocbq *iocb; 1893 struct lpfc_dmabuf *mp1, *mp2; 1894 1895 cnt += pring->missbufcnt; 1896 1897 /* While there are buffers to post */ 1898 while (cnt > 0) { 1899 /* Allocate buffer for command iocb */ 1900 iocb = lpfc_sli_get_iocbq(phba); 1901 if (iocb == NULL) { 1902 pring->missbufcnt = cnt; 1903 return cnt; 1904 } 1905 icmd = &iocb->iocb; 1906 1907 /* 2 buffers can be posted per command */ 1908 /* Allocate buffer to post */ 1909 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1910 if (mp1) 1911 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1912 if (!mp1 || !mp1->virt) { 1913 kfree(mp1); 1914 lpfc_sli_release_iocbq(phba, iocb); 1915 pring->missbufcnt = cnt; 1916 return cnt; 1917 } 1918 1919 INIT_LIST_HEAD(&mp1->list); 1920 /* Allocate buffer to post */ 1921 if (cnt > 1) { 1922 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1923 if (mp2) 1924 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1925 &mp2->phys); 1926 if (!mp2 || !mp2->virt) { 1927 kfree(mp2); 1928 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1929 kfree(mp1); 1930 lpfc_sli_release_iocbq(phba, iocb); 1931 pring->missbufcnt = cnt; 1932 return cnt; 1933 } 1934 1935 INIT_LIST_HEAD(&mp2->list); 1936 } else { 1937 mp2 = NULL; 1938 } 1939 1940 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 1941 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 1942 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 1943 icmd->ulpBdeCount = 1; 1944 cnt--; 1945 if (mp2) { 1946 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 1947 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 1948 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 1949 cnt--; 1950 icmd->ulpBdeCount = 2; 1951 } 1952 1953 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1954 icmd->ulpLe = 1; 1955 1956 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 1957 IOCB_ERROR) { 1958 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1959 kfree(mp1); 1960 cnt++; 1961 if (mp2) { 1962 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 1963 kfree(mp2); 1964 cnt++; 1965 } 1966 lpfc_sli_release_iocbq(phba, iocb); 1967 pring->missbufcnt = cnt; 1968 return cnt; 1969 } 1970 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1971 if (mp2) 1972 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1973 } 1974 pring->missbufcnt = 0; 1975 return 0; 1976} 1977 1978/** 1979 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 1980 * @phba: pointer to lpfc hba data structure. 1981 * 1982 * This routine posts initial receive IOCB buffers to the ELS ring. The 1983 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 1984 * set to 64 IOCBs. 1985 * 1986 * Return codes 1987 * 0 - success (currently always success) 1988 **/ 1989static int 1990lpfc_post_rcv_buf(struct lpfc_hba *phba) 1991{ 1992 struct lpfc_sli *psli = &phba->sli; 1993 1994 /* Ring 0, ELS / CT buffers */ 1995 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 1996 /* Ring 2 - FCP no buffers needed */ 1997 1998 return 0; 1999} 2000 2001#define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2002 2003/** 2004 * lpfc_sha_init - Set up initial array of hash table entries 2005 * @HashResultPointer: pointer to an array as hash table. 2006 * 2007 * This routine sets up the initial values to the array of hash table entries 2008 * for the LC HBAs. 2009 **/ 2010static void 2011lpfc_sha_init(uint32_t * HashResultPointer) 2012{ 2013 HashResultPointer[0] = 0x67452301; 2014 HashResultPointer[1] = 0xEFCDAB89; 2015 HashResultPointer[2] = 0x98BADCFE; 2016 HashResultPointer[3] = 0x10325476; 2017 HashResultPointer[4] = 0xC3D2E1F0; 2018} 2019 2020/** 2021 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2022 * @HashResultPointer: pointer to an initial/result hash table. 2023 * @HashWorkingPointer: pointer to an working hash table. 2024 * 2025 * This routine iterates an initial hash table pointed by @HashResultPointer 2026 * with the values from the working hash table pointeed by @HashWorkingPointer. 2027 * The results are putting back to the initial hash table, returned through 2028 * the @HashResultPointer as the result hash table. 2029 **/ 2030static void 2031lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2032{ 2033 int t; 2034 uint32_t TEMP; 2035 uint32_t A, B, C, D, E; 2036 t = 16; 2037 do { 2038 HashWorkingPointer[t] = 2039 S(1, 2040 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2041 8] ^ 2042 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2043 } while (++t <= 79); 2044 t = 0; 2045 A = HashResultPointer[0]; 2046 B = HashResultPointer[1]; 2047 C = HashResultPointer[2]; 2048 D = HashResultPointer[3]; 2049 E = HashResultPointer[4]; 2050 2051 do { 2052 if (t < 20) { 2053 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2054 } else if (t < 40) { 2055 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2056 } else if (t < 60) { 2057 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2058 } else { 2059 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2060 } 2061 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2062 E = D; 2063 D = C; 2064 C = S(30, B); 2065 B = A; 2066 A = TEMP; 2067 } while (++t <= 79); 2068 2069 HashResultPointer[0] += A; 2070 HashResultPointer[1] += B; 2071 HashResultPointer[2] += C; 2072 HashResultPointer[3] += D; 2073 HashResultPointer[4] += E; 2074 2075} 2076 2077/** 2078 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2079 * @RandomChallenge: pointer to the entry of host challenge random number array. 2080 * @HashWorking: pointer to the entry of the working hash array. 2081 * 2082 * This routine calculates the working hash array referred by @HashWorking 2083 * from the challenge random numbers associated with the host, referred by 2084 * @RandomChallenge. The result is put into the entry of the working hash 2085 * array and returned by reference through @HashWorking. 2086 **/ 2087static void 2088lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2089{ 2090 *HashWorking = (*RandomChallenge ^ *HashWorking); 2091} 2092 2093/** 2094 * lpfc_hba_init - Perform special handling for LC HBA initialization 2095 * @phba: pointer to lpfc hba data structure. 2096 * @hbainit: pointer to an array of unsigned 32-bit integers. 2097 * 2098 * This routine performs the special handling for LC HBA initialization. 2099 **/ 2100void 2101lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2102{ 2103 int t; 2104 uint32_t *HashWorking; 2105 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2106 2107 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2108 if (!HashWorking) 2109 return; 2110 2111 HashWorking[0] = HashWorking[78] = *pwwnn++; 2112 HashWorking[1] = HashWorking[79] = *pwwnn; 2113 2114 for (t = 0; t < 7; t++) 2115 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2116 2117 lpfc_sha_init(hbainit); 2118 lpfc_sha_iterate(hbainit, HashWorking); 2119 kfree(HashWorking); 2120} 2121 2122/** 2123 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2124 * @vport: pointer to a virtual N_Port data structure. 2125 * 2126 * This routine performs the necessary cleanups before deleting the @vport. 2127 * It invokes the discovery state machine to perform necessary state 2128 * transitions and to release the ndlps associated with the @vport. Note, 2129 * the physical port is treated as @vport 0. 2130 **/ 2131void 2132lpfc_cleanup(struct lpfc_vport *vport) 2133{ 2134 struct lpfc_hba *phba = vport->phba; 2135 struct lpfc_nodelist *ndlp, *next_ndlp; 2136 int i = 0; 2137 2138 if (phba->link_state > LPFC_LINK_DOWN) 2139 lpfc_port_link_failure(vport); 2140 2141 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2142 if (!NLP_CHK_NODE_ACT(ndlp)) { 2143 ndlp = lpfc_enable_node(vport, ndlp, 2144 NLP_STE_UNUSED_NODE); 2145 if (!ndlp) 2146 continue; 2147 spin_lock_irq(&phba->ndlp_lock); 2148 NLP_SET_FREE_REQ(ndlp); 2149 spin_unlock_irq(&phba->ndlp_lock); 2150 /* Trigger the release of the ndlp memory */ 2151 lpfc_nlp_put(ndlp); 2152 continue; 2153 } 2154 spin_lock_irq(&phba->ndlp_lock); 2155 if (NLP_CHK_FREE_REQ(ndlp)) { 2156 /* The ndlp should not be in memory free mode already */ 2157 spin_unlock_irq(&phba->ndlp_lock); 2158 continue; 2159 } else 2160 /* Indicate request for freeing ndlp memory */ 2161 NLP_SET_FREE_REQ(ndlp); 2162 spin_unlock_irq(&phba->ndlp_lock); 2163 2164 if (vport->port_type != LPFC_PHYSICAL_PORT && 2165 ndlp->nlp_DID == Fabric_DID) { 2166 /* Just free up ndlp with Fabric_DID for vports */ 2167 lpfc_nlp_put(ndlp); 2168 continue; 2169 } 2170 2171 if (ndlp->nlp_type & NLP_FABRIC) 2172 lpfc_disc_state_machine(vport, ndlp, NULL, 2173 NLP_EVT_DEVICE_RECOVERY); 2174 2175 lpfc_disc_state_machine(vport, ndlp, NULL, 2176 NLP_EVT_DEVICE_RM); 2177 2178 } 2179 2180 /* At this point, ALL ndlp's should be gone 2181 * because of the previous NLP_EVT_DEVICE_RM. 2182 * Lets wait for this to happen, if needed. 2183 */ 2184 while (!list_empty(&vport->fc_nodes)) { 2185 if (i++ > 3000) { 2186 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2187 "0233 Nodelist not empty\n"); 2188 list_for_each_entry_safe(ndlp, next_ndlp, 2189 &vport->fc_nodes, nlp_listp) { 2190 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2191 LOG_NODE, 2192 "0282 did:x%x ndlp:x%p " 2193 "usgmap:x%x refcnt:%d\n", 2194 ndlp->nlp_DID, (void *)ndlp, 2195 ndlp->nlp_usg_map, 2196 atomic_read( 2197 &ndlp->kref.refcount)); 2198 } 2199 break; 2200 } 2201 2202 /* Wait for any activity on ndlps to settle */ 2203 msleep(10); 2204 } 2205} 2206 2207/** 2208 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2209 * @vport: pointer to a virtual N_Port data structure. 2210 * 2211 * This routine stops all the timers associated with a @vport. This function 2212 * is invoked before disabling or deleting a @vport. Note that the physical 2213 * port is treated as @vport 0. 2214 **/ 2215void 2216lpfc_stop_vport_timers(struct lpfc_vport *vport) 2217{ 2218 del_timer_sync(&vport->els_tmofunc); 2219 del_timer_sync(&vport->fc_fdmitmo); 2220 lpfc_can_disctmo(vport); 2221 return; 2222} 2223 2224/** 2225 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2226 * @phba: pointer to lpfc hba data structure. 2227 * 2228 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2229 * caller of this routine should already hold the host lock. 2230 **/ 2231void 2232__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2233{ 2234 /* Clear pending FCF rediscovery wait and failover in progress flags */ 2235 phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND | 2236 FCF_DEAD_DISC | 2237 FCF_ACVL_DISC); 2238 /* Now, try to stop the timer */ 2239 del_timer(&phba->fcf.redisc_wait); 2240} 2241 2242/** 2243 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2244 * @phba: pointer to lpfc hba data structure. 2245 * 2246 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2247 * checks whether the FCF rediscovery wait timer is pending with the host 2248 * lock held before proceeding with disabling the timer and clearing the 2249 * wait timer pendig flag. 2250 **/ 2251void 2252lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2253{ 2254 spin_lock_irq(&phba->hbalock); 2255 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2256 /* FCF rediscovery timer already fired or stopped */ 2257 spin_unlock_irq(&phba->hbalock); 2258 return; 2259 } 2260 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2261 spin_unlock_irq(&phba->hbalock); 2262} 2263 2264/** 2265 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2266 * @phba: pointer to lpfc hba data structure. 2267 * 2268 * This routine stops all the timers associated with a HBA. This function is 2269 * invoked before either putting a HBA offline or unloading the driver. 2270 **/ 2271void 2272lpfc_stop_hba_timers(struct lpfc_hba *phba) 2273{ 2274 lpfc_stop_vport_timers(phba->pport); 2275 del_timer_sync(&phba->sli.mbox_tmo); 2276 del_timer_sync(&phba->fabric_block_timer); 2277 del_timer_sync(&phba->eratt_poll); 2278 del_timer_sync(&phba->hb_tmofunc); 2279 phba->hb_outstanding = 0; 2280 2281 switch (phba->pci_dev_grp) { 2282 case LPFC_PCI_DEV_LP: 2283 /* Stop any LightPulse device specific driver timers */ 2284 del_timer_sync(&phba->fcp_poll_timer); 2285 break; 2286 case LPFC_PCI_DEV_OC: 2287 /* Stop any OneConnect device sepcific driver timers */ 2288 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2289 break; 2290 default: 2291 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2292 "0297 Invalid device group (x%x)\n", 2293 phba->pci_dev_grp); 2294 break; 2295 } 2296 return; 2297} 2298 2299/** 2300 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2301 * @phba: pointer to lpfc hba data structure. 2302 * 2303 * This routine marks a HBA's management interface as blocked. Once the HBA's 2304 * management interface is marked as blocked, all the user space access to 2305 * the HBA, whether they are from sysfs interface or libdfc interface will 2306 * all be blocked. The HBA is set to block the management interface when the 2307 * driver prepares the HBA interface for online or offline. 2308 **/ 2309static void 2310lpfc_block_mgmt_io(struct lpfc_hba * phba) 2311{ 2312 unsigned long iflag; 2313 uint8_t actcmd = MBX_HEARTBEAT; 2314 unsigned long timeout; 2315 2316 2317 spin_lock_irqsave(&phba->hbalock, iflag); 2318 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2319 if (phba->sli.mbox_active) 2320 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2321 spin_unlock_irqrestore(&phba->hbalock, iflag); 2322 /* Determine how long we might wait for the active mailbox 2323 * command to be gracefully completed by firmware. 2324 */ 2325 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + 2326 jiffies; 2327 /* Wait for the outstnading mailbox command to complete */ 2328 while (phba->sli.mbox_active) { 2329 /* Check active mailbox complete status every 2ms */ 2330 msleep(2); 2331 if (time_after(jiffies, timeout)) { 2332 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2333 "2813 Mgmt IO is Blocked %x " 2334 "- mbox cmd %x still active\n", 2335 phba->sli.sli_flag, actcmd); 2336 break; 2337 } 2338 } 2339} 2340 2341/** 2342 * lpfc_online - Initialize and bring a HBA online 2343 * @phba: pointer to lpfc hba data structure. 2344 * 2345 * This routine initializes the HBA and brings a HBA online. During this 2346 * process, the management interface is blocked to prevent user space access 2347 * to the HBA interfering with the driver initialization. 2348 * 2349 * Return codes 2350 * 0 - successful 2351 * 1 - failed 2352 **/ 2353int 2354lpfc_online(struct lpfc_hba *phba) 2355{ 2356 struct lpfc_vport *vport; 2357 struct lpfc_vport **vports; 2358 int i; 2359 2360 if (!phba) 2361 return 0; 2362 vport = phba->pport; 2363 2364 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2365 return 0; 2366 2367 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2368 "0458 Bring Adapter online\n"); 2369 2370 lpfc_block_mgmt_io(phba); 2371 2372 if (!lpfc_sli_queue_setup(phba)) { 2373 lpfc_unblock_mgmt_io(phba); 2374 return 1; 2375 } 2376 2377 if (phba->sli_rev == LPFC_SLI_REV4) { 2378 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2379 lpfc_unblock_mgmt_io(phba); 2380 return 1; 2381 } 2382 } else { 2383 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2384 lpfc_unblock_mgmt_io(phba); 2385 return 1; 2386 } 2387 } 2388 2389 vports = lpfc_create_vport_work_array(phba); 2390 if (vports != NULL) 2391 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2392 struct Scsi_Host *shost; 2393 shost = lpfc_shost_from_vport(vports[i]); 2394 spin_lock_irq(shost->host_lock); 2395 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2396 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2397 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2398 if (phba->sli_rev == LPFC_SLI_REV4) 2399 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2400 spin_unlock_irq(shost->host_lock); 2401 } 2402 lpfc_destroy_vport_work_array(phba, vports); 2403 2404 lpfc_unblock_mgmt_io(phba); 2405 return 0; 2406} 2407 2408/** 2409 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2410 * @phba: pointer to lpfc hba data structure. 2411 * 2412 * This routine marks a HBA's management interface as not blocked. Once the 2413 * HBA's management interface is marked as not blocked, all the user space 2414 * access to the HBA, whether they are from sysfs interface or libdfc 2415 * interface will be allowed. The HBA is set to block the management interface 2416 * when the driver prepares the HBA interface for online or offline and then 2417 * set to unblock the management interface afterwards. 2418 **/ 2419void 2420lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2421{ 2422 unsigned long iflag; 2423 2424 spin_lock_irqsave(&phba->hbalock, iflag); 2425 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2426 spin_unlock_irqrestore(&phba->hbalock, iflag); 2427} 2428 2429/** 2430 * lpfc_offline_prep - Prepare a HBA to be brought offline 2431 * @phba: pointer to lpfc hba data structure. 2432 * 2433 * This routine is invoked to prepare a HBA to be brought offline. It performs 2434 * unregistration login to all the nodes on all vports and flushes the mailbox 2435 * queue to make it ready to be brought offline. 2436 **/ 2437void 2438lpfc_offline_prep(struct lpfc_hba * phba) 2439{ 2440 struct lpfc_vport *vport = phba->pport; 2441 struct lpfc_nodelist *ndlp, *next_ndlp; 2442 struct lpfc_vport **vports; 2443 struct Scsi_Host *shost; 2444 int i; 2445 2446 if (vport->fc_flag & FC_OFFLINE_MODE) 2447 return; 2448 2449 lpfc_block_mgmt_io(phba); 2450 2451 lpfc_linkdown(phba); 2452 2453 /* Issue an unreg_login to all nodes on all vports */ 2454 vports = lpfc_create_vport_work_array(phba); 2455 if (vports != NULL) { 2456 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2457 if (vports[i]->load_flag & FC_UNLOADING) 2458 continue; 2459 shost = lpfc_shost_from_vport(vports[i]); 2460 spin_lock_irq(shost->host_lock); 2461 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2462 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2463 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2464 spin_unlock_irq(shost->host_lock); 2465 2466 shost = lpfc_shost_from_vport(vports[i]); 2467 list_for_each_entry_safe(ndlp, next_ndlp, 2468 &vports[i]->fc_nodes, 2469 nlp_listp) { 2470 if (!NLP_CHK_NODE_ACT(ndlp)) 2471 continue; 2472 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2473 continue; 2474 if (ndlp->nlp_type & NLP_FABRIC) { 2475 lpfc_disc_state_machine(vports[i], ndlp, 2476 NULL, NLP_EVT_DEVICE_RECOVERY); 2477 lpfc_disc_state_machine(vports[i], ndlp, 2478 NULL, NLP_EVT_DEVICE_RM); 2479 } 2480 spin_lock_irq(shost->host_lock); 2481 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2482 spin_unlock_irq(shost->host_lock); 2483 lpfc_unreg_rpi(vports[i], ndlp); 2484 } 2485 } 2486 } 2487 lpfc_destroy_vport_work_array(phba, vports); 2488 2489 lpfc_sli_mbox_sys_shutdown(phba); 2490} 2491 2492/** 2493 * lpfc_offline - Bring a HBA offline 2494 * @phba: pointer to lpfc hba data structure. 2495 * 2496 * This routine actually brings a HBA offline. It stops all the timers 2497 * associated with the HBA, brings down the SLI layer, and eventually 2498 * marks the HBA as in offline state for the upper layer protocol. 2499 **/ 2500void 2501lpfc_offline(struct lpfc_hba *phba) 2502{ 2503 struct Scsi_Host *shost; 2504 struct lpfc_vport **vports; 2505 int i; 2506 2507 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2508 return; 2509 2510 /* stop port and all timers associated with this hba */ 2511 lpfc_stop_port(phba); 2512 vports = lpfc_create_vport_work_array(phba); 2513 if (vports != NULL) 2514 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2515 lpfc_stop_vport_timers(vports[i]); 2516 lpfc_destroy_vport_work_array(phba, vports); 2517 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2518 "0460 Bring Adapter offline\n"); 2519 /* Bring down the SLI Layer and cleanup. The HBA is offline 2520 now. */ 2521 lpfc_sli_hba_down(phba); 2522 spin_lock_irq(&phba->hbalock); 2523 phba->work_ha = 0; 2524 spin_unlock_irq(&phba->hbalock); 2525 vports = lpfc_create_vport_work_array(phba); 2526 if (vports != NULL) 2527 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2528 shost = lpfc_shost_from_vport(vports[i]); 2529 spin_lock_irq(shost->host_lock); 2530 vports[i]->work_port_events = 0; 2531 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2532 spin_unlock_irq(shost->host_lock); 2533 } 2534 lpfc_destroy_vport_work_array(phba, vports); 2535} 2536 2537/** 2538 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2539 * @phba: pointer to lpfc hba data structure. 2540 * 2541 * This routine is to free all the SCSI buffers and IOCBs from the driver 2542 * list back to kernel. It is called from lpfc_pci_remove_one to free 2543 * the internal resources before the device is removed from the system. 2544 * 2545 * Return codes 2546 * 0 - successful (for now, it always returns 0) 2547 **/ 2548static int 2549lpfc_scsi_free(struct lpfc_hba *phba) 2550{ 2551 struct lpfc_scsi_buf *sb, *sb_next; 2552 struct lpfc_iocbq *io, *io_next; 2553 2554 spin_lock_irq(&phba->hbalock); 2555 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2556 spin_lock(&phba->scsi_buf_list_lock); 2557 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2558 list_del(&sb->list); 2559 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2560 sb->dma_handle); 2561 kfree(sb); 2562 phba->total_scsi_bufs--; 2563 } 2564 spin_unlock(&phba->scsi_buf_list_lock); 2565 2566 /* Release all the lpfc_iocbq entries maintained by this host. */ 2567 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2568 list_del(&io->list); 2569 kfree(io); 2570 phba->total_iocbq_bufs--; 2571 } 2572 spin_unlock_irq(&phba->hbalock); 2573 return 0; 2574} 2575 2576/** 2577 * lpfc_create_port - Create an FC port 2578 * @phba: pointer to lpfc hba data structure. 2579 * @instance: a unique integer ID to this FC port. 2580 * @dev: pointer to the device data structure. 2581 * 2582 * This routine creates a FC port for the upper layer protocol. The FC port 2583 * can be created on top of either a physical port or a virtual port provided 2584 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2585 * and associates the FC port created before adding the shost into the SCSI 2586 * layer. 2587 * 2588 * Return codes 2589 * @vport - pointer to the virtual N_Port data structure. 2590 * NULL - port create failed. 2591 **/ 2592struct lpfc_vport * 2593lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2594{ 2595 struct lpfc_vport *vport; 2596 struct Scsi_Host *shost; 2597 int error = 0; 2598 2599 if (dev != &phba->pcidev->dev) 2600 shost = scsi_host_alloc(&lpfc_vport_template, 2601 sizeof(struct lpfc_vport)); 2602 else 2603 shost = scsi_host_alloc(&lpfc_template, 2604 sizeof(struct lpfc_vport)); 2605 if (!shost) 2606 goto out; 2607 2608 vport = (struct lpfc_vport *) shost->hostdata; 2609 vport->phba = phba; 2610 vport->load_flag |= FC_LOADING; 2611 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2612 vport->fc_rscn_flush = 0; 2613 2614 lpfc_get_vport_cfgparam(vport); 2615 shost->unique_id = instance; 2616 shost->max_id = LPFC_MAX_TARGET; 2617 shost->max_lun = vport->cfg_max_luns; 2618 shost->this_id = -1; 2619 shost->max_cmd_len = 16; 2620 if (phba->sli_rev == LPFC_SLI_REV4) { 2621 shost->dma_boundary = 2622 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 2623 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2624 } 2625 2626 /* 2627 * Set initial can_queue value since 0 is no longer supported and 2628 * scsi_add_host will fail. This will be adjusted later based on the 2629 * max xri value determined in hba setup. 2630 */ 2631 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2632 if (dev != &phba->pcidev->dev) { 2633 shost->transportt = lpfc_vport_transport_template; 2634 vport->port_type = LPFC_NPIV_PORT; 2635 } else { 2636 shost->transportt = lpfc_transport_template; 2637 vport->port_type = LPFC_PHYSICAL_PORT; 2638 } 2639 2640 /* Initialize all internally managed lists. */ 2641 INIT_LIST_HEAD(&vport->fc_nodes); 2642 INIT_LIST_HEAD(&vport->rcv_buffer_list); 2643 spin_lock_init(&vport->work_port_lock); 2644 2645 init_timer(&vport->fc_disctmo); 2646 vport->fc_disctmo.function = lpfc_disc_timeout; 2647 vport->fc_disctmo.data = (unsigned long)vport; 2648 2649 init_timer(&vport->fc_fdmitmo); 2650 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2651 vport->fc_fdmitmo.data = (unsigned long)vport; 2652 2653 init_timer(&vport->els_tmofunc); 2654 vport->els_tmofunc.function = lpfc_els_timeout; 2655 vport->els_tmofunc.data = (unsigned long)vport; 2656 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2657 if (error) 2658 goto out_put_shost; 2659 2660 spin_lock_irq(&phba->hbalock); 2661 list_add_tail(&vport->listentry, &phba->port_list); 2662 spin_unlock_irq(&phba->hbalock); 2663 return vport; 2664 2665out_put_shost: 2666 scsi_host_put(shost); 2667out: 2668 return NULL; 2669} 2670 2671/** 2672 * destroy_port - destroy an FC port 2673 * @vport: pointer to an lpfc virtual N_Port data structure. 2674 * 2675 * This routine destroys a FC port from the upper layer protocol. All the 2676 * resources associated with the port are released. 2677 **/ 2678void 2679destroy_port(struct lpfc_vport *vport) 2680{ 2681 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2682 struct lpfc_hba *phba = vport->phba; 2683 2684 lpfc_debugfs_terminate(vport); 2685 fc_remove_host(shost); 2686 scsi_remove_host(shost); 2687 2688 spin_lock_irq(&phba->hbalock); 2689 list_del_init(&vport->listentry); 2690 spin_unlock_irq(&phba->hbalock); 2691 2692 lpfc_cleanup(vport); 2693 return; 2694} 2695 2696/** 2697 * lpfc_get_instance - Get a unique integer ID 2698 * 2699 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2700 * uses the kernel idr facility to perform the task. 2701 * 2702 * Return codes: 2703 * instance - a unique integer ID allocated as the new instance. 2704 * -1 - lpfc get instance failed. 2705 **/ 2706int 2707lpfc_get_instance(void) 2708{ 2709 int instance = 0; 2710 2711 /* Assign an unused number */ 2712 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2713 return -1; 2714 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2715 return -1; 2716 return instance; 2717} 2718 2719/** 2720 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2721 * @shost: pointer to SCSI host data structure. 2722 * @time: elapsed time of the scan in jiffies. 2723 * 2724 * This routine is called by the SCSI layer with a SCSI host to determine 2725 * whether the scan host is finished. 2726 * 2727 * Note: there is no scan_start function as adapter initialization will have 2728 * asynchronously kicked off the link initialization. 2729 * 2730 * Return codes 2731 * 0 - SCSI host scan is not over yet. 2732 * 1 - SCSI host scan is over. 2733 **/ 2734int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2735{ 2736 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2737 struct lpfc_hba *phba = vport->phba; 2738 int stat = 0; 2739 2740 spin_lock_irq(shost->host_lock); 2741 2742 if (vport->load_flag & FC_UNLOADING) { 2743 stat = 1; 2744 goto finished; 2745 } 2746 if (time >= 30 * HZ) { 2747 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2748 "0461 Scanning longer than 30 " 2749 "seconds. Continuing initialization\n"); 2750 stat = 1; 2751 goto finished; 2752 } 2753 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2754 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2755 "0465 Link down longer than 15 " 2756 "seconds. Continuing initialization\n"); 2757 stat = 1; 2758 goto finished; 2759 } 2760 2761 if (vport->port_state != LPFC_VPORT_READY) 2762 goto finished; 2763 if (vport->num_disc_nodes || vport->fc_prli_sent) 2764 goto finished; 2765 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2766 goto finished; 2767 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2768 goto finished; 2769 2770 stat = 1; 2771 2772finished: 2773 spin_unlock_irq(shost->host_lock); 2774 return stat; 2775} 2776 2777/** 2778 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2779 * @shost: pointer to SCSI host data structure. 2780 * 2781 * This routine initializes a given SCSI host attributes on a FC port. The 2782 * SCSI host can be either on top of a physical port or a virtual port. 2783 **/ 2784void lpfc_host_attrib_init(struct Scsi_Host *shost) 2785{ 2786 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2787 struct lpfc_hba *phba = vport->phba; 2788 /* 2789 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2790 */ 2791 2792 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2793 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2794 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2795 2796 memset(fc_host_supported_fc4s(shost), 0, 2797 sizeof(fc_host_supported_fc4s(shost))); 2798 fc_host_supported_fc4s(shost)[2] = 1; 2799 fc_host_supported_fc4s(shost)[7] = 1; 2800 2801 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2802 sizeof fc_host_symbolic_name(shost)); 2803 2804 fc_host_supported_speeds(shost) = 0; 2805 if (phba->lmt & LMT_10Gb) 2806 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2807 if (phba->lmt & LMT_8Gb) 2808 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2809 if (phba->lmt & LMT_4Gb) 2810 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2811 if (phba->lmt & LMT_2Gb) 2812 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2813 if (phba->lmt & LMT_1Gb) 2814 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2815 2816 fc_host_maxframe_size(shost) = 2817 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2818 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2819 2820 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 2821 2822 /* This value is also unchanging */ 2823 memset(fc_host_active_fc4s(shost), 0, 2824 sizeof(fc_host_active_fc4s(shost))); 2825 fc_host_active_fc4s(shost)[2] = 1; 2826 fc_host_active_fc4s(shost)[7] = 1; 2827 2828 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2829 spin_lock_irq(shost->host_lock); 2830 vport->load_flag &= ~FC_LOADING; 2831 spin_unlock_irq(shost->host_lock); 2832} 2833 2834/** 2835 * lpfc_stop_port_s3 - Stop SLI3 device port 2836 * @phba: pointer to lpfc hba data structure. 2837 * 2838 * This routine is invoked to stop an SLI3 device port, it stops the device 2839 * from generating interrupts and stops the device driver's timers for the 2840 * device. 2841 **/ 2842static void 2843lpfc_stop_port_s3(struct lpfc_hba *phba) 2844{ 2845 /* Clear all interrupt enable conditions */ 2846 writel(0, phba->HCregaddr); 2847 readl(phba->HCregaddr); /* flush */ 2848 /* Clear all pending interrupts */ 2849 writel(0xffffffff, phba->HAregaddr); 2850 readl(phba->HAregaddr); /* flush */ 2851 2852 /* Reset some HBA SLI setup states */ 2853 lpfc_stop_hba_timers(phba); 2854 phba->pport->work_port_events = 0; 2855} 2856 2857/** 2858 * lpfc_stop_port_s4 - Stop SLI4 device port 2859 * @phba: pointer to lpfc hba data structure. 2860 * 2861 * This routine is invoked to stop an SLI4 device port, it stops the device 2862 * from generating interrupts and stops the device driver's timers for the 2863 * device. 2864 **/ 2865static void 2866lpfc_stop_port_s4(struct lpfc_hba *phba) 2867{ 2868 /* Reset some HBA SLI4 setup states */ 2869 lpfc_stop_hba_timers(phba); 2870 phba->pport->work_port_events = 0; 2871 phba->sli4_hba.intr_enable = 0; 2872} 2873 2874/** 2875 * lpfc_stop_port - Wrapper function for stopping hba port 2876 * @phba: Pointer to HBA context object. 2877 * 2878 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 2879 * the API jump table function pointer from the lpfc_hba struct. 2880 **/ 2881void 2882lpfc_stop_port(struct lpfc_hba *phba) 2883{ 2884 phba->lpfc_stop_port(phba); 2885} 2886 2887/** 2888 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port. 2889 * @phba: pointer to lpfc hba data structure. 2890 * 2891 * This routine is invoked to remove the driver default fcf record from 2892 * the port. This routine currently acts on FCF Index 0. 2893 * 2894 **/ 2895void 2896lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) 2897{ 2898 int rc = 0; 2899 LPFC_MBOXQ_t *mboxq; 2900 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record; 2901 uint32_t mbox_tmo, req_len; 2902 uint32_t shdr_status, shdr_add_status; 2903 2904 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2905 if (!mboxq) { 2906 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2907 "2020 Failed to allocate mbox for ADD_FCF cmd\n"); 2908 return; 2909 } 2910 2911 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) - 2912 sizeof(struct lpfc_sli4_cfg_mhdr); 2913 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2914 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF, 2915 req_len, LPFC_SLI4_MBX_EMBED); 2916 /* 2917 * In phase 1, there is a single FCF index, 0. In phase2, the driver 2918 * supports multiple FCF indices. 2919 */ 2920 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; 2921 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); 2922 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, 2923 phba->fcf.current_rec.fcf_indx); 2924 2925 if (!phba->sli4_hba.intr_enable) 2926 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2927 else { 2928 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 2929 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 2930 } 2931 /* The IOCTL status is embedded in the mailbox subheader. */ 2932 shdr_status = bf_get(lpfc_mbox_hdr_status, 2933 &del_fcf_record->header.cfg_shdr.response); 2934 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 2935 &del_fcf_record->header.cfg_shdr.response); 2936 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { 2937 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2938 "2516 DEL FCF of default FCF Index failed " 2939 "mbx status x%x, status x%x add_status x%x\n", 2940 rc, shdr_status, shdr_add_status); 2941 } 2942 if (rc != MBX_TIMEOUT) 2943 mempool_free(mboxq, phba->mbox_mem_pool); 2944} 2945 2946/** 2947 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 2948 * @phba: Pointer to hba for which this call is being executed. 2949 * 2950 * This routine starts the timer waiting for the FCF rediscovery to complete. 2951 **/ 2952void 2953lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 2954{ 2955 unsigned long fcf_redisc_wait_tmo = 2956 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 2957 /* Start fcf rediscovery wait period timer */ 2958 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 2959 spin_lock_irq(&phba->hbalock); 2960 /* Allow action to new fcf asynchronous event */ 2961 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 2962 /* Mark the FCF rediscovery pending state */ 2963 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 2964 spin_unlock_irq(&phba->hbalock); 2965} 2966 2967/** 2968 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 2969 * @ptr: Map to lpfc_hba data structure pointer. 2970 * 2971 * This routine is invoked when waiting for FCF table rediscover has been 2972 * timed out. If new FCF record(s) has (have) been discovered during the 2973 * wait period, a new FCF event shall be added to the FCOE async event 2974 * list, and then worker thread shall be waked up for processing from the 2975 * worker thread context. 2976 **/ 2977void 2978lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 2979{ 2980 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 2981 2982 /* Don't send FCF rediscovery event if timer cancelled */ 2983 spin_lock_irq(&phba->hbalock); 2984 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2985 spin_unlock_irq(&phba->hbalock); 2986 return; 2987 } 2988 /* Clear FCF rediscovery timer pending flag */ 2989 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2990 /* FCF rediscovery event to worker thread */ 2991 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 2992 spin_unlock_irq(&phba->hbalock); 2993 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2994 "2776 FCF rediscover wait timer expired, post " 2995 "a worker thread event for FCF table scan\n"); 2996 /* wake up worker thread */ 2997 lpfc_worker_wake_up(phba); 2998} 2999 3000/** 3001 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support 3002 * @phba: pointer to lpfc hba data structure. 3003 * 3004 * This function uses the QUERY_FW_CFG mailbox command to determine if the 3005 * firmware loaded supports FCoE. A return of zero indicates that the mailbox 3006 * was successful and the firmware supports FCoE. Any other return indicates 3007 * a error. It is assumed that this function will be called before interrupts 3008 * are enabled. 3009 **/ 3010static int 3011lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba) 3012{ 3013 int rc = 0; 3014 LPFC_MBOXQ_t *mboxq; 3015 struct lpfc_mbx_query_fw_cfg *query_fw_cfg; 3016 uint32_t length; 3017 uint32_t shdr_status, shdr_add_status; 3018 3019 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3020 if (!mboxq) { 3021 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3022 "2621 Failed to allocate mbox for " 3023 "query firmware config cmd\n"); 3024 return -ENOMEM; 3025 } 3026 query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg; 3027 length = (sizeof(struct lpfc_mbx_query_fw_cfg) - 3028 sizeof(struct lpfc_sli4_cfg_mhdr)); 3029 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 3030 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 3031 length, LPFC_SLI4_MBX_EMBED); 3032 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 3033 /* The IOCTL status is embedded in the mailbox subheader. */ 3034 shdr_status = bf_get(lpfc_mbox_hdr_status, 3035 &query_fw_cfg->header.cfg_shdr.response); 3036 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 3037 &query_fw_cfg->header.cfg_shdr.response); 3038 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { 3039 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3040 "2622 Query Firmware Config failed " 3041 "mbx status x%x, status x%x add_status x%x\n", 3042 rc, shdr_status, shdr_add_status); 3043 return -EINVAL; 3044 } 3045 if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) { 3046 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3047 "2623 FCoE Function not supported by firmware. " 3048 "Function mode = %08x\n", 3049 query_fw_cfg->function_mode); 3050 return -EINVAL; 3051 } 3052 if (rc != MBX_TIMEOUT) 3053 mempool_free(mboxq, phba->mbox_mem_pool); 3054 return 0; 3055} 3056 3057/** 3058 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3059 * @phba: pointer to lpfc hba data structure. 3060 * @acqe_link: pointer to the async link completion queue entry. 3061 * 3062 * This routine is to parse the SLI4 link-attention link fault code and 3063 * translate it into the base driver's read link attention mailbox command 3064 * status. 3065 * 3066 * Return: Link-attention status in terms of base driver's coding. 3067 **/ 3068static uint16_t 3069lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3070 struct lpfc_acqe_link *acqe_link) 3071{ 3072 uint16_t latt_fault; 3073 3074 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3075 case LPFC_ASYNC_LINK_FAULT_NONE: 3076 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3077 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3078 latt_fault = 0; 3079 break; 3080 default: 3081 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3082 "0398 Invalid link fault code: x%x\n", 3083 bf_get(lpfc_acqe_link_fault, acqe_link)); 3084 latt_fault = MBXERR_ERROR; 3085 break; 3086 } 3087 return latt_fault; 3088} 3089 3090/** 3091 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3092 * @phba: pointer to lpfc hba data structure. 3093 * @acqe_link: pointer to the async link completion queue entry. 3094 * 3095 * This routine is to parse the SLI4 link attention type and translate it 3096 * into the base driver's link attention type coding. 3097 * 3098 * Return: Link attention type in terms of base driver's coding. 3099 **/ 3100static uint8_t 3101lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3102 struct lpfc_acqe_link *acqe_link) 3103{ 3104 uint8_t att_type; 3105 3106 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3107 case LPFC_ASYNC_LINK_STATUS_DOWN: 3108 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3109 att_type = AT_LINK_DOWN; 3110 break; 3111 case LPFC_ASYNC_LINK_STATUS_UP: 3112 /* Ignore physical link up events - wait for logical link up */ 3113 att_type = AT_RESERVED; 3114 break; 3115 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3116 att_type = AT_LINK_UP; 3117 break; 3118 default: 3119 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3120 "0399 Invalid link attention type: x%x\n", 3121 bf_get(lpfc_acqe_link_status, acqe_link)); 3122 att_type = AT_RESERVED; 3123 break; 3124 } 3125 return att_type; 3126} 3127 3128/** 3129 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 3130 * @phba: pointer to lpfc hba data structure. 3131 * @acqe_link: pointer to the async link completion queue entry. 3132 * 3133 * This routine is to parse the SLI4 link-attention link speed and translate 3134 * it into the base driver's link-attention link speed coding. 3135 * 3136 * Return: Link-attention link speed in terms of base driver's coding. 3137 **/ 3138static uint8_t 3139lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 3140 struct lpfc_acqe_link *acqe_link) 3141{ 3142 uint8_t link_speed; 3143 3144 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3145 case LPFC_ASYNC_LINK_SPEED_ZERO: 3146 link_speed = LA_UNKNW_LINK; 3147 break; 3148 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3149 link_speed = LA_UNKNW_LINK; 3150 break; 3151 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3152 link_speed = LA_UNKNW_LINK; 3153 break; 3154 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3155 link_speed = LA_1GHZ_LINK; 3156 break; 3157 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3158 link_speed = LA_10GHZ_LINK; 3159 break; 3160 default: 3161 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3162 "0483 Invalid link-attention link speed: x%x\n", 3163 bf_get(lpfc_acqe_link_speed, acqe_link)); 3164 link_speed = LA_UNKNW_LINK; 3165 break; 3166 } 3167 return link_speed; 3168} 3169 3170/** 3171 * lpfc_sli4_async_link_evt - Process the asynchronous link event 3172 * @phba: pointer to lpfc hba data structure. 3173 * @acqe_link: pointer to the async link completion queue entry. 3174 * 3175 * This routine is to handle the SLI4 asynchronous link event. 3176 **/ 3177static void 3178lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3179 struct lpfc_acqe_link *acqe_link) 3180{ 3181 struct lpfc_dmabuf *mp; 3182 LPFC_MBOXQ_t *pmb; 3183 MAILBOX_t *mb; 3184 READ_LA_VAR *la; 3185 uint8_t att_type; 3186 3187 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3188 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) 3189 return; 3190 phba->fcoe_eventtag = acqe_link->event_tag; 3191 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3192 if (!pmb) { 3193 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3194 "0395 The mboxq allocation failed\n"); 3195 return; 3196 } 3197 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3198 if (!mp) { 3199 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3200 "0396 The lpfc_dmabuf allocation failed\n"); 3201 goto out_free_pmb; 3202 } 3203 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3204 if (!mp->virt) { 3205 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3206 "0397 The mbuf allocation failed\n"); 3207 goto out_free_dmabuf; 3208 } 3209 3210 /* Cleanup any outstanding ELS commands */ 3211 lpfc_els_flush_all_cmd(phba); 3212 3213 /* Block ELS IOCBs until we have done process link event */ 3214 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3215 3216 /* Update link event statistics */ 3217 phba->sli.slistat.link_event++; 3218 3219 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ 3220 lpfc_read_la(phba, pmb, mp); 3221 pmb->vport = phba->pport; 3222 3223 /* Parse and translate status field */ 3224 mb = &pmb->u.mb; 3225 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3226 3227 /* Parse and translate link attention fields */ 3228 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; 3229 la->eventTag = acqe_link->event_tag; 3230 la->attType = att_type; 3231 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); 3232 3233 /* Fake the the following irrelvant fields */ 3234 la->topology = TOPOLOGY_PT_PT; 3235 la->granted_AL_PA = 0; 3236 la->il = 0; 3237 la->pb = 0; 3238 la->fa = 0; 3239 la->mm = 0; 3240 3241 /* Keep the link status for extra SLI4 state machine reference */ 3242 phba->sli4_hba.link_state.speed = 3243 bf_get(lpfc_acqe_link_speed, acqe_link); 3244 phba->sli4_hba.link_state.duplex = 3245 bf_get(lpfc_acqe_link_duplex, acqe_link); 3246 phba->sli4_hba.link_state.status = 3247 bf_get(lpfc_acqe_link_status, acqe_link); 3248 phba->sli4_hba.link_state.physical = 3249 bf_get(lpfc_acqe_link_physical, acqe_link); 3250 phba->sli4_hba.link_state.fault = 3251 bf_get(lpfc_acqe_link_fault, acqe_link); 3252 phba->sli4_hba.link_state.logical_speed = 3253 bf_get(lpfc_acqe_qos_link_speed, acqe_link); 3254 3255 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3256 lpfc_mbx_cmpl_read_la(phba, pmb); 3257 3258 return; 3259 3260out_free_dmabuf: 3261 kfree(mp); 3262out_free_pmb: 3263 mempool_free(pmb, phba->mbox_mem_pool); 3264} 3265 3266/** 3267 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 3268 * @vport: pointer to vport data structure. 3269 * 3270 * This routine is to perform Clear Virtual Link (CVL) on a vport in 3271 * response to a CVL event. 3272 * 3273 * Return the pointer to the ndlp with the vport if successful, otherwise 3274 * return NULL. 3275 **/ 3276static struct lpfc_nodelist * 3277lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 3278{ 3279 struct lpfc_nodelist *ndlp; 3280 struct Scsi_Host *shost; 3281 struct lpfc_hba *phba; 3282 3283 if (!vport) 3284 return NULL; 3285 phba = vport->phba; 3286 if (!phba) 3287 return NULL; 3288 ndlp = lpfc_findnode_did(vport, Fabric_DID); 3289 if (!ndlp) { 3290 /* Cannot find existing Fabric ndlp, so allocate a new one */ 3291 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3292 if (!ndlp) 3293 return 0; 3294 lpfc_nlp_init(vport, ndlp, Fabric_DID); 3295 /* Set the node type */ 3296 ndlp->nlp_type |= NLP_FABRIC; 3297 /* Put ndlp onto node list */ 3298 lpfc_enqueue_node(vport, ndlp); 3299 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 3300 /* re-setup ndlp without removing from node list */ 3301 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 3302 if (!ndlp) 3303 return 0; 3304 } 3305 if (phba->pport->port_state < LPFC_FLOGI) 3306 return NULL; 3307 /* If virtual link is not yet instantiated ignore CVL */ 3308 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)) 3309 return NULL; 3310 shost = lpfc_shost_from_vport(vport); 3311 if (!shost) 3312 return NULL; 3313 lpfc_linkdown_port(vport); 3314 lpfc_cleanup_pending_mbox(vport); 3315 spin_lock_irq(shost->host_lock); 3316 vport->fc_flag |= FC_VPORT_CVL_RCVD; 3317 spin_unlock_irq(shost->host_lock); 3318 3319 return ndlp; 3320} 3321 3322/** 3323 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 3324 * @vport: pointer to lpfc hba data structure. 3325 * 3326 * This routine is to perform Clear Virtual Link (CVL) on all vports in 3327 * response to a FCF dead event. 3328 **/ 3329static void 3330lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 3331{ 3332 struct lpfc_vport **vports; 3333 int i; 3334 3335 vports = lpfc_create_vport_work_array(phba); 3336 if (vports) 3337 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3338 lpfc_sli4_perform_vport_cvl(vports[i]); 3339 lpfc_destroy_vport_work_array(phba, vports); 3340} 3341 3342/** 3343 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 3344 * @phba: pointer to lpfc hba data structure. 3345 * @acqe_link: pointer to the async fcoe completion queue entry. 3346 * 3347 * This routine is to handle the SLI4 asynchronous fcoe event. 3348 **/ 3349static void 3350lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, 3351 struct lpfc_acqe_fcoe *acqe_fcoe) 3352{ 3353 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 3354 int rc; 3355 struct lpfc_vport *vport; 3356 struct lpfc_nodelist *ndlp; 3357 struct Scsi_Host *shost; 3358 int active_vlink_present; 3359 struct lpfc_vport **vports; 3360 int i; 3361 3362 phba->fc_eventTag = acqe_fcoe->event_tag; 3363 phba->fcoe_eventtag = acqe_fcoe->event_tag; 3364 switch (event_type) { 3365 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3366 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: 3367 if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) 3368 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3369 LOG_DISCOVERY, 3370 "2546 New FCF found event: " 3371 "evt_tag:x%x, fcf_index:x%x\n", 3372 acqe_fcoe->event_tag, 3373 acqe_fcoe->index); 3374 else 3375 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3376 LOG_DISCOVERY, 3377 "2788 FCF parameter modified event: " 3378 "evt_tag:x%x, fcf_index:x%x\n", 3379 acqe_fcoe->event_tag, 3380 acqe_fcoe->index); 3381 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3382 /* 3383 * During period of FCF discovery, read the FCF 3384 * table record indexed by the event to update 3385 * FCF round robin failover eligible FCF bmask. 3386 */ 3387 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3388 LOG_DISCOVERY, 3389 "2779 Read new FCF record with " 3390 "fcf_index:x%x for updating FCF " 3391 "round robin failover bmask\n", 3392 acqe_fcoe->index); 3393 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); 3394 } 3395 3396 /* If the FCF discovery is in progress, do nothing. */ 3397 spin_lock_irq(&phba->hbalock); 3398 if (phba->hba_flag & FCF_DISC_INPROGRESS) { 3399 spin_unlock_irq(&phba->hbalock); 3400 break; 3401 } 3402 /* If fast FCF failover rescan event is pending, do nothing */ 3403 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3404 spin_unlock_irq(&phba->hbalock); 3405 break; 3406 } 3407 3408 /* If the FCF has been in discovered state, do nothing. */ 3409 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 3410 spin_unlock_irq(&phba->hbalock); 3411 break; 3412 } 3413 spin_unlock_irq(&phba->hbalock); 3414 3415 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3416 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3417 "2770 Start FCF table scan due to new FCF " 3418 "event: evt_tag:x%x, fcf_index:x%x\n", 3419 acqe_fcoe->event_tag, acqe_fcoe->index); 3420 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3421 LPFC_FCOE_FCF_GET_FIRST); 3422 if (rc) 3423 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3424 "2547 Issue FCF scan read FCF mailbox " 3425 "command failed 0x%x\n", rc); 3426 break; 3427 3428 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3429 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3430 "2548 FCF Table full count 0x%x tag 0x%x\n", 3431 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), 3432 acqe_fcoe->event_tag); 3433 break; 3434 3435 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3436 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3437 "2549 FCF disconnected from network index 0x%x" 3438 " tag 0x%x\n", acqe_fcoe->index, 3439 acqe_fcoe->event_tag); 3440 /* 3441 * If we are in the middle of FCF failover process, clear 3442 * the corresponding FCF bit in the roundrobin bitmap. 3443 */ 3444 spin_lock_irq(&phba->hbalock); 3445 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3446 spin_unlock_irq(&phba->hbalock); 3447 /* Update FLOGI FCF failover eligible FCF bmask */ 3448 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index); 3449 break; 3450 } 3451 spin_unlock_irq(&phba->hbalock); 3452 3453 /* If the event is not for currently used fcf do nothing */ 3454 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) 3455 break; 3456 3457 /* 3458 * Otherwise, request the port to rediscover the entire FCF 3459 * table for a fast recovery from case that the current FCF 3460 * is no longer valid as we are not in the middle of FCF 3461 * failover process already. 3462 */ 3463 spin_lock_irq(&phba->hbalock); 3464 /* Mark the fast failover process in progress */ 3465 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 3466 spin_unlock_irq(&phba->hbalock); 3467 3468 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3469 "2771 Start FCF fast failover process due to " 3470 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 3471 "\n", acqe_fcoe->event_tag, acqe_fcoe->index); 3472 rc = lpfc_sli4_redisc_fcf_table(phba); 3473 if (rc) { 3474 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3475 LOG_DISCOVERY, 3476 "2772 Issue FCF rediscover mabilbox " 3477 "command failed, fail through to FCF " 3478 "dead event\n"); 3479 spin_lock_irq(&phba->hbalock); 3480 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 3481 spin_unlock_irq(&phba->hbalock); 3482 /* 3483 * Last resort will fail over by treating this 3484 * as a link down to FCF registration. 3485 */ 3486 lpfc_sli4_fcf_dead_failthrough(phba); 3487 } else { 3488 /* Reset FCF roundrobin bmask for new discovery */ 3489 memset(phba->fcf.fcf_rr_bmask, 0, 3490 sizeof(*phba->fcf.fcf_rr_bmask)); 3491 /* 3492 * Handling fast FCF failover to a DEAD FCF event is 3493 * considered equalivant to receiving CVL to all vports. 3494 */ 3495 lpfc_sli4_perform_all_vport_cvl(phba); 3496 } 3497 break; 3498 case LPFC_FCOE_EVENT_TYPE_CVL: 3499 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3500 "2718 Clear Virtual Link Received for VPI 0x%x" 3501 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3502 vport = lpfc_find_vport_by_vpid(phba, 3503 acqe_fcoe->index - phba->vpi_base); 3504 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3505 if (!ndlp) 3506 break; 3507 active_vlink_present = 0; 3508 3509 vports = lpfc_create_vport_work_array(phba); 3510 if (vports) { 3511 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 3512 i++) { 3513 if ((!(vports[i]->fc_flag & 3514 FC_VPORT_CVL_RCVD)) && 3515 (vports[i]->port_state > LPFC_FDISC)) { 3516 active_vlink_present = 1; 3517 break; 3518 } 3519 } 3520 lpfc_destroy_vport_work_array(phba, vports); 3521 } 3522 3523 if (active_vlink_present) { 3524 /* 3525 * If there are other active VLinks present, 3526 * re-instantiate the Vlink using FDISC. 3527 */ 3528 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3529 shost = lpfc_shost_from_vport(vport); 3530 spin_lock_irq(shost->host_lock); 3531 ndlp->nlp_flag |= NLP_DELAY_TMO; 3532 spin_unlock_irq(shost->host_lock); 3533 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 3534 vport->port_state = LPFC_FDISC; 3535 } else { 3536 /* 3537 * Otherwise, we request port to rediscover 3538 * the entire FCF table for a fast recovery 3539 * from possible case that the current FCF 3540 * is no longer valid if we are not already 3541 * in the FCF failover process. 3542 */ 3543 spin_lock_irq(&phba->hbalock); 3544 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3545 spin_unlock_irq(&phba->hbalock); 3546 break; 3547 } 3548 /* Mark the fast failover process in progress */ 3549 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 3550 spin_unlock_irq(&phba->hbalock); 3551 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3552 LOG_DISCOVERY, 3553 "2773 Start FCF fast failover due " 3554 "to CVL event: evt_tag:x%x\n", 3555 acqe_fcoe->event_tag); 3556 rc = lpfc_sli4_redisc_fcf_table(phba); 3557 if (rc) { 3558 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3559 LOG_DISCOVERY, 3560 "2774 Issue FCF rediscover " 3561 "mabilbox command failed, " 3562 "through to CVL event\n"); 3563 spin_lock_irq(&phba->hbalock); 3564 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 3565 spin_unlock_irq(&phba->hbalock); 3566 /* 3567 * Last resort will be re-try on the 3568 * the current registered FCF entry. 3569 */ 3570 lpfc_retry_pport_discovery(phba); 3571 } else 3572 /* 3573 * Reset FCF roundrobin bmask for new 3574 * discovery. 3575 */ 3576 memset(phba->fcf.fcf_rr_bmask, 0, 3577 sizeof(*phba->fcf.fcf_rr_bmask)); 3578 } 3579 break; 3580 default: 3581 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3582 "0288 Unknown FCoE event type 0x%x event tag " 3583 "0x%x\n", event_type, acqe_fcoe->event_tag); 3584 break; 3585 } 3586} 3587 3588/** 3589 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 3590 * @phba: pointer to lpfc hba data structure. 3591 * @acqe_link: pointer to the async dcbx completion queue entry. 3592 * 3593 * This routine is to handle the SLI4 asynchronous dcbx event. 3594 **/ 3595static void 3596lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3597 struct lpfc_acqe_dcbx *acqe_dcbx) 3598{ 3599 phba->fc_eventTag = acqe_dcbx->event_tag; 3600 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3601 "0290 The SLI4 DCBX asynchronous event is not " 3602 "handled yet\n"); 3603} 3604 3605/** 3606 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 3607 * @phba: pointer to lpfc hba data structure. 3608 * @acqe_link: pointer to the async grp5 completion queue entry. 3609 * 3610 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 3611 * is an asynchronous notified of a logical link speed change. The Port 3612 * reports the logical link speed in units of 10Mbps. 3613 **/ 3614static void 3615lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 3616 struct lpfc_acqe_grp5 *acqe_grp5) 3617{ 3618 uint16_t prev_ll_spd; 3619 3620 phba->fc_eventTag = acqe_grp5->event_tag; 3621 phba->fcoe_eventtag = acqe_grp5->event_tag; 3622 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 3623 phba->sli4_hba.link_state.logical_speed = 3624 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); 3625 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3626 "2789 GRP5 Async Event: Updating logical link speed " 3627 "from %dMbps to %dMbps\n", (prev_ll_spd * 10), 3628 (phba->sli4_hba.link_state.logical_speed*10)); 3629} 3630 3631/** 3632 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 3633 * @phba: pointer to lpfc hba data structure. 3634 * 3635 * This routine is invoked by the worker thread to process all the pending 3636 * SLI4 asynchronous events. 3637 **/ 3638void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 3639{ 3640 struct lpfc_cq_event *cq_event; 3641 3642 /* First, declare the async event has been handled */ 3643 spin_lock_irq(&phba->hbalock); 3644 phba->hba_flag &= ~ASYNC_EVENT; 3645 spin_unlock_irq(&phba->hbalock); 3646 /* Now, handle all the async events */ 3647 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 3648 /* Get the first event from the head of the event queue */ 3649 spin_lock_irq(&phba->hbalock); 3650 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 3651 cq_event, struct lpfc_cq_event, list); 3652 spin_unlock_irq(&phba->hbalock); 3653 /* Process the asynchronous event */ 3654 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 3655 case LPFC_TRAILER_CODE_LINK: 3656 lpfc_sli4_async_link_evt(phba, 3657 &cq_event->cqe.acqe_link); 3658 break; 3659 case LPFC_TRAILER_CODE_FCOE: 3660 lpfc_sli4_async_fcoe_evt(phba, 3661 &cq_event->cqe.acqe_fcoe); 3662 break; 3663 case LPFC_TRAILER_CODE_DCBX: 3664 lpfc_sli4_async_dcbx_evt(phba, 3665 &cq_event->cqe.acqe_dcbx); 3666 break; 3667 case LPFC_TRAILER_CODE_GRP5: 3668 lpfc_sli4_async_grp5_evt(phba, 3669 &cq_event->cqe.acqe_grp5); 3670 break; 3671 default: 3672 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3673 "1804 Invalid asynchrous event code: " 3674 "x%x\n", bf_get(lpfc_trailer_code, 3675 &cq_event->cqe.mcqe_cmpl)); 3676 break; 3677 } 3678 /* Free the completion event processed to the free pool */ 3679 lpfc_sli4_cq_event_release(phba, cq_event); 3680 } 3681} 3682 3683/** 3684 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 3685 * @phba: pointer to lpfc hba data structure. 3686 * 3687 * This routine is invoked by the worker thread to process FCF table 3688 * rediscovery pending completion event. 3689 **/ 3690void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 3691{ 3692 int rc; 3693 3694 spin_lock_irq(&phba->hbalock); 3695 /* Clear FCF rediscovery timeout event */ 3696 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 3697 /* Clear driver fast failover FCF record flag */ 3698 phba->fcf.failover_rec.flag = 0; 3699 /* Set state for FCF fast failover */ 3700 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 3701 spin_unlock_irq(&phba->hbalock); 3702 3703 /* Scan FCF table from the first entry to re-discover SAN */ 3704 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3705 "2777 Start FCF table scan after FCF " 3706 "rediscovery quiescent period over\n"); 3707 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 3708 if (rc) 3709 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3710 "2747 Issue FCF scan read FCF mailbox " 3711 "command failed 0x%x\n", rc); 3712} 3713 3714/** 3715 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3716 * @phba: pointer to lpfc hba data structure. 3717 * @dev_grp: The HBA PCI-Device group number. 3718 * 3719 * This routine is invoked to set up the per HBA PCI-Device group function 3720 * API jump table entries. 3721 * 3722 * Return: 0 if success, otherwise -ENODEV 3723 **/ 3724int 3725lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3726{ 3727 int rc; 3728 3729 /* Set up lpfc PCI-device group */ 3730 phba->pci_dev_grp = dev_grp; 3731 3732 /* The LPFC_PCI_DEV_OC uses SLI4 */ 3733 if (dev_grp == LPFC_PCI_DEV_OC) 3734 phba->sli_rev = LPFC_SLI_REV4; 3735 3736 /* Set up device INIT API function jump table */ 3737 rc = lpfc_init_api_table_setup(phba, dev_grp); 3738 if (rc) 3739 return -ENODEV; 3740 /* Set up SCSI API function jump table */ 3741 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 3742 if (rc) 3743 return -ENODEV; 3744 /* Set up SLI API function jump table */ 3745 rc = lpfc_sli_api_table_setup(phba, dev_grp); 3746 if (rc) 3747 return -ENODEV; 3748 /* Set up MBOX API function jump table */ 3749 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 3750 if (rc) 3751 return -ENODEV; 3752 3753 return 0; 3754} 3755 3756/** 3757 * lpfc_log_intr_mode - Log the active interrupt mode 3758 * @phba: pointer to lpfc hba data structure. 3759 * @intr_mode: active interrupt mode adopted. 3760 * 3761 * This routine it invoked to log the currently used active interrupt mode 3762 * to the device. 3763 **/ 3764static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 3765{ 3766 switch (intr_mode) { 3767 case 0: 3768 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3769 "0470 Enable INTx interrupt mode.\n"); 3770 break; 3771 case 1: 3772 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3773 "0481 Enabled MSI interrupt mode.\n"); 3774 break; 3775 case 2: 3776 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3777 "0480 Enabled MSI-X interrupt mode.\n"); 3778 break; 3779 default: 3780 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3781 "0482 Illegal interrupt mode.\n"); 3782 break; 3783 } 3784 return; 3785} 3786 3787/** 3788 * lpfc_enable_pci_dev - Enable a generic PCI device. 3789 * @phba: pointer to lpfc hba data structure. 3790 * 3791 * This routine is invoked to enable the PCI device that is common to all 3792 * PCI devices. 3793 * 3794 * Return codes 3795 * 0 - successful 3796 * other values - error 3797 **/ 3798static int 3799lpfc_enable_pci_dev(struct lpfc_hba *phba) 3800{ 3801 struct pci_dev *pdev; 3802 int bars; 3803 3804 /* Obtain PCI device reference */ 3805 if (!phba->pcidev) 3806 goto out_error; 3807 else 3808 pdev = phba->pcidev; 3809 /* Select PCI BARs */ 3810 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3811 /* Enable PCI device */ 3812 if (pci_enable_device_mem(pdev)) 3813 goto out_error; 3814 /* Request PCI resource for the device */ 3815 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 3816 goto out_disable_device; 3817 /* Set up device as PCI master and save state for EEH */ 3818 pci_set_master(pdev); 3819 pci_try_set_mwi(pdev); 3820 pci_save_state(pdev); 3821 3822 return 0; 3823 3824out_disable_device: 3825 pci_disable_device(pdev); 3826out_error: 3827 return -ENODEV; 3828} 3829 3830/** 3831 * lpfc_disable_pci_dev - Disable a generic PCI device. 3832 * @phba: pointer to lpfc hba data structure. 3833 * 3834 * This routine is invoked to disable the PCI device that is common to all 3835 * PCI devices. 3836 **/ 3837static void 3838lpfc_disable_pci_dev(struct lpfc_hba *phba) 3839{ 3840 struct pci_dev *pdev; 3841 int bars; 3842 3843 /* Obtain PCI device reference */ 3844 if (!phba->pcidev) 3845 return; 3846 else 3847 pdev = phba->pcidev; 3848 /* Select PCI BARs */ 3849 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3850 /* Release PCI resource and disable PCI device */ 3851 pci_release_selected_regions(pdev, bars); 3852 pci_disable_device(pdev); 3853 /* Null out PCI private reference to driver */ 3854 pci_set_drvdata(pdev, NULL); 3855 3856 return; 3857} 3858 3859/** 3860 * lpfc_reset_hba - Reset a hba 3861 * @phba: pointer to lpfc hba data structure. 3862 * 3863 * This routine is invoked to reset a hba device. It brings the HBA 3864 * offline, performs a board restart, and then brings the board back 3865 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 3866 * on outstanding mailbox commands. 3867 **/ 3868void 3869lpfc_reset_hba(struct lpfc_hba *phba) 3870{ 3871 /* If resets are disabled then set error state and return. */ 3872 if (!phba->cfg_enable_hba_reset) { 3873 phba->link_state = LPFC_HBA_ERROR; 3874 return; 3875 } 3876 lpfc_offline_prep(phba); 3877 lpfc_offline(phba); 3878 lpfc_sli_brdrestart(phba); 3879 lpfc_online(phba); 3880 lpfc_unblock_mgmt_io(phba); 3881} 3882 3883/** 3884 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 3885 * @phba: pointer to lpfc hba data structure. 3886 * 3887 * This routine is invoked to set up the driver internal resources specific to 3888 * support the SLI-3 HBA device it attached to. 3889 * 3890 * Return codes 3891 * 0 - successful 3892 * other values - error 3893 **/ 3894static int 3895lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 3896{ 3897 struct lpfc_sli *psli; 3898 3899 /* 3900 * Initialize timers used by driver 3901 */ 3902 3903 /* Heartbeat timer */ 3904 init_timer(&phba->hb_tmofunc); 3905 phba->hb_tmofunc.function = lpfc_hb_timeout; 3906 phba->hb_tmofunc.data = (unsigned long)phba; 3907 3908 psli = &phba->sli; 3909 /* MBOX heartbeat timer */ 3910 init_timer(&psli->mbox_tmo); 3911 psli->mbox_tmo.function = lpfc_mbox_timeout; 3912 psli->mbox_tmo.data = (unsigned long) phba; 3913 /* FCP polling mode timer */ 3914 init_timer(&phba->fcp_poll_timer); 3915 phba->fcp_poll_timer.function = lpfc_poll_timeout; 3916 phba->fcp_poll_timer.data = (unsigned long) phba; 3917 /* Fabric block timer */ 3918 init_timer(&phba->fabric_block_timer); 3919 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3920 phba->fabric_block_timer.data = (unsigned long) phba; 3921 /* EA polling mode timer */ 3922 init_timer(&phba->eratt_poll); 3923 phba->eratt_poll.function = lpfc_poll_eratt; 3924 phba->eratt_poll.data = (unsigned long) phba; 3925 3926 /* Host attention work mask setup */ 3927 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 3928 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 3929 3930 /* Get all the module params for configuring this host */ 3931 lpfc_get_cfgparam(phba); 3932 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 3933 phba->menlo_flag |= HBA_MENLO_SUPPORT; 3934 /* check for menlo minimum sg count */ 3935 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 3936 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 3937 } 3938 3939 /* 3940 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3941 * used to create the sg_dma_buf_pool must be dynamically calculated. 3942 * 2 segments are added since the IOCB needs a command and response bde. 3943 */ 3944 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 3945 sizeof(struct fcp_rsp) + 3946 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 3947 3948 if (phba->cfg_enable_bg) { 3949 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 3950 phba->cfg_sg_dma_buf_size += 3951 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 3952 } 3953 3954 /* Also reinitialize the host templates with new values. */ 3955 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3956 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3957 3958 phba->max_vpi = LPFC_MAX_VPI; 3959 /* This will be set to correct value after config_port mbox */ 3960 phba->max_vports = 0; 3961 3962 /* 3963 * Initialize the SLI Layer to run with lpfc HBAs. 3964 */ 3965 lpfc_sli_setup(phba); 3966 lpfc_sli_queue_setup(phba); 3967 3968 /* Allocate device driver memory */ 3969 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 3970 return -ENOMEM; 3971 3972 return 0; 3973} 3974 3975/** 3976 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 3977 * @phba: pointer to lpfc hba data structure. 3978 * 3979 * This routine is invoked to unset the driver internal resources set up 3980 * specific for supporting the SLI-3 HBA device it attached to. 3981 **/ 3982static void 3983lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 3984{ 3985 /* Free device driver memory allocated */ 3986 lpfc_mem_free_all(phba); 3987 3988 return; 3989} 3990 3991/** 3992 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 3993 * @phba: pointer to lpfc hba data structure. 3994 * 3995 * This routine is invoked to set up the driver internal resources specific to 3996 * support the SLI-4 HBA device it attached to. 3997 * 3998 * Return codes 3999 * 0 - successful 4000 * other values - error 4001 **/ 4002static int 4003lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 4004{ 4005 struct lpfc_sli *psli; 4006 LPFC_MBOXQ_t *mboxq; 4007 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 4008 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4009 struct lpfc_mqe *mqe; 4010 int longs; 4011 4012 /* Before proceed, wait for POST done and device ready */ 4013 rc = lpfc_sli4_post_status_check(phba); 4014 if (rc) 4015 return -ENODEV; 4016 4017 /* 4018 * Initialize timers used by driver 4019 */ 4020 4021 /* Heartbeat timer */ 4022 init_timer(&phba->hb_tmofunc); 4023 phba->hb_tmofunc.function = lpfc_hb_timeout; 4024 phba->hb_tmofunc.data = (unsigned long)phba; 4025 4026 psli = &phba->sli; 4027 /* MBOX heartbeat timer */ 4028 init_timer(&psli->mbox_tmo); 4029 psli->mbox_tmo.function = lpfc_mbox_timeout; 4030 psli->mbox_tmo.data = (unsigned long) phba; 4031 /* Fabric block timer */ 4032 init_timer(&phba->fabric_block_timer); 4033 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4034 phba->fabric_block_timer.data = (unsigned long) phba; 4035 /* EA polling mode timer */ 4036 init_timer(&phba->eratt_poll); 4037 phba->eratt_poll.function = lpfc_poll_eratt; 4038 phba->eratt_poll.data = (unsigned long) phba; 4039 /* FCF rediscover timer */ 4040 init_timer(&phba->fcf.redisc_wait); 4041 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 4042 phba->fcf.redisc_wait.data = (unsigned long)phba; 4043 4044 /* 4045 * We need to do a READ_CONFIG mailbox command here before 4046 * calling lpfc_get_cfgparam. For VFs this will report the 4047 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 4048 * All of the resources allocated 4049 * for this Port are tied to these values. 4050 */ 4051 /* Get all the module params for configuring this host */ 4052 lpfc_get_cfgparam(phba); 4053 phba->max_vpi = LPFC_MAX_VPI; 4054 /* This will be set to correct value after the read_config mbox */ 4055 phba->max_vports = 0; 4056 4057 /* Program the default value of vlan_id and fc_map */ 4058 phba->valid_vlan = 0; 4059 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4060 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4061 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4062 4063 /* 4064 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4065 * used to create the sg_dma_buf_pool must be dynamically calculated. 4066 * 2 segments are added since the IOCB needs a command and response bde. 4067 * To insure that the scsi sgl does not cross a 4k page boundary only 4068 * sgl sizes of must be a power of 2. 4069 */ 4070 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 4071 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); 4072 /* Feature Level 1 hardware is limited to 2 pages */ 4073 if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) == 4074 LPFC_SLI_INTF_FEATURELEVEL1_1)) 4075 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 4076 else 4077 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 4078 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 4079 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 4080 dma_buf_size = dma_buf_size << 1) 4081 ; 4082 if (dma_buf_size == max_buf_size) 4083 phba->cfg_sg_seg_cnt = (dma_buf_size - 4084 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - 4085 (2 * sizeof(struct sli4_sge))) / 4086 sizeof(struct sli4_sge); 4087 phba->cfg_sg_dma_buf_size = dma_buf_size; 4088 4089 /* Initialize buffer queue management fields */ 4090 hbq_count = lpfc_sli_hbq_count(); 4091 for (i = 0; i < hbq_count; ++i) 4092 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 4093 INIT_LIST_HEAD(&phba->rb_pend_list); 4094 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 4095 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 4096 4097 /* 4098 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 4099 */ 4100 /* Initialize the Abort scsi buffer list used by driver */ 4101 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 4102 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 4103 /* This abort list used by worker thread */ 4104 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 4105 4106 /* 4107 * Initialize dirver internal slow-path work queues 4108 */ 4109 4110 /* Driver internel slow-path CQ Event pool */ 4111 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 4112 /* Response IOCB work queue list */ 4113 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 4114 /* Asynchronous event CQ Event work queue list */ 4115 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 4116 /* Fast-path XRI aborted CQ Event work queue list */ 4117 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 4118 /* Slow-path XRI aborted CQ Event work queue list */ 4119 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 4120 /* Receive queue CQ Event work queue list */ 4121 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 4122 4123 /* Initialize the driver internal SLI layer lists. */ 4124 lpfc_sli_setup(phba); 4125 lpfc_sli_queue_setup(phba); 4126 4127 /* Allocate device driver memory */ 4128 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 4129 if (rc) 4130 return -ENOMEM; 4131 4132 /* Create the bootstrap mailbox command */ 4133 rc = lpfc_create_bootstrap_mbox(phba); 4134 if (unlikely(rc)) 4135 goto out_free_mem; 4136 4137 /* Set up the host's endian order with the device. */ 4138 rc = lpfc_setup_endian_order(phba); 4139 if (unlikely(rc)) 4140 goto out_free_bsmbx; 4141 4142 rc = lpfc_sli4_fw_cfg_check(phba); 4143 if (unlikely(rc)) 4144 goto out_free_bsmbx; 4145 4146 /* Set up the hba's configuration parameters. */ 4147 rc = lpfc_sli4_read_config(phba); 4148 if (unlikely(rc)) 4149 goto out_free_bsmbx; 4150 4151 /* Perform a function reset */ 4152 rc = lpfc_pci_function_reset(phba); 4153 if (unlikely(rc)) 4154 goto out_free_bsmbx; 4155 4156 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4157 GFP_KERNEL); 4158 if (!mboxq) { 4159 rc = -ENOMEM; 4160 goto out_free_bsmbx; 4161 } 4162 4163 /* Get the Supported Pages. It is always available. */ 4164 lpfc_supported_pages(mboxq); 4165 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4166 if (unlikely(rc)) { 4167 rc = -EIO; 4168 mempool_free(mboxq, phba->mbox_mem_pool); 4169 goto out_free_bsmbx; 4170 } 4171 4172 mqe = &mboxq->u.mqe; 4173 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 4174 LPFC_MAX_SUPPORTED_PAGES); 4175 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 4176 switch (pn_page[i]) { 4177 case LPFC_SLI4_PARAMETERS: 4178 phba->sli4_hba.pc_sli4_params.supported = 1; 4179 break; 4180 default: 4181 break; 4182 } 4183 } 4184 4185 /* Read the port's SLI4 Parameters capabilities if supported. */ 4186 if (phba->sli4_hba.pc_sli4_params.supported) 4187 rc = lpfc_pc_sli4_params_get(phba, mboxq); 4188 mempool_free(mboxq, phba->mbox_mem_pool); 4189 if (rc) { 4190 rc = -EIO; 4191 goto out_free_bsmbx; 4192 } 4193 /* Create all the SLI4 queues */ 4194 rc = lpfc_sli4_queue_create(phba); 4195 if (rc) 4196 goto out_free_bsmbx; 4197 4198 /* Create driver internal CQE event pool */ 4199 rc = lpfc_sli4_cq_event_pool_create(phba); 4200 if (rc) 4201 goto out_destroy_queue; 4202 4203 /* Initialize and populate the iocb list per host */ 4204 rc = lpfc_init_sgl_list(phba); 4205 if (rc) { 4206 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4207 "1400 Failed to initialize sgl list.\n"); 4208 goto out_destroy_cq_event_pool; 4209 } 4210 rc = lpfc_init_active_sgl_array(phba); 4211 if (rc) { 4212 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4213 "1430 Failed to initialize sgl list.\n"); 4214 goto out_free_sgl_list; 4215 } 4216 4217 rc = lpfc_sli4_init_rpi_hdrs(phba); 4218 if (rc) { 4219 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4220 "1432 Failed to initialize rpi headers.\n"); 4221 goto out_free_active_sgl; 4222 } 4223 4224 /* Allocate eligible FCF bmask memory for FCF round robin failover */ 4225 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 4226 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 4227 GFP_KERNEL); 4228 if (!phba->fcf.fcf_rr_bmask) { 4229 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4230 "2759 Failed allocate memory for FCF round " 4231 "robin failover bmask\n"); 4232 goto out_remove_rpi_hdrs; 4233 } 4234 4235 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4236 phba->cfg_fcp_eq_count), GFP_KERNEL); 4237 if (!phba->sli4_hba.fcp_eq_hdl) { 4238 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4239 "2572 Failed allocate memory for fast-path " 4240 "per-EQ handle array\n"); 4241 goto out_free_fcf_rr_bmask; 4242 } 4243 4244 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4245 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 4246 if (!phba->sli4_hba.msix_entries) { 4247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4248 "2573 Failed allocate memory for msi-x " 4249 "interrupt vector entries\n"); 4250 goto out_free_fcp_eq_hdl; 4251 } 4252 4253 return rc; 4254 4255out_free_fcp_eq_hdl: 4256 kfree(phba->sli4_hba.fcp_eq_hdl); 4257out_free_fcf_rr_bmask: 4258 kfree(phba->fcf.fcf_rr_bmask); 4259out_remove_rpi_hdrs: 4260 lpfc_sli4_remove_rpi_hdrs(phba); 4261out_free_active_sgl: 4262 lpfc_free_active_sgl(phba); 4263out_free_sgl_list: 4264 lpfc_free_sgl_list(phba); 4265out_destroy_cq_event_pool: 4266 lpfc_sli4_cq_event_pool_destroy(phba); 4267out_destroy_queue: 4268 lpfc_sli4_queue_destroy(phba); 4269out_free_bsmbx: 4270 lpfc_destroy_bootstrap_mbox(phba); 4271out_free_mem: 4272 lpfc_mem_free(phba); 4273 return rc; 4274} 4275 4276/** 4277 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 4278 * @phba: pointer to lpfc hba data structure. 4279 * 4280 * This routine is invoked to unset the driver internal resources set up 4281 * specific for supporting the SLI-4 HBA device it attached to. 4282 **/ 4283static void 4284lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 4285{ 4286 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 4287 4288 /* unregister default FCFI from the HBA */ 4289 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); 4290 4291 /* Free the default FCR table */ 4292 lpfc_sli_remove_dflt_fcf(phba); 4293 4294 /* Free memory allocated for msi-x interrupt vector entries */ 4295 kfree(phba->sli4_hba.msix_entries); 4296 4297 /* Free memory allocated for fast-path work queue handles */ 4298 kfree(phba->sli4_hba.fcp_eq_hdl); 4299 4300 /* Free the allocated rpi headers. */ 4301 lpfc_sli4_remove_rpi_hdrs(phba); 4302 lpfc_sli4_remove_rpis(phba); 4303 4304 /* Free eligible FCF index bmask */ 4305 kfree(phba->fcf.fcf_rr_bmask); 4306 4307 /* Free the ELS sgl list */ 4308 lpfc_free_active_sgl(phba); 4309 lpfc_free_sgl_list(phba); 4310 4311 /* Free the SCSI sgl management array */ 4312 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4313 4314 /* Free the SLI4 queues */ 4315 lpfc_sli4_queue_destroy(phba); 4316 4317 /* Free the completion queue EQ event pool */ 4318 lpfc_sli4_cq_event_release_all(phba); 4319 lpfc_sli4_cq_event_pool_destroy(phba); 4320 4321 /* Reset SLI4 HBA FCoE function */ 4322 lpfc_pci_function_reset(phba); 4323 4324 /* Free the bsmbx region. */ 4325 lpfc_destroy_bootstrap_mbox(phba); 4326 4327 /* Free the SLI Layer memory with SLI4 HBAs */ 4328 lpfc_mem_free_all(phba); 4329 4330 /* Free the current connect table */ 4331 list_for_each_entry_safe(conn_entry, next_conn_entry, 4332 &phba->fcf_conn_rec_list, list) { 4333 list_del_init(&conn_entry->list); 4334 kfree(conn_entry); 4335 } 4336 4337 return; 4338} 4339 4340/** 4341 * lpfc_init_api_table_setup - Set up init api fucntion jump table 4342 * @phba: The hba struct for which this call is being executed. 4343 * @dev_grp: The HBA PCI-Device group number. 4344 * 4345 * This routine sets up the device INIT interface API function jump table 4346 * in @phba struct. 4347 * 4348 * Returns: 0 - success, -ENODEV - failure. 4349 **/ 4350int 4351lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4352{ 4353 phba->lpfc_hba_init_link = lpfc_hba_init_link; 4354 phba->lpfc_hba_down_link = lpfc_hba_down_link; 4355 switch (dev_grp) { 4356 case LPFC_PCI_DEV_LP: 4357 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4358 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 4359 phba->lpfc_stop_port = lpfc_stop_port_s3; 4360 break; 4361 case LPFC_PCI_DEV_OC: 4362 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 4363 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 4364 phba->lpfc_stop_port = lpfc_stop_port_s4; 4365 break; 4366 default: 4367 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4368 "1431 Invalid HBA PCI-device group: 0x%x\n", 4369 dev_grp); 4370 return -ENODEV; 4371 break; 4372 } 4373 return 0; 4374} 4375 4376/** 4377 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 4378 * @phba: pointer to lpfc hba data structure. 4379 * 4380 * This routine is invoked to set up the driver internal resources before the 4381 * device specific resource setup to support the HBA device it attached to. 4382 * 4383 * Return codes 4384 * 0 - successful 4385 * other values - error 4386 **/ 4387static int 4388lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 4389{ 4390 /* 4391 * Driver resources common to all SLI revisions 4392 */ 4393 atomic_set(&phba->fast_event_count, 0); 4394 spin_lock_init(&phba->hbalock); 4395 4396 /* Initialize ndlp management spinlock */ 4397 spin_lock_init(&phba->ndlp_lock); 4398 4399 INIT_LIST_HEAD(&phba->port_list); 4400 INIT_LIST_HEAD(&phba->work_list); 4401 init_waitqueue_head(&phba->wait_4_mlo_m_q); 4402 4403 /* Initialize the wait queue head for the kernel thread */ 4404 init_waitqueue_head(&phba->work_waitq); 4405 4406 /* Initialize the scsi buffer list used by driver for scsi IO */ 4407 spin_lock_init(&phba->scsi_buf_list_lock); 4408 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 4409 4410 /* Initialize the fabric iocb list */ 4411 INIT_LIST_HEAD(&phba->fabric_iocb_list); 4412 4413 /* Initialize list to save ELS buffers */ 4414 INIT_LIST_HEAD(&phba->elsbuf); 4415 4416 /* Initialize FCF connection rec list */ 4417 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 4418 4419 return 0; 4420} 4421 4422/** 4423 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 4424 * @phba: pointer to lpfc hba data structure. 4425 * 4426 * This routine is invoked to set up the driver internal resources after the 4427 * device specific resource setup to support the HBA device it attached to. 4428 * 4429 * Return codes 4430 * 0 - successful 4431 * other values - error 4432 **/ 4433static int 4434lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 4435{ 4436 int error; 4437 4438 /* Startup the kernel thread for this host adapter. */ 4439 phba->worker_thread = kthread_run(lpfc_do_work, phba, 4440 "lpfc_worker_%d", phba->brd_no); 4441 if (IS_ERR(phba->worker_thread)) { 4442 error = PTR_ERR(phba->worker_thread); 4443 return error; 4444 } 4445 4446 return 0; 4447} 4448 4449/** 4450 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 4451 * @phba: pointer to lpfc hba data structure. 4452 * 4453 * This routine is invoked to unset the driver internal resources set up after 4454 * the device specific resource setup for supporting the HBA device it 4455 * attached to. 4456 **/ 4457static void 4458lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 4459{ 4460 /* Stop kernel worker thread */ 4461 kthread_stop(phba->worker_thread); 4462} 4463 4464/** 4465 * lpfc_free_iocb_list - Free iocb list. 4466 * @phba: pointer to lpfc hba data structure. 4467 * 4468 * This routine is invoked to free the driver's IOCB list and memory. 4469 **/ 4470static void 4471lpfc_free_iocb_list(struct lpfc_hba *phba) 4472{ 4473 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 4474 4475 spin_lock_irq(&phba->hbalock); 4476 list_for_each_entry_safe(iocbq_entry, iocbq_next, 4477 &phba->lpfc_iocb_list, list) { 4478 list_del(&iocbq_entry->list); 4479 kfree(iocbq_entry); 4480 phba->total_iocbq_bufs--; 4481 } 4482 spin_unlock_irq(&phba->hbalock); 4483 4484 return; 4485} 4486 4487/** 4488 * lpfc_init_iocb_list - Allocate and initialize iocb list. 4489 * @phba: pointer to lpfc hba data structure. 4490 * 4491 * This routine is invoked to allocate and initizlize the driver's IOCB 4492 * list and set up the IOCB tag array accordingly. 4493 * 4494 * Return codes 4495 * 0 - successful 4496 * other values - error 4497 **/ 4498static int 4499lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 4500{ 4501 struct lpfc_iocbq *iocbq_entry = NULL; 4502 uint16_t iotag; 4503 int i; 4504 4505 /* Initialize and populate the iocb list per host. */ 4506 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 4507 for (i = 0; i < iocb_count; i++) { 4508 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 4509 if (iocbq_entry == NULL) { 4510 printk(KERN_ERR "%s: only allocated %d iocbs of " 4511 "expected %d count. Unloading driver.\n", 4512 __func__, i, LPFC_IOCB_LIST_CNT); 4513 goto out_free_iocbq; 4514 } 4515 4516 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 4517 if (iotag == 0) { 4518 kfree(iocbq_entry); 4519 printk(KERN_ERR "%s: failed to allocate IOTAG. " 4520 "Unloading driver.\n", __func__); 4521 goto out_free_iocbq; 4522 } 4523 iocbq_entry->sli4_xritag = NO_XRI; 4524 4525 spin_lock_irq(&phba->hbalock); 4526 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 4527 phba->total_iocbq_bufs++; 4528 spin_unlock_irq(&phba->hbalock); 4529 } 4530 4531 return 0; 4532 4533out_free_iocbq: 4534 lpfc_free_iocb_list(phba); 4535 4536 return -ENOMEM; 4537} 4538 4539/** 4540 * lpfc_free_sgl_list - Free sgl list. 4541 * @phba: pointer to lpfc hba data structure. 4542 * 4543 * This routine is invoked to free the driver's sgl list and memory. 4544 **/ 4545static void 4546lpfc_free_sgl_list(struct lpfc_hba *phba) 4547{ 4548 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 4549 LIST_HEAD(sglq_list); 4550 int rc = 0; 4551 4552 spin_lock_irq(&phba->hbalock); 4553 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 4554 spin_unlock_irq(&phba->hbalock); 4555 4556 list_for_each_entry_safe(sglq_entry, sglq_next, 4557 &sglq_list, list) { 4558 list_del(&sglq_entry->list); 4559 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 4560 kfree(sglq_entry); 4561 phba->sli4_hba.total_sglq_bufs--; 4562 } 4563 rc = lpfc_sli4_remove_all_sgl_pages(phba); 4564 if (rc) { 4565 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4566 "2005 Unable to deregister pages from HBA: %x\n", rc); 4567 } 4568 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4569} 4570 4571/** 4572 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 4573 * @phba: pointer to lpfc hba data structure. 4574 * 4575 * This routine is invoked to allocate the driver's active sgl memory. 4576 * This array will hold the sglq_entry's for active IOs. 4577 **/ 4578static int 4579lpfc_init_active_sgl_array(struct lpfc_hba *phba) 4580{ 4581 int size; 4582 size = sizeof(struct lpfc_sglq *); 4583 size *= phba->sli4_hba.max_cfg_param.max_xri; 4584 4585 phba->sli4_hba.lpfc_sglq_active_list = 4586 kzalloc(size, GFP_KERNEL); 4587 if (!phba->sli4_hba.lpfc_sglq_active_list) 4588 return -ENOMEM; 4589 return 0; 4590} 4591 4592/** 4593 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 4594 * @phba: pointer to lpfc hba data structure. 4595 * 4596 * This routine is invoked to walk through the array of active sglq entries 4597 * and free all of the resources. 4598 * This is just a place holder for now. 4599 **/ 4600static void 4601lpfc_free_active_sgl(struct lpfc_hba *phba) 4602{ 4603 kfree(phba->sli4_hba.lpfc_sglq_active_list); 4604} 4605 4606/** 4607 * lpfc_init_sgl_list - Allocate and initialize sgl list. 4608 * @phba: pointer to lpfc hba data structure. 4609 * 4610 * This routine is invoked to allocate and initizlize the driver's sgl 4611 * list and set up the sgl xritag tag array accordingly. 4612 * 4613 * Return codes 4614 * 0 - successful 4615 * other values - error 4616 **/ 4617static int 4618lpfc_init_sgl_list(struct lpfc_hba *phba) 4619{ 4620 struct lpfc_sglq *sglq_entry = NULL; 4621 int i; 4622 int els_xri_cnt; 4623 4624 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4625 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4626 "2400 lpfc_init_sgl_list els %d.\n", 4627 els_xri_cnt); 4628 /* Initialize and populate the sglq list per host/VF. */ 4629 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4630 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 4631 4632 /* Sanity check on XRI management */ 4633 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 4634 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4635 "2562 No room left for SCSI XRI allocation: " 4636 "max_xri=%d, els_xri=%d\n", 4637 phba->sli4_hba.max_cfg_param.max_xri, 4638 els_xri_cnt); 4639 return -ENOMEM; 4640 } 4641 4642 /* Allocate memory for the ELS XRI management array */ 4643 phba->sli4_hba.lpfc_els_sgl_array = 4644 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), 4645 GFP_KERNEL); 4646 4647 if (!phba->sli4_hba.lpfc_els_sgl_array) { 4648 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4649 "2401 Failed to allocate memory for ELS " 4650 "XRI management array of size %d.\n", 4651 els_xri_cnt); 4652 return -ENOMEM; 4653 } 4654 4655 /* Keep the SCSI XRI into the XRI management array */ 4656 phba->sli4_hba.scsi_xri_max = 4657 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4658 phba->sli4_hba.scsi_xri_cnt = 0; 4659 4660 phba->sli4_hba.lpfc_scsi_psb_array = 4661 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4662 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4663 4664 if (!phba->sli4_hba.lpfc_scsi_psb_array) { 4665 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4666 "2563 Failed to allocate memory for SCSI " 4667 "XRI management array of size %d.\n", 4668 phba->sli4_hba.scsi_xri_max); 4669 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4670 return -ENOMEM; 4671 } 4672 4673 for (i = 0; i < els_xri_cnt; i++) { 4674 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); 4675 if (sglq_entry == NULL) { 4676 printk(KERN_ERR "%s: only allocated %d sgls of " 4677 "expected %d count. Unloading driver.\n", 4678 __func__, i, els_xri_cnt); 4679 goto out_free_mem; 4680 } 4681 4682 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); 4683 if (sglq_entry->sli4_xritag == NO_XRI) { 4684 kfree(sglq_entry); 4685 printk(KERN_ERR "%s: failed to allocate XRI.\n" 4686 "Unloading driver.\n", __func__); 4687 goto out_free_mem; 4688 } 4689 sglq_entry->buff_type = GEN_BUFF_TYPE; 4690 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4691 if (sglq_entry->virt == NULL) { 4692 kfree(sglq_entry); 4693 printk(KERN_ERR "%s: failed to allocate mbuf.\n" 4694 "Unloading driver.\n", __func__); 4695 goto out_free_mem; 4696 } 4697 sglq_entry->sgl = sglq_entry->virt; 4698 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 4699 4700 /* The list order is used by later block SGL registraton */ 4701 spin_lock_irq(&phba->hbalock); 4702 sglq_entry->state = SGL_FREED; 4703 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4704 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4705 phba->sli4_hba.total_sglq_bufs++; 4706 spin_unlock_irq(&phba->hbalock); 4707 } 4708 return 0; 4709 4710out_free_mem: 4711 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4712 lpfc_free_sgl_list(phba); 4713 return -ENOMEM; 4714} 4715 4716/** 4717 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 4718 * @phba: pointer to lpfc hba data structure. 4719 * 4720 * This routine is invoked to post rpi header templates to the 4721 * HBA consistent with the SLI-4 interface spec. This routine 4722 * posts a PAGE_SIZE memory region to the port to hold up to 4723 * PAGE_SIZE modulo 64 rpi context headers. 4724 * No locks are held here because this is an initialization routine 4725 * called only from probe or lpfc_online when interrupts are not 4726 * enabled and the driver is reinitializing the device. 4727 * 4728 * Return codes 4729 * 0 - successful 4730 * ENOMEM - No availble memory 4731 * EIO - The mailbox failed to complete successfully. 4732 **/ 4733int 4734lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4735{ 4736 int rc = 0; 4737 int longs; 4738 uint16_t rpi_count; 4739 struct lpfc_rpi_hdr *rpi_hdr; 4740 4741 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 4742 4743 /* 4744 * Provision an rpi bitmask range for discovery. The total count 4745 * is the difference between max and base + 1. 4746 */ 4747 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 4748 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4749 4750 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 4751 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 4752 GFP_KERNEL); 4753 if (!phba->sli4_hba.rpi_bmask) 4754 return -ENOMEM; 4755 4756 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 4757 if (!rpi_hdr) { 4758 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4759 "0391 Error during rpi post operation\n"); 4760 lpfc_sli4_remove_rpis(phba); 4761 rc = -ENODEV; 4762 } 4763 4764 return rc; 4765} 4766 4767/** 4768 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 4769 * @phba: pointer to lpfc hba data structure. 4770 * 4771 * This routine is invoked to allocate a single 4KB memory region to 4772 * support rpis and stores them in the phba. This single region 4773 * provides support for up to 64 rpis. The region is used globally 4774 * by the device. 4775 * 4776 * Returns: 4777 * A valid rpi hdr on success. 4778 * A NULL pointer on any failure. 4779 **/ 4780struct lpfc_rpi_hdr * 4781lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 4782{ 4783 uint16_t rpi_limit, curr_rpi_range; 4784 struct lpfc_dmabuf *dmabuf; 4785 struct lpfc_rpi_hdr *rpi_hdr; 4786 4787 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 4788 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4789 4790 spin_lock_irq(&phba->hbalock); 4791 curr_rpi_range = phba->sli4_hba.next_rpi; 4792 spin_unlock_irq(&phba->hbalock); 4793 4794 /* 4795 * The port has a limited number of rpis. The increment here 4796 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 4797 * and to allow the full max_rpi range per port. 4798 */ 4799 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 4800 return NULL; 4801 4802 /* 4803 * First allocate the protocol header region for the port. The 4804 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 4805 */ 4806 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4807 if (!dmabuf) 4808 return NULL; 4809 4810 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4811 LPFC_HDR_TEMPLATE_SIZE, 4812 &dmabuf->phys, 4813 GFP_KERNEL); 4814 if (!dmabuf->virt) { 4815 rpi_hdr = NULL; 4816 goto err_free_dmabuf; 4817 } 4818 4819 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 4820 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 4821 rpi_hdr = NULL; 4822 goto err_free_coherent; 4823 } 4824 4825 /* Save the rpi header data for cleanup later. */ 4826 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 4827 if (!rpi_hdr) 4828 goto err_free_coherent; 4829 4830 rpi_hdr->dmabuf = dmabuf; 4831 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 4832 rpi_hdr->page_count = 1; 4833 spin_lock_irq(&phba->hbalock); 4834 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 4835 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 4836 4837 /* 4838 * The next_rpi stores the next module-64 rpi value to post 4839 * in any subsequent rpi memory region postings. 4840 */ 4841 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; 4842 spin_unlock_irq(&phba->hbalock); 4843 return rpi_hdr; 4844 4845 err_free_coherent: 4846 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 4847 dmabuf->virt, dmabuf->phys); 4848 err_free_dmabuf: 4849 kfree(dmabuf); 4850 return NULL; 4851} 4852 4853/** 4854 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 4855 * @phba: pointer to lpfc hba data structure. 4856 * 4857 * This routine is invoked to remove all memory resources allocated 4858 * to support rpis. This routine presumes the caller has released all 4859 * rpis consumed by fabric or port logins and is prepared to have 4860 * the header pages removed. 4861 **/ 4862void 4863lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 4864{ 4865 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 4866 4867 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 4868 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 4869 list_del(&rpi_hdr->list); 4870 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 4871 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 4872 kfree(rpi_hdr->dmabuf); 4873 kfree(rpi_hdr); 4874 } 4875 4876 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4877 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 4878} 4879 4880/** 4881 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 4882 * @pdev: pointer to pci device data structure. 4883 * 4884 * This routine is invoked to allocate the driver hba data structure for an 4885 * HBA device. If the allocation is successful, the phba reference to the 4886 * PCI device data structure is set. 4887 * 4888 * Return codes 4889 * pointer to @phba - successful 4890 * NULL - error 4891 **/ 4892static struct lpfc_hba * 4893lpfc_hba_alloc(struct pci_dev *pdev) 4894{ 4895 struct lpfc_hba *phba; 4896 4897 /* Allocate memory for HBA structure */ 4898 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 4899 if (!phba) { 4900 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 4901 return NULL; 4902 } 4903 4904 /* Set reference to PCI device in HBA structure */ 4905 phba->pcidev = pdev; 4906 4907 /* Assign an unused board number */ 4908 phba->brd_no = lpfc_get_instance(); 4909 if (phba->brd_no < 0) { 4910 kfree(phba); 4911 return NULL; 4912 } 4913 4914 spin_lock_init(&phba->ct_ev_lock); 4915 INIT_LIST_HEAD(&phba->ct_ev_waiters); 4916 4917 return phba; 4918} 4919 4920/** 4921 * lpfc_hba_free - Free driver hba data structure with a device. 4922 * @phba: pointer to lpfc hba data structure. 4923 * 4924 * This routine is invoked to free the driver hba data structure with an 4925 * HBA device. 4926 **/ 4927static void 4928lpfc_hba_free(struct lpfc_hba *phba) 4929{ 4930 /* Release the driver assigned board number */ 4931 idr_remove(&lpfc_hba_index, phba->brd_no); 4932 4933 kfree(phba); 4934 return; 4935} 4936 4937/** 4938 * lpfc_create_shost - Create hba physical port with associated scsi host. 4939 * @phba: pointer to lpfc hba data structure. 4940 * 4941 * This routine is invoked to create HBA physical port and associate a SCSI 4942 * host with it. 4943 * 4944 * Return codes 4945 * 0 - successful 4946 * other values - error 4947 **/ 4948static int 4949lpfc_create_shost(struct lpfc_hba *phba) 4950{ 4951 struct lpfc_vport *vport; 4952 struct Scsi_Host *shost; 4953 4954 /* Initialize HBA FC structure */ 4955 phba->fc_edtov = FF_DEF_EDTOV; 4956 phba->fc_ratov = FF_DEF_RATOV; 4957 phba->fc_altov = FF_DEF_ALTOV; 4958 phba->fc_arbtov = FF_DEF_ARBTOV; 4959 4960 atomic_set(&phba->sdev_cnt, 0); 4961 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 4962 if (!vport) 4963 return -ENODEV; 4964 4965 shost = lpfc_shost_from_vport(vport); 4966 phba->pport = vport; 4967 lpfc_debugfs_initialize(vport); 4968 /* Put reference to SCSI host to driver's device private data */ 4969 pci_set_drvdata(phba->pcidev, shost); 4970 4971 return 0; 4972} 4973 4974/** 4975 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 4976 * @phba: pointer to lpfc hba data structure. 4977 * 4978 * This routine is invoked to destroy HBA physical port and the associated 4979 * SCSI host. 4980 **/ 4981static void 4982lpfc_destroy_shost(struct lpfc_hba *phba) 4983{ 4984 struct lpfc_vport *vport = phba->pport; 4985 4986 /* Destroy physical port that associated with the SCSI host */ 4987 destroy_port(vport); 4988 4989 return; 4990} 4991 4992/** 4993 * lpfc_setup_bg - Setup Block guard structures and debug areas. 4994 * @phba: pointer to lpfc hba data structure. 4995 * @shost: the shost to be used to detect Block guard settings. 4996 * 4997 * This routine sets up the local Block guard protocol settings for @shost. 4998 * This routine also allocates memory for debugging bg buffers. 4999 **/ 5000static void 5001lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 5002{ 5003 int pagecnt = 10; 5004 if (lpfc_prot_mask && lpfc_prot_guard) { 5005 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5006 "1478 Registering BlockGuard with the " 5007 "SCSI layer\n"); 5008 scsi_host_set_prot(shost, lpfc_prot_mask); 5009 scsi_host_set_guard(shost, lpfc_prot_guard); 5010 } 5011 if (!_dump_buf_data) { 5012 while (pagecnt) { 5013 spin_lock_init(&_dump_buf_lock); 5014 _dump_buf_data = 5015 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5016 if (_dump_buf_data) { 5017 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5018 "9043 BLKGRD: allocated %d pages for " 5019 "_dump_buf_data at 0x%p\n", 5020 (1 << pagecnt), _dump_buf_data); 5021 _dump_buf_data_order = pagecnt; 5022 memset(_dump_buf_data, 0, 5023 ((1 << PAGE_SHIFT) << pagecnt)); 5024 break; 5025 } else 5026 --pagecnt; 5027 } 5028 if (!_dump_buf_data_order) 5029 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5030 "9044 BLKGRD: ERROR unable to allocate " 5031 "memory for hexdump\n"); 5032 } else 5033 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5034 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 5035 "\n", _dump_buf_data); 5036 if (!_dump_buf_dif) { 5037 while (pagecnt) { 5038 _dump_buf_dif = 5039 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5040 if (_dump_buf_dif) { 5041 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5042 "9046 BLKGRD: allocated %d pages for " 5043 "_dump_buf_dif at 0x%p\n", 5044 (1 << pagecnt), _dump_buf_dif); 5045 _dump_buf_dif_order = pagecnt; 5046 memset(_dump_buf_dif, 0, 5047 ((1 << PAGE_SHIFT) << pagecnt)); 5048 break; 5049 } else 5050 --pagecnt; 5051 } 5052 if (!_dump_buf_dif_order) 5053 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5054 "9047 BLKGRD: ERROR unable to allocate " 5055 "memory for hexdump\n"); 5056 } else 5057 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5058 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 5059 _dump_buf_dif); 5060} 5061 5062/** 5063 * lpfc_post_init_setup - Perform necessary device post initialization setup. 5064 * @phba: pointer to lpfc hba data structure. 5065 * 5066 * This routine is invoked to perform all the necessary post initialization 5067 * setup for the device. 5068 **/ 5069static void 5070lpfc_post_init_setup(struct lpfc_hba *phba) 5071{ 5072 struct Scsi_Host *shost; 5073 struct lpfc_adapter_event_header adapter_event; 5074 5075 /* Get the default values for Model Name and Description */ 5076 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 5077 5078 /* 5079 * hba setup may have changed the hba_queue_depth so we need to 5080 * adjust the value of can_queue. 5081 */ 5082 shost = pci_get_drvdata(phba->pcidev); 5083 shost->can_queue = phba->cfg_hba_queue_depth - 10; 5084 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 5085 lpfc_setup_bg(phba, shost); 5086 5087 lpfc_host_attrib_init(shost); 5088 5089 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 5090 spin_lock_irq(shost->host_lock); 5091 lpfc_poll_start_timer(phba); 5092 spin_unlock_irq(shost->host_lock); 5093 } 5094 5095 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5096 "0428 Perform SCSI scan\n"); 5097 /* Send board arrival event to upper layer */ 5098 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 5099 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 5100 fc_host_post_vendor_event(shost, fc_get_event_number(), 5101 sizeof(adapter_event), 5102 (char *) &adapter_event, 5103 LPFC_NL_VENDOR_ID); 5104 return; 5105} 5106 5107/** 5108 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 5109 * @phba: pointer to lpfc hba data structure. 5110 * 5111 * This routine is invoked to set up the PCI device memory space for device 5112 * with SLI-3 interface spec. 5113 * 5114 * Return codes 5115 * 0 - successful 5116 * other values - error 5117 **/ 5118static int 5119lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 5120{ 5121 struct pci_dev *pdev; 5122 unsigned long bar0map_len, bar2map_len; 5123 int i, hbq_count; 5124 void *ptr; 5125 int error = -ENODEV; 5126 5127 /* Obtain PCI device reference */ 5128 if (!phba->pcidev) 5129 return error; 5130 else 5131 pdev = phba->pcidev; 5132 5133 /* Set the device DMA mask size */ 5134 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 5135 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 5136 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 5137 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 5138 return error; 5139 } 5140 } 5141 5142 /* Get the bus address of Bar0 and Bar2 and the number of bytes 5143 * required by each mapping. 5144 */ 5145 phba->pci_bar0_map = pci_resource_start(pdev, 0); 5146 bar0map_len = pci_resource_len(pdev, 0); 5147 5148 phba->pci_bar2_map = pci_resource_start(pdev, 2); 5149 bar2map_len = pci_resource_len(pdev, 2); 5150 5151 /* Map HBA SLIM to a kernel virtual address. */ 5152 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 5153 if (!phba->slim_memmap_p) { 5154 dev_printk(KERN_ERR, &pdev->dev, 5155 "ioremap failed for SLIM memory.\n"); 5156 goto out; 5157 } 5158 5159 /* Map HBA Control Registers to a kernel virtual address. */ 5160 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 5161 if (!phba->ctrl_regs_memmap_p) { 5162 dev_printk(KERN_ERR, &pdev->dev, 5163 "ioremap failed for HBA control registers.\n"); 5164 goto out_iounmap_slim; 5165 } 5166 5167 /* Allocate memory for SLI-2 structures */ 5168 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 5169 SLI2_SLIM_SIZE, 5170 &phba->slim2p.phys, 5171 GFP_KERNEL); 5172 if (!phba->slim2p.virt) 5173 goto out_iounmap; 5174 5175 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 5176 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 5177 phba->mbox_ext = (phba->slim2p.virt + 5178 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 5179 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 5180 phba->IOCBs = (phba->slim2p.virt + 5181 offsetof(struct lpfc_sli2_slim, IOCBs)); 5182 5183 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 5184 lpfc_sli_hbq_size(), 5185 &phba->hbqslimp.phys, 5186 GFP_KERNEL); 5187 if (!phba->hbqslimp.virt) 5188 goto out_free_slim; 5189 5190 hbq_count = lpfc_sli_hbq_count(); 5191 ptr = phba->hbqslimp.virt; 5192 for (i = 0; i < hbq_count; ++i) { 5193 phba->hbqs[i].hbq_virt = ptr; 5194 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5195 ptr += (lpfc_hbq_defs[i]->entry_count * 5196 sizeof(struct lpfc_hbq_entry)); 5197 } 5198 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 5199 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 5200 5201 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 5202 5203 INIT_LIST_HEAD(&phba->rb_pend_list); 5204 5205 phba->MBslimaddr = phba->slim_memmap_p; 5206 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 5207 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 5208 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 5209 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 5210 5211 return 0; 5212 5213out_free_slim: 5214 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5215 phba->slim2p.virt, phba->slim2p.phys); 5216out_iounmap: 5217 iounmap(phba->ctrl_regs_memmap_p); 5218out_iounmap_slim: 5219 iounmap(phba->slim_memmap_p); 5220out: 5221 return error; 5222} 5223 5224/** 5225 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 5226 * @phba: pointer to lpfc hba data structure. 5227 * 5228 * This routine is invoked to unset the PCI device memory space for device 5229 * with SLI-3 interface spec. 5230 **/ 5231static void 5232lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 5233{ 5234 struct pci_dev *pdev; 5235 5236 /* Obtain PCI device reference */ 5237 if (!phba->pcidev) 5238 return; 5239 else 5240 pdev = phba->pcidev; 5241 5242 /* Free coherent DMA memory allocated */ 5243 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 5244 phba->hbqslimp.virt, phba->hbqslimp.phys); 5245 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5246 phba->slim2p.virt, phba->slim2p.phys); 5247 5248 /* I/O memory unmap */ 5249 iounmap(phba->ctrl_regs_memmap_p); 5250 iounmap(phba->slim_memmap_p); 5251 5252 return; 5253} 5254 5255/** 5256 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 5257 * @phba: pointer to lpfc hba data structure. 5258 * 5259 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 5260 * done and check status. 5261 * 5262 * Return 0 if successful, otherwise -ENODEV. 5263 **/ 5264int 5265lpfc_sli4_post_status_check(struct lpfc_hba *phba) 5266{ 5267 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg; 5268 int i, port_error = -ENODEV; 5269 5270 if (!phba->sli4_hba.STAregaddr) 5271 return -ENODEV; 5272 5273 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 5274 for (i = 0; i < 3000; i++) { 5275 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); 5276 /* Encounter fatal POST error, break out */ 5277 if (bf_get(lpfc_hst_state_perr, &sta_reg)) { 5278 port_error = -ENODEV; 5279 break; 5280 } 5281 if (LPFC_POST_STAGE_ARMFW_READY == 5282 bf_get(lpfc_hst_state_port_status, &sta_reg)) { 5283 port_error = 0; 5284 break; 5285 } 5286 msleep(10); 5287 } 5288 5289 if (port_error) 5290 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5291 "1408 Failure HBA POST Status: sta_reg=0x%x, " 5292 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " 5293 "dl=x%x, pstatus=x%x\n", sta_reg.word0, 5294 bf_get(lpfc_hst_state_perr, &sta_reg), 5295 bf_get(lpfc_hst_state_sfi, &sta_reg), 5296 bf_get(lpfc_hst_state_nip, &sta_reg), 5297 bf_get(lpfc_hst_state_ipc, &sta_reg), 5298 bf_get(lpfc_hst_state_xrom, &sta_reg), 5299 bf_get(lpfc_hst_state_dl, &sta_reg), 5300 bf_get(lpfc_hst_state_port_status, &sta_reg)); 5301 5302 /* Log device information */ 5303 phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr); 5304 if (bf_get(lpfc_sli_intf_valid, 5305 &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) { 5306 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5307 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " 5308 "FeatureL1=0x%x, FeatureL2=0x%x\n", 5309 bf_get(lpfc_sli_intf_sli_family, 5310 &phba->sli4_hba.sli_intf), 5311 bf_get(lpfc_sli_intf_slirev, 5312 &phba->sli4_hba.sli_intf), 5313 bf_get(lpfc_sli_intf_featurelevel1, 5314 &phba->sli4_hba.sli_intf), 5315 bf_get(lpfc_sli_intf_featurelevel2, 5316 &phba->sli4_hba.sli_intf)); 5317 } 5318 phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr); 5319 phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr); 5320 /* With uncoverable error, log the error message and return error */ 5321 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); 5322 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); 5323 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 5324 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 5325 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5326 "1422 HBA Unrecoverable error: " 5327 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 5328 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n", 5329 uerrlo_reg.word0, uerrhi_reg.word0, 5330 phba->sli4_hba.ue_mask_lo, 5331 phba->sli4_hba.ue_mask_hi); 5332 return -ENODEV; 5333 } 5334 5335 return port_error; 5336} 5337 5338/** 5339 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 5340 * @phba: pointer to lpfc hba data structure. 5341 * 5342 * This routine is invoked to set up SLI4 BAR0 PCI config space register 5343 * memory map. 5344 **/ 5345static void 5346lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) 5347{ 5348 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 5349 LPFC_UERR_STATUS_LO; 5350 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5351 LPFC_UERR_STATUS_HI; 5352 phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 5353 LPFC_UE_MASK_LO; 5354 phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5355 LPFC_UE_MASK_HI; 5356 phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p + 5357 LPFC_SLI_INTF; 5358} 5359 5360/** 5361 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 5362 * @phba: pointer to lpfc hba data structure. 5363 * 5364 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 5365 * memory map. 5366 **/ 5367static void 5368lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 5369{ 5370 5371 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5372 LPFC_HST_STATE; 5373 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5374 LPFC_HST_ISR0; 5375 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5376 LPFC_HST_IMR0; 5377 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5378 LPFC_HST_ISCR0; 5379 return; 5380} 5381 5382/** 5383 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 5384 * @phba: pointer to lpfc hba data structure. 5385 * @vf: virtual function number 5386 * 5387 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 5388 * based on the given viftual function number, @vf. 5389 * 5390 * Return 0 if successful, otherwise -ENODEV. 5391 **/ 5392static int 5393lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 5394{ 5395 if (vf > LPFC_VIR_FUNC_MAX) 5396 return -ENODEV; 5397 5398 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5399 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 5400 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5401 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 5402 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5403 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 5404 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5405 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 5406 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5407 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 5408 return 0; 5409} 5410 5411/** 5412 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 5413 * @phba: pointer to lpfc hba data structure. 5414 * 5415 * This routine is invoked to create the bootstrap mailbox 5416 * region consistent with the SLI-4 interface spec. This 5417 * routine allocates all memory necessary to communicate 5418 * mailbox commands to the port and sets up all alignment 5419 * needs. No locks are expected to be held when calling 5420 * this routine. 5421 * 5422 * Return codes 5423 * 0 - successful 5424 * ENOMEM - could not allocated memory. 5425 **/ 5426static int 5427lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 5428{ 5429 uint32_t bmbx_size; 5430 struct lpfc_dmabuf *dmabuf; 5431 struct dma_address *dma_address; 5432 uint32_t pa_addr; 5433 uint64_t phys_addr; 5434 5435 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5436 if (!dmabuf) 5437 return -ENOMEM; 5438 5439 /* 5440 * The bootstrap mailbox region is comprised of 2 parts 5441 * plus an alignment restriction of 16 bytes. 5442 */ 5443 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 5444 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5445 bmbx_size, 5446 &dmabuf->phys, 5447 GFP_KERNEL); 5448 if (!dmabuf->virt) { 5449 kfree(dmabuf); 5450 return -ENOMEM; 5451 } 5452 memset(dmabuf->virt, 0, bmbx_size); 5453 5454 /* 5455 * Initialize the bootstrap mailbox pointers now so that the register 5456 * operations are simple later. The mailbox dma address is required 5457 * to be 16-byte aligned. Also align the virtual memory as each 5458 * maibox is copied into the bmbx mailbox region before issuing the 5459 * command to the port. 5460 */ 5461 phba->sli4_hba.bmbx.dmabuf = dmabuf; 5462 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 5463 5464 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 5465 LPFC_ALIGN_16_BYTE); 5466 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 5467 LPFC_ALIGN_16_BYTE); 5468 5469 /* 5470 * Set the high and low physical addresses now. The SLI4 alignment 5471 * requirement is 16 bytes and the mailbox is posted to the port 5472 * as two 30-bit addresses. The other data is a bit marking whether 5473 * the 30-bit address is the high or low address. 5474 * Upcast bmbx aphys to 64bits so shift instruction compiles 5475 * clean on 32 bit machines. 5476 */ 5477 dma_address = &phba->sli4_hba.bmbx.dma_address; 5478 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 5479 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 5480 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 5481 LPFC_BMBX_BIT1_ADDR_HI); 5482 5483 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 5484 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 5485 LPFC_BMBX_BIT1_ADDR_LO); 5486 return 0; 5487} 5488 5489/** 5490 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 5491 * @phba: pointer to lpfc hba data structure. 5492 * 5493 * This routine is invoked to teardown the bootstrap mailbox 5494 * region and release all host resources. This routine requires 5495 * the caller to ensure all mailbox commands recovered, no 5496 * additional mailbox comands are sent, and interrupts are disabled 5497 * before calling this routine. 5498 * 5499 **/ 5500static void 5501lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 5502{ 5503 dma_free_coherent(&phba->pcidev->dev, 5504 phba->sli4_hba.bmbx.bmbx_size, 5505 phba->sli4_hba.bmbx.dmabuf->virt, 5506 phba->sli4_hba.bmbx.dmabuf->phys); 5507 5508 kfree(phba->sli4_hba.bmbx.dmabuf); 5509 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 5510} 5511 5512/** 5513 * lpfc_sli4_read_config - Get the config parameters. 5514 * @phba: pointer to lpfc hba data structure. 5515 * 5516 * This routine is invoked to read the configuration parameters from the HBA. 5517 * The configuration parameters are used to set the base and maximum values 5518 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 5519 * allocation for the port. 5520 * 5521 * Return codes 5522 * 0 - successful 5523 * ENOMEM - No availble memory 5524 * EIO - The mailbox failed to complete successfully. 5525 **/ 5526static int 5527lpfc_sli4_read_config(struct lpfc_hba *phba) 5528{ 5529 LPFC_MBOXQ_t *pmb; 5530 struct lpfc_mbx_read_config *rd_config; 5531 uint32_t rc = 0; 5532 5533 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5534 if (!pmb) { 5535 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5536 "2011 Unable to allocate memory for issuing " 5537 "SLI_CONFIG_SPECIAL mailbox command\n"); 5538 return -ENOMEM; 5539 } 5540 5541 lpfc_read_config(phba, pmb); 5542 5543 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 5544 if (rc != MBX_SUCCESS) { 5545 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5546 "2012 Mailbox failed , mbxCmd x%x " 5547 "READ_CONFIG, mbxStatus x%x\n", 5548 bf_get(lpfc_mqe_command, &pmb->u.mqe), 5549 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 5550 rc = -EIO; 5551 } else { 5552 rd_config = &pmb->u.mqe.un.rd_config; 5553 phba->sli4_hba.max_cfg_param.max_xri = 5554 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 5555 phba->sli4_hba.max_cfg_param.xri_base = 5556 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 5557 phba->sli4_hba.max_cfg_param.max_vpi = 5558 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 5559 phba->sli4_hba.max_cfg_param.vpi_base = 5560 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 5561 phba->sli4_hba.max_cfg_param.max_rpi = 5562 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 5563 phba->sli4_hba.max_cfg_param.rpi_base = 5564 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 5565 phba->sli4_hba.max_cfg_param.max_vfi = 5566 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 5567 phba->sli4_hba.max_cfg_param.vfi_base = 5568 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 5569 phba->sli4_hba.max_cfg_param.max_fcfi = 5570 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 5571 phba->sli4_hba.max_cfg_param.fcfi_base = 5572 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); 5573 phba->sli4_hba.max_cfg_param.max_eq = 5574 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 5575 phba->sli4_hba.max_cfg_param.max_rq = 5576 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 5577 phba->sli4_hba.max_cfg_param.max_wq = 5578 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 5579 phba->sli4_hba.max_cfg_param.max_cq = 5580 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 5581 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 5582 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 5583 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 5584 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 5585 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5586 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 5587 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 5588 phba->max_vports = phba->max_vpi; 5589 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5590 "2003 cfg params XRI(B:%d M:%d), " 5591 "VPI(B:%d M:%d) " 5592 "VFI(B:%d M:%d) " 5593 "RPI(B:%d M:%d) " 5594 "FCFI(B:%d M:%d)\n", 5595 phba->sli4_hba.max_cfg_param.xri_base, 5596 phba->sli4_hba.max_cfg_param.max_xri, 5597 phba->sli4_hba.max_cfg_param.vpi_base, 5598 phba->sli4_hba.max_cfg_param.max_vpi, 5599 phba->sli4_hba.max_cfg_param.vfi_base, 5600 phba->sli4_hba.max_cfg_param.max_vfi, 5601 phba->sli4_hba.max_cfg_param.rpi_base, 5602 phba->sli4_hba.max_cfg_param.max_rpi, 5603 phba->sli4_hba.max_cfg_param.fcfi_base, 5604 phba->sli4_hba.max_cfg_param.max_fcfi); 5605 } 5606 mempool_free(pmb, phba->mbox_mem_pool); 5607 5608 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 5609 if (phba->cfg_hba_queue_depth > 5610 (phba->sli4_hba.max_cfg_param.max_xri - 5611 lpfc_sli4_get_els_iocb_cnt(phba))) 5612 phba->cfg_hba_queue_depth = 5613 phba->sli4_hba.max_cfg_param.max_xri - 5614 lpfc_sli4_get_els_iocb_cnt(phba); 5615 return rc; 5616} 5617 5618/** 5619 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. 5620 * @phba: pointer to lpfc hba data structure. 5621 * 5622 * This routine is invoked to setup the host-side endian order to the 5623 * HBA consistent with the SLI-4 interface spec. 5624 * 5625 * Return codes 5626 * 0 - successful 5627 * ENOMEM - No availble memory 5628 * EIO - The mailbox failed to complete successfully. 5629 **/ 5630static int 5631lpfc_setup_endian_order(struct lpfc_hba *phba) 5632{ 5633 LPFC_MBOXQ_t *mboxq; 5634 uint32_t rc = 0; 5635 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 5636 HOST_ENDIAN_HIGH_WORD1}; 5637 5638 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5639 if (!mboxq) { 5640 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5641 "0492 Unable to allocate memory for issuing " 5642 "SLI_CONFIG_SPECIAL mailbox command\n"); 5643 return -ENOMEM; 5644 } 5645 5646 /* 5647 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two 5648 * words to contain special data values and no other data. 5649 */ 5650 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 5651 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 5652 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5653 if (rc != MBX_SUCCESS) { 5654 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5655 "0493 SLI_CONFIG_SPECIAL mailbox failed with " 5656 "status x%x\n", 5657 rc); 5658 rc = -EIO; 5659 } 5660 5661 mempool_free(mboxq, phba->mbox_mem_pool); 5662 return rc; 5663} 5664 5665/** 5666 * lpfc_sli4_queue_create - Create all the SLI4 queues 5667 * @phba: pointer to lpfc hba data structure. 5668 * 5669 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 5670 * operation. For each SLI4 queue type, the parameters such as queue entry 5671 * count (queue depth) shall be taken from the module parameter. For now, 5672 * we just use some constant number as place holder. 5673 * 5674 * Return codes 5675 * 0 - successful 5676 * ENOMEM - No availble memory 5677 * EIO - The mailbox failed to complete successfully. 5678 **/ 5679static int 5680lpfc_sli4_queue_create(struct lpfc_hba *phba) 5681{ 5682 struct lpfc_queue *qdesc; 5683 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5684 int cfg_fcp_wq_count; 5685 int cfg_fcp_eq_count; 5686 5687 /* 5688 * Sanity check for confiugred queue parameters against the run-time 5689 * device parameters 5690 */ 5691 5692 /* Sanity check on FCP fast-path WQ parameters */ 5693 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 5694 if (cfg_fcp_wq_count > 5695 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 5696 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 5697 LPFC_SP_WQN_DEF; 5698 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 5699 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5700 "2581 Not enough WQs (%d) from " 5701 "the pci function for supporting " 5702 "FCP WQs (%d)\n", 5703 phba->sli4_hba.max_cfg_param.max_wq, 5704 phba->cfg_fcp_wq_count); 5705 goto out_error; 5706 } 5707 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5708 "2582 Not enough WQs (%d) from the pci " 5709 "function for supporting the requested " 5710 "FCP WQs (%d), the actual FCP WQs can " 5711 "be supported: %d\n", 5712 phba->sli4_hba.max_cfg_param.max_wq, 5713 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 5714 } 5715 /* The actual number of FCP work queues adopted */ 5716 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 5717 5718 /* Sanity check on FCP fast-path EQ parameters */ 5719 cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 5720 if (cfg_fcp_eq_count > 5721 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 5722 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 5723 LPFC_SP_EQN_DEF; 5724 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 5725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5726 "2574 Not enough EQs (%d) from the " 5727 "pci function for supporting FCP " 5728 "EQs (%d)\n", 5729 phba->sli4_hba.max_cfg_param.max_eq, 5730 phba->cfg_fcp_eq_count); 5731 goto out_error; 5732 } 5733 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5734 "2575 Not enough EQs (%d) from the pci " 5735 "function for supporting the requested " 5736 "FCP EQs (%d), the actual FCP EQs can " 5737 "be supported: %d\n", 5738 phba->sli4_hba.max_cfg_param.max_eq, 5739 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 5740 } 5741 /* It does not make sense to have more EQs than WQs */ 5742 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 5743 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5744 "2593 The FCP EQ count(%d) cannot be greater " 5745 "than the FCP WQ count(%d), limiting the " 5746 "FCP EQ count to %d\n", cfg_fcp_eq_count, 5747 phba->cfg_fcp_wq_count, 5748 phba->cfg_fcp_wq_count); 5749 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 5750 } 5751 /* The actual number of FCP event queues adopted */ 5752 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 5753 /* The overall number of event queues used */ 5754 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 5755 5756 /* 5757 * Create Event Queues (EQs) 5758 */ 5759 5760 /* Get EQ depth from module parameter, fake the default for now */ 5761 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 5762 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 5763 5764 /* Create slow path event queue */ 5765 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5766 phba->sli4_hba.eq_ecount); 5767 if (!qdesc) { 5768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5769 "0496 Failed allocate slow-path EQ\n"); 5770 goto out_error; 5771 } 5772 phba->sli4_hba.sp_eq = qdesc; 5773 5774 /* Create fast-path FCP Event Queue(s) */ 5775 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 5776 phba->cfg_fcp_eq_count), GFP_KERNEL); 5777 if (!phba->sli4_hba.fp_eq) { 5778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5779 "2576 Failed allocate memory for fast-path " 5780 "EQ record array\n"); 5781 goto out_free_sp_eq; 5782 } 5783 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5784 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5785 phba->sli4_hba.eq_ecount); 5786 if (!qdesc) { 5787 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5788 "0497 Failed allocate fast-path EQ\n"); 5789 goto out_free_fp_eq; 5790 } 5791 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 5792 } 5793 5794 /* 5795 * Create Complete Queues (CQs) 5796 */ 5797 5798 /* Get CQ depth from module parameter, fake the default for now */ 5799 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 5800 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 5801 5802 /* Create slow-path Mailbox Command Complete Queue */ 5803 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5804 phba->sli4_hba.cq_ecount); 5805 if (!qdesc) { 5806 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5807 "0500 Failed allocate slow-path mailbox CQ\n"); 5808 goto out_free_fp_eq; 5809 } 5810 phba->sli4_hba.mbx_cq = qdesc; 5811 5812 /* Create slow-path ELS Complete Queue */ 5813 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5814 phba->sli4_hba.cq_ecount); 5815 if (!qdesc) { 5816 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5817 "0501 Failed allocate slow-path ELS CQ\n"); 5818 goto out_free_mbx_cq; 5819 } 5820 phba->sli4_hba.els_cq = qdesc; 5821 5822 5823 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 5824 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 5825 phba->cfg_fcp_eq_count), GFP_KERNEL); 5826 if (!phba->sli4_hba.fcp_cq) { 5827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5828 "2577 Failed allocate memory for fast-path " 5829 "CQ record array\n"); 5830 goto out_free_els_cq; 5831 } 5832 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5833 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5834 phba->sli4_hba.cq_ecount); 5835 if (!qdesc) { 5836 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5837 "0499 Failed allocate fast-path FCP " 5838 "CQ (%d)\n", fcp_cqidx); 5839 goto out_free_fcp_cq; 5840 } 5841 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 5842 } 5843 5844 /* Create Mailbox Command Queue */ 5845 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 5846 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 5847 5848 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 5849 phba->sli4_hba.mq_ecount); 5850 if (!qdesc) { 5851 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5852 "0505 Failed allocate slow-path MQ\n"); 5853 goto out_free_fcp_cq; 5854 } 5855 phba->sli4_hba.mbx_wq = qdesc; 5856 5857 /* 5858 * Create all the Work Queues (WQs) 5859 */ 5860 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 5861 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 5862 5863 /* Create slow-path ELS Work Queue */ 5864 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5865 phba->sli4_hba.wq_ecount); 5866 if (!qdesc) { 5867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5868 "0504 Failed allocate slow-path ELS WQ\n"); 5869 goto out_free_mbx_wq; 5870 } 5871 phba->sli4_hba.els_wq = qdesc; 5872 5873 /* Create fast-path FCP Work Queue(s) */ 5874 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 5875 phba->cfg_fcp_wq_count), GFP_KERNEL); 5876 if (!phba->sli4_hba.fcp_wq) { 5877 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5878 "2578 Failed allocate memory for fast-path " 5879 "WQ record array\n"); 5880 goto out_free_els_wq; 5881 } 5882 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 5883 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5884 phba->sli4_hba.wq_ecount); 5885 if (!qdesc) { 5886 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5887 "0503 Failed allocate fast-path FCP " 5888 "WQ (%d)\n", fcp_wqidx); 5889 goto out_free_fcp_wq; 5890 } 5891 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 5892 } 5893 5894 /* 5895 * Create Receive Queue (RQ) 5896 */ 5897 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 5898 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 5899 5900 /* Create Receive Queue for header */ 5901 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5902 phba->sli4_hba.rq_ecount); 5903 if (!qdesc) { 5904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5905 "0506 Failed allocate receive HRQ\n"); 5906 goto out_free_fcp_wq; 5907 } 5908 phba->sli4_hba.hdr_rq = qdesc; 5909 5910 /* Create Receive Queue for data */ 5911 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5912 phba->sli4_hba.rq_ecount); 5913 if (!qdesc) { 5914 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5915 "0507 Failed allocate receive DRQ\n"); 5916 goto out_free_hdr_rq; 5917 } 5918 phba->sli4_hba.dat_rq = qdesc; 5919 5920 return 0; 5921 5922out_free_hdr_rq: 5923 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5924 phba->sli4_hba.hdr_rq = NULL; 5925out_free_fcp_wq: 5926 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 5927 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 5928 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 5929 } 5930 kfree(phba->sli4_hba.fcp_wq); 5931out_free_els_wq: 5932 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5933 phba->sli4_hba.els_wq = NULL; 5934out_free_mbx_wq: 5935 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5936 phba->sli4_hba.mbx_wq = NULL; 5937out_free_fcp_cq: 5938 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 5939 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 5940 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 5941 } 5942 kfree(phba->sli4_hba.fcp_cq); 5943out_free_els_cq: 5944 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5945 phba->sli4_hba.els_cq = NULL; 5946out_free_mbx_cq: 5947 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5948 phba->sli4_hba.mbx_cq = NULL; 5949out_free_fp_eq: 5950 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 5951 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 5952 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 5953 } 5954 kfree(phba->sli4_hba.fp_eq); 5955out_free_sp_eq: 5956 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5957 phba->sli4_hba.sp_eq = NULL; 5958out_error: 5959 return -ENOMEM; 5960} 5961 5962/** 5963 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 5964 * @phba: pointer to lpfc hba data structure. 5965 * 5966 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 5967 * operation. 5968 * 5969 * Return codes 5970 * 0 - successful 5971 * ENOMEM - No availble memory 5972 * EIO - The mailbox failed to complete successfully. 5973 **/ 5974static void 5975lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 5976{ 5977 int fcp_qidx; 5978 5979 /* Release mailbox command work queue */ 5980 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5981 phba->sli4_hba.mbx_wq = NULL; 5982 5983 /* Release ELS work queue */ 5984 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5985 phba->sli4_hba.els_wq = NULL; 5986 5987 /* Release FCP work queue */ 5988 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 5989 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 5990 kfree(phba->sli4_hba.fcp_wq); 5991 phba->sli4_hba.fcp_wq = NULL; 5992 5993 /* Release unsolicited receive queue */ 5994 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5995 phba->sli4_hba.hdr_rq = NULL; 5996 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 5997 phba->sli4_hba.dat_rq = NULL; 5998 5999 /* Release ELS complete queue */ 6000 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6001 phba->sli4_hba.els_cq = NULL; 6002 6003 /* Release mailbox command complete queue */ 6004 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6005 phba->sli4_hba.mbx_cq = NULL; 6006 6007 /* Release FCP response complete queue */ 6008 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6009 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6010 kfree(phba->sli4_hba.fcp_cq); 6011 phba->sli4_hba.fcp_cq = NULL; 6012 6013 /* Release fast-path event queue */ 6014 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6015 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 6016 kfree(phba->sli4_hba.fp_eq); 6017 phba->sli4_hba.fp_eq = NULL; 6018 6019 /* Release slow-path event queue */ 6020 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6021 phba->sli4_hba.sp_eq = NULL; 6022 6023 return; 6024} 6025 6026/** 6027 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 6028 * @phba: pointer to lpfc hba data structure. 6029 * 6030 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 6031 * operation. 6032 * 6033 * Return codes 6034 * 0 - successful 6035 * ENOMEM - No availble memory 6036 * EIO - The mailbox failed to complete successfully. 6037 **/ 6038int 6039lpfc_sli4_queue_setup(struct lpfc_hba *phba) 6040{ 6041 int rc = -ENOMEM; 6042 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6043 int fcp_cq_index = 0; 6044 6045 /* 6046 * Set up Event Queues (EQs) 6047 */ 6048 6049 /* Set up slow-path event queue */ 6050 if (!phba->sli4_hba.sp_eq) { 6051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6052 "0520 Slow-path EQ not allocated\n"); 6053 goto out_error; 6054 } 6055 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 6056 LPFC_SP_DEF_IMAX); 6057 if (rc) { 6058 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6059 "0521 Failed setup of slow-path EQ: " 6060 "rc = 0x%x\n", rc); 6061 goto out_error; 6062 } 6063 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6064 "2583 Slow-path EQ setup: queue-id=%d\n", 6065 phba->sli4_hba.sp_eq->queue_id); 6066 6067 /* Set up fast-path event queue */ 6068 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6069 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6070 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6071 "0522 Fast-path EQ (%d) not " 6072 "allocated\n", fcp_eqidx); 6073 goto out_destroy_fp_eq; 6074 } 6075 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 6076 phba->cfg_fcp_imax); 6077 if (rc) { 6078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6079 "0523 Failed setup of fast-path EQ " 6080 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 6081 goto out_destroy_fp_eq; 6082 } 6083 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6084 "2584 Fast-path EQ setup: " 6085 "queue[%d]-id=%d\n", fcp_eqidx, 6086 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 6087 } 6088 6089 /* 6090 * Set up Complete Queues (CQs) 6091 */ 6092 6093 /* Set up slow-path MBOX Complete Queue as the first CQ */ 6094 if (!phba->sli4_hba.mbx_cq) { 6095 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6096 "0528 Mailbox CQ not allocated\n"); 6097 goto out_destroy_fp_eq; 6098 } 6099 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 6100 LPFC_MCQ, LPFC_MBOX); 6101 if (rc) { 6102 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6103 "0529 Failed setup of slow-path mailbox CQ: " 6104 "rc = 0x%x\n", rc); 6105 goto out_destroy_fp_eq; 6106 } 6107 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6108 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 6109 phba->sli4_hba.mbx_cq->queue_id, 6110 phba->sli4_hba.sp_eq->queue_id); 6111 6112 /* Set up slow-path ELS Complete Queue */ 6113 if (!phba->sli4_hba.els_cq) { 6114 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6115 "0530 ELS CQ not allocated\n"); 6116 goto out_destroy_mbx_cq; 6117 } 6118 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 6119 LPFC_WCQ, LPFC_ELS); 6120 if (rc) { 6121 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6122 "0531 Failed setup of slow-path ELS CQ: " 6123 "rc = 0x%x\n", rc); 6124 goto out_destroy_mbx_cq; 6125 } 6126 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6127 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 6128 phba->sli4_hba.els_cq->queue_id, 6129 phba->sli4_hba.sp_eq->queue_id); 6130 6131 /* Set up fast-path FCP Response Complete Queue */ 6132 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6133 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6134 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6135 "0526 Fast-path FCP CQ (%d) not " 6136 "allocated\n", fcp_cqidx); 6137 goto out_destroy_fcp_cq; 6138 } 6139 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 6140 phba->sli4_hba.fp_eq[fcp_cqidx], 6141 LPFC_WCQ, LPFC_FCP); 6142 if (rc) { 6143 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6144 "0527 Failed setup of fast-path FCP " 6145 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 6146 goto out_destroy_fcp_cq; 6147 } 6148 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6149 "2588 FCP CQ setup: cq[%d]-id=%d, " 6150 "parent eq[%d]-id=%d\n", 6151 fcp_cqidx, 6152 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 6153 fcp_cqidx, 6154 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 6155 } 6156 6157 /* 6158 * Set up all the Work Queues (WQs) 6159 */ 6160 6161 /* Set up Mailbox Command Queue */ 6162 if (!phba->sli4_hba.mbx_wq) { 6163 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6164 "0538 Slow-path MQ not allocated\n"); 6165 goto out_destroy_fcp_cq; 6166 } 6167 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 6168 phba->sli4_hba.mbx_cq, LPFC_MBOX); 6169 if (rc) { 6170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6171 "0539 Failed setup of slow-path MQ: " 6172 "rc = 0x%x\n", rc); 6173 goto out_destroy_fcp_cq; 6174 } 6175 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6176 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 6177 phba->sli4_hba.mbx_wq->queue_id, 6178 phba->sli4_hba.mbx_cq->queue_id); 6179 6180 /* Set up slow-path ELS Work Queue */ 6181 if (!phba->sli4_hba.els_wq) { 6182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6183 "0536 Slow-path ELS WQ not allocated\n"); 6184 goto out_destroy_mbx_wq; 6185 } 6186 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 6187 phba->sli4_hba.els_cq, LPFC_ELS); 6188 if (rc) { 6189 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6190 "0537 Failed setup of slow-path ELS WQ: " 6191 "rc = 0x%x\n", rc); 6192 goto out_destroy_mbx_wq; 6193 } 6194 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6195 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 6196 phba->sli4_hba.els_wq->queue_id, 6197 phba->sli4_hba.els_cq->queue_id); 6198 6199 /* Set up fast-path FCP Work Queue */ 6200 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6201 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 6202 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6203 "0534 Fast-path FCP WQ (%d) not " 6204 "allocated\n", fcp_wqidx); 6205 goto out_destroy_fcp_wq; 6206 } 6207 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 6208 phba->sli4_hba.fcp_cq[fcp_cq_index], 6209 LPFC_FCP); 6210 if (rc) { 6211 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6212 "0535 Failed setup of fast-path FCP " 6213 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 6214 goto out_destroy_fcp_wq; 6215 } 6216 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6217 "2591 FCP WQ setup: wq[%d]-id=%d, " 6218 "parent cq[%d]-id=%d\n", 6219 fcp_wqidx, 6220 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 6221 fcp_cq_index, 6222 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 6223 /* Round robin FCP Work Queue's Completion Queue assignment */ 6224 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 6225 } 6226 6227 /* 6228 * Create Receive Queue (RQ) 6229 */ 6230 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 6231 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6232 "0540 Receive Queue not allocated\n"); 6233 goto out_destroy_fcp_wq; 6234 } 6235 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 6236 phba->sli4_hba.els_cq, LPFC_USOL); 6237 if (rc) { 6238 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6239 "0541 Failed setup of Receive Queue: " 6240 "rc = 0x%x\n", rc); 6241 goto out_destroy_fcp_wq; 6242 } 6243 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6244 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 6245 "parent cq-id=%d\n", 6246 phba->sli4_hba.hdr_rq->queue_id, 6247 phba->sli4_hba.dat_rq->queue_id, 6248 phba->sli4_hba.els_cq->queue_id); 6249 return 0; 6250 6251out_destroy_fcp_wq: 6252 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 6253 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 6254 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6255out_destroy_mbx_wq: 6256 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6257out_destroy_fcp_cq: 6258 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6259 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6260 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6261out_destroy_mbx_cq: 6262 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6263out_destroy_fp_eq: 6264 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 6265 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 6266 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6267out_error: 6268 return rc; 6269} 6270 6271/** 6272 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 6273 * @phba: pointer to lpfc hba data structure. 6274 * 6275 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 6276 * operation. 6277 * 6278 * Return codes 6279 * 0 - successful 6280 * ENOMEM - No availble memory 6281 * EIO - The mailbox failed to complete successfully. 6282 **/ 6283void 6284lpfc_sli4_queue_unset(struct lpfc_hba *phba) 6285{ 6286 int fcp_qidx; 6287 6288 /* Unset mailbox command work queue */ 6289 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6290 /* Unset ELS work queue */ 6291 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6292 /* Unset unsolicited receive queue */ 6293 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 6294 /* Unset FCP work queue */ 6295 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6296 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 6297 /* Unset mailbox command complete queue */ 6298 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6299 /* Unset ELS complete queue */ 6300 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6301 /* Unset FCP response complete queue */ 6302 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6303 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6304 /* Unset fast-path event queue */ 6305 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6306 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6307 /* Unset slow-path event queue */ 6308 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6309} 6310 6311/** 6312 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 6313 * @phba: pointer to lpfc hba data structure. 6314 * 6315 * This routine is invoked to allocate and set up a pool of completion queue 6316 * events. The body of the completion queue event is a completion queue entry 6317 * CQE. For now, this pool is used for the interrupt service routine to queue 6318 * the following HBA completion queue events for the worker thread to process: 6319 * - Mailbox asynchronous events 6320 * - Receive queue completion unsolicited events 6321 * Later, this can be used for all the slow-path events. 6322 * 6323 * Return codes 6324 * 0 - successful 6325 * -ENOMEM - No availble memory 6326 **/ 6327static int 6328lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 6329{ 6330 struct lpfc_cq_event *cq_event; 6331 int i; 6332 6333 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 6334 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 6335 if (!cq_event) 6336 goto out_pool_create_fail; 6337 list_add_tail(&cq_event->list, 6338 &phba->sli4_hba.sp_cqe_event_pool); 6339 } 6340 return 0; 6341 6342out_pool_create_fail: 6343 lpfc_sli4_cq_event_pool_destroy(phba); 6344 return -ENOMEM; 6345} 6346 6347/** 6348 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 6349 * @phba: pointer to lpfc hba data structure. 6350 * 6351 * This routine is invoked to free the pool of completion queue events at 6352 * driver unload time. Note that, it is the responsibility of the driver 6353 * cleanup routine to free all the outstanding completion-queue events 6354 * allocated from this pool back into the pool before invoking this routine 6355 * to destroy the pool. 6356 **/ 6357static void 6358lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 6359{ 6360 struct lpfc_cq_event *cq_event, *next_cq_event; 6361 6362 list_for_each_entry_safe(cq_event, next_cq_event, 6363 &phba->sli4_hba.sp_cqe_event_pool, list) { 6364 list_del(&cq_event->list); 6365 kfree(cq_event); 6366 } 6367} 6368 6369/** 6370 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6371 * @phba: pointer to lpfc hba data structure. 6372 * 6373 * This routine is the lock free version of the API invoked to allocate a 6374 * completion-queue event from the free pool. 6375 * 6376 * Return: Pointer to the newly allocated completion-queue event if successful 6377 * NULL otherwise. 6378 **/ 6379struct lpfc_cq_event * 6380__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6381{ 6382 struct lpfc_cq_event *cq_event = NULL; 6383 6384 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 6385 struct lpfc_cq_event, list); 6386 return cq_event; 6387} 6388 6389/** 6390 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6391 * @phba: pointer to lpfc hba data structure. 6392 * 6393 * This routine is the lock version of the API invoked to allocate a 6394 * completion-queue event from the free pool. 6395 * 6396 * Return: Pointer to the newly allocated completion-queue event if successful 6397 * NULL otherwise. 6398 **/ 6399struct lpfc_cq_event * 6400lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6401{ 6402 struct lpfc_cq_event *cq_event; 6403 unsigned long iflags; 6404 6405 spin_lock_irqsave(&phba->hbalock, iflags); 6406 cq_event = __lpfc_sli4_cq_event_alloc(phba); 6407 spin_unlock_irqrestore(&phba->hbalock, iflags); 6408 return cq_event; 6409} 6410 6411/** 6412 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6413 * @phba: pointer to lpfc hba data structure. 6414 * @cq_event: pointer to the completion queue event to be freed. 6415 * 6416 * This routine is the lock free version of the API invoked to release a 6417 * completion-queue event back into the free pool. 6418 **/ 6419void 6420__lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6421 struct lpfc_cq_event *cq_event) 6422{ 6423 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 6424} 6425 6426/** 6427 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6428 * @phba: pointer to lpfc hba data structure. 6429 * @cq_event: pointer to the completion queue event to be freed. 6430 * 6431 * This routine is the lock version of the API invoked to release a 6432 * completion-queue event back into the free pool. 6433 **/ 6434void 6435lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6436 struct lpfc_cq_event *cq_event) 6437{ 6438 unsigned long iflags; 6439 spin_lock_irqsave(&phba->hbalock, iflags); 6440 __lpfc_sli4_cq_event_release(phba, cq_event); 6441 spin_unlock_irqrestore(&phba->hbalock, iflags); 6442} 6443 6444/** 6445 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 6446 * @phba: pointer to lpfc hba data structure. 6447 * 6448 * This routine is to free all the pending completion-queue events to the 6449 * back into the free pool for device reset. 6450 **/ 6451static void 6452lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 6453{ 6454 LIST_HEAD(cqelist); 6455 struct lpfc_cq_event *cqe; 6456 unsigned long iflags; 6457 6458 /* Retrieve all the pending WCQEs from pending WCQE lists */ 6459 spin_lock_irqsave(&phba->hbalock, iflags); 6460 /* Pending FCP XRI abort events */ 6461 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 6462 &cqelist); 6463 /* Pending ELS XRI abort events */ 6464 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 6465 &cqelist); 6466 /* Pending asynnc events */ 6467 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 6468 &cqelist); 6469 spin_unlock_irqrestore(&phba->hbalock, iflags); 6470 6471 while (!list_empty(&cqelist)) { 6472 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 6473 lpfc_sli4_cq_event_release(phba, cqe); 6474 } 6475} 6476 6477/** 6478 * lpfc_pci_function_reset - Reset pci function. 6479 * @phba: pointer to lpfc hba data structure. 6480 * 6481 * This routine is invoked to request a PCI function reset. It will destroys 6482 * all resources assigned to the PCI function which originates this request. 6483 * 6484 * Return codes 6485 * 0 - successful 6486 * ENOMEM - No availble memory 6487 * EIO - The mailbox failed to complete successfully. 6488 **/ 6489int 6490lpfc_pci_function_reset(struct lpfc_hba *phba) 6491{ 6492 LPFC_MBOXQ_t *mboxq; 6493 uint32_t rc = 0; 6494 uint32_t shdr_status, shdr_add_status; 6495 union lpfc_sli4_cfg_shdr *shdr; 6496 6497 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6498 if (!mboxq) { 6499 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6500 "0494 Unable to allocate memory for issuing " 6501 "SLI_FUNCTION_RESET mailbox command\n"); 6502 return -ENOMEM; 6503 } 6504 6505 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ 6506 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6507 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 6508 LPFC_SLI4_MBX_EMBED); 6509 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6510 shdr = (union lpfc_sli4_cfg_shdr *) 6511 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6512 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6513 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6514 if (rc != MBX_TIMEOUT) 6515 mempool_free(mboxq, phba->mbox_mem_pool); 6516 if (shdr_status || shdr_add_status || rc) { 6517 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6518 "0495 SLI_FUNCTION_RESET mailbox failed with " 6519 "status x%x add_status x%x, mbx status x%x\n", 6520 shdr_status, shdr_add_status, rc); 6521 rc = -ENXIO; 6522 } 6523 return rc; 6524} 6525 6526/** 6527 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 6528 * @phba: pointer to lpfc hba data structure. 6529 * @cnt: number of nop mailbox commands to send. 6530 * 6531 * This routine is invoked to send a number @cnt of NOP mailbox command and 6532 * wait for each command to complete. 6533 * 6534 * Return: the number of NOP mailbox command completed. 6535 **/ 6536static int 6537lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 6538{ 6539 LPFC_MBOXQ_t *mboxq; 6540 int length, cmdsent; 6541 uint32_t mbox_tmo; 6542 uint32_t rc = 0; 6543 uint32_t shdr_status, shdr_add_status; 6544 union lpfc_sli4_cfg_shdr *shdr; 6545 6546 if (cnt == 0) { 6547 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6548 "2518 Requested to send 0 NOP mailbox cmd\n"); 6549 return cnt; 6550 } 6551 6552 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6553 if (!mboxq) { 6554 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6555 "2519 Unable to allocate memory for issuing " 6556 "NOP mailbox command\n"); 6557 return 0; 6558 } 6559 6560 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 6561 length = (sizeof(struct lpfc_mbx_nop) - 6562 sizeof(struct lpfc_sli4_cfg_mhdr)); 6563 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6564 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 6565 6566 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 6567 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 6568 if (!phba->sli4_hba.intr_enable) 6569 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6570 else 6571 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 6572 if (rc == MBX_TIMEOUT) 6573 break; 6574 /* Check return status */ 6575 shdr = (union lpfc_sli4_cfg_shdr *) 6576 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6577 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6578 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 6579 &shdr->response); 6580 if (shdr_status || shdr_add_status || rc) { 6581 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6582 "2520 NOP mailbox command failed " 6583 "status x%x add_status x%x mbx " 6584 "status x%x\n", shdr_status, 6585 shdr_add_status, rc); 6586 break; 6587 } 6588 } 6589 6590 if (rc != MBX_TIMEOUT) 6591 mempool_free(mboxq, phba->mbox_mem_pool); 6592 6593 return cmdsent; 6594} 6595 6596/** 6597 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device 6598 * @phba: pointer to lpfc hba data structure. 6599 * @fcfi: fcf index. 6600 * 6601 * This routine is invoked to unregister a FCFI from device. 6602 **/ 6603void 6604lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) 6605{ 6606 LPFC_MBOXQ_t *mbox; 6607 uint32_t mbox_tmo; 6608 int rc; 6609 unsigned long flags; 6610 6611 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6612 6613 if (!mbox) 6614 return; 6615 6616 lpfc_unreg_fcfi(mbox, fcfi); 6617 6618 if (!phba->sli4_hba.intr_enable) 6619 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6620 else { 6621 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 6622 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6623 } 6624 if (rc != MBX_TIMEOUT) 6625 mempool_free(mbox, phba->mbox_mem_pool); 6626 if (rc != MBX_SUCCESS) 6627 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6628 "2517 Unregister FCFI command failed " 6629 "status %d, mbxStatus x%x\n", rc, 6630 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 6631 else { 6632 spin_lock_irqsave(&phba->hbalock, flags); 6633 /* Mark the FCFI is no longer registered */ 6634 phba->fcf.fcf_flag &= 6635 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE); 6636 spin_unlock_irqrestore(&phba->hbalock, flags); 6637 } 6638} 6639 6640/** 6641 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 6642 * @phba: pointer to lpfc hba data structure. 6643 * 6644 * This routine is invoked to set up the PCI device memory space for device 6645 * with SLI-4 interface spec. 6646 * 6647 * Return codes 6648 * 0 - successful 6649 * other values - error 6650 **/ 6651static int 6652lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 6653{ 6654 struct pci_dev *pdev; 6655 unsigned long bar0map_len, bar1map_len, bar2map_len; 6656 int error = -ENODEV; 6657 6658 /* Obtain PCI device reference */ 6659 if (!phba->pcidev) 6660 return error; 6661 else 6662 pdev = phba->pcidev; 6663 6664 /* Set the device DMA mask size */ 6665 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6666 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6667 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6668 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6669 return error; 6670 } 6671 } 6672 6673 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 6674 * number of bytes required by each mapping. They are actually 6675 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device. 6676 */ 6677 if (pci_resource_start(pdev, 0)) { 6678 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6679 bar0map_len = pci_resource_len(pdev, 0); 6680 } else { 6681 phba->pci_bar0_map = pci_resource_start(pdev, 1); 6682 bar0map_len = pci_resource_len(pdev, 1); 6683 } 6684 phba->pci_bar1_map = pci_resource_start(pdev, 2); 6685 bar1map_len = pci_resource_len(pdev, 2); 6686 6687 phba->pci_bar2_map = pci_resource_start(pdev, 4); 6688 bar2map_len = pci_resource_len(pdev, 4); 6689 6690 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ 6691 phba->sli4_hba.conf_regs_memmap_p = 6692 ioremap(phba->pci_bar0_map, bar0map_len); 6693 if (!phba->sli4_hba.conf_regs_memmap_p) { 6694 dev_printk(KERN_ERR, &pdev->dev, 6695 "ioremap failed for SLI4 PCI config registers.\n"); 6696 goto out; 6697 } 6698 6699 /* Map SLI4 HBA Control Register base to a kernel virtual address. */ 6700 phba->sli4_hba.ctrl_regs_memmap_p = 6701 ioremap(phba->pci_bar1_map, bar1map_len); 6702 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 6703 dev_printk(KERN_ERR, &pdev->dev, 6704 "ioremap failed for SLI4 HBA control registers.\n"); 6705 goto out_iounmap_conf; 6706 } 6707 6708 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ 6709 phba->sli4_hba.drbl_regs_memmap_p = 6710 ioremap(phba->pci_bar2_map, bar2map_len); 6711 if (!phba->sli4_hba.drbl_regs_memmap_p) { 6712 dev_printk(KERN_ERR, &pdev->dev, 6713 "ioremap failed for SLI4 HBA doorbell registers.\n"); 6714 goto out_iounmap_ctrl; 6715 } 6716 6717 /* Set up BAR0 PCI config space register memory map */ 6718 lpfc_sli4_bar0_register_memmap(phba); 6719 6720 /* Set up BAR1 register memory map */ 6721 lpfc_sli4_bar1_register_memmap(phba); 6722 6723 /* Set up BAR2 register memory map */ 6724 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 6725 if (error) 6726 goto out_iounmap_all; 6727 6728 return 0; 6729 6730out_iounmap_all: 6731 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 6732out_iounmap_ctrl: 6733 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 6734out_iounmap_conf: 6735 iounmap(phba->sli4_hba.conf_regs_memmap_p); 6736out: 6737 return error; 6738} 6739 6740/** 6741 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 6742 * @phba: pointer to lpfc hba data structure. 6743 * 6744 * This routine is invoked to unset the PCI device memory space for device 6745 * with SLI-4 interface spec. 6746 **/ 6747static void 6748lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 6749{ 6750 struct pci_dev *pdev; 6751 6752 /* Obtain PCI device reference */ 6753 if (!phba->pcidev) 6754 return; 6755 else 6756 pdev = phba->pcidev; 6757 6758 /* Free coherent DMA memory allocated */ 6759 6760 /* Unmap I/O memory space */ 6761 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 6762 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 6763 iounmap(phba->sli4_hba.conf_regs_memmap_p); 6764 6765 return; 6766} 6767 6768/** 6769 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 6770 * @phba: pointer to lpfc hba data structure. 6771 * 6772 * This routine is invoked to enable the MSI-X interrupt vectors to device 6773 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 6774 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 6775 * invoked, enables either all or nothing, depending on the current 6776 * availability of PCI vector resources. The device driver is responsible 6777 * for calling the individual request_irq() to register each MSI-X vector 6778 * with a interrupt handler, which is done in this function. Note that 6779 * later when device is unloading, the driver should always call free_irq() 6780 * on all MSI-X vectors it has done request_irq() on before calling 6781 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 6782 * will be left with MSI-X enabled and leaks its vectors. 6783 * 6784 * Return codes 6785 * 0 - successful 6786 * other values - error 6787 **/ 6788static int 6789lpfc_sli_enable_msix(struct lpfc_hba *phba) 6790{ 6791 int rc, i; 6792 LPFC_MBOXQ_t *pmb; 6793 6794 /* Set up MSI-X multi-message vectors */ 6795 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6796 phba->msix_entries[i].entry = i; 6797 6798 /* Configure MSI-X capability structure */ 6799 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 6800 ARRAY_SIZE(phba->msix_entries)); 6801 if (rc) { 6802 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6803 "0420 PCI enable MSI-X failed (%d)\n", rc); 6804 goto msi_fail_out; 6805 } 6806 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6807 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6808 "0477 MSI-X entry[%d]: vector=x%x " 6809 "message=%d\n", i, 6810 phba->msix_entries[i].vector, 6811 phba->msix_entries[i].entry); 6812 /* 6813 * Assign MSI-X vectors to interrupt handlers 6814 */ 6815 6816 /* vector-0 is associated to slow-path handler */ 6817 rc = request_irq(phba->msix_entries[0].vector, 6818 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 6819 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6820 if (rc) { 6821 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6822 "0421 MSI-X slow-path request_irq failed " 6823 "(%d)\n", rc); 6824 goto msi_fail_out; 6825 } 6826 6827 /* vector-1 is associated to fast-path handler */ 6828 rc = request_irq(phba->msix_entries[1].vector, 6829 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 6830 LPFC_FP_DRIVER_HANDLER_NAME, phba); 6831 6832 if (rc) { 6833 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6834 "0429 MSI-X fast-path request_irq failed " 6835 "(%d)\n", rc); 6836 goto irq_fail_out; 6837 } 6838 6839 /* 6840 * Configure HBA MSI-X attention conditions to messages 6841 */ 6842 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6843 6844 if (!pmb) { 6845 rc = -ENOMEM; 6846 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6847 "0474 Unable to allocate memory for issuing " 6848 "MBOX_CONFIG_MSI command\n"); 6849 goto mem_fail_out; 6850 } 6851 rc = lpfc_config_msi(phba, pmb); 6852 if (rc) 6853 goto mbx_fail_out; 6854 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6855 if (rc != MBX_SUCCESS) { 6856 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6857 "0351 Config MSI mailbox command failed, " 6858 "mbxCmd x%x, mbxStatus x%x\n", 6859 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 6860 goto mbx_fail_out; 6861 } 6862 6863 /* Free memory allocated for mailbox command */ 6864 mempool_free(pmb, phba->mbox_mem_pool); 6865 return rc; 6866 6867mbx_fail_out: 6868 /* Free memory allocated for mailbox command */ 6869 mempool_free(pmb, phba->mbox_mem_pool); 6870 6871mem_fail_out: 6872 /* free the irq already requested */ 6873 free_irq(phba->msix_entries[1].vector, phba); 6874 6875irq_fail_out: 6876 /* free the irq already requested */ 6877 free_irq(phba->msix_entries[0].vector, phba); 6878 6879msi_fail_out: 6880 /* Unconfigure MSI-X capability structure */ 6881 pci_disable_msix(phba->pcidev); 6882 return rc; 6883} 6884 6885/** 6886 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 6887 * @phba: pointer to lpfc hba data structure. 6888 * 6889 * This routine is invoked to release the MSI-X vectors and then disable the 6890 * MSI-X interrupt mode to device with SLI-3 interface spec. 6891 **/ 6892static void 6893lpfc_sli_disable_msix(struct lpfc_hba *phba) 6894{ 6895 int i; 6896 6897 /* Free up MSI-X multi-message vectors */ 6898 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6899 free_irq(phba->msix_entries[i].vector, phba); 6900 /* Disable MSI-X */ 6901 pci_disable_msix(phba->pcidev); 6902 6903 return; 6904} 6905 6906/** 6907 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 6908 * @phba: pointer to lpfc hba data structure. 6909 * 6910 * This routine is invoked to enable the MSI interrupt mode to device with 6911 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 6912 * enable the MSI vector. The device driver is responsible for calling the 6913 * request_irq() to register MSI vector with a interrupt the handler, which 6914 * is done in this function. 6915 * 6916 * Return codes 6917 * 0 - successful 6918 * other values - error 6919 */ 6920static int 6921lpfc_sli_enable_msi(struct lpfc_hba *phba) 6922{ 6923 int rc; 6924 6925 rc = pci_enable_msi(phba->pcidev); 6926 if (!rc) 6927 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6928 "0462 PCI enable MSI mode success.\n"); 6929 else { 6930 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6931 "0471 PCI enable MSI mode failed (%d)\n", rc); 6932 return rc; 6933 } 6934 6935 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6936 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6937 if (rc) { 6938 pci_disable_msi(phba->pcidev); 6939 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6940 "0478 MSI request_irq failed (%d)\n", rc); 6941 } 6942 return rc; 6943} 6944 6945/** 6946 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 6947 * @phba: pointer to lpfc hba data structure. 6948 * 6949 * This routine is invoked to disable the MSI interrupt mode to device with 6950 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 6951 * done request_irq() on before calling pci_disable_msi(). Failure to do so 6952 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 6953 * its vector. 6954 */ 6955static void 6956lpfc_sli_disable_msi(struct lpfc_hba *phba) 6957{ 6958 free_irq(phba->pcidev->irq, phba); 6959 pci_disable_msi(phba->pcidev); 6960 return; 6961} 6962 6963/** 6964 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 6965 * @phba: pointer to lpfc hba data structure. 6966 * 6967 * This routine is invoked to enable device interrupt and associate driver's 6968 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 6969 * spec. Depends on the interrupt mode configured to the driver, the driver 6970 * will try to fallback from the configured interrupt mode to an interrupt 6971 * mode which is supported by the platform, kernel, and device in the order 6972 * of: 6973 * MSI-X -> MSI -> IRQ. 6974 * 6975 * Return codes 6976 * 0 - successful 6977 * other values - error 6978 **/ 6979static uint32_t 6980lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6981{ 6982 uint32_t intr_mode = LPFC_INTR_ERROR; 6983 int retval; 6984 6985 if (cfg_mode == 2) { 6986 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 6987 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 6988 if (!retval) { 6989 /* Now, try to enable MSI-X interrupt mode */ 6990 retval = lpfc_sli_enable_msix(phba); 6991 if (!retval) { 6992 /* Indicate initialization to MSI-X mode */ 6993 phba->intr_type = MSIX; 6994 intr_mode = 2; 6995 } 6996 } 6997 } 6998 6999 /* Fallback to MSI if MSI-X initialization failed */ 7000 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7001 retval = lpfc_sli_enable_msi(phba); 7002 if (!retval) { 7003 /* Indicate initialization to MSI mode */ 7004 phba->intr_type = MSI; 7005 intr_mode = 1; 7006 } 7007 } 7008 7009 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7010 if (phba->intr_type == NONE) { 7011 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 7012 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7013 if (!retval) { 7014 /* Indicate initialization to INTx mode */ 7015 phba->intr_type = INTx; 7016 intr_mode = 0; 7017 } 7018 } 7019 return intr_mode; 7020} 7021 7022/** 7023 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 7024 * @phba: pointer to lpfc hba data structure. 7025 * 7026 * This routine is invoked to disable device interrupt and disassociate the 7027 * driver's interrupt handler(s) from interrupt vector(s) to device with 7028 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 7029 * release the interrupt vector(s) for the message signaled interrupt. 7030 **/ 7031static void 7032lpfc_sli_disable_intr(struct lpfc_hba *phba) 7033{ 7034 /* Disable the currently initialized interrupt mode */ 7035 if (phba->intr_type == MSIX) 7036 lpfc_sli_disable_msix(phba); 7037 else if (phba->intr_type == MSI) 7038 lpfc_sli_disable_msi(phba); 7039 else if (phba->intr_type == INTx) 7040 free_irq(phba->pcidev->irq, phba); 7041 7042 /* Reset interrupt management states */ 7043 phba->intr_type = NONE; 7044 phba->sli.slistat.sli_intr = 0; 7045 7046 return; 7047} 7048 7049/** 7050 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 7051 * @phba: pointer to lpfc hba data structure. 7052 * 7053 * This routine is invoked to enable the MSI-X interrupt vectors to device 7054 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 7055 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 7056 * enables either all or nothing, depending on the current availability of 7057 * PCI vector resources. The device driver is responsible for calling the 7058 * individual request_irq() to register each MSI-X vector with a interrupt 7059 * handler, which is done in this function. Note that later when device is 7060 * unloading, the driver should always call free_irq() on all MSI-X vectors 7061 * it has done request_irq() on before calling pci_disable_msix(). Failure 7062 * to do so results in a BUG_ON() and a device will be left with MSI-X 7063 * enabled and leaks its vectors. 7064 * 7065 * Return codes 7066 * 0 - successful 7067 * other values - error 7068 **/ 7069static int 7070lpfc_sli4_enable_msix(struct lpfc_hba *phba) 7071{ 7072 int vectors, rc, index; 7073 7074 /* Set up MSI-X multi-message vectors */ 7075 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 7076 phba->sli4_hba.msix_entries[index].entry = index; 7077 7078 /* Configure MSI-X capability structure */ 7079 vectors = phba->sli4_hba.cfg_eqn; 7080enable_msix_vectors: 7081 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 7082 vectors); 7083 if (rc > 1) { 7084 vectors = rc; 7085 goto enable_msix_vectors; 7086 } else if (rc) { 7087 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7088 "0484 PCI enable MSI-X failed (%d)\n", rc); 7089 goto msi_fail_out; 7090 } 7091 7092 /* Log MSI-X vector assignment */ 7093 for (index = 0; index < vectors; index++) 7094 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7095 "0489 MSI-X entry[%d]: vector=x%x " 7096 "message=%d\n", index, 7097 phba->sli4_hba.msix_entries[index].vector, 7098 phba->sli4_hba.msix_entries[index].entry); 7099 /* 7100 * Assign MSI-X vectors to interrupt handlers 7101 */ 7102 7103 /* The first vector must associated to slow-path handler for MQ */ 7104 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 7105 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 7106 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7107 if (rc) { 7108 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7109 "0485 MSI-X slow-path request_irq failed " 7110 "(%d)\n", rc); 7111 goto msi_fail_out; 7112 } 7113 7114 /* The rest of the vector(s) are associated to fast-path handler(s) */ 7115 for (index = 1; index < vectors; index++) { 7116 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 7117 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 7118 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 7119 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 7120 LPFC_FP_DRIVER_HANDLER_NAME, 7121 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7122 if (rc) { 7123 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7124 "0486 MSI-X fast-path (%d) " 7125 "request_irq failed (%d)\n", index, rc); 7126 goto cfg_fail_out; 7127 } 7128 } 7129 phba->sli4_hba.msix_vec_nr = vectors; 7130 7131 return rc; 7132 7133cfg_fail_out: 7134 /* free the irq already requested */ 7135 for (--index; index >= 1; index--) 7136 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 7137 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7138 7139 /* free the irq already requested */ 7140 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7141 7142msi_fail_out: 7143 /* Unconfigure MSI-X capability structure */ 7144 pci_disable_msix(phba->pcidev); 7145 return rc; 7146} 7147 7148/** 7149 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 7150 * @phba: pointer to lpfc hba data structure. 7151 * 7152 * This routine is invoked to release the MSI-X vectors and then disable the 7153 * MSI-X interrupt mode to device with SLI-4 interface spec. 7154 **/ 7155static void 7156lpfc_sli4_disable_msix(struct lpfc_hba *phba) 7157{ 7158 int index; 7159 7160 /* Free up MSI-X multi-message vectors */ 7161 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7162 7163 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++) 7164 free_irq(phba->sli4_hba.msix_entries[index].vector, 7165 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7166 7167 /* Disable MSI-X */ 7168 pci_disable_msix(phba->pcidev); 7169 7170 return; 7171} 7172 7173/** 7174 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 7175 * @phba: pointer to lpfc hba data structure. 7176 * 7177 * This routine is invoked to enable the MSI interrupt mode to device with 7178 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 7179 * to enable the MSI vector. The device driver is responsible for calling 7180 * the request_irq() to register MSI vector with a interrupt the handler, 7181 * which is done in this function. 7182 * 7183 * Return codes 7184 * 0 - successful 7185 * other values - error 7186 **/ 7187static int 7188lpfc_sli4_enable_msi(struct lpfc_hba *phba) 7189{ 7190 int rc, index; 7191 7192 rc = pci_enable_msi(phba->pcidev); 7193 if (!rc) 7194 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7195 "0487 PCI enable MSI mode success.\n"); 7196 else { 7197 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7198 "0488 PCI enable MSI mode failed (%d)\n", rc); 7199 return rc; 7200 } 7201 7202 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7203 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7204 if (rc) { 7205 pci_disable_msi(phba->pcidev); 7206 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7207 "0490 MSI request_irq failed (%d)\n", rc); 7208 return rc; 7209 } 7210 7211 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 7212 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7213 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7214 } 7215 7216 return 0; 7217} 7218 7219/** 7220 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 7221 * @phba: pointer to lpfc hba data structure. 7222 * 7223 * This routine is invoked to disable the MSI interrupt mode to device with 7224 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 7225 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7226 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7227 * its vector. 7228 **/ 7229static void 7230lpfc_sli4_disable_msi(struct lpfc_hba *phba) 7231{ 7232 free_irq(phba->pcidev->irq, phba); 7233 pci_disable_msi(phba->pcidev); 7234 return; 7235} 7236 7237/** 7238 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 7239 * @phba: pointer to lpfc hba data structure. 7240 * 7241 * This routine is invoked to enable device interrupt and associate driver's 7242 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 7243 * interface spec. Depends on the interrupt mode configured to the driver, 7244 * the driver will try to fallback from the configured interrupt mode to an 7245 * interrupt mode which is supported by the platform, kernel, and device in 7246 * the order of: 7247 * MSI-X -> MSI -> IRQ. 7248 * 7249 * Return codes 7250 * 0 - successful 7251 * other values - error 7252 **/ 7253static uint32_t 7254lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 7255{ 7256 uint32_t intr_mode = LPFC_INTR_ERROR; 7257 int retval, index; 7258 7259 if (cfg_mode == 2) { 7260 /* Preparation before conf_msi mbox cmd */ 7261 retval = 0; 7262 if (!retval) { 7263 /* Now, try to enable MSI-X interrupt mode */ 7264 retval = lpfc_sli4_enable_msix(phba); 7265 if (!retval) { 7266 /* Indicate initialization to MSI-X mode */ 7267 phba->intr_type = MSIX; 7268 intr_mode = 2; 7269 } 7270 } 7271 } 7272 7273 /* Fallback to MSI if MSI-X initialization failed */ 7274 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7275 retval = lpfc_sli4_enable_msi(phba); 7276 if (!retval) { 7277 /* Indicate initialization to MSI mode */ 7278 phba->intr_type = MSI; 7279 intr_mode = 1; 7280 } 7281 } 7282 7283 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7284 if (phba->intr_type == NONE) { 7285 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7286 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7287 if (!retval) { 7288 /* Indicate initialization to INTx mode */ 7289 phba->intr_type = INTx; 7290 intr_mode = 0; 7291 for (index = 0; index < phba->cfg_fcp_eq_count; 7292 index++) { 7293 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7294 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7295 } 7296 } 7297 } 7298 return intr_mode; 7299} 7300 7301/** 7302 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 7303 * @phba: pointer to lpfc hba data structure. 7304 * 7305 * This routine is invoked to disable device interrupt and disassociate 7306 * the driver's interrupt handler(s) from interrupt vector(s) to device 7307 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 7308 * will release the interrupt vector(s) for the message signaled interrupt. 7309 **/ 7310static void 7311lpfc_sli4_disable_intr(struct lpfc_hba *phba) 7312{ 7313 /* Disable the currently initialized interrupt mode */ 7314 if (phba->intr_type == MSIX) 7315 lpfc_sli4_disable_msix(phba); 7316 else if (phba->intr_type == MSI) 7317 lpfc_sli4_disable_msi(phba); 7318 else if (phba->intr_type == INTx) 7319 free_irq(phba->pcidev->irq, phba); 7320 7321 /* Reset interrupt management states */ 7322 phba->intr_type = NONE; 7323 phba->sli.slistat.sli_intr = 0; 7324 7325 return; 7326} 7327 7328/** 7329 * lpfc_unset_hba - Unset SLI3 hba device initialization 7330 * @phba: pointer to lpfc hba data structure. 7331 * 7332 * This routine is invoked to unset the HBA device initialization steps to 7333 * a device with SLI-3 interface spec. 7334 **/ 7335static void 7336lpfc_unset_hba(struct lpfc_hba *phba) 7337{ 7338 struct lpfc_vport *vport = phba->pport; 7339 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7340 7341 spin_lock_irq(shost->host_lock); 7342 vport->load_flag |= FC_UNLOADING; 7343 spin_unlock_irq(shost->host_lock); 7344 7345 lpfc_stop_hba_timers(phba); 7346 7347 phba->pport->work_port_events = 0; 7348 7349 lpfc_sli_hba_down(phba); 7350 7351 lpfc_sli_brdrestart(phba); 7352 7353 lpfc_sli_disable_intr(phba); 7354 7355 return; 7356} 7357 7358/** 7359 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 7360 * @phba: pointer to lpfc hba data structure. 7361 * 7362 * This routine is invoked to unset the HBA device initialization steps to 7363 * a device with SLI-4 interface spec. 7364 **/ 7365static void 7366lpfc_sli4_unset_hba(struct lpfc_hba *phba) 7367{ 7368 struct lpfc_vport *vport = phba->pport; 7369 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7370 7371 spin_lock_irq(shost->host_lock); 7372 vport->load_flag |= FC_UNLOADING; 7373 spin_unlock_irq(shost->host_lock); 7374 7375 phba->pport->work_port_events = 0; 7376 7377 lpfc_sli4_hba_down(phba); 7378 7379 lpfc_sli4_disable_intr(phba); 7380 7381 return; 7382} 7383 7384/** 7385 * lpfc_sli4_hba_unset - Unset the fcoe hba 7386 * @phba: Pointer to HBA context object. 7387 * 7388 * This function is called in the SLI4 code path to reset the HBA's FCoE 7389 * function. The caller is not required to hold any lock. This routine 7390 * issues PCI function reset mailbox command to reset the FCoE function. 7391 * At the end of the function, it calls lpfc_hba_down_post function to 7392 * free any pending commands. 7393 **/ 7394static void 7395lpfc_sli4_hba_unset(struct lpfc_hba *phba) 7396{ 7397 int wait_cnt = 0; 7398 LPFC_MBOXQ_t *mboxq; 7399 7400 lpfc_stop_hba_timers(phba); 7401 phba->sli4_hba.intr_enable = 0; 7402 7403 /* 7404 * Gracefully wait out the potential current outstanding asynchronous 7405 * mailbox command. 7406 */ 7407 7408 /* First, block any pending async mailbox command from posted */ 7409 spin_lock_irq(&phba->hbalock); 7410 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 7411 spin_unlock_irq(&phba->hbalock); 7412 /* Now, trying to wait it out if we can */ 7413 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7414 msleep(10); 7415 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 7416 break; 7417 } 7418 /* Forcefully release the outstanding mailbox command if timed out */ 7419 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7420 spin_lock_irq(&phba->hbalock); 7421 mboxq = phba->sli.mbox_active; 7422 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 7423 __lpfc_mbox_cmpl_put(phba, mboxq); 7424 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7425 phba->sli.mbox_active = NULL; 7426 spin_unlock_irq(&phba->hbalock); 7427 } 7428 7429 /* Tear down the queues in the HBA */ 7430 lpfc_sli4_queue_unset(phba); 7431 7432 /* Disable PCI subsystem interrupt */ 7433 lpfc_sli4_disable_intr(phba); 7434 7435 /* Stop kthread signal shall trigger work_done one more time */ 7436 kthread_stop(phba->worker_thread); 7437 7438 /* Stop the SLI4 device port */ 7439 phba->pport->work_port_events = 0; 7440} 7441 7442 /** 7443 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 7444 * @phba: Pointer to HBA context object. 7445 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 7446 * 7447 * This function is called in the SLI4 code path to read the port's 7448 * sli4 capabilities. 7449 * 7450 * This function may be be called from any context that can block-wait 7451 * for the completion. The expectation is that this routine is called 7452 * typically from probe_one or from the online routine. 7453 **/ 7454int 7455lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7456{ 7457 int rc; 7458 struct lpfc_mqe *mqe; 7459 struct lpfc_pc_sli4_params *sli4_params; 7460 uint32_t mbox_tmo; 7461 7462 rc = 0; 7463 mqe = &mboxq->u.mqe; 7464 7465 /* Read the port's SLI4 Parameters port capabilities */ 7466 lpfc_sli4_params(mboxq); 7467 if (!phba->sli4_hba.intr_enable) 7468 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7469 else { 7470 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES); 7471 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 7472 } 7473 7474 if (unlikely(rc)) 7475 return 1; 7476 7477 sli4_params = &phba->sli4_hba.pc_sli4_params; 7478 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 7479 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 7480 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 7481 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 7482 &mqe->un.sli4_params); 7483 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 7484 &mqe->un.sli4_params); 7485 sli4_params->proto_types = mqe->un.sli4_params.word3; 7486 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 7487 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 7488 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 7489 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 7490 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 7491 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 7492 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 7493 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 7494 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 7495 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 7496 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 7497 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 7498 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 7499 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 7500 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 7501 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 7502 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 7503 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 7504 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 7505 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 7506 return rc; 7507} 7508 7509/** 7510 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 7511 * @pdev: pointer to PCI device 7512 * @pid: pointer to PCI device identifier 7513 * 7514 * This routine is to be called to attach a device with SLI-3 interface spec 7515 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 7516 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 7517 * information of the device and driver to see if the driver state that it can 7518 * support this kind of device. If the match is successful, the driver core 7519 * invokes this routine. If this routine determines it can claim the HBA, it 7520 * does all the initialization that it needs to do to handle the HBA properly. 7521 * 7522 * Return code 7523 * 0 - driver can claim the device 7524 * negative value - driver can not claim the device 7525 **/ 7526static int __devinit 7527lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 7528{ 7529 struct lpfc_hba *phba; 7530 struct lpfc_vport *vport = NULL; 7531 struct Scsi_Host *shost = NULL; 7532 int error; 7533 uint32_t cfg_mode, intr_mode; 7534 7535 /* Allocate memory for HBA structure */ 7536 phba = lpfc_hba_alloc(pdev); 7537 if (!phba) 7538 return -ENOMEM; 7539 7540 /* Perform generic PCI device enabling operation */ 7541 error = lpfc_enable_pci_dev(phba); 7542 if (error) { 7543 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7544 "1401 Failed to enable pci device.\n"); 7545 goto out_free_phba; 7546 } 7547 7548 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 7549 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 7550 if (error) 7551 goto out_disable_pci_dev; 7552 7553 /* Set up SLI-3 specific device PCI memory space */ 7554 error = lpfc_sli_pci_mem_setup(phba); 7555 if (error) { 7556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7557 "1402 Failed to set up pci memory space.\n"); 7558 goto out_disable_pci_dev; 7559 } 7560 7561 /* Set up phase-1 common device driver resources */ 7562 error = lpfc_setup_driver_resource_phase1(phba); 7563 if (error) { 7564 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7565 "1403 Failed to set up driver resource.\n"); 7566 goto out_unset_pci_mem_s3; 7567 } 7568 7569 /* Set up SLI-3 specific device driver resources */ 7570 error = lpfc_sli_driver_resource_setup(phba); 7571 if (error) { 7572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7573 "1404 Failed to set up driver resource.\n"); 7574 goto out_unset_pci_mem_s3; 7575 } 7576 7577 /* Initialize and populate the iocb list per host */ 7578 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 7579 if (error) { 7580 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7581 "1405 Failed to initialize iocb list.\n"); 7582 goto out_unset_driver_resource_s3; 7583 } 7584 7585 /* Set up common device driver resources */ 7586 error = lpfc_setup_driver_resource_phase2(phba); 7587 if (error) { 7588 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7589 "1406 Failed to set up driver resource.\n"); 7590 goto out_free_iocb_list; 7591 } 7592 7593 /* Create SCSI host to the physical port */ 7594 error = lpfc_create_shost(phba); 7595 if (error) { 7596 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7597 "1407 Failed to create scsi host.\n"); 7598 goto out_unset_driver_resource; 7599 } 7600 7601 /* Configure sysfs attributes */ 7602 vport = phba->pport; 7603 error = lpfc_alloc_sysfs_attr(vport); 7604 if (error) { 7605 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7606 "1476 Failed to allocate sysfs attr\n"); 7607 goto out_destroy_shost; 7608 } 7609 7610 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 7611 /* Now, trying to enable interrupt and bring up the device */ 7612 cfg_mode = phba->cfg_use_msi; 7613 while (true) { 7614 /* Put device to a known state before enabling interrupt */ 7615 lpfc_stop_port(phba); 7616 /* Configure and enable interrupt */ 7617 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 7618 if (intr_mode == LPFC_INTR_ERROR) { 7619 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7620 "0431 Failed to enable interrupt.\n"); 7621 error = -ENODEV; 7622 goto out_free_sysfs_attr; 7623 } 7624 /* SLI-3 HBA setup */ 7625 if (lpfc_sli_hba_setup(phba)) { 7626 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7627 "1477 Failed to set up hba\n"); 7628 error = -ENODEV; 7629 goto out_remove_device; 7630 } 7631 7632 /* Wait 50ms for the interrupts of previous mailbox commands */ 7633 msleep(50); 7634 /* Check active interrupts on message signaled interrupts */ 7635 if (intr_mode == 0 || 7636 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 7637 /* Log the current active interrupt mode */ 7638 phba->intr_mode = intr_mode; 7639 lpfc_log_intr_mode(phba, intr_mode); 7640 break; 7641 } else { 7642 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7643 "0447 Configure interrupt mode (%d) " 7644 "failed active interrupt test.\n", 7645 intr_mode); 7646 /* Disable the current interrupt mode */ 7647 lpfc_sli_disable_intr(phba); 7648 /* Try next level of interrupt mode */ 7649 cfg_mode = --intr_mode; 7650 } 7651 } 7652 7653 /* Perform post initialization setup */ 7654 lpfc_post_init_setup(phba); 7655 7656 /* Check if there are static vports to be created. */ 7657 lpfc_create_static_vport(phba); 7658 7659 return 0; 7660 7661out_remove_device: 7662 lpfc_unset_hba(phba); 7663out_free_sysfs_attr: 7664 lpfc_free_sysfs_attr(vport); 7665out_destroy_shost: 7666 lpfc_destroy_shost(phba); 7667out_unset_driver_resource: 7668 lpfc_unset_driver_resource_phase2(phba); 7669out_free_iocb_list: 7670 lpfc_free_iocb_list(phba); 7671out_unset_driver_resource_s3: 7672 lpfc_sli_driver_resource_unset(phba); 7673out_unset_pci_mem_s3: 7674 lpfc_sli_pci_mem_unset(phba); 7675out_disable_pci_dev: 7676 lpfc_disable_pci_dev(phba); 7677 if (shost) 7678 scsi_host_put(shost); 7679out_free_phba: 7680 lpfc_hba_free(phba); 7681 return error; 7682} 7683 7684/** 7685 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 7686 * @pdev: pointer to PCI device 7687 * 7688 * This routine is to be called to disattach a device with SLI-3 interface 7689 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 7690 * removed from PCI bus, it performs all the necessary cleanup for the HBA 7691 * device to be removed from the PCI subsystem properly. 7692 **/ 7693static void __devexit 7694lpfc_pci_remove_one_s3(struct pci_dev *pdev) 7695{ 7696 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7697 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 7698 struct lpfc_vport **vports; 7699 struct lpfc_hba *phba = vport->phba; 7700 int i; 7701 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 7702 7703 spin_lock_irq(&phba->hbalock); 7704 vport->load_flag |= FC_UNLOADING; 7705 spin_unlock_irq(&phba->hbalock); 7706 7707 lpfc_free_sysfs_attr(vport); 7708 7709 /* Release all the vports against this physical port */ 7710 vports = lpfc_create_vport_work_array(phba); 7711 if (vports != NULL) 7712 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 7713 fc_vport_terminate(vports[i]->fc_vport); 7714 lpfc_destroy_vport_work_array(phba, vports); 7715 7716 /* Remove FC host and then SCSI host with the physical port */ 7717 fc_remove_host(shost); 7718 scsi_remove_host(shost); 7719 lpfc_cleanup(vport); 7720 7721 /* 7722 * Bring down the SLI Layer. This step disable all interrupts, 7723 * clears the rings, discards all mailbox commands, and resets 7724 * the HBA. 7725 */ 7726 7727 /* HBA interrupt will be diabled after this call */ 7728 lpfc_sli_hba_down(phba); 7729 /* Stop kthread signal shall trigger work_done one more time */ 7730 kthread_stop(phba->worker_thread); 7731 /* Final cleanup of txcmplq and reset the HBA */ 7732 lpfc_sli_brdrestart(phba); 7733 7734 lpfc_stop_hba_timers(phba); 7735 spin_lock_irq(&phba->hbalock); 7736 list_del_init(&vport->listentry); 7737 spin_unlock_irq(&phba->hbalock); 7738 7739 lpfc_debugfs_terminate(vport); 7740 7741 /* Disable interrupt */ 7742 lpfc_sli_disable_intr(phba); 7743 7744 pci_set_drvdata(pdev, NULL); 7745 scsi_host_put(shost); 7746 7747 /* 7748 * Call scsi_free before mem_free since scsi bufs are released to their 7749 * corresponding pools here. 7750 */ 7751 lpfc_scsi_free(phba); 7752 lpfc_mem_free_all(phba); 7753 7754 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7755 phba->hbqslimp.virt, phba->hbqslimp.phys); 7756 7757 /* Free resources associated with SLI2 interface */ 7758 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7759 phba->slim2p.virt, phba->slim2p.phys); 7760 7761 /* unmap adapter SLIM and Control Registers */ 7762 iounmap(phba->ctrl_regs_memmap_p); 7763 iounmap(phba->slim_memmap_p); 7764 7765 lpfc_hba_free(phba); 7766 7767 pci_release_selected_regions(pdev, bars); 7768 pci_disable_device(pdev); 7769} 7770 7771/** 7772 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 7773 * @pdev: pointer to PCI device 7774 * @msg: power management message 7775 * 7776 * This routine is to be called from the kernel's PCI subsystem to support 7777 * system Power Management (PM) to device with SLI-3 interface spec. When 7778 * PM invokes this method, it quiesces the device by stopping the driver's 7779 * worker thread for the device, turning off device's interrupt and DMA, 7780 * and bring the device offline. Note that as the driver implements the 7781 * minimum PM requirements to a power-aware driver's PM support for the 7782 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 7783 * to the suspend() method call will be treated as SUSPEND and the driver will 7784 * fully reinitialize its device during resume() method call, the driver will 7785 * set device to PCI_D3hot state in PCI config space instead of setting it 7786 * according to the @msg provided by the PM. 7787 * 7788 * Return code 7789 * 0 - driver suspended the device 7790 * Error otherwise 7791 **/ 7792static int 7793lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 7794{ 7795 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7796 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7797 7798 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7799 "0473 PCI device Power Management suspend.\n"); 7800 7801 /* Bring down the device */ 7802 lpfc_offline_prep(phba); 7803 lpfc_offline(phba); 7804 kthread_stop(phba->worker_thread); 7805 7806 /* Disable interrupt from device */ 7807 lpfc_sli_disable_intr(phba); 7808 7809 /* Save device state to PCI config space */ 7810 pci_save_state(pdev); 7811 pci_set_power_state(pdev, PCI_D3hot); 7812 7813 return 0; 7814} 7815 7816/** 7817 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 7818 * @pdev: pointer to PCI device 7819 * 7820 * This routine is to be called from the kernel's PCI subsystem to support 7821 * system Power Management (PM) to device with SLI-3 interface spec. When PM 7822 * invokes this method, it restores the device's PCI config space state and 7823 * fully reinitializes the device and brings it online. Note that as the 7824 * driver implements the minimum PM requirements to a power-aware driver's 7825 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 7826 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 7827 * driver will fully reinitialize its device during resume() method call, 7828 * the device will be set to PCI_D0 directly in PCI config space before 7829 * restoring the state. 7830 * 7831 * Return code 7832 * 0 - driver suspended the device 7833 * Error otherwise 7834 **/ 7835static int 7836lpfc_pci_resume_one_s3(struct pci_dev *pdev) 7837{ 7838 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7839 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7840 uint32_t intr_mode; 7841 int error; 7842 7843 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7844 "0452 PCI device Power Management resume.\n"); 7845 7846 /* Restore device state from PCI config space */ 7847 pci_set_power_state(pdev, PCI_D0); 7848 pci_restore_state(pdev); 7849 7850 /* 7851 * As the new kernel behavior of pci_restore_state() API call clears 7852 * device saved_state flag, need to save the restored state again. 7853 */ 7854 pci_save_state(pdev); 7855 7856 if (pdev->is_busmaster) 7857 pci_set_master(pdev); 7858 7859 /* Startup the kernel thread for this host adapter. */ 7860 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7861 "lpfc_worker_%d", phba->brd_no); 7862 if (IS_ERR(phba->worker_thread)) { 7863 error = PTR_ERR(phba->worker_thread); 7864 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7865 "0434 PM resume failed to start worker " 7866 "thread: error=x%x.\n", error); 7867 return error; 7868 } 7869 7870 /* Configure and enable interrupt */ 7871 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7872 if (intr_mode == LPFC_INTR_ERROR) { 7873 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7874 "0430 PM resume Failed to enable interrupt\n"); 7875 return -EIO; 7876 } else 7877 phba->intr_mode = intr_mode; 7878 7879 /* Restart HBA and bring it online */ 7880 lpfc_sli_brdrestart(phba); 7881 lpfc_online(phba); 7882 7883 /* Log the current active interrupt mode */ 7884 lpfc_log_intr_mode(phba, phba->intr_mode); 7885 7886 return 0; 7887} 7888 7889/** 7890 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 7891 * @phba: pointer to lpfc hba data structure. 7892 * 7893 * This routine is called to prepare the SLI3 device for PCI slot recover. It 7894 * aborts all the outstanding SCSI I/Os to the pci device. 7895 **/ 7896static void 7897lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 7898{ 7899 struct lpfc_sli *psli = &phba->sli; 7900 struct lpfc_sli_ring *pring; 7901 7902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7903 "2723 PCI channel I/O abort preparing for recovery\n"); 7904 7905 /* 7906 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 7907 * and let the SCSI mid-layer to retry them to recover. 7908 */ 7909 pring = &psli->ring[psli->fcp_ring]; 7910 lpfc_sli_abort_iocb_ring(phba, pring); 7911} 7912 7913/** 7914 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 7915 * @phba: pointer to lpfc hba data structure. 7916 * 7917 * This routine is called to prepare the SLI3 device for PCI slot reset. It 7918 * disables the device interrupt and pci device, and aborts the internal FCP 7919 * pending I/Os. 7920 **/ 7921static void 7922lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 7923{ 7924 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7925 "2710 PCI channel disable preparing for reset\n"); 7926 7927 /* Block any management I/Os to the device */ 7928 lpfc_block_mgmt_io(phba); 7929 7930 /* Block all SCSI devices' I/Os on the host */ 7931 lpfc_scsi_dev_block(phba); 7932 7933 /* stop all timers */ 7934 lpfc_stop_hba_timers(phba); 7935 7936 /* Disable interrupt and pci device */ 7937 lpfc_sli_disable_intr(phba); 7938 pci_disable_device(phba->pcidev); 7939 7940 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 7941 lpfc_sli_flush_fcp_rings(phba); 7942} 7943 7944/** 7945 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 7946 * @phba: pointer to lpfc hba data structure. 7947 * 7948 * This routine is called to prepare the SLI3 device for PCI slot permanently 7949 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 7950 * pending I/Os. 7951 **/ 7952static void 7953lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 7954{ 7955 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7956 "2711 PCI channel permanent disable for failure\n"); 7957 /* Block all SCSI devices' I/Os on the host */ 7958 lpfc_scsi_dev_block(phba); 7959 7960 /* stop all timers */ 7961 lpfc_stop_hba_timers(phba); 7962 7963 /* Clean up all driver's outstanding SCSI I/Os */ 7964 lpfc_sli_flush_fcp_rings(phba); 7965} 7966 7967/** 7968 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 7969 * @pdev: pointer to PCI device. 7970 * @state: the current PCI connection state. 7971 * 7972 * This routine is called from the PCI subsystem for I/O error handling to 7973 * device with SLI-3 interface spec. This function is called by the PCI 7974 * subsystem after a PCI bus error affecting this device has been detected. 7975 * When this function is invoked, it will need to stop all the I/Os and 7976 * interrupt(s) to the device. Once that is done, it will return 7977 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 7978 * as desired. 7979 * 7980 * Return codes 7981 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 7982 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7983 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7984 **/ 7985static pci_ers_result_t 7986lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 7987{ 7988 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7989 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7990 7991 switch (state) { 7992 case pci_channel_io_normal: 7993 /* Non-fatal error, prepare for recovery */ 7994 lpfc_sli_prep_dev_for_recover(phba); 7995 return PCI_ERS_RESULT_CAN_RECOVER; 7996 case pci_channel_io_frozen: 7997 /* Fatal error, prepare for slot reset */ 7998 lpfc_sli_prep_dev_for_reset(phba); 7999 return PCI_ERS_RESULT_NEED_RESET; 8000 case pci_channel_io_perm_failure: 8001 /* Permanent failure, prepare for device down */ 8002 lpfc_sli_prep_dev_for_perm_failure(phba); 8003 return PCI_ERS_RESULT_DISCONNECT; 8004 default: 8005 /* Unknown state, prepare and request slot reset */ 8006 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8007 "0472 Unknown PCI error state: x%x\n", state); 8008 lpfc_sli_prep_dev_for_reset(phba); 8009 return PCI_ERS_RESULT_NEED_RESET; 8010 } 8011} 8012 8013/** 8014 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 8015 * @pdev: pointer to PCI device. 8016 * 8017 * This routine is called from the PCI subsystem for error handling to 8018 * device with SLI-3 interface spec. This is called after PCI bus has been 8019 * reset to restart the PCI card from scratch, as if from a cold-boot. 8020 * During the PCI subsystem error recovery, after driver returns 8021 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 8022 * recovery and then call this routine before calling the .resume method 8023 * to recover the device. This function will initialize the HBA device, 8024 * enable the interrupt, but it will just put the HBA to offline state 8025 * without passing any I/O traffic. 8026 * 8027 * Return codes 8028 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8029 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8030 */ 8031static pci_ers_result_t 8032lpfc_io_slot_reset_s3(struct pci_dev *pdev) 8033{ 8034 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8035 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8036 struct lpfc_sli *psli = &phba->sli; 8037 uint32_t intr_mode; 8038 8039 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 8040 if (pci_enable_device_mem(pdev)) { 8041 printk(KERN_ERR "lpfc: Cannot re-enable " 8042 "PCI device after reset.\n"); 8043 return PCI_ERS_RESULT_DISCONNECT; 8044 } 8045 8046 pci_restore_state(pdev); 8047 8048 /* 8049 * As the new kernel behavior of pci_restore_state() API call clears 8050 * device saved_state flag, need to save the restored state again. 8051 */ 8052 pci_save_state(pdev); 8053 8054 if (pdev->is_busmaster) 8055 pci_set_master(pdev); 8056 8057 spin_lock_irq(&phba->hbalock); 8058 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 8059 spin_unlock_irq(&phba->hbalock); 8060 8061 /* Configure and enable interrupt */ 8062 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 8063 if (intr_mode == LPFC_INTR_ERROR) { 8064 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8065 "0427 Cannot re-enable interrupt after " 8066 "slot reset.\n"); 8067 return PCI_ERS_RESULT_DISCONNECT; 8068 } else 8069 phba->intr_mode = intr_mode; 8070 8071 /* Take device offline, it will perform cleanup */ 8072 lpfc_offline_prep(phba); 8073 lpfc_offline(phba); 8074 lpfc_sli_brdrestart(phba); 8075 8076 /* Log the current active interrupt mode */ 8077 lpfc_log_intr_mode(phba, phba->intr_mode); 8078 8079 return PCI_ERS_RESULT_RECOVERED; 8080} 8081 8082/** 8083 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 8084 * @pdev: pointer to PCI device 8085 * 8086 * This routine is called from the PCI subsystem for error handling to device 8087 * with SLI-3 interface spec. It is called when kernel error recovery tells 8088 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 8089 * error recovery. After this call, traffic can start to flow from this device 8090 * again. 8091 */ 8092static void 8093lpfc_io_resume_s3(struct pci_dev *pdev) 8094{ 8095 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8096 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8097 8098 /* Bring device online, it will be no-op for non-fatal error resume */ 8099 lpfc_online(phba); 8100 8101 /* Clean up Advanced Error Reporting (AER) if needed */ 8102 if (phba->hba_flag & HBA_AER_ENABLED) 8103 pci_cleanup_aer_uncorrect_error_status(pdev); 8104} 8105 8106/** 8107 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 8108 * @phba: pointer to lpfc hba data structure. 8109 * 8110 * returns the number of ELS/CT IOCBs to reserve 8111 **/ 8112int 8113lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 8114{ 8115 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 8116 8117 if (phba->sli_rev == LPFC_SLI_REV4) { 8118 if (max_xri <= 100) 8119 return 10; 8120 else if (max_xri <= 256) 8121 return 25; 8122 else if (max_xri <= 512) 8123 return 50; 8124 else if (max_xri <= 1024) 8125 return 100; 8126 else 8127 return 150; 8128 } else 8129 return 0; 8130} 8131 8132/** 8133 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 8134 * @pdev: pointer to PCI device 8135 * @pid: pointer to PCI device identifier 8136 * 8137 * This routine is called from the kernel's PCI subsystem to device with 8138 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8139 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 8140 * information of the device and driver to see if the driver state that it 8141 * can support this kind of device. If the match is successful, the driver 8142 * core invokes this routine. If this routine determines it can claim the HBA, 8143 * it does all the initialization that it needs to do to handle the HBA 8144 * properly. 8145 * 8146 * Return code 8147 * 0 - driver can claim the device 8148 * negative value - driver can not claim the device 8149 **/ 8150static int __devinit 8151lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 8152{ 8153 struct lpfc_hba *phba; 8154 struct lpfc_vport *vport = NULL; 8155 struct Scsi_Host *shost = NULL; 8156 int error; 8157 uint32_t cfg_mode, intr_mode; 8158 int mcnt; 8159 8160 /* Allocate memory for HBA structure */ 8161 phba = lpfc_hba_alloc(pdev); 8162 if (!phba) 8163 return -ENOMEM; 8164 8165 /* Perform generic PCI device enabling operation */ 8166 error = lpfc_enable_pci_dev(phba); 8167 if (error) { 8168 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8169 "1409 Failed to enable pci device.\n"); 8170 goto out_free_phba; 8171 } 8172 8173 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 8174 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 8175 if (error) 8176 goto out_disable_pci_dev; 8177 8178 /* Set up SLI-4 specific device PCI memory space */ 8179 error = lpfc_sli4_pci_mem_setup(phba); 8180 if (error) { 8181 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8182 "1410 Failed to set up pci memory space.\n"); 8183 goto out_disable_pci_dev; 8184 } 8185 8186 /* Set up phase-1 common device driver resources */ 8187 error = lpfc_setup_driver_resource_phase1(phba); 8188 if (error) { 8189 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8190 "1411 Failed to set up driver resource.\n"); 8191 goto out_unset_pci_mem_s4; 8192 } 8193 8194 /* Set up SLI-4 Specific device driver resources */ 8195 error = lpfc_sli4_driver_resource_setup(phba); 8196 if (error) { 8197 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8198 "1412 Failed to set up driver resource.\n"); 8199 goto out_unset_pci_mem_s4; 8200 } 8201 8202 /* Initialize and populate the iocb list per host */ 8203 8204 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8205 "2821 initialize iocb list %d.\n", 8206 phba->cfg_iocb_cnt*1024); 8207 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); 8208 8209 if (error) { 8210 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8211 "1413 Failed to initialize iocb list.\n"); 8212 goto out_unset_driver_resource_s4; 8213 } 8214 8215 /* Set up common device driver resources */ 8216 error = lpfc_setup_driver_resource_phase2(phba); 8217 if (error) { 8218 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8219 "1414 Failed to set up driver resource.\n"); 8220 goto out_free_iocb_list; 8221 } 8222 8223 /* Create SCSI host to the physical port */ 8224 error = lpfc_create_shost(phba); 8225 if (error) { 8226 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8227 "1415 Failed to create scsi host.\n"); 8228 goto out_unset_driver_resource; 8229 } 8230 8231 /* Configure sysfs attributes */ 8232 vport = phba->pport; 8233 error = lpfc_alloc_sysfs_attr(vport); 8234 if (error) { 8235 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8236 "1416 Failed to allocate sysfs attr\n"); 8237 goto out_destroy_shost; 8238 } 8239 8240 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 8241 /* Now, trying to enable interrupt and bring up the device */ 8242 cfg_mode = phba->cfg_use_msi; 8243 while (true) { 8244 /* Put device to a known state before enabling interrupt */ 8245 lpfc_stop_port(phba); 8246 /* Configure and enable interrupt */ 8247 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 8248 if (intr_mode == LPFC_INTR_ERROR) { 8249 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8250 "0426 Failed to enable interrupt.\n"); 8251 error = -ENODEV; 8252 goto out_free_sysfs_attr; 8253 } 8254 /* Default to single FCP EQ for non-MSI-X */ 8255 if (phba->intr_type != MSIX) 8256 phba->cfg_fcp_eq_count = 1; 8257 else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count) 8258 phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 8259 /* Set up SLI-4 HBA */ 8260 if (lpfc_sli4_hba_setup(phba)) { 8261 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8262 "1421 Failed to set up hba\n"); 8263 error = -ENODEV; 8264 goto out_disable_intr; 8265 } 8266 8267 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 8268 if (intr_mode != 0) 8269 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 8270 LPFC_ACT_INTR_CNT); 8271 8272 /* Check active interrupts received only for MSI/MSI-X */ 8273 if (intr_mode == 0 || 8274 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 8275 /* Log the current active interrupt mode */ 8276 phba->intr_mode = intr_mode; 8277 lpfc_log_intr_mode(phba, intr_mode); 8278 break; 8279 } 8280 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8281 "0451 Configure interrupt mode (%d) " 8282 "failed active interrupt test.\n", 8283 intr_mode); 8284 /* Unset the preivous SLI-4 HBA setup */ 8285 lpfc_sli4_unset_hba(phba); 8286 /* Try next level of interrupt mode */ 8287 cfg_mode = --intr_mode; 8288 } 8289 8290 /* Perform post initialization setup */ 8291 lpfc_post_init_setup(phba); 8292 8293 /* Check if there are static vports to be created. */ 8294 lpfc_create_static_vport(phba); 8295 8296 return 0; 8297 8298out_disable_intr: 8299 lpfc_sli4_disable_intr(phba); 8300out_free_sysfs_attr: 8301 lpfc_free_sysfs_attr(vport); 8302out_destroy_shost: 8303 lpfc_destroy_shost(phba); 8304out_unset_driver_resource: 8305 lpfc_unset_driver_resource_phase2(phba); 8306out_free_iocb_list: 8307 lpfc_free_iocb_list(phba); 8308out_unset_driver_resource_s4: 8309 lpfc_sli4_driver_resource_unset(phba); 8310out_unset_pci_mem_s4: 8311 lpfc_sli4_pci_mem_unset(phba); 8312out_disable_pci_dev: 8313 lpfc_disable_pci_dev(phba); 8314 if (shost) 8315 scsi_host_put(shost); 8316out_free_phba: 8317 lpfc_hba_free(phba); 8318 return error; 8319} 8320 8321/** 8322 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 8323 * @pdev: pointer to PCI device 8324 * 8325 * This routine is called from the kernel's PCI subsystem to device with 8326 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8327 * removed from PCI bus, it performs all the necessary cleanup for the HBA 8328 * device to be removed from the PCI subsystem properly. 8329 **/ 8330static void __devexit 8331lpfc_pci_remove_one_s4(struct pci_dev *pdev) 8332{ 8333 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8334 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8335 struct lpfc_vport **vports; 8336 struct lpfc_hba *phba = vport->phba; 8337 int i; 8338 8339 /* Mark the device unloading flag */ 8340 spin_lock_irq(&phba->hbalock); 8341 vport->load_flag |= FC_UNLOADING; 8342 spin_unlock_irq(&phba->hbalock); 8343 8344 /* Free the HBA sysfs attributes */ 8345 lpfc_free_sysfs_attr(vport); 8346 8347 /* Release all the vports against this physical port */ 8348 vports = lpfc_create_vport_work_array(phba); 8349 if (vports != NULL) 8350 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8351 fc_vport_terminate(vports[i]->fc_vport); 8352 lpfc_destroy_vport_work_array(phba, vports); 8353 8354 /* Remove FC host and then SCSI host with the physical port */ 8355 fc_remove_host(shost); 8356 scsi_remove_host(shost); 8357 8358 /* Perform cleanup on the physical port */ 8359 lpfc_cleanup(vport); 8360 8361 /* 8362 * Bring down the SLI Layer. This step disables all interrupts, 8363 * clears the rings, discards all mailbox commands, and resets 8364 * the HBA FCoE function. 8365 */ 8366 lpfc_debugfs_terminate(vport); 8367 lpfc_sli4_hba_unset(phba); 8368 8369 spin_lock_irq(&phba->hbalock); 8370 list_del_init(&vport->listentry); 8371 spin_unlock_irq(&phba->hbalock); 8372 8373 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi 8374 * buffers are released to their corresponding pools here. 8375 */ 8376 lpfc_scsi_free(phba); 8377 lpfc_sli4_driver_resource_unset(phba); 8378 8379 /* Unmap adapter Control and Doorbell registers */ 8380 lpfc_sli4_pci_mem_unset(phba); 8381 8382 /* Release PCI resources and disable device's PCI function */ 8383 scsi_host_put(shost); 8384 lpfc_disable_pci_dev(phba); 8385 8386 /* Finally, free the driver's device data structure */ 8387 lpfc_hba_free(phba); 8388 8389 return; 8390} 8391 8392/** 8393 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 8394 * @pdev: pointer to PCI device 8395 * @msg: power management message 8396 * 8397 * This routine is called from the kernel's PCI subsystem to support system 8398 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 8399 * this method, it quiesces the device by stopping the driver's worker 8400 * thread for the device, turning off device's interrupt and DMA, and bring 8401 * the device offline. Note that as the driver implements the minimum PM 8402 * requirements to a power-aware driver's PM support for suspend/resume -- all 8403 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 8404 * method call will be treated as SUSPEND and the driver will fully 8405 * reinitialize its device during resume() method call, the driver will set 8406 * device to PCI_D3hot state in PCI config space instead of setting it 8407 * according to the @msg provided by the PM. 8408 * 8409 * Return code 8410 * 0 - driver suspended the device 8411 * Error otherwise 8412 **/ 8413static int 8414lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 8415{ 8416 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8417 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8418 8419 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8420 "2843 PCI device Power Management suspend.\n"); 8421 8422 /* Bring down the device */ 8423 lpfc_offline_prep(phba); 8424 lpfc_offline(phba); 8425 kthread_stop(phba->worker_thread); 8426 8427 /* Disable interrupt from device */ 8428 lpfc_sli4_disable_intr(phba); 8429 8430 /* Save device state to PCI config space */ 8431 pci_save_state(pdev); 8432 pci_set_power_state(pdev, PCI_D3hot); 8433 8434 return 0; 8435} 8436 8437/** 8438 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 8439 * @pdev: pointer to PCI device 8440 * 8441 * This routine is called from the kernel's PCI subsystem to support system 8442 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 8443 * this method, it restores the device's PCI config space state and fully 8444 * reinitializes the device and brings it online. Note that as the driver 8445 * implements the minimum PM requirements to a power-aware driver's PM for 8446 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8447 * to the suspend() method call will be treated as SUSPEND and the driver 8448 * will fully reinitialize its device during resume() method call, the device 8449 * will be set to PCI_D0 directly in PCI config space before restoring the 8450 * state. 8451 * 8452 * Return code 8453 * 0 - driver suspended the device 8454 * Error otherwise 8455 **/ 8456static int 8457lpfc_pci_resume_one_s4(struct pci_dev *pdev) 8458{ 8459 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8460 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8461 uint32_t intr_mode; 8462 int error; 8463 8464 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8465 "0292 PCI device Power Management resume.\n"); 8466 8467 /* Restore device state from PCI config space */ 8468 pci_set_power_state(pdev, PCI_D0); 8469 pci_restore_state(pdev); 8470 8471 /* 8472 * As the new kernel behavior of pci_restore_state() API call clears 8473 * device saved_state flag, need to save the restored state again. 8474 */ 8475 pci_save_state(pdev); 8476 8477 if (pdev->is_busmaster) 8478 pci_set_master(pdev); 8479 8480 /* Startup the kernel thread for this host adapter. */ 8481 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8482 "lpfc_worker_%d", phba->brd_no); 8483 if (IS_ERR(phba->worker_thread)) { 8484 error = PTR_ERR(phba->worker_thread); 8485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8486 "0293 PM resume failed to start worker " 8487 "thread: error=x%x.\n", error); 8488 return error; 8489 } 8490 8491 /* Configure and enable interrupt */ 8492 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 8493 if (intr_mode == LPFC_INTR_ERROR) { 8494 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8495 "0294 PM resume Failed to enable interrupt\n"); 8496 return -EIO; 8497 } else 8498 phba->intr_mode = intr_mode; 8499 8500 /* Restart HBA and bring it online */ 8501 lpfc_sli_brdrestart(phba); 8502 lpfc_online(phba); 8503 8504 /* Log the current active interrupt mode */ 8505 lpfc_log_intr_mode(phba, phba->intr_mode); 8506 8507 return 0; 8508} 8509 8510/** 8511 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 8512 * @phba: pointer to lpfc hba data structure. 8513 * 8514 * This routine is called to prepare the SLI4 device for PCI slot recover. It 8515 * aborts all the outstanding SCSI I/Os to the pci device. 8516 **/ 8517static void 8518lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 8519{ 8520 struct lpfc_sli *psli = &phba->sli; 8521 struct lpfc_sli_ring *pring; 8522 8523 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8524 "2828 PCI channel I/O abort preparing for recovery\n"); 8525 /* 8526 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 8527 * and let the SCSI mid-layer to retry them to recover. 8528 */ 8529 pring = &psli->ring[psli->fcp_ring]; 8530 lpfc_sli_abort_iocb_ring(phba, pring); 8531} 8532 8533/** 8534 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 8535 * @phba: pointer to lpfc hba data structure. 8536 * 8537 * This routine is called to prepare the SLI4 device for PCI slot reset. It 8538 * disables the device interrupt and pci device, and aborts the internal FCP 8539 * pending I/Os. 8540 **/ 8541static void 8542lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 8543{ 8544 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8545 "2826 PCI channel disable preparing for reset\n"); 8546 8547 /* Block any management I/Os to the device */ 8548 lpfc_block_mgmt_io(phba); 8549 8550 /* Block all SCSI devices' I/Os on the host */ 8551 lpfc_scsi_dev_block(phba); 8552 8553 /* stop all timers */ 8554 lpfc_stop_hba_timers(phba); 8555 8556 /* Disable interrupt and pci device */ 8557 lpfc_sli4_disable_intr(phba); 8558 pci_disable_device(phba->pcidev); 8559 8560 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 8561 lpfc_sli_flush_fcp_rings(phba); 8562} 8563 8564/** 8565 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 8566 * @phba: pointer to lpfc hba data structure. 8567 * 8568 * This routine is called to prepare the SLI4 device for PCI slot permanently 8569 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 8570 * pending I/Os. 8571 **/ 8572static void 8573lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 8574{ 8575 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8576 "2827 PCI channel permanent disable for failure\n"); 8577 8578 /* Block all SCSI devices' I/Os on the host */ 8579 lpfc_scsi_dev_block(phba); 8580 8581 /* stop all timers */ 8582 lpfc_stop_hba_timers(phba); 8583 8584 /* Clean up all driver's outstanding SCSI I/Os */ 8585 lpfc_sli_flush_fcp_rings(phba); 8586} 8587 8588/** 8589 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 8590 * @pdev: pointer to PCI device. 8591 * @state: the current PCI connection state. 8592 * 8593 * This routine is called from the PCI subsystem for error handling to device 8594 * with SLI-4 interface spec. This function is called by the PCI subsystem 8595 * after a PCI bus error affecting this device has been detected. When this 8596 * function is invoked, it will need to stop all the I/Os and interrupt(s) 8597 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 8598 * for the PCI subsystem to perform proper recovery as desired. 8599 * 8600 * Return codes 8601 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8602 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8603 **/ 8604static pci_ers_result_t 8605lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 8606{ 8607 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8608 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8609 8610 switch (state) { 8611 case pci_channel_io_normal: 8612 /* Non-fatal error, prepare for recovery */ 8613 lpfc_sli4_prep_dev_for_recover(phba); 8614 return PCI_ERS_RESULT_CAN_RECOVER; 8615 case pci_channel_io_frozen: 8616 /* Fatal error, prepare for slot reset */ 8617 lpfc_sli4_prep_dev_for_reset(phba); 8618 return PCI_ERS_RESULT_NEED_RESET; 8619 case pci_channel_io_perm_failure: 8620 /* Permanent failure, prepare for device down */ 8621 lpfc_sli4_prep_dev_for_perm_failure(phba); 8622 return PCI_ERS_RESULT_DISCONNECT; 8623 default: 8624 /* Unknown state, prepare and request slot reset */ 8625 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8626 "2825 Unknown PCI error state: x%x\n", state); 8627 lpfc_sli4_prep_dev_for_reset(phba); 8628 return PCI_ERS_RESULT_NEED_RESET; 8629 } 8630} 8631 8632/** 8633 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 8634 * @pdev: pointer to PCI device. 8635 * 8636 * This routine is called from the PCI subsystem for error handling to device 8637 * with SLI-4 interface spec. It is called after PCI bus has been reset to 8638 * restart the PCI card from scratch, as if from a cold-boot. During the 8639 * PCI subsystem error recovery, after the driver returns 8640 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 8641 * recovery and then call this routine before calling the .resume method to 8642 * recover the device. This function will initialize the HBA device, enable 8643 * the interrupt, but it will just put the HBA to offline state without 8644 * passing any I/O traffic. 8645 * 8646 * Return codes 8647 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8648 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8649 */ 8650static pci_ers_result_t 8651lpfc_io_slot_reset_s4(struct pci_dev *pdev) 8652{ 8653 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8654 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8655 struct lpfc_sli *psli = &phba->sli; 8656 uint32_t intr_mode; 8657 8658 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 8659 if (pci_enable_device_mem(pdev)) { 8660 printk(KERN_ERR "lpfc: Cannot re-enable " 8661 "PCI device after reset.\n"); 8662 return PCI_ERS_RESULT_DISCONNECT; 8663 } 8664 8665 pci_restore_state(pdev); 8666 if (pdev->is_busmaster) 8667 pci_set_master(pdev); 8668 8669 spin_lock_irq(&phba->hbalock); 8670 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 8671 spin_unlock_irq(&phba->hbalock); 8672 8673 /* Configure and enable interrupt */ 8674 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 8675 if (intr_mode == LPFC_INTR_ERROR) { 8676 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8677 "2824 Cannot re-enable interrupt after " 8678 "slot reset.\n"); 8679 return PCI_ERS_RESULT_DISCONNECT; 8680 } else 8681 phba->intr_mode = intr_mode; 8682 8683 /* Log the current active interrupt mode */ 8684 lpfc_log_intr_mode(phba, phba->intr_mode); 8685 8686 return PCI_ERS_RESULT_RECOVERED; 8687} 8688 8689/** 8690 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 8691 * @pdev: pointer to PCI device 8692 * 8693 * This routine is called from the PCI subsystem for error handling to device 8694 * with SLI-4 interface spec. It is called when kernel error recovery tells 8695 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 8696 * error recovery. After this call, traffic can start to flow from this device 8697 * again. 8698 **/ 8699static void 8700lpfc_io_resume_s4(struct pci_dev *pdev) 8701{ 8702 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8703 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8704 8705 /* 8706 * In case of slot reset, as function reset is performed through 8707 * mailbox command which needs DMA to be enabled, this operation 8708 * has to be moved to the io resume phase. Taking device offline 8709 * will perform the necessary cleanup. 8710 */ 8711 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 8712 /* Perform device reset */ 8713 lpfc_offline_prep(phba); 8714 lpfc_offline(phba); 8715 lpfc_sli_brdrestart(phba); 8716 /* Bring the device back online */ 8717 lpfc_online(phba); 8718 } 8719 8720 /* Clean up Advanced Error Reporting (AER) if needed */ 8721 if (phba->hba_flag & HBA_AER_ENABLED) 8722 pci_cleanup_aer_uncorrect_error_status(pdev); 8723} 8724 8725/** 8726 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 8727 * @pdev: pointer to PCI device 8728 * @pid: pointer to PCI device identifier 8729 * 8730 * This routine is to be registered to the kernel's PCI subsystem. When an 8731 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 8732 * at PCI device-specific information of the device and driver to see if the 8733 * driver state that it can support this kind of device. If the match is 8734 * successful, the driver core invokes this routine. This routine dispatches 8735 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 8736 * do all the initialization that it needs to do to handle the HBA device 8737 * properly. 8738 * 8739 * Return code 8740 * 0 - driver can claim the device 8741 * negative value - driver can not claim the device 8742 **/ 8743static int __devinit 8744lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 8745{ 8746 int rc; 8747 struct lpfc_sli_intf intf; 8748 8749 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 8750 return -ENODEV; 8751 8752 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 8753 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 8754 rc = lpfc_pci_probe_one_s4(pdev, pid); 8755 else 8756 rc = lpfc_pci_probe_one_s3(pdev, pid); 8757 8758 return rc; 8759} 8760 8761/** 8762 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 8763 * @pdev: pointer to PCI device 8764 * 8765 * This routine is to be registered to the kernel's PCI subsystem. When an 8766 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 8767 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 8768 * remove routine, which will perform all the necessary cleanup for the 8769 * device to be removed from the PCI subsystem properly. 8770 **/ 8771static void __devexit 8772lpfc_pci_remove_one(struct pci_dev *pdev) 8773{ 8774 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8775 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8776 8777 switch (phba->pci_dev_grp) { 8778 case LPFC_PCI_DEV_LP: 8779 lpfc_pci_remove_one_s3(pdev); 8780 break; 8781 case LPFC_PCI_DEV_OC: 8782 lpfc_pci_remove_one_s4(pdev); 8783 break; 8784 default: 8785 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8786 "1424 Invalid PCI device group: 0x%x\n", 8787 phba->pci_dev_grp); 8788 break; 8789 } 8790 return; 8791} 8792 8793/** 8794 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 8795 * @pdev: pointer to PCI device 8796 * @msg: power management message 8797 * 8798 * This routine is to be registered to the kernel's PCI subsystem to support 8799 * system Power Management (PM). When PM invokes this method, it dispatches 8800 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 8801 * suspend the device. 8802 * 8803 * Return code 8804 * 0 - driver suspended the device 8805 * Error otherwise 8806 **/ 8807static int 8808lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 8809{ 8810 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8811 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8812 int rc = -ENODEV; 8813 8814 switch (phba->pci_dev_grp) { 8815 case LPFC_PCI_DEV_LP: 8816 rc = lpfc_pci_suspend_one_s3(pdev, msg); 8817 break; 8818 case LPFC_PCI_DEV_OC: 8819 rc = lpfc_pci_suspend_one_s4(pdev, msg); 8820 break; 8821 default: 8822 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8823 "1425 Invalid PCI device group: 0x%x\n", 8824 phba->pci_dev_grp); 8825 break; 8826 } 8827 return rc; 8828} 8829 8830/** 8831 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 8832 * @pdev: pointer to PCI device 8833 * 8834 * This routine is to be registered to the kernel's PCI subsystem to support 8835 * system Power Management (PM). When PM invokes this method, it dispatches 8836 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 8837 * resume the device. 8838 * 8839 * Return code 8840 * 0 - driver suspended the device 8841 * Error otherwise 8842 **/ 8843static int 8844lpfc_pci_resume_one(struct pci_dev *pdev) 8845{ 8846 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8847 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8848 int rc = -ENODEV; 8849 8850 switch (phba->pci_dev_grp) { 8851 case LPFC_PCI_DEV_LP: 8852 rc = lpfc_pci_resume_one_s3(pdev); 8853 break; 8854 case LPFC_PCI_DEV_OC: 8855 rc = lpfc_pci_resume_one_s4(pdev); 8856 break; 8857 default: 8858 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8859 "1426 Invalid PCI device group: 0x%x\n", 8860 phba->pci_dev_grp); 8861 break; 8862 } 8863 return rc; 8864} 8865 8866/** 8867 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 8868 * @pdev: pointer to PCI device. 8869 * @state: the current PCI connection state. 8870 * 8871 * This routine is registered to the PCI subsystem for error handling. This 8872 * function is called by the PCI subsystem after a PCI bus error affecting 8873 * this device has been detected. When this routine is invoked, it dispatches 8874 * the action to the proper SLI-3 or SLI-4 device error detected handling 8875 * routine, which will perform the proper error detected operation. 8876 * 8877 * Return codes 8878 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8879 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8880 **/ 8881static pci_ers_result_t 8882lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 8883{ 8884 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8885 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8886 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 8887 8888 switch (phba->pci_dev_grp) { 8889 case LPFC_PCI_DEV_LP: 8890 rc = lpfc_io_error_detected_s3(pdev, state); 8891 break; 8892 case LPFC_PCI_DEV_OC: 8893 rc = lpfc_io_error_detected_s4(pdev, state); 8894 break; 8895 default: 8896 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8897 "1427 Invalid PCI device group: 0x%x\n", 8898 phba->pci_dev_grp); 8899 break; 8900 } 8901 return rc; 8902} 8903 8904/** 8905 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 8906 * @pdev: pointer to PCI device. 8907 * 8908 * This routine is registered to the PCI subsystem for error handling. This 8909 * function is called after PCI bus has been reset to restart the PCI card 8910 * from scratch, as if from a cold-boot. When this routine is invoked, it 8911 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 8912 * routine, which will perform the proper device reset. 8913 * 8914 * Return codes 8915 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8916 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8917 **/ 8918static pci_ers_result_t 8919lpfc_io_slot_reset(struct pci_dev *pdev) 8920{ 8921 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8922 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8923 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 8924 8925 switch (phba->pci_dev_grp) { 8926 case LPFC_PCI_DEV_LP: 8927 rc = lpfc_io_slot_reset_s3(pdev); 8928 break; 8929 case LPFC_PCI_DEV_OC: 8930 rc = lpfc_io_slot_reset_s4(pdev); 8931 break; 8932 default: 8933 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8934 "1428 Invalid PCI device group: 0x%x\n", 8935 phba->pci_dev_grp); 8936 break; 8937 } 8938 return rc; 8939} 8940 8941/** 8942 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 8943 * @pdev: pointer to PCI device 8944 * 8945 * This routine is registered to the PCI subsystem for error handling. It 8946 * is called when kernel error recovery tells the lpfc driver that it is 8947 * OK to resume normal PCI operation after PCI bus error recovery. When 8948 * this routine is invoked, it dispatches the action to the proper SLI-3 8949 * or SLI-4 device io_resume routine, which will resume the device operation. 8950 **/ 8951static void 8952lpfc_io_resume(struct pci_dev *pdev) 8953{ 8954 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8955 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8956 8957 switch (phba->pci_dev_grp) { 8958 case LPFC_PCI_DEV_LP: 8959 lpfc_io_resume_s3(pdev); 8960 break; 8961 case LPFC_PCI_DEV_OC: 8962 lpfc_io_resume_s4(pdev); 8963 break; 8964 default: 8965 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8966 "1429 Invalid PCI device group: 0x%x\n", 8967 phba->pci_dev_grp); 8968 break; 8969 } 8970 return; 8971} 8972 8973static struct pci_device_id lpfc_id_table[] = { 8974 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 8975 PCI_ANY_ID, PCI_ANY_ID, }, 8976 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 8977 PCI_ANY_ID, PCI_ANY_ID, }, 8978 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 8979 PCI_ANY_ID, PCI_ANY_ID, }, 8980 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 8981 PCI_ANY_ID, PCI_ANY_ID, }, 8982 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 8983 PCI_ANY_ID, PCI_ANY_ID, }, 8984 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 8985 PCI_ANY_ID, PCI_ANY_ID, }, 8986 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 8987 PCI_ANY_ID, PCI_ANY_ID, }, 8988 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 8989 PCI_ANY_ID, PCI_ANY_ID, }, 8990 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 8991 PCI_ANY_ID, PCI_ANY_ID, }, 8992 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 8993 PCI_ANY_ID, PCI_ANY_ID, }, 8994 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 8995 PCI_ANY_ID, PCI_ANY_ID, }, 8996 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 8997 PCI_ANY_ID, PCI_ANY_ID, }, 8998 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 8999 PCI_ANY_ID, PCI_ANY_ID, }, 9000 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 9001 PCI_ANY_ID, PCI_ANY_ID, }, 9002 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 9003 PCI_ANY_ID, PCI_ANY_ID, }, 9004 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 9005 PCI_ANY_ID, PCI_ANY_ID, }, 9006 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 9007 PCI_ANY_ID, PCI_ANY_ID, }, 9008 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 9009 PCI_ANY_ID, PCI_ANY_ID, }, 9010 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 9011 PCI_ANY_ID, PCI_ANY_ID, }, 9012 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 9013 PCI_ANY_ID, PCI_ANY_ID, }, 9014 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 9015 PCI_ANY_ID, PCI_ANY_ID, }, 9016 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 9017 PCI_ANY_ID, PCI_ANY_ID, }, 9018 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 9019 PCI_ANY_ID, PCI_ANY_ID, }, 9020 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 9021 PCI_ANY_ID, PCI_ANY_ID, }, 9022 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 9023 PCI_ANY_ID, PCI_ANY_ID, }, 9024 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 9025 PCI_ANY_ID, PCI_ANY_ID, }, 9026 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 9027 PCI_ANY_ID, PCI_ANY_ID, }, 9028 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 9029 PCI_ANY_ID, PCI_ANY_ID, }, 9030 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 9031 PCI_ANY_ID, PCI_ANY_ID, }, 9032 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 9033 PCI_ANY_ID, PCI_ANY_ID, }, 9034 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 9035 PCI_ANY_ID, PCI_ANY_ID, }, 9036 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 9037 PCI_ANY_ID, PCI_ANY_ID, }, 9038 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 9039 PCI_ANY_ID, PCI_ANY_ID, }, 9040 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 9041 PCI_ANY_ID, PCI_ANY_ID, }, 9042 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 9043 PCI_ANY_ID, PCI_ANY_ID, }, 9044 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 9045 PCI_ANY_ID, PCI_ANY_ID, }, 9046 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 9047 PCI_ANY_ID, PCI_ANY_ID, }, 9048 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 9049 PCI_ANY_ID, PCI_ANY_ID, }, 9050 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 9051 PCI_ANY_ID, PCI_ANY_ID, }, 9052 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 9053 PCI_ANY_ID, PCI_ANY_ID, }, 9054 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, 9055 PCI_ANY_ID, PCI_ANY_ID, }, 9056 { 0 } 9057}; 9058 9059MODULE_DEVICE_TABLE(pci, lpfc_id_table); 9060 9061static struct pci_error_handlers lpfc_err_handler = { 9062 .error_detected = lpfc_io_error_detected, 9063 .slot_reset = lpfc_io_slot_reset, 9064 .resume = lpfc_io_resume, 9065}; 9066 9067static struct pci_driver lpfc_driver = { 9068 .name = LPFC_DRIVER_NAME, 9069 .id_table = lpfc_id_table, 9070 .probe = lpfc_pci_probe_one, 9071 .remove = __devexit_p(lpfc_pci_remove_one), 9072 .suspend = lpfc_pci_suspend_one, 9073 .resume = lpfc_pci_resume_one, 9074 .err_handler = &lpfc_err_handler, 9075}; 9076 9077/** 9078 * lpfc_init - lpfc module initialization routine 9079 * 9080 * This routine is to be invoked when the lpfc module is loaded into the 9081 * kernel. The special kernel macro module_init() is used to indicate the 9082 * role of this routine to the kernel as lpfc module entry point. 9083 * 9084 * Return codes 9085 * 0 - successful 9086 * -ENOMEM - FC attach transport failed 9087 * all others - failed 9088 */ 9089static int __init 9090lpfc_init(void) 9091{ 9092 int error = 0; 9093 9094 printk(LPFC_MODULE_DESC "\n"); 9095 printk(LPFC_COPYRIGHT "\n"); 9096 9097 if (lpfc_enable_npiv) { 9098 lpfc_transport_functions.vport_create = lpfc_vport_create; 9099 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 9100 } 9101 lpfc_transport_template = 9102 fc_attach_transport(&lpfc_transport_functions); 9103 if (lpfc_transport_template == NULL) 9104 return -ENOMEM; 9105 if (lpfc_enable_npiv) { 9106 lpfc_vport_transport_template = 9107 fc_attach_transport(&lpfc_vport_transport_functions); 9108 if (lpfc_vport_transport_template == NULL) { 9109 fc_release_transport(lpfc_transport_template); 9110 return -ENOMEM; 9111 } 9112 } 9113 error = pci_register_driver(&lpfc_driver); 9114 if (error) { 9115 fc_release_transport(lpfc_transport_template); 9116 if (lpfc_enable_npiv) 9117 fc_release_transport(lpfc_vport_transport_template); 9118 } 9119 9120 return error; 9121} 9122 9123/** 9124 * lpfc_exit - lpfc module removal routine 9125 * 9126 * This routine is invoked when the lpfc module is removed from the kernel. 9127 * The special kernel macro module_exit() is used to indicate the role of 9128 * this routine to the kernel as lpfc module exit point. 9129 */ 9130static void __exit 9131lpfc_exit(void) 9132{ 9133 pci_unregister_driver(&lpfc_driver); 9134 fc_release_transport(lpfc_transport_template); 9135 if (lpfc_enable_npiv) 9136 fc_release_transport(lpfc_vport_transport_template); 9137 if (_dump_buf_data) { 9138 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 9139 "_dump_buf_data at 0x%p\n", 9140 (1L << _dump_buf_data_order), _dump_buf_data); 9141 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 9142 } 9143 9144 if (_dump_buf_dif) { 9145 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 9146 "_dump_buf_dif at 0x%p\n", 9147 (1L << _dump_buf_dif_order), _dump_buf_dif); 9148 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 9149 } 9150} 9151 9152module_init(lpfc_init); 9153module_exit(lpfc_exit); 9154MODULE_LICENSE("GPL"); 9155MODULE_DESCRIPTION(LPFC_MODULE_DESC); 9156MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 9157MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 9158