lpfc_init.c revision 3772a99175f5378b5001e8da364341a8b8226a4a
1/******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/dma-mapping.h> 25#include <linux/idr.h> 26#include <linux/interrupt.h> 27#include <linux/kthread.h> 28#include <linux/pci.h> 29#include <linux/spinlock.h> 30#include <linux/ctype.h> 31 32#include <scsi/scsi.h> 33#include <scsi/scsi_device.h> 34#include <scsi/scsi_host.h> 35#include <scsi/scsi_transport_fc.h> 36 37#include "lpfc_hw.h" 38#include "lpfc_sli.h" 39#include "lpfc_nl.h" 40#include "lpfc_disc.h" 41#include "lpfc_scsi.h" 42#include "lpfc.h" 43#include "lpfc_logmsg.h" 44#include "lpfc_crtn.h" 45#include "lpfc_vport.h" 46#include "lpfc_version.h" 47 48char *_dump_buf_data; 49unsigned long _dump_buf_data_order; 50char *_dump_buf_dif; 51unsigned long _dump_buf_dif_order; 52spinlock_t _dump_buf_lock; 53 54static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 55static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 56static int lpfc_post_rcv_buf(struct lpfc_hba *); 57 58static struct scsi_transport_template *lpfc_transport_template = NULL; 59static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 60static DEFINE_IDR(lpfc_hba_index); 61 62/** 63 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 64 * @phba: pointer to lpfc hba data structure. 65 * 66 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 67 * mailbox command. It retrieves the revision information from the HBA and 68 * collects the Vital Product Data (VPD) about the HBA for preparing the 69 * configuration of the HBA. 70 * 71 * Return codes: 72 * 0 - success. 73 * -ERESTART - requests the SLI layer to reset the HBA and try again. 74 * Any other value - indicates an error. 75 **/ 76int 77lpfc_config_port_prep(struct lpfc_hba *phba) 78{ 79 lpfc_vpd_t *vp = &phba->vpd; 80 int i = 0, rc; 81 LPFC_MBOXQ_t *pmb; 82 MAILBOX_t *mb; 83 char *lpfc_vpd_data = NULL; 84 uint16_t offset = 0; 85 static char licensed[56] = 86 "key unlock for use with gnu public licensed code only\0"; 87 static int init_key = 1; 88 89 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 90 if (!pmb) { 91 phba->link_state = LPFC_HBA_ERROR; 92 return -ENOMEM; 93 } 94 95 mb = &pmb->mb; 96 phba->link_state = LPFC_INIT_MBX_CMDS; 97 98 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 99 if (init_key) { 100 uint32_t *ptext = (uint32_t *) licensed; 101 102 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 103 *ptext = cpu_to_be32(*ptext); 104 init_key = 0; 105 } 106 107 lpfc_read_nv(phba, pmb); 108 memset((char*)mb->un.varRDnvp.rsvd3, 0, 109 sizeof (mb->un.varRDnvp.rsvd3)); 110 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 111 sizeof (licensed)); 112 113 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 114 115 if (rc != MBX_SUCCESS) { 116 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 117 "0324 Config Port initialization " 118 "error, mbxCmd x%x READ_NVPARM, " 119 "mbxStatus x%x\n", 120 mb->mbxCommand, mb->mbxStatus); 121 mempool_free(pmb, phba->mbox_mem_pool); 122 return -ERESTART; 123 } 124 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 125 sizeof(phba->wwnn)); 126 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 127 sizeof(phba->wwpn)); 128 } 129 130 phba->sli3_options = 0x0; 131 132 /* Setup and issue mailbox READ REV command */ 133 lpfc_read_rev(phba, pmb); 134 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 135 if (rc != MBX_SUCCESS) { 136 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 137 "0439 Adapter failed to init, mbxCmd x%x " 138 "READ_REV, mbxStatus x%x\n", 139 mb->mbxCommand, mb->mbxStatus); 140 mempool_free( pmb, phba->mbox_mem_pool); 141 return -ERESTART; 142 } 143 144 145 /* 146 * The value of rr must be 1 since the driver set the cv field to 1. 147 * This setting requires the FW to set all revision fields. 148 */ 149 if (mb->un.varRdRev.rr == 0) { 150 vp->rev.rBit = 0; 151 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 152 "0440 Adapter failed to init, READ_REV has " 153 "missing revision information.\n"); 154 mempool_free(pmb, phba->mbox_mem_pool); 155 return -ERESTART; 156 } 157 158 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 159 mempool_free(pmb, phba->mbox_mem_pool); 160 return -EINVAL; 161 } 162 163 /* Save information as VPD data */ 164 vp->rev.rBit = 1; 165 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 166 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 167 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 168 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 169 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 170 vp->rev.biuRev = mb->un.varRdRev.biuRev; 171 vp->rev.smRev = mb->un.varRdRev.smRev; 172 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 173 vp->rev.endecRev = mb->un.varRdRev.endecRev; 174 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 175 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 176 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 177 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 178 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 179 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 180 181 /* If the sli feature level is less then 9, we must 182 * tear down all RPIs and VPIs on link down if NPIV 183 * is enabled. 184 */ 185 if (vp->rev.feaLevelHigh < 9) 186 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 187 188 if (lpfc_is_LC_HBA(phba->pcidev->device)) 189 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 190 sizeof (phba->RandomData)); 191 192 /* Get adapter VPD information */ 193 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 194 if (!lpfc_vpd_data) 195 goto out_free_mbox; 196 197 do { 198 lpfc_dump_mem(phba, pmb, offset); 199 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 200 201 if (rc != MBX_SUCCESS) { 202 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 203 "0441 VPD not present on adapter, " 204 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 205 mb->mbxCommand, mb->mbxStatus); 206 mb->un.varDmp.word_cnt = 0; 207 } 208 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 209 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 210 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 211 lpfc_vpd_data + offset, 212 mb->un.varDmp.word_cnt); 213 offset += mb->un.varDmp.word_cnt; 214 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 215 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 216 217 kfree(lpfc_vpd_data); 218out_free_mbox: 219 mempool_free(pmb, phba->mbox_mem_pool); 220 return 0; 221} 222 223/** 224 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 225 * @phba: pointer to lpfc hba data structure. 226 * @pmboxq: pointer to the driver internal queue element for mailbox command. 227 * 228 * This is the completion handler for driver's configuring asynchronous event 229 * mailbox command to the device. If the mailbox command returns successfully, 230 * it will set internal async event support flag to 1; otherwise, it will 231 * set internal async event support flag to 0. 232 **/ 233static void 234lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 235{ 236 if (pmboxq->mb.mbxStatus == MBX_SUCCESS) 237 phba->temp_sensor_support = 1; 238 else 239 phba->temp_sensor_support = 0; 240 mempool_free(pmboxq, phba->mbox_mem_pool); 241 return; 242} 243 244/** 245 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 246 * @phba: pointer to lpfc hba data structure. 247 * @pmboxq: pointer to the driver internal queue element for mailbox command. 248 * 249 * This is the completion handler for dump mailbox command for getting 250 * wake up parameters. When this command complete, the response contain 251 * Option rom version of the HBA. This function translate the version number 252 * into a human readable string and store it in OptionROMVersion. 253 **/ 254static void 255lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 256{ 257 struct prog_id *prg; 258 uint32_t prog_id_word; 259 char dist = ' '; 260 /* character array used for decoding dist type. */ 261 char dist_char[] = "nabx"; 262 263 if (pmboxq->mb.mbxStatus != MBX_SUCCESS) { 264 mempool_free(pmboxq, phba->mbox_mem_pool); 265 return; 266 } 267 268 prg = (struct prog_id *) &prog_id_word; 269 270 /* word 7 contain option rom version */ 271 prog_id_word = pmboxq->mb.un.varWords[7]; 272 273 /* Decode the Option rom version word to a readable string */ 274 if (prg->dist < 4) 275 dist = dist_char[prg->dist]; 276 277 if ((prg->dist == 3) && (prg->num == 0)) 278 sprintf(phba->OptionROMVersion, "%d.%d%d", 279 prg->ver, prg->rev, prg->lev); 280 else 281 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 282 prg->ver, prg->rev, prg->lev, 283 dist, prg->num); 284 mempool_free(pmboxq, phba->mbox_mem_pool); 285 return; 286} 287 288/** 289 * lpfc_config_port_post - Perform lpfc initialization after config port 290 * @phba: pointer to lpfc hba data structure. 291 * 292 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 293 * command call. It performs all internal resource and state setups on the 294 * port: post IOCB buffers, enable appropriate host interrupt attentions, 295 * ELS ring timers, etc. 296 * 297 * Return codes 298 * 0 - success. 299 * Any other value - error. 300 **/ 301int 302lpfc_config_port_post(struct lpfc_hba *phba) 303{ 304 struct lpfc_vport *vport = phba->pport; 305 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 306 LPFC_MBOXQ_t *pmb; 307 MAILBOX_t *mb; 308 struct lpfc_dmabuf *mp; 309 struct lpfc_sli *psli = &phba->sli; 310 uint32_t status, timeout; 311 int i, j; 312 int rc; 313 314 spin_lock_irq(&phba->hbalock); 315 /* 316 * If the Config port completed correctly the HBA is not 317 * over heated any more. 318 */ 319 if (phba->over_temp_state == HBA_OVER_TEMP) 320 phba->over_temp_state = HBA_NORMAL_TEMP; 321 spin_unlock_irq(&phba->hbalock); 322 323 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 324 if (!pmb) { 325 phba->link_state = LPFC_HBA_ERROR; 326 return -ENOMEM; 327 } 328 mb = &pmb->mb; 329 330 /* Get login parameters for NID. */ 331 lpfc_read_sparam(phba, pmb, 0); 332 pmb->vport = vport; 333 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 334 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 335 "0448 Adapter failed init, mbxCmd x%x " 336 "READ_SPARM mbxStatus x%x\n", 337 mb->mbxCommand, mb->mbxStatus); 338 phba->link_state = LPFC_HBA_ERROR; 339 mp = (struct lpfc_dmabuf *) pmb->context1; 340 mempool_free( pmb, phba->mbox_mem_pool); 341 lpfc_mbuf_free(phba, mp->virt, mp->phys); 342 kfree(mp); 343 return -EIO; 344 } 345 346 mp = (struct lpfc_dmabuf *) pmb->context1; 347 348 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 349 lpfc_mbuf_free(phba, mp->virt, mp->phys); 350 kfree(mp); 351 pmb->context1 = NULL; 352 353 if (phba->cfg_soft_wwnn) 354 u64_to_wwn(phba->cfg_soft_wwnn, 355 vport->fc_sparam.nodeName.u.wwn); 356 if (phba->cfg_soft_wwpn) 357 u64_to_wwn(phba->cfg_soft_wwpn, 358 vport->fc_sparam.portName.u.wwn); 359 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 360 sizeof (struct lpfc_name)); 361 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 362 sizeof (struct lpfc_name)); 363 364 /* Update the fc_host data structures with new wwn. */ 365 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 366 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 367 368 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 369 /* This should be consolidated into parse_vpd ? - mr */ 370 if (phba->SerialNumber[0] == 0) { 371 uint8_t *outptr; 372 373 outptr = &vport->fc_nodename.u.s.IEEE[0]; 374 for (i = 0; i < 12; i++) { 375 status = *outptr++; 376 j = ((status & 0xf0) >> 4); 377 if (j <= 9) 378 phba->SerialNumber[i] = 379 (char)((uint8_t) 0x30 + (uint8_t) j); 380 else 381 phba->SerialNumber[i] = 382 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 383 i++; 384 j = (status & 0xf); 385 if (j <= 9) 386 phba->SerialNumber[i] = 387 (char)((uint8_t) 0x30 + (uint8_t) j); 388 else 389 phba->SerialNumber[i] = 390 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 391 } 392 } 393 394 lpfc_read_config(phba, pmb); 395 pmb->vport = vport; 396 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 397 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 398 "0453 Adapter failed to init, mbxCmd x%x " 399 "READ_CONFIG, mbxStatus x%x\n", 400 mb->mbxCommand, mb->mbxStatus); 401 phba->link_state = LPFC_HBA_ERROR; 402 mempool_free( pmb, phba->mbox_mem_pool); 403 return -EIO; 404 } 405 406 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 407 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 408 phba->cfg_hba_queue_depth = 409 mb->un.varRdConfig.max_xri + 1; 410 411 phba->lmt = mb->un.varRdConfig.lmt; 412 413 /* Get the default values for Model Name and Description */ 414 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 415 416 if ((phba->cfg_link_speed > LINK_SPEED_10G) 417 || ((phba->cfg_link_speed == LINK_SPEED_1G) 418 && !(phba->lmt & LMT_1Gb)) 419 || ((phba->cfg_link_speed == LINK_SPEED_2G) 420 && !(phba->lmt & LMT_2Gb)) 421 || ((phba->cfg_link_speed == LINK_SPEED_4G) 422 && !(phba->lmt & LMT_4Gb)) 423 || ((phba->cfg_link_speed == LINK_SPEED_8G) 424 && !(phba->lmt & LMT_8Gb)) 425 || ((phba->cfg_link_speed == LINK_SPEED_10G) 426 && !(phba->lmt & LMT_10Gb))) { 427 /* Reset link speed to auto */ 428 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 429 "1302 Invalid speed for this board: " 430 "Reset link speed to auto: x%x\n", 431 phba->cfg_link_speed); 432 phba->cfg_link_speed = LINK_SPEED_AUTO; 433 } 434 435 phba->link_state = LPFC_LINK_DOWN; 436 437 /* Only process IOCBs on ELS ring till hba_state is READY */ 438 if (psli->ring[psli->extra_ring].cmdringaddr) 439 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 440 if (psli->ring[psli->fcp_ring].cmdringaddr) 441 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 442 if (psli->ring[psli->next_ring].cmdringaddr) 443 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 444 445 /* Post receive buffers for desired rings */ 446 if (phba->sli_rev != 3) 447 lpfc_post_rcv_buf(phba); 448 449 /* 450 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 451 */ 452 if (phba->intr_type == MSIX) { 453 rc = lpfc_config_msi(phba, pmb); 454 if (rc) { 455 mempool_free(pmb, phba->mbox_mem_pool); 456 return -EIO; 457 } 458 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 459 if (rc != MBX_SUCCESS) { 460 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 461 "0352 Config MSI mailbox command " 462 "failed, mbxCmd x%x, mbxStatus x%x\n", 463 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 464 mempool_free(pmb, phba->mbox_mem_pool); 465 return -EIO; 466 } 467 } 468 469 /* Initialize ERATT handling flag */ 470 phba->hba_flag &= ~HBA_ERATT_HANDLED; 471 472 /* Enable appropriate host interrupts */ 473 spin_lock_irq(&phba->hbalock); 474 status = readl(phba->HCregaddr); 475 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 476 if (psli->num_rings > 0) 477 status |= HC_R0INT_ENA; 478 if (psli->num_rings > 1) 479 status |= HC_R1INT_ENA; 480 if (psli->num_rings > 2) 481 status |= HC_R2INT_ENA; 482 if (psli->num_rings > 3) 483 status |= HC_R3INT_ENA; 484 485 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 486 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 487 status &= ~(HC_R0INT_ENA); 488 489 writel(status, phba->HCregaddr); 490 readl(phba->HCregaddr); /* flush */ 491 spin_unlock_irq(&phba->hbalock); 492 493 /* Set up ring-0 (ELS) timer */ 494 timeout = phba->fc_ratov * 2; 495 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 496 /* Set up heart beat (HB) timer */ 497 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 498 phba->hb_outstanding = 0; 499 phba->last_completion_time = jiffies; 500 /* Set up error attention (ERATT) polling timer */ 501 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 502 503 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 504 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 505 lpfc_set_loopback_flag(phba); 506 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 507 if (rc != MBX_SUCCESS) { 508 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 509 "0454 Adapter failed to init, mbxCmd x%x " 510 "INIT_LINK, mbxStatus x%x\n", 511 mb->mbxCommand, mb->mbxStatus); 512 513 /* Clear all interrupt enable conditions */ 514 writel(0, phba->HCregaddr); 515 readl(phba->HCregaddr); /* flush */ 516 /* Clear all pending interrupts */ 517 writel(0xffffffff, phba->HAregaddr); 518 readl(phba->HAregaddr); /* flush */ 519 520 phba->link_state = LPFC_HBA_ERROR; 521 if (rc != MBX_BUSY) 522 mempool_free(pmb, phba->mbox_mem_pool); 523 return -EIO; 524 } 525 /* MBOX buffer will be freed in mbox compl */ 526 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 527 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 528 pmb->mbox_cmpl = lpfc_config_async_cmpl; 529 pmb->vport = phba->pport; 530 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 531 532 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 533 lpfc_printf_log(phba, 534 KERN_ERR, 535 LOG_INIT, 536 "0456 Adapter failed to issue " 537 "ASYNCEVT_ENABLE mbox status x%x \n.", 538 rc); 539 mempool_free(pmb, phba->mbox_mem_pool); 540 } 541 542 /* Get Option rom version */ 543 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 544 lpfc_dump_wakeup_param(phba, pmb); 545 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 546 pmb->vport = phba->pport; 547 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 548 549 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 550 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 551 "to get Option ROM version status x%x\n.", rc); 552 mempool_free(pmb, phba->mbox_mem_pool); 553 } 554 555 return 0; 556} 557 558/** 559 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 560 * @phba: pointer to lpfc HBA data structure. 561 * 562 * This routine will do LPFC uninitialization before the HBA is reset when 563 * bringing down the SLI Layer. 564 * 565 * Return codes 566 * 0 - success. 567 * Any other value - error. 568 **/ 569int 570lpfc_hba_down_prep(struct lpfc_hba *phba) 571{ 572 struct lpfc_vport **vports; 573 int i; 574 575 if (phba->sli_rev <= LPFC_SLI_REV3) { 576 /* Disable interrupts */ 577 writel(0, phba->HCregaddr); 578 readl(phba->HCregaddr); /* flush */ 579 } 580 581 if (phba->pport->load_flag & FC_UNLOADING) 582 lpfc_cleanup_discovery_resources(phba->pport); 583 else { 584 vports = lpfc_create_vport_work_array(phba); 585 if (vports != NULL) 586 for (i = 0; i <= phba->max_vports && 587 vports[i] != NULL; i++) 588 lpfc_cleanup_discovery_resources(vports[i]); 589 lpfc_destroy_vport_work_array(phba, vports); 590 } 591 return 0; 592} 593 594/** 595 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 596 * @phba: pointer to lpfc HBA data structure. 597 * 598 * This routine will do uninitialization after the HBA is reset when bring 599 * down the SLI Layer. 600 * 601 * Return codes 602 * 0 - sucess. 603 * Any other value - error. 604 **/ 605static int 606lpfc_hba_down_post_s3(struct lpfc_hba *phba) 607{ 608 struct lpfc_sli *psli = &phba->sli; 609 struct lpfc_sli_ring *pring; 610 struct lpfc_dmabuf *mp, *next_mp; 611 LIST_HEAD(completions); 612 int i; 613 614 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 615 lpfc_sli_hbqbuf_free_all(phba); 616 else { 617 /* Cleanup preposted buffers on the ELS ring */ 618 pring = &psli->ring[LPFC_ELS_RING]; 619 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 620 list_del(&mp->list); 621 pring->postbufq_cnt--; 622 lpfc_mbuf_free(phba, mp->virt, mp->phys); 623 kfree(mp); 624 } 625 } 626 627 spin_lock_irq(&phba->hbalock); 628 for (i = 0; i < psli->num_rings; i++) { 629 pring = &psli->ring[i]; 630 631 /* At this point in time the HBA is either reset or DOA. Either 632 * way, nothing should be on txcmplq as it will NEVER complete. 633 */ 634 list_splice_init(&pring->txcmplq, &completions); 635 pring->txcmplq_cnt = 0; 636 spin_unlock_irq(&phba->hbalock); 637 638 /* Cancel all the IOCBs from the completions list */ 639 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 640 IOERR_SLI_ABORTED); 641 642 lpfc_sli_abort_iocb_ring(phba, pring); 643 spin_lock_irq(&phba->hbalock); 644 } 645 spin_unlock_irq(&phba->hbalock); 646 647 return 0; 648} 649 650/** 651 * lpfc_hb_timeout - The HBA-timer timeout handler 652 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 653 * 654 * This is the HBA-timer timeout handler registered to the lpfc driver. When 655 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 656 * work-port-events bitmap and the worker thread is notified. This timeout 657 * event will be used by the worker thread to invoke the actual timeout 658 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 659 * be performed in the timeout handler and the HBA timeout event bit shall 660 * be cleared by the worker thread after it has taken the event bitmap out. 661 **/ 662static void 663lpfc_hb_timeout(unsigned long ptr) 664{ 665 struct lpfc_hba *phba; 666 uint32_t tmo_posted; 667 unsigned long iflag; 668 669 phba = (struct lpfc_hba *)ptr; 670 671 /* Check for heart beat timeout conditions */ 672 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 673 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 674 if (!tmo_posted) 675 phba->pport->work_port_events |= WORKER_HB_TMO; 676 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 677 678 /* Tell the worker thread there is work to do */ 679 if (!tmo_posted) 680 lpfc_worker_wake_up(phba); 681 return; 682} 683 684/** 685 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 686 * @phba: pointer to lpfc hba data structure. 687 * @pmboxq: pointer to the driver internal queue element for mailbox command. 688 * 689 * This is the callback function to the lpfc heart-beat mailbox command. 690 * If configured, the lpfc driver issues the heart-beat mailbox command to 691 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 692 * heart-beat mailbox command is issued, the driver shall set up heart-beat 693 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 694 * heart-beat outstanding state. Once the mailbox command comes back and 695 * no error conditions detected, the heart-beat mailbox command timer is 696 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 697 * state is cleared for the next heart-beat. If the timer expired with the 698 * heart-beat outstanding state set, the driver will put the HBA offline. 699 **/ 700static void 701lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 702{ 703 unsigned long drvr_flag; 704 705 spin_lock_irqsave(&phba->hbalock, drvr_flag); 706 phba->hb_outstanding = 0; 707 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 708 709 /* Check and reset heart-beat timer is necessary */ 710 mempool_free(pmboxq, phba->mbox_mem_pool); 711 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 712 !(phba->link_state == LPFC_HBA_ERROR) && 713 !(phba->pport->load_flag & FC_UNLOADING)) 714 mod_timer(&phba->hb_tmofunc, 715 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 716 return; 717} 718 719/** 720 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 721 * @phba: pointer to lpfc hba data structure. 722 * 723 * This is the actual HBA-timer timeout handler to be invoked by the worker 724 * thread whenever the HBA timer fired and HBA-timeout event posted. This 725 * handler performs any periodic operations needed for the device. If such 726 * periodic event has already been attended to either in the interrupt handler 727 * or by processing slow-ring or fast-ring events within the HBA-timer 728 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 729 * the timer for the next timeout period. If lpfc heart-beat mailbox command 730 * is configured and there is no heart-beat mailbox command outstanding, a 731 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 732 * has been a heart-beat mailbox command outstanding, the HBA shall be put 733 * to offline. 734 **/ 735void 736lpfc_hb_timeout_handler(struct lpfc_hba *phba) 737{ 738 LPFC_MBOXQ_t *pmboxq; 739 struct lpfc_dmabuf *buf_ptr; 740 int retval; 741 struct lpfc_sli *psli = &phba->sli; 742 LIST_HEAD(completions); 743 744 if ((phba->link_state == LPFC_HBA_ERROR) || 745 (phba->pport->load_flag & FC_UNLOADING) || 746 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 747 return; 748 749 spin_lock_irq(&phba->pport->work_port_lock); 750 751 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 752 jiffies)) { 753 spin_unlock_irq(&phba->pport->work_port_lock); 754 if (!phba->hb_outstanding) 755 mod_timer(&phba->hb_tmofunc, 756 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 757 else 758 mod_timer(&phba->hb_tmofunc, 759 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 760 return; 761 } 762 spin_unlock_irq(&phba->pport->work_port_lock); 763 764 if (phba->elsbuf_cnt && 765 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 766 spin_lock_irq(&phba->hbalock); 767 list_splice_init(&phba->elsbuf, &completions); 768 phba->elsbuf_cnt = 0; 769 phba->elsbuf_prev_cnt = 0; 770 spin_unlock_irq(&phba->hbalock); 771 772 while (!list_empty(&completions)) { 773 list_remove_head(&completions, buf_ptr, 774 struct lpfc_dmabuf, list); 775 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 776 kfree(buf_ptr); 777 } 778 } 779 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 780 781 /* If there is no heart beat outstanding, issue a heartbeat command */ 782 if (phba->cfg_enable_hba_heartbeat) { 783 if (!phba->hb_outstanding) { 784 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 785 if (!pmboxq) { 786 mod_timer(&phba->hb_tmofunc, 787 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 788 return; 789 } 790 791 lpfc_heart_beat(phba, pmboxq); 792 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 793 pmboxq->vport = phba->pport; 794 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 795 796 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 797 mempool_free(pmboxq, phba->mbox_mem_pool); 798 mod_timer(&phba->hb_tmofunc, 799 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 800 return; 801 } 802 mod_timer(&phba->hb_tmofunc, 803 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 804 phba->hb_outstanding = 1; 805 return; 806 } else { 807 /* 808 * If heart beat timeout called with hb_outstanding set 809 * we need to take the HBA offline. 810 */ 811 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 812 "0459 Adapter heartbeat failure, " 813 "taking this port offline.\n"); 814 815 spin_lock_irq(&phba->hbalock); 816 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 817 spin_unlock_irq(&phba->hbalock); 818 819 lpfc_offline_prep(phba); 820 lpfc_offline(phba); 821 lpfc_unblock_mgmt_io(phba); 822 phba->link_state = LPFC_HBA_ERROR; 823 lpfc_hba_down_post(phba); 824 } 825 } 826} 827 828/** 829 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 830 * @phba: pointer to lpfc hba data structure. 831 * 832 * This routine is called to bring the HBA offline when HBA hardware error 833 * other than Port Error 6 has been detected. 834 **/ 835static void 836lpfc_offline_eratt(struct lpfc_hba *phba) 837{ 838 struct lpfc_sli *psli = &phba->sli; 839 840 spin_lock_irq(&phba->hbalock); 841 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 842 spin_unlock_irq(&phba->hbalock); 843 lpfc_offline_prep(phba); 844 845 lpfc_offline(phba); 846 lpfc_reset_barrier(phba); 847 lpfc_sli_brdreset(phba); 848 lpfc_hba_down_post(phba); 849 lpfc_sli_brdready(phba, HS_MBRDY); 850 lpfc_unblock_mgmt_io(phba); 851 phba->link_state = LPFC_HBA_ERROR; 852 return; 853} 854 855/** 856 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 857 * @phba: pointer to lpfc hba data structure. 858 * 859 * This routine is invoked to handle the deferred HBA hardware error 860 * conditions. This type of error is indicated by HBA by setting ER1 861 * and another ER bit in the host status register. The driver will 862 * wait until the ER1 bit clears before handling the error condition. 863 **/ 864static void 865lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 866{ 867 uint32_t old_host_status = phba->work_hs; 868 struct lpfc_sli_ring *pring; 869 struct lpfc_sli *psli = &phba->sli; 870 871 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 872 "0479 Deferred Adapter Hardware Error " 873 "Data: x%x x%x x%x\n", 874 phba->work_hs, 875 phba->work_status[0], phba->work_status[1]); 876 877 spin_lock_irq(&phba->hbalock); 878 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 879 spin_unlock_irq(&phba->hbalock); 880 881 882 /* 883 * Firmware stops when it triggred erratt. That could cause the I/Os 884 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 885 * SCSI layer retry it after re-establishing link. 886 */ 887 pring = &psli->ring[psli->fcp_ring]; 888 lpfc_sli_abort_iocb_ring(phba, pring); 889 890 /* 891 * There was a firmware error. Take the hba offline and then 892 * attempt to restart it. 893 */ 894 lpfc_offline_prep(phba); 895 lpfc_offline(phba); 896 897 /* Wait for the ER1 bit to clear.*/ 898 while (phba->work_hs & HS_FFER1) { 899 msleep(100); 900 phba->work_hs = readl(phba->HSregaddr); 901 /* If driver is unloading let the worker thread continue */ 902 if (phba->pport->load_flag & FC_UNLOADING) { 903 phba->work_hs = 0; 904 break; 905 } 906 } 907 908 /* 909 * This is to ptrotect against a race condition in which 910 * first write to the host attention register clear the 911 * host status register. 912 */ 913 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 914 phba->work_hs = old_host_status & ~HS_FFER1; 915 916 spin_lock_irq(&phba->hbalock); 917 phba->hba_flag &= ~DEFER_ERATT; 918 spin_unlock_irq(&phba->hbalock); 919 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 920 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 921} 922 923static void 924lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 925{ 926 struct lpfc_board_event_header board_event; 927 struct Scsi_Host *shost; 928 929 board_event.event_type = FC_REG_BOARD_EVENT; 930 board_event.subcategory = LPFC_EVENT_PORTINTERR; 931 shost = lpfc_shost_from_vport(phba->pport); 932 fc_host_post_vendor_event(shost, fc_get_event_number(), 933 sizeof(board_event), 934 (char *) &board_event, 935 LPFC_NL_VENDOR_ID); 936} 937 938/** 939 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 940 * @phba: pointer to lpfc hba data structure. 941 * 942 * This routine is invoked to handle the following HBA hardware error 943 * conditions: 944 * 1 - HBA error attention interrupt 945 * 2 - DMA ring index out of range 946 * 3 - Mailbox command came back as unknown 947 **/ 948static void 949lpfc_handle_eratt_s3(struct lpfc_hba *phba) 950{ 951 struct lpfc_vport *vport = phba->pport; 952 struct lpfc_sli *psli = &phba->sli; 953 struct lpfc_sli_ring *pring; 954 uint32_t event_data; 955 unsigned long temperature; 956 struct temp_event temp_event_data; 957 struct Scsi_Host *shost; 958 959 /* If the pci channel is offline, ignore possible errors, 960 * since we cannot communicate with the pci card anyway. 961 */ 962 if (pci_channel_offline(phba->pcidev)) { 963 spin_lock_irq(&phba->hbalock); 964 phba->hba_flag &= ~DEFER_ERATT; 965 spin_unlock_irq(&phba->hbalock); 966 return; 967 } 968 969 /* If resets are disabled then leave the HBA alone and return */ 970 if (!phba->cfg_enable_hba_reset) 971 return; 972 973 /* Send an internal error event to mgmt application */ 974 lpfc_board_errevt_to_mgmt(phba); 975 976 if (phba->hba_flag & DEFER_ERATT) 977 lpfc_handle_deferred_eratt(phba); 978 979 if (phba->work_hs & HS_FFER6) { 980 /* Re-establishing Link */ 981 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 982 "1301 Re-establishing Link " 983 "Data: x%x x%x x%x\n", 984 phba->work_hs, 985 phba->work_status[0], phba->work_status[1]); 986 987 spin_lock_irq(&phba->hbalock); 988 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 989 spin_unlock_irq(&phba->hbalock); 990 991 /* 992 * Firmware stops when it triggled erratt with HS_FFER6. 993 * That could cause the I/Os dropped by the firmware. 994 * Error iocb (I/O) on txcmplq and let the SCSI layer 995 * retry it after re-establishing link. 996 */ 997 pring = &psli->ring[psli->fcp_ring]; 998 lpfc_sli_abort_iocb_ring(phba, pring); 999 1000 /* 1001 * There was a firmware error. Take the hba offline and then 1002 * attempt to restart it. 1003 */ 1004 lpfc_offline_prep(phba); 1005 lpfc_offline(phba); 1006 lpfc_sli_brdrestart(phba); 1007 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1008 lpfc_unblock_mgmt_io(phba); 1009 return; 1010 } 1011 lpfc_unblock_mgmt_io(phba); 1012 } else if (phba->work_hs & HS_CRIT_TEMP) { 1013 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1014 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1015 temp_event_data.event_code = LPFC_CRIT_TEMP; 1016 temp_event_data.data = (uint32_t)temperature; 1017 1018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1019 "0406 Adapter maximum temperature exceeded " 1020 "(%ld), taking this port offline " 1021 "Data: x%x x%x x%x\n", 1022 temperature, phba->work_hs, 1023 phba->work_status[0], phba->work_status[1]); 1024 1025 shost = lpfc_shost_from_vport(phba->pport); 1026 fc_host_post_vendor_event(shost, fc_get_event_number(), 1027 sizeof(temp_event_data), 1028 (char *) &temp_event_data, 1029 SCSI_NL_VID_TYPE_PCI 1030 | PCI_VENDOR_ID_EMULEX); 1031 1032 spin_lock_irq(&phba->hbalock); 1033 phba->over_temp_state = HBA_OVER_TEMP; 1034 spin_unlock_irq(&phba->hbalock); 1035 lpfc_offline_eratt(phba); 1036 1037 } else { 1038 /* The if clause above forces this code path when the status 1039 * failure is a value other than FFER6. Do not call the offline 1040 * twice. This is the adapter hardware error path. 1041 */ 1042 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1043 "0457 Adapter Hardware Error " 1044 "Data: x%x x%x x%x\n", 1045 phba->work_hs, 1046 phba->work_status[0], phba->work_status[1]); 1047 1048 event_data = FC_REG_DUMP_EVENT; 1049 shost = lpfc_shost_from_vport(vport); 1050 fc_host_post_vendor_event(shost, fc_get_event_number(), 1051 sizeof(event_data), (char *) &event_data, 1052 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1053 1054 lpfc_offline_eratt(phba); 1055 } 1056 return; 1057} 1058 1059/** 1060 * lpfc_handle_latt - The HBA link event handler 1061 * @phba: pointer to lpfc hba data structure. 1062 * 1063 * This routine is invoked from the worker thread to handle a HBA host 1064 * attention link event. 1065 **/ 1066void 1067lpfc_handle_latt(struct lpfc_hba *phba) 1068{ 1069 struct lpfc_vport *vport = phba->pport; 1070 struct lpfc_sli *psli = &phba->sli; 1071 LPFC_MBOXQ_t *pmb; 1072 volatile uint32_t control; 1073 struct lpfc_dmabuf *mp; 1074 int rc = 0; 1075 1076 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1077 if (!pmb) { 1078 rc = 1; 1079 goto lpfc_handle_latt_err_exit; 1080 } 1081 1082 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1083 if (!mp) { 1084 rc = 2; 1085 goto lpfc_handle_latt_free_pmb; 1086 } 1087 1088 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1089 if (!mp->virt) { 1090 rc = 3; 1091 goto lpfc_handle_latt_free_mp; 1092 } 1093 1094 /* Cleanup any outstanding ELS commands */ 1095 lpfc_els_flush_all_cmd(phba); 1096 1097 psli->slistat.link_event++; 1098 lpfc_read_la(phba, pmb, mp); 1099 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 1100 pmb->vport = vport; 1101 /* Block ELS IOCBs until we have processed this mbox command */ 1102 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1103 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1104 if (rc == MBX_NOT_FINISHED) { 1105 rc = 4; 1106 goto lpfc_handle_latt_free_mbuf; 1107 } 1108 1109 /* Clear Link Attention in HA REG */ 1110 spin_lock_irq(&phba->hbalock); 1111 writel(HA_LATT, phba->HAregaddr); 1112 readl(phba->HAregaddr); /* flush */ 1113 spin_unlock_irq(&phba->hbalock); 1114 1115 return; 1116 1117lpfc_handle_latt_free_mbuf: 1118 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1119 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1120lpfc_handle_latt_free_mp: 1121 kfree(mp); 1122lpfc_handle_latt_free_pmb: 1123 mempool_free(pmb, phba->mbox_mem_pool); 1124lpfc_handle_latt_err_exit: 1125 /* Enable Link attention interrupts */ 1126 spin_lock_irq(&phba->hbalock); 1127 psli->sli_flag |= LPFC_PROCESS_LA; 1128 control = readl(phba->HCregaddr); 1129 control |= HC_LAINT_ENA; 1130 writel(control, phba->HCregaddr); 1131 readl(phba->HCregaddr); /* flush */ 1132 1133 /* Clear Link Attention in HA REG */ 1134 writel(HA_LATT, phba->HAregaddr); 1135 readl(phba->HAregaddr); /* flush */ 1136 spin_unlock_irq(&phba->hbalock); 1137 lpfc_linkdown(phba); 1138 phba->link_state = LPFC_HBA_ERROR; 1139 1140 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1141 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1142 1143 return; 1144} 1145 1146/** 1147 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1148 * @phba: pointer to lpfc hba data structure. 1149 * @vpd: pointer to the vital product data. 1150 * @len: length of the vital product data in bytes. 1151 * 1152 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1153 * an array of characters. In this routine, the ModelName, ProgramType, and 1154 * ModelDesc, etc. fields of the phba data structure will be populated. 1155 * 1156 * Return codes 1157 * 0 - pointer to the VPD passed in is NULL 1158 * 1 - success 1159 **/ 1160int 1161lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1162{ 1163 uint8_t lenlo, lenhi; 1164 int Length; 1165 int i, j; 1166 int finished = 0; 1167 int index = 0; 1168 1169 if (!vpd) 1170 return 0; 1171 1172 /* Vital Product */ 1173 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1174 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1175 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1176 (uint32_t) vpd[3]); 1177 while (!finished && (index < (len - 4))) { 1178 switch (vpd[index]) { 1179 case 0x82: 1180 case 0x91: 1181 index += 1; 1182 lenlo = vpd[index]; 1183 index += 1; 1184 lenhi = vpd[index]; 1185 index += 1; 1186 i = ((((unsigned short)lenhi) << 8) + lenlo); 1187 index += i; 1188 break; 1189 case 0x90: 1190 index += 1; 1191 lenlo = vpd[index]; 1192 index += 1; 1193 lenhi = vpd[index]; 1194 index += 1; 1195 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1196 if (Length > len - index) 1197 Length = len - index; 1198 while (Length > 0) { 1199 /* Look for Serial Number */ 1200 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1201 index += 2; 1202 i = vpd[index]; 1203 index += 1; 1204 j = 0; 1205 Length -= (3+i); 1206 while(i--) { 1207 phba->SerialNumber[j++] = vpd[index++]; 1208 if (j == 31) 1209 break; 1210 } 1211 phba->SerialNumber[j] = 0; 1212 continue; 1213 } 1214 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1215 phba->vpd_flag |= VPD_MODEL_DESC; 1216 index += 2; 1217 i = vpd[index]; 1218 index += 1; 1219 j = 0; 1220 Length -= (3+i); 1221 while(i--) { 1222 phba->ModelDesc[j++] = vpd[index++]; 1223 if (j == 255) 1224 break; 1225 } 1226 phba->ModelDesc[j] = 0; 1227 continue; 1228 } 1229 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1230 phba->vpd_flag |= VPD_MODEL_NAME; 1231 index += 2; 1232 i = vpd[index]; 1233 index += 1; 1234 j = 0; 1235 Length -= (3+i); 1236 while(i--) { 1237 phba->ModelName[j++] = vpd[index++]; 1238 if (j == 79) 1239 break; 1240 } 1241 phba->ModelName[j] = 0; 1242 continue; 1243 } 1244 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1245 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1246 index += 2; 1247 i = vpd[index]; 1248 index += 1; 1249 j = 0; 1250 Length -= (3+i); 1251 while(i--) { 1252 phba->ProgramType[j++] = vpd[index++]; 1253 if (j == 255) 1254 break; 1255 } 1256 phba->ProgramType[j] = 0; 1257 continue; 1258 } 1259 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1260 phba->vpd_flag |= VPD_PORT; 1261 index += 2; 1262 i = vpd[index]; 1263 index += 1; 1264 j = 0; 1265 Length -= (3+i); 1266 while(i--) { 1267 phba->Port[j++] = vpd[index++]; 1268 if (j == 19) 1269 break; 1270 } 1271 phba->Port[j] = 0; 1272 continue; 1273 } 1274 else { 1275 index += 2; 1276 i = vpd[index]; 1277 index += 1; 1278 index += i; 1279 Length -= (3 + i); 1280 } 1281 } 1282 finished = 0; 1283 break; 1284 case 0x78: 1285 finished = 1; 1286 break; 1287 default: 1288 index ++; 1289 break; 1290 } 1291 } 1292 1293 return(1); 1294} 1295 1296/** 1297 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1298 * @phba: pointer to lpfc hba data structure. 1299 * @mdp: pointer to the data structure to hold the derived model name. 1300 * @descp: pointer to the data structure to hold the derived description. 1301 * 1302 * This routine retrieves HBA's description based on its registered PCI device 1303 * ID. The @descp passed into this function points to an array of 256 chars. It 1304 * shall be returned with the model name, maximum speed, and the host bus type. 1305 * The @mdp passed into this function points to an array of 80 chars. When the 1306 * function returns, the @mdp will be filled with the model name. 1307 **/ 1308static void 1309lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1310{ 1311 lpfc_vpd_t *vp; 1312 uint16_t dev_id = phba->pcidev->device; 1313 int max_speed; 1314 int GE = 0; 1315 struct { 1316 char * name; 1317 int max_speed; 1318 char * bus; 1319 } m = {"<Unknown>", 0, ""}; 1320 1321 if (mdp && mdp[0] != '\0' 1322 && descp && descp[0] != '\0') 1323 return; 1324 1325 if (phba->lmt & LMT_10Gb) 1326 max_speed = 10; 1327 else if (phba->lmt & LMT_8Gb) 1328 max_speed = 8; 1329 else if (phba->lmt & LMT_4Gb) 1330 max_speed = 4; 1331 else if (phba->lmt & LMT_2Gb) 1332 max_speed = 2; 1333 else 1334 max_speed = 1; 1335 1336 vp = &phba->vpd; 1337 1338 switch (dev_id) { 1339 case PCI_DEVICE_ID_FIREFLY: 1340 m = (typeof(m)){"LP6000", max_speed, "PCI"}; 1341 break; 1342 case PCI_DEVICE_ID_SUPERFLY: 1343 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1344 m = (typeof(m)){"LP7000", max_speed, "PCI"}; 1345 else 1346 m = (typeof(m)){"LP7000E", max_speed, "PCI"}; 1347 break; 1348 case PCI_DEVICE_ID_DRAGONFLY: 1349 m = (typeof(m)){"LP8000", max_speed, "PCI"}; 1350 break; 1351 case PCI_DEVICE_ID_CENTAUR: 1352 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1353 m = (typeof(m)){"LP9002", max_speed, "PCI"}; 1354 else 1355 m = (typeof(m)){"LP9000", max_speed, "PCI"}; 1356 break; 1357 case PCI_DEVICE_ID_RFLY: 1358 m = (typeof(m)){"LP952", max_speed, "PCI"}; 1359 break; 1360 case PCI_DEVICE_ID_PEGASUS: 1361 m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; 1362 break; 1363 case PCI_DEVICE_ID_THOR: 1364 m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; 1365 break; 1366 case PCI_DEVICE_ID_VIPER: 1367 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"}; 1368 break; 1369 case PCI_DEVICE_ID_PFLY: 1370 m = (typeof(m)){"LP982", max_speed, "PCI-X"}; 1371 break; 1372 case PCI_DEVICE_ID_TFLY: 1373 m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; 1374 break; 1375 case PCI_DEVICE_ID_HELIOS: 1376 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; 1377 break; 1378 case PCI_DEVICE_ID_HELIOS_SCSP: 1379 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; 1380 break; 1381 case PCI_DEVICE_ID_HELIOS_DCSP: 1382 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; 1383 break; 1384 case PCI_DEVICE_ID_NEPTUNE: 1385 m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; 1386 break; 1387 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1388 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; 1389 break; 1390 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1391 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; 1392 break; 1393 case PCI_DEVICE_ID_BMID: 1394 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; 1395 break; 1396 case PCI_DEVICE_ID_BSMB: 1397 m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; 1398 break; 1399 case PCI_DEVICE_ID_ZEPHYR: 1400 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1401 break; 1402 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1403 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1404 break; 1405 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1406 m = (typeof(m)){"LP2105", max_speed, "PCIe"}; 1407 GE = 1; 1408 break; 1409 case PCI_DEVICE_ID_ZMID: 1410 m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; 1411 break; 1412 case PCI_DEVICE_ID_ZSMB: 1413 m = (typeof(m)){"LPe111", max_speed, "PCIe"}; 1414 break; 1415 case PCI_DEVICE_ID_LP101: 1416 m = (typeof(m)){"LP101", max_speed, "PCI-X"}; 1417 break; 1418 case PCI_DEVICE_ID_LP10000S: 1419 m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; 1420 break; 1421 case PCI_DEVICE_ID_LP11000S: 1422 m = (typeof(m)){"LP11000-S", max_speed, 1423 "PCI-X2"}; 1424 break; 1425 case PCI_DEVICE_ID_LPE11000S: 1426 m = (typeof(m)){"LPe11000-S", max_speed, 1427 "PCIe"}; 1428 break; 1429 case PCI_DEVICE_ID_SAT: 1430 m = (typeof(m)){"LPe12000", max_speed, "PCIe"}; 1431 break; 1432 case PCI_DEVICE_ID_SAT_MID: 1433 m = (typeof(m)){"LPe1250", max_speed, "PCIe"}; 1434 break; 1435 case PCI_DEVICE_ID_SAT_SMB: 1436 m = (typeof(m)){"LPe121", max_speed, "PCIe"}; 1437 break; 1438 case PCI_DEVICE_ID_SAT_DCSP: 1439 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"}; 1440 break; 1441 case PCI_DEVICE_ID_SAT_SCSP: 1442 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"}; 1443 break; 1444 case PCI_DEVICE_ID_SAT_S: 1445 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; 1446 break; 1447 case PCI_DEVICE_ID_HORNET: 1448 m = (typeof(m)){"LP21000", max_speed, "PCIe"}; 1449 GE = 1; 1450 break; 1451 case PCI_DEVICE_ID_PROTEUS_VF: 1452 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1453 break; 1454 case PCI_DEVICE_ID_PROTEUS_PF: 1455 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1456 break; 1457 case PCI_DEVICE_ID_PROTEUS_S: 1458 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; 1459 break; 1460 default: 1461 m = (typeof(m)){ NULL }; 1462 break; 1463 } 1464 1465 if (mdp && mdp[0] == '\0') 1466 snprintf(mdp, 79,"%s", m.name); 1467 if (descp && descp[0] == '\0') 1468 snprintf(descp, 255, 1469 "Emulex %s %d%s %s %s", 1470 m.name, m.max_speed, 1471 (GE) ? "GE" : "Gb", 1472 m.bus, 1473 (GE) ? "FCoE Adapter" : "Fibre Channel Adapter"); 1474} 1475 1476/** 1477 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 1478 * @phba: pointer to lpfc hba data structure. 1479 * @pring: pointer to a IOCB ring. 1480 * @cnt: the number of IOCBs to be posted to the IOCB ring. 1481 * 1482 * This routine posts a given number of IOCBs with the associated DMA buffer 1483 * descriptors specified by the cnt argument to the given IOCB ring. 1484 * 1485 * Return codes 1486 * The number of IOCBs NOT able to be posted to the IOCB ring. 1487 **/ 1488int 1489lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 1490{ 1491 IOCB_t *icmd; 1492 struct lpfc_iocbq *iocb; 1493 struct lpfc_dmabuf *mp1, *mp2; 1494 1495 cnt += pring->missbufcnt; 1496 1497 /* While there are buffers to post */ 1498 while (cnt > 0) { 1499 /* Allocate buffer for command iocb */ 1500 iocb = lpfc_sli_get_iocbq(phba); 1501 if (iocb == NULL) { 1502 pring->missbufcnt = cnt; 1503 return cnt; 1504 } 1505 icmd = &iocb->iocb; 1506 1507 /* 2 buffers can be posted per command */ 1508 /* Allocate buffer to post */ 1509 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1510 if (mp1) 1511 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1512 if (!mp1 || !mp1->virt) { 1513 kfree(mp1); 1514 lpfc_sli_release_iocbq(phba, iocb); 1515 pring->missbufcnt = cnt; 1516 return cnt; 1517 } 1518 1519 INIT_LIST_HEAD(&mp1->list); 1520 /* Allocate buffer to post */ 1521 if (cnt > 1) { 1522 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1523 if (mp2) 1524 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1525 &mp2->phys); 1526 if (!mp2 || !mp2->virt) { 1527 kfree(mp2); 1528 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1529 kfree(mp1); 1530 lpfc_sli_release_iocbq(phba, iocb); 1531 pring->missbufcnt = cnt; 1532 return cnt; 1533 } 1534 1535 INIT_LIST_HEAD(&mp2->list); 1536 } else { 1537 mp2 = NULL; 1538 } 1539 1540 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 1541 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 1542 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 1543 icmd->ulpBdeCount = 1; 1544 cnt--; 1545 if (mp2) { 1546 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 1547 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 1548 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 1549 cnt--; 1550 icmd->ulpBdeCount = 2; 1551 } 1552 1553 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1554 icmd->ulpLe = 1; 1555 1556 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 1557 IOCB_ERROR) { 1558 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1559 kfree(mp1); 1560 cnt++; 1561 if (mp2) { 1562 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 1563 kfree(mp2); 1564 cnt++; 1565 } 1566 lpfc_sli_release_iocbq(phba, iocb); 1567 pring->missbufcnt = cnt; 1568 return cnt; 1569 } 1570 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1571 if (mp2) 1572 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1573 } 1574 pring->missbufcnt = 0; 1575 return 0; 1576} 1577 1578/** 1579 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 1580 * @phba: pointer to lpfc hba data structure. 1581 * 1582 * This routine posts initial receive IOCB buffers to the ELS ring. The 1583 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 1584 * set to 64 IOCBs. 1585 * 1586 * Return codes 1587 * 0 - success (currently always success) 1588 **/ 1589static int 1590lpfc_post_rcv_buf(struct lpfc_hba *phba) 1591{ 1592 struct lpfc_sli *psli = &phba->sli; 1593 1594 /* Ring 0, ELS / CT buffers */ 1595 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 1596 /* Ring 2 - FCP no buffers needed */ 1597 1598 return 0; 1599} 1600 1601#define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 1602 1603/** 1604 * lpfc_sha_init - Set up initial array of hash table entries 1605 * @HashResultPointer: pointer to an array as hash table. 1606 * 1607 * This routine sets up the initial values to the array of hash table entries 1608 * for the LC HBAs. 1609 **/ 1610static void 1611lpfc_sha_init(uint32_t * HashResultPointer) 1612{ 1613 HashResultPointer[0] = 0x67452301; 1614 HashResultPointer[1] = 0xEFCDAB89; 1615 HashResultPointer[2] = 0x98BADCFE; 1616 HashResultPointer[3] = 0x10325476; 1617 HashResultPointer[4] = 0xC3D2E1F0; 1618} 1619 1620/** 1621 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 1622 * @HashResultPointer: pointer to an initial/result hash table. 1623 * @HashWorkingPointer: pointer to an working hash table. 1624 * 1625 * This routine iterates an initial hash table pointed by @HashResultPointer 1626 * with the values from the working hash table pointeed by @HashWorkingPointer. 1627 * The results are putting back to the initial hash table, returned through 1628 * the @HashResultPointer as the result hash table. 1629 **/ 1630static void 1631lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 1632{ 1633 int t; 1634 uint32_t TEMP; 1635 uint32_t A, B, C, D, E; 1636 t = 16; 1637 do { 1638 HashWorkingPointer[t] = 1639 S(1, 1640 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 1641 8] ^ 1642 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 1643 } while (++t <= 79); 1644 t = 0; 1645 A = HashResultPointer[0]; 1646 B = HashResultPointer[1]; 1647 C = HashResultPointer[2]; 1648 D = HashResultPointer[3]; 1649 E = HashResultPointer[4]; 1650 1651 do { 1652 if (t < 20) { 1653 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 1654 } else if (t < 40) { 1655 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 1656 } else if (t < 60) { 1657 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 1658 } else { 1659 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 1660 } 1661 TEMP += S(5, A) + E + HashWorkingPointer[t]; 1662 E = D; 1663 D = C; 1664 C = S(30, B); 1665 B = A; 1666 A = TEMP; 1667 } while (++t <= 79); 1668 1669 HashResultPointer[0] += A; 1670 HashResultPointer[1] += B; 1671 HashResultPointer[2] += C; 1672 HashResultPointer[3] += D; 1673 HashResultPointer[4] += E; 1674 1675} 1676 1677/** 1678 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 1679 * @RandomChallenge: pointer to the entry of host challenge random number array. 1680 * @HashWorking: pointer to the entry of the working hash array. 1681 * 1682 * This routine calculates the working hash array referred by @HashWorking 1683 * from the challenge random numbers associated with the host, referred by 1684 * @RandomChallenge. The result is put into the entry of the working hash 1685 * array and returned by reference through @HashWorking. 1686 **/ 1687static void 1688lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 1689{ 1690 *HashWorking = (*RandomChallenge ^ *HashWorking); 1691} 1692 1693/** 1694 * lpfc_hba_init - Perform special handling for LC HBA initialization 1695 * @phba: pointer to lpfc hba data structure. 1696 * @hbainit: pointer to an array of unsigned 32-bit integers. 1697 * 1698 * This routine performs the special handling for LC HBA initialization. 1699 **/ 1700void 1701lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 1702{ 1703 int t; 1704 uint32_t *HashWorking; 1705 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 1706 1707 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 1708 if (!HashWorking) 1709 return; 1710 1711 HashWorking[0] = HashWorking[78] = *pwwnn++; 1712 HashWorking[1] = HashWorking[79] = *pwwnn; 1713 1714 for (t = 0; t < 7; t++) 1715 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 1716 1717 lpfc_sha_init(hbainit); 1718 lpfc_sha_iterate(hbainit, HashWorking); 1719 kfree(HashWorking); 1720} 1721 1722/** 1723 * lpfc_cleanup - Performs vport cleanups before deleting a vport 1724 * @vport: pointer to a virtual N_Port data structure. 1725 * 1726 * This routine performs the necessary cleanups before deleting the @vport. 1727 * It invokes the discovery state machine to perform necessary state 1728 * transitions and to release the ndlps associated with the @vport. Note, 1729 * the physical port is treated as @vport 0. 1730 **/ 1731void 1732lpfc_cleanup(struct lpfc_vport *vport) 1733{ 1734 struct lpfc_hba *phba = vport->phba; 1735 struct lpfc_nodelist *ndlp, *next_ndlp; 1736 int i = 0; 1737 1738 if (phba->link_state > LPFC_LINK_DOWN) 1739 lpfc_port_link_failure(vport); 1740 1741 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 1742 if (!NLP_CHK_NODE_ACT(ndlp)) { 1743 ndlp = lpfc_enable_node(vport, ndlp, 1744 NLP_STE_UNUSED_NODE); 1745 if (!ndlp) 1746 continue; 1747 spin_lock_irq(&phba->ndlp_lock); 1748 NLP_SET_FREE_REQ(ndlp); 1749 spin_unlock_irq(&phba->ndlp_lock); 1750 /* Trigger the release of the ndlp memory */ 1751 lpfc_nlp_put(ndlp); 1752 continue; 1753 } 1754 spin_lock_irq(&phba->ndlp_lock); 1755 if (NLP_CHK_FREE_REQ(ndlp)) { 1756 /* The ndlp should not be in memory free mode already */ 1757 spin_unlock_irq(&phba->ndlp_lock); 1758 continue; 1759 } else 1760 /* Indicate request for freeing ndlp memory */ 1761 NLP_SET_FREE_REQ(ndlp); 1762 spin_unlock_irq(&phba->ndlp_lock); 1763 1764 if (vport->port_type != LPFC_PHYSICAL_PORT && 1765 ndlp->nlp_DID == Fabric_DID) { 1766 /* Just free up ndlp with Fabric_DID for vports */ 1767 lpfc_nlp_put(ndlp); 1768 continue; 1769 } 1770 1771 if (ndlp->nlp_type & NLP_FABRIC) 1772 lpfc_disc_state_machine(vport, ndlp, NULL, 1773 NLP_EVT_DEVICE_RECOVERY); 1774 1775 lpfc_disc_state_machine(vport, ndlp, NULL, 1776 NLP_EVT_DEVICE_RM); 1777 1778 } 1779 1780 /* At this point, ALL ndlp's should be gone 1781 * because of the previous NLP_EVT_DEVICE_RM. 1782 * Lets wait for this to happen, if needed. 1783 */ 1784 while (!list_empty(&vport->fc_nodes)) { 1785 if (i++ > 3000) { 1786 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1787 "0233 Nodelist not empty\n"); 1788 list_for_each_entry_safe(ndlp, next_ndlp, 1789 &vport->fc_nodes, nlp_listp) { 1790 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 1791 LOG_NODE, 1792 "0282 did:x%x ndlp:x%p " 1793 "usgmap:x%x refcnt:%d\n", 1794 ndlp->nlp_DID, (void *)ndlp, 1795 ndlp->nlp_usg_map, 1796 atomic_read( 1797 &ndlp->kref.refcount)); 1798 } 1799 break; 1800 } 1801 1802 /* Wait for any activity on ndlps to settle */ 1803 msleep(10); 1804 } 1805} 1806 1807/** 1808 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 1809 * @vport: pointer to a virtual N_Port data structure. 1810 * 1811 * This routine stops all the timers associated with a @vport. This function 1812 * is invoked before disabling or deleting a @vport. Note that the physical 1813 * port is treated as @vport 0. 1814 **/ 1815void 1816lpfc_stop_vport_timers(struct lpfc_vport *vport) 1817{ 1818 del_timer_sync(&vport->els_tmofunc); 1819 del_timer_sync(&vport->fc_fdmitmo); 1820 lpfc_can_disctmo(vport); 1821 return; 1822} 1823 1824/** 1825 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 1826 * @phba: pointer to lpfc hba data structure. 1827 * 1828 * This routine stops all the timers associated with a HBA. This function is 1829 * invoked before either putting a HBA offline or unloading the driver. 1830 **/ 1831void 1832lpfc_stop_hba_timers(struct lpfc_hba *phba) 1833{ 1834 lpfc_stop_vport_timers(phba->pport); 1835 del_timer_sync(&phba->sli.mbox_tmo); 1836 del_timer_sync(&phba->fabric_block_timer); 1837 del_timer_sync(&phba->eratt_poll); 1838 del_timer_sync(&phba->hb_tmofunc); 1839 phba->hb_outstanding = 0; 1840 1841 switch (phba->pci_dev_grp) { 1842 case LPFC_PCI_DEV_LP: 1843 /* Stop any LightPulse device specific driver timers */ 1844 del_timer_sync(&phba->fcp_poll_timer); 1845 break; 1846 case LPFC_PCI_DEV_OC: 1847 /* Stop any OneConnect device sepcific driver timers */ 1848 break; 1849 default: 1850 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1851 "0297 Invalid device group (x%x)\n", 1852 phba->pci_dev_grp); 1853 break; 1854 } 1855 return; 1856} 1857 1858/** 1859 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 1860 * @phba: pointer to lpfc hba data structure. 1861 * 1862 * This routine marks a HBA's management interface as blocked. Once the HBA's 1863 * management interface is marked as blocked, all the user space access to 1864 * the HBA, whether they are from sysfs interface or libdfc interface will 1865 * all be blocked. The HBA is set to block the management interface when the 1866 * driver prepares the HBA interface for online or offline. 1867 **/ 1868static void 1869lpfc_block_mgmt_io(struct lpfc_hba * phba) 1870{ 1871 unsigned long iflag; 1872 1873 spin_lock_irqsave(&phba->hbalock, iflag); 1874 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 1875 spin_unlock_irqrestore(&phba->hbalock, iflag); 1876} 1877 1878/** 1879 * lpfc_online - Initialize and bring a HBA online 1880 * @phba: pointer to lpfc hba data structure. 1881 * 1882 * This routine initializes the HBA and brings a HBA online. During this 1883 * process, the management interface is blocked to prevent user space access 1884 * to the HBA interfering with the driver initialization. 1885 * 1886 * Return codes 1887 * 0 - successful 1888 * 1 - failed 1889 **/ 1890int 1891lpfc_online(struct lpfc_hba *phba) 1892{ 1893 struct lpfc_vport *vport; 1894 struct lpfc_vport **vports; 1895 int i; 1896 1897 if (!phba) 1898 return 0; 1899 vport = phba->pport; 1900 1901 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 1902 return 0; 1903 1904 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1905 "0458 Bring Adapter online\n"); 1906 1907 lpfc_block_mgmt_io(phba); 1908 1909 if (!lpfc_sli_queue_setup(phba)) { 1910 lpfc_unblock_mgmt_io(phba); 1911 return 1; 1912 } 1913 1914 if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */ 1915 lpfc_unblock_mgmt_io(phba); 1916 return 1; 1917 } 1918 1919 vports = lpfc_create_vport_work_array(phba); 1920 if (vports != NULL) 1921 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1922 struct Scsi_Host *shost; 1923 shost = lpfc_shost_from_vport(vports[i]); 1924 spin_lock_irq(shost->host_lock); 1925 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 1926 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 1927 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 1928 spin_unlock_irq(shost->host_lock); 1929 } 1930 lpfc_destroy_vport_work_array(phba, vports); 1931 1932 lpfc_unblock_mgmt_io(phba); 1933 return 0; 1934} 1935 1936/** 1937 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 1938 * @phba: pointer to lpfc hba data structure. 1939 * 1940 * This routine marks a HBA's management interface as not blocked. Once the 1941 * HBA's management interface is marked as not blocked, all the user space 1942 * access to the HBA, whether they are from sysfs interface or libdfc 1943 * interface will be allowed. The HBA is set to block the management interface 1944 * when the driver prepares the HBA interface for online or offline and then 1945 * set to unblock the management interface afterwards. 1946 **/ 1947void 1948lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 1949{ 1950 unsigned long iflag; 1951 1952 spin_lock_irqsave(&phba->hbalock, iflag); 1953 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 1954 spin_unlock_irqrestore(&phba->hbalock, iflag); 1955} 1956 1957/** 1958 * lpfc_offline_prep - Prepare a HBA to be brought offline 1959 * @phba: pointer to lpfc hba data structure. 1960 * 1961 * This routine is invoked to prepare a HBA to be brought offline. It performs 1962 * unregistration login to all the nodes on all vports and flushes the mailbox 1963 * queue to make it ready to be brought offline. 1964 **/ 1965void 1966lpfc_offline_prep(struct lpfc_hba * phba) 1967{ 1968 struct lpfc_vport *vport = phba->pport; 1969 struct lpfc_nodelist *ndlp, *next_ndlp; 1970 struct lpfc_vport **vports; 1971 int i; 1972 1973 if (vport->fc_flag & FC_OFFLINE_MODE) 1974 return; 1975 1976 lpfc_block_mgmt_io(phba); 1977 1978 lpfc_linkdown(phba); 1979 1980 /* Issue an unreg_login to all nodes on all vports */ 1981 vports = lpfc_create_vport_work_array(phba); 1982 if (vports != NULL) { 1983 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1984 struct Scsi_Host *shost; 1985 1986 if (vports[i]->load_flag & FC_UNLOADING) 1987 continue; 1988 shost = lpfc_shost_from_vport(vports[i]); 1989 list_for_each_entry_safe(ndlp, next_ndlp, 1990 &vports[i]->fc_nodes, 1991 nlp_listp) { 1992 if (!NLP_CHK_NODE_ACT(ndlp)) 1993 continue; 1994 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 1995 continue; 1996 if (ndlp->nlp_type & NLP_FABRIC) { 1997 lpfc_disc_state_machine(vports[i], ndlp, 1998 NULL, NLP_EVT_DEVICE_RECOVERY); 1999 lpfc_disc_state_machine(vports[i], ndlp, 2000 NULL, NLP_EVT_DEVICE_RM); 2001 } 2002 spin_lock_irq(shost->host_lock); 2003 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2004 spin_unlock_irq(shost->host_lock); 2005 lpfc_unreg_rpi(vports[i], ndlp); 2006 } 2007 } 2008 } 2009 lpfc_destroy_vport_work_array(phba, vports); 2010 2011 lpfc_sli_flush_mbox_queue(phba); 2012} 2013 2014/** 2015 * lpfc_offline - Bring a HBA offline 2016 * @phba: pointer to lpfc hba data structure. 2017 * 2018 * This routine actually brings a HBA offline. It stops all the timers 2019 * associated with the HBA, brings down the SLI layer, and eventually 2020 * marks the HBA as in offline state for the upper layer protocol. 2021 **/ 2022void 2023lpfc_offline(struct lpfc_hba *phba) 2024{ 2025 struct Scsi_Host *shost; 2026 struct lpfc_vport **vports; 2027 int i; 2028 2029 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2030 return; 2031 2032 /* stop all timers associated with this hba */ 2033 lpfc_stop_phba_timers(phba); 2034 vports = lpfc_create_vport_work_array(phba); 2035 if (vports != NULL) 2036 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 2037 lpfc_stop_vport_timers(vports[i]); 2038 lpfc_destroy_vport_work_array(phba, vports); 2039 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2040 "0460 Bring Adapter offline\n"); 2041 /* Bring down the SLI Layer and cleanup. The HBA is offline 2042 now. */ 2043 lpfc_sli_hba_down(phba); 2044 spin_lock_irq(&phba->hbalock); 2045 phba->work_ha = 0; 2046 spin_unlock_irq(&phba->hbalock); 2047 vports = lpfc_create_vport_work_array(phba); 2048 if (vports != NULL) 2049 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2050 shost = lpfc_shost_from_vport(vports[i]); 2051 spin_lock_irq(shost->host_lock); 2052 vports[i]->work_port_events = 0; 2053 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2054 spin_unlock_irq(shost->host_lock); 2055 } 2056 lpfc_destroy_vport_work_array(phba, vports); 2057} 2058 2059/** 2060 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2061 * @phba: pointer to lpfc hba data structure. 2062 * 2063 * This routine is to free all the SCSI buffers and IOCBs from the driver 2064 * list back to kernel. It is called from lpfc_pci_remove_one to free 2065 * the internal resources before the device is removed from the system. 2066 * 2067 * Return codes 2068 * 0 - successful (for now, it always returns 0) 2069 **/ 2070static int 2071lpfc_scsi_free(struct lpfc_hba *phba) 2072{ 2073 struct lpfc_scsi_buf *sb, *sb_next; 2074 struct lpfc_iocbq *io, *io_next; 2075 2076 spin_lock_irq(&phba->hbalock); 2077 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2078 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2079 list_del(&sb->list); 2080 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2081 sb->dma_handle); 2082 kfree(sb); 2083 phba->total_scsi_bufs--; 2084 } 2085 2086 /* Release all the lpfc_iocbq entries maintained by this host. */ 2087 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2088 list_del(&io->list); 2089 kfree(io); 2090 phba->total_iocbq_bufs--; 2091 } 2092 2093 spin_unlock_irq(&phba->hbalock); 2094 2095 return 0; 2096} 2097 2098/** 2099 * lpfc_create_port - Create an FC port 2100 * @phba: pointer to lpfc hba data structure. 2101 * @instance: a unique integer ID to this FC port. 2102 * @dev: pointer to the device data structure. 2103 * 2104 * This routine creates a FC port for the upper layer protocol. The FC port 2105 * can be created on top of either a physical port or a virtual port provided 2106 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2107 * and associates the FC port created before adding the shost into the SCSI 2108 * layer. 2109 * 2110 * Return codes 2111 * @vport - pointer to the virtual N_Port data structure. 2112 * NULL - port create failed. 2113 **/ 2114struct lpfc_vport * 2115lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2116{ 2117 struct lpfc_vport *vport; 2118 struct Scsi_Host *shost; 2119 int error = 0; 2120 2121 if (dev != &phba->pcidev->dev) 2122 shost = scsi_host_alloc(&lpfc_vport_template, 2123 sizeof(struct lpfc_vport)); 2124 else 2125 shost = scsi_host_alloc(&lpfc_template, 2126 sizeof(struct lpfc_vport)); 2127 if (!shost) 2128 goto out; 2129 2130 vport = (struct lpfc_vport *) shost->hostdata; 2131 vport->phba = phba; 2132 vport->load_flag |= FC_LOADING; 2133 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2134 vport->fc_rscn_flush = 0; 2135 2136 lpfc_get_vport_cfgparam(vport); 2137 shost->unique_id = instance; 2138 shost->max_id = LPFC_MAX_TARGET; 2139 shost->max_lun = vport->cfg_max_luns; 2140 shost->this_id = -1; 2141 shost->max_cmd_len = 16; 2142 2143 /* 2144 * Set initial can_queue value since 0 is no longer supported and 2145 * scsi_add_host will fail. This will be adjusted later based on the 2146 * max xri value determined in hba setup. 2147 */ 2148 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2149 if (dev != &phba->pcidev->dev) { 2150 shost->transportt = lpfc_vport_transport_template; 2151 vport->port_type = LPFC_NPIV_PORT; 2152 } else { 2153 shost->transportt = lpfc_transport_template; 2154 vport->port_type = LPFC_PHYSICAL_PORT; 2155 } 2156 2157 /* Initialize all internally managed lists. */ 2158 INIT_LIST_HEAD(&vport->fc_nodes); 2159 spin_lock_init(&vport->work_port_lock); 2160 2161 init_timer(&vport->fc_disctmo); 2162 vport->fc_disctmo.function = lpfc_disc_timeout; 2163 vport->fc_disctmo.data = (unsigned long)vport; 2164 2165 init_timer(&vport->fc_fdmitmo); 2166 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2167 vport->fc_fdmitmo.data = (unsigned long)vport; 2168 2169 init_timer(&vport->els_tmofunc); 2170 vport->els_tmofunc.function = lpfc_els_timeout; 2171 vport->els_tmofunc.data = (unsigned long)vport; 2172 2173 error = scsi_add_host(shost, dev); 2174 if (error) 2175 goto out_put_shost; 2176 2177 spin_lock_irq(&phba->hbalock); 2178 list_add_tail(&vport->listentry, &phba->port_list); 2179 spin_unlock_irq(&phba->hbalock); 2180 return vport; 2181 2182out_put_shost: 2183 scsi_host_put(shost); 2184out: 2185 return NULL; 2186} 2187 2188/** 2189 * destroy_port - destroy an FC port 2190 * @vport: pointer to an lpfc virtual N_Port data structure. 2191 * 2192 * This routine destroys a FC port from the upper layer protocol. All the 2193 * resources associated with the port are released. 2194 **/ 2195void 2196destroy_port(struct lpfc_vport *vport) 2197{ 2198 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2199 struct lpfc_hba *phba = vport->phba; 2200 2201 lpfc_debugfs_terminate(vport); 2202 fc_remove_host(shost); 2203 scsi_remove_host(shost); 2204 2205 spin_lock_irq(&phba->hbalock); 2206 list_del_init(&vport->listentry); 2207 spin_unlock_irq(&phba->hbalock); 2208 2209 lpfc_cleanup(vport); 2210 return; 2211} 2212 2213/** 2214 * lpfc_get_instance - Get a unique integer ID 2215 * 2216 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2217 * uses the kernel idr facility to perform the task. 2218 * 2219 * Return codes: 2220 * instance - a unique integer ID allocated as the new instance. 2221 * -1 - lpfc get instance failed. 2222 **/ 2223int 2224lpfc_get_instance(void) 2225{ 2226 int instance = 0; 2227 2228 /* Assign an unused number */ 2229 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2230 return -1; 2231 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2232 return -1; 2233 return instance; 2234} 2235 2236/** 2237 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2238 * @shost: pointer to SCSI host data structure. 2239 * @time: elapsed time of the scan in jiffies. 2240 * 2241 * This routine is called by the SCSI layer with a SCSI host to determine 2242 * whether the scan host is finished. 2243 * 2244 * Note: there is no scan_start function as adapter initialization will have 2245 * asynchronously kicked off the link initialization. 2246 * 2247 * Return codes 2248 * 0 - SCSI host scan is not over yet. 2249 * 1 - SCSI host scan is over. 2250 **/ 2251int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2252{ 2253 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2254 struct lpfc_hba *phba = vport->phba; 2255 int stat = 0; 2256 2257 spin_lock_irq(shost->host_lock); 2258 2259 if (vport->load_flag & FC_UNLOADING) { 2260 stat = 1; 2261 goto finished; 2262 } 2263 if (time >= 30 * HZ) { 2264 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2265 "0461 Scanning longer than 30 " 2266 "seconds. Continuing initialization\n"); 2267 stat = 1; 2268 goto finished; 2269 } 2270 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2271 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2272 "0465 Link down longer than 15 " 2273 "seconds. Continuing initialization\n"); 2274 stat = 1; 2275 goto finished; 2276 } 2277 2278 if (vport->port_state != LPFC_VPORT_READY) 2279 goto finished; 2280 if (vport->num_disc_nodes || vport->fc_prli_sent) 2281 goto finished; 2282 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2283 goto finished; 2284 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2285 goto finished; 2286 2287 stat = 1; 2288 2289finished: 2290 spin_unlock_irq(shost->host_lock); 2291 return stat; 2292} 2293 2294/** 2295 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2296 * @shost: pointer to SCSI host data structure. 2297 * 2298 * This routine initializes a given SCSI host attributes on a FC port. The 2299 * SCSI host can be either on top of a physical port or a virtual port. 2300 **/ 2301void lpfc_host_attrib_init(struct Scsi_Host *shost) 2302{ 2303 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2304 struct lpfc_hba *phba = vport->phba; 2305 /* 2306 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2307 */ 2308 2309 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2310 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2311 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2312 2313 memset(fc_host_supported_fc4s(shost), 0, 2314 sizeof(fc_host_supported_fc4s(shost))); 2315 fc_host_supported_fc4s(shost)[2] = 1; 2316 fc_host_supported_fc4s(shost)[7] = 1; 2317 2318 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2319 sizeof fc_host_symbolic_name(shost)); 2320 2321 fc_host_supported_speeds(shost) = 0; 2322 if (phba->lmt & LMT_10Gb) 2323 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2324 if (phba->lmt & LMT_8Gb) 2325 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2326 if (phba->lmt & LMT_4Gb) 2327 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2328 if (phba->lmt & LMT_2Gb) 2329 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2330 if (phba->lmt & LMT_1Gb) 2331 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2332 2333 fc_host_maxframe_size(shost) = 2334 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2335 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2336 2337 /* This value is also unchanging */ 2338 memset(fc_host_active_fc4s(shost), 0, 2339 sizeof(fc_host_active_fc4s(shost))); 2340 fc_host_active_fc4s(shost)[2] = 1; 2341 fc_host_active_fc4s(shost)[7] = 1; 2342 2343 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2344 spin_lock_irq(shost->host_lock); 2345 vport->load_flag &= ~FC_LOADING; 2346 spin_unlock_irq(shost->host_lock); 2347} 2348 2349/** 2350 * lpfc_enable_msix - Enable MSI-X interrupt mode 2351 * @phba: pointer to lpfc hba data structure. 2352 * 2353 * This routine is invoked to enable the MSI-X interrupt vectors. The kernel 2354 * function pci_enable_msix() is called to enable the MSI-X vectors. Note that 2355 * pci_enable_msix(), once invoked, enables either all or nothing, depending 2356 * on the current availability of PCI vector resources. The device driver is 2357 * responsible for calling the individual request_irq() to register each MSI-X 2358 * vector with a interrupt handler, which is done in this function. Note that 2359 * later when device is unloading, the driver should always call free_irq() 2360 * on all MSI-X vectors it has done request_irq() on before calling 2361 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 2362 * will be left with MSI-X enabled and leaks its vectors. 2363 * 2364 * Return codes 2365 * 0 - sucessful 2366 * other values - error 2367 **/ 2368static int 2369lpfc_enable_msix(struct lpfc_hba *phba) 2370{ 2371 int rc, i; 2372 LPFC_MBOXQ_t *pmb; 2373 2374 /* Set up MSI-X multi-message vectors */ 2375 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 2376 phba->msix_entries[i].entry = i; 2377 2378 /* Configure MSI-X capability structure */ 2379 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 2380 ARRAY_SIZE(phba->msix_entries)); 2381 if (rc) { 2382 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2383 "0420 PCI enable MSI-X failed (%d)\n", rc); 2384 goto msi_fail_out; 2385 } else 2386 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 2387 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2388 "0477 MSI-X entry[%d]: vector=x%x " 2389 "message=%d\n", i, 2390 phba->msix_entries[i].vector, 2391 phba->msix_entries[i].entry); 2392 /* 2393 * Assign MSI-X vectors to interrupt handlers 2394 */ 2395 2396 /* vector-0 is associated to slow-path handler */ 2397 rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, 2398 IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); 2399 if (rc) { 2400 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2401 "0421 MSI-X slow-path request_irq failed " 2402 "(%d)\n", rc); 2403 goto msi_fail_out; 2404 } 2405 2406 /* vector-1 is associated to fast-path handler */ 2407 rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler, 2408 IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); 2409 2410 if (rc) { 2411 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2412 "0429 MSI-X fast-path request_irq failed " 2413 "(%d)\n", rc); 2414 goto irq_fail_out; 2415 } 2416 2417 /* 2418 * Configure HBA MSI-X attention conditions to messages 2419 */ 2420 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2421 2422 if (!pmb) { 2423 rc = -ENOMEM; 2424 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2425 "0474 Unable to allocate memory for issuing " 2426 "MBOX_CONFIG_MSI command\n"); 2427 goto mem_fail_out; 2428 } 2429 rc = lpfc_config_msi(phba, pmb); 2430 if (rc) 2431 goto mbx_fail_out; 2432 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 2433 if (rc != MBX_SUCCESS) { 2434 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 2435 "0351 Config MSI mailbox command failed, " 2436 "mbxCmd x%x, mbxStatus x%x\n", 2437 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 2438 goto mbx_fail_out; 2439 } 2440 2441 /* Free memory allocated for mailbox command */ 2442 mempool_free(pmb, phba->mbox_mem_pool); 2443 return rc; 2444 2445mbx_fail_out: 2446 /* Free memory allocated for mailbox command */ 2447 mempool_free(pmb, phba->mbox_mem_pool); 2448 2449mem_fail_out: 2450 /* free the irq already requested */ 2451 free_irq(phba->msix_entries[1].vector, phba); 2452 2453irq_fail_out: 2454 /* free the irq already requested */ 2455 free_irq(phba->msix_entries[0].vector, phba); 2456 2457msi_fail_out: 2458 /* Unconfigure MSI-X capability structure */ 2459 pci_disable_msix(phba->pcidev); 2460 return rc; 2461} 2462 2463/** 2464 * lpfc_disable_msix - Disable MSI-X interrupt mode 2465 * @phba: pointer to lpfc hba data structure. 2466 * 2467 * This routine is invoked to release the MSI-X vectors and then disable the 2468 * MSI-X interrupt mode. 2469 **/ 2470static void 2471lpfc_disable_msix(struct lpfc_hba *phba) 2472{ 2473 int i; 2474 2475 /* Free up MSI-X multi-message vectors */ 2476 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 2477 free_irq(phba->msix_entries[i].vector, phba); 2478 /* Disable MSI-X */ 2479 pci_disable_msix(phba->pcidev); 2480} 2481 2482/** 2483 * lpfc_enable_msi - Enable MSI interrupt mode 2484 * @phba: pointer to lpfc hba data structure. 2485 * 2486 * This routine is invoked to enable the MSI interrupt mode. The kernel 2487 * function pci_enable_msi() is called to enable the MSI vector. The 2488 * device driver is responsible for calling the request_irq() to register 2489 * MSI vector with a interrupt the handler, which is done in this function. 2490 * 2491 * Return codes 2492 * 0 - sucessful 2493 * other values - error 2494 */ 2495static int 2496lpfc_enable_msi(struct lpfc_hba *phba) 2497{ 2498 int rc; 2499 2500 rc = pci_enable_msi(phba->pcidev); 2501 if (!rc) 2502 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2503 "0462 PCI enable MSI mode success.\n"); 2504 else { 2505 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2506 "0471 PCI enable MSI mode failed (%d)\n", rc); 2507 return rc; 2508 } 2509 2510 rc = request_irq(phba->pcidev->irq, lpfc_intr_handler, 2511 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 2512 if (rc) { 2513 pci_disable_msi(phba->pcidev); 2514 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2515 "0478 MSI request_irq failed (%d)\n", rc); 2516 } 2517 return rc; 2518} 2519 2520/** 2521 * lpfc_disable_msi - Disable MSI interrupt mode 2522 * @phba: pointer to lpfc hba data structure. 2523 * 2524 * This routine is invoked to disable the MSI interrupt mode. The driver 2525 * calls free_irq() on MSI vector it has done request_irq() on before 2526 * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and 2527 * a device will be left with MSI enabled and leaks its vector. 2528 */ 2529 2530static void 2531lpfc_disable_msi(struct lpfc_hba *phba) 2532{ 2533 free_irq(phba->pcidev->irq, phba); 2534 pci_disable_msi(phba->pcidev); 2535 return; 2536} 2537 2538/** 2539 * lpfc_log_intr_mode - Log the active interrupt mode 2540 * @phba: pointer to lpfc hba data structure. 2541 * @intr_mode: active interrupt mode adopted. 2542 * 2543 * This routine it invoked to log the currently used active interrupt mode 2544 * to the device. 2545 **/ 2546static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 2547{ 2548 switch (intr_mode) { 2549 case 0: 2550 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2551 "0470 Enable INTx interrupt mode.\n"); 2552 break; 2553 case 1: 2554 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2555 "0481 Enabled MSI interrupt mode.\n"); 2556 break; 2557 case 2: 2558 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2559 "0480 Enabled MSI-X interrupt mode.\n"); 2560 break; 2561 default: 2562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2563 "0482 Illegal interrupt mode.\n"); 2564 break; 2565 } 2566 return; 2567} 2568 2569/** 2570 * lpfc_enable_pci_dev - Enable a generic PCI device. 2571 * @phba: pointer to lpfc hba data structure. 2572 * 2573 * This routine is invoked to enable the PCI device that is common to all 2574 * PCI devices. 2575 * 2576 * Return codes 2577 * 0 - sucessful 2578 * other values - error 2579 **/ 2580static int 2581lpfc_enable_pci_dev(struct lpfc_hba *phba) 2582{ 2583 struct pci_dev *pdev; 2584 int bars; 2585 2586 /* Obtain PCI device reference */ 2587 if (!phba->pcidev) 2588 goto out_error; 2589 else 2590 pdev = phba->pcidev; 2591 /* Select PCI BARs */ 2592 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2593 /* Enable PCI device */ 2594 if (pci_enable_device_mem(pdev)) 2595 goto out_error; 2596 /* Request PCI resource for the device */ 2597 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 2598 goto out_disable_device; 2599 /* Set up device as PCI master and save state for EEH */ 2600 pci_set_master(pdev); 2601 pci_try_set_mwi(pdev); 2602 pci_save_state(pdev); 2603 2604 return 0; 2605 2606out_disable_device: 2607 pci_disable_device(pdev); 2608out_error: 2609 return -ENODEV; 2610} 2611 2612/** 2613 * lpfc_disable_pci_dev - Disable a generic PCI device. 2614 * @phba: pointer to lpfc hba data structure. 2615 * 2616 * This routine is invoked to disable the PCI device that is common to all 2617 * PCI devices. 2618 **/ 2619static void 2620lpfc_disable_pci_dev(struct lpfc_hba *phba) 2621{ 2622 struct pci_dev *pdev; 2623 int bars; 2624 2625 /* Obtain PCI device reference */ 2626 if (!phba->pcidev) 2627 return; 2628 else 2629 pdev = phba->pcidev; 2630 /* Select PCI BARs */ 2631 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2632 /* Release PCI resource and disable PCI device */ 2633 pci_release_selected_regions(pdev, bars); 2634 pci_disable_device(pdev); 2635 /* Null out PCI private reference to driver */ 2636 pci_set_drvdata(pdev, NULL); 2637 2638 return; 2639} 2640 2641/** 2642 * lpfc_reset_hba - Reset a hba 2643 * @phba: pointer to lpfc hba data structure. 2644 * 2645 * This routine is invoked to reset a hba device. It brings the HBA 2646 * offline, performs a board restart, and then brings the board back 2647 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 2648 * on outstanding mailbox commands. 2649 **/ 2650void 2651lpfc_reset_hba(struct lpfc_hba *phba) 2652{ 2653 /* If resets are disabled then set error state and return. */ 2654 if (!phba->cfg_enable_hba_reset) { 2655 phba->link_state = LPFC_HBA_ERROR; 2656 return; 2657 } 2658 lpfc_offline_prep(phba); 2659 lpfc_offline(phba); 2660 lpfc_sli_brdrestart(phba); 2661 lpfc_online(phba); 2662 lpfc_unblock_mgmt_io(phba); 2663} 2664 2665/** 2666 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 2667 * @phba: pointer to lpfc hba data structure. 2668 * 2669 * This routine is invoked to set up the driver internal resources specific to 2670 * support the SLI-3 HBA device it attached to. 2671 * 2672 * Return codes 2673 * 0 - sucessful 2674 * other values - error 2675 **/ 2676static int 2677lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 2678{ 2679 struct lpfc_sli *psli; 2680 2681 /* 2682 * Initialize timers used by driver 2683 */ 2684 2685 /* Heartbeat timer */ 2686 init_timer(&phba->hb_tmofunc); 2687 phba->hb_tmofunc.function = lpfc_hb_timeout; 2688 phba->hb_tmofunc.data = (unsigned long)phba; 2689 2690 psli = &phba->sli; 2691 /* MBOX heartbeat timer */ 2692 init_timer(&psli->mbox_tmo); 2693 psli->mbox_tmo.function = lpfc_mbox_timeout; 2694 psli->mbox_tmo.data = (unsigned long) phba; 2695 /* FCP polling mode timer */ 2696 init_timer(&phba->fcp_poll_timer); 2697 phba->fcp_poll_timer.function = lpfc_poll_timeout; 2698 phba->fcp_poll_timer.data = (unsigned long) phba; 2699 /* Fabric block timer */ 2700 init_timer(&phba->fabric_block_timer); 2701 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 2702 phba->fabric_block_timer.data = (unsigned long) phba; 2703 /* EA polling mode timer */ 2704 init_timer(&phba->eratt_poll); 2705 phba->eratt_poll.function = lpfc_poll_eratt; 2706 phba->eratt_poll.data = (unsigned long) phba; 2707 2708 /* Host attention work mask setup */ 2709 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 2710 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 2711 2712 /* Get all the module params for configuring this host */ 2713 lpfc_get_cfgparam(phba); 2714 /* 2715 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 2716 * used to create the sg_dma_buf_pool must be dynamically calculated. 2717 * 2 segments are added since the IOCB needs a command and response bde. 2718 */ 2719 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 2720 sizeof(struct fcp_rsp) + 2721 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 2722 2723 if (phba->cfg_enable_bg) { 2724 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 2725 phba->cfg_sg_dma_buf_size += 2726 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 2727 } 2728 2729 /* Also reinitialize the host templates with new values. */ 2730 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 2731 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 2732 2733 phba->max_vpi = LPFC_MAX_VPI; 2734 /* This will be set to correct value after config_port mbox */ 2735 phba->max_vports = 0; 2736 2737 /* 2738 * Initialize the SLI Layer to run with lpfc HBAs. 2739 */ 2740 lpfc_sli_setup(phba); 2741 lpfc_sli_queue_setup(phba); 2742 2743 /* Allocate device driver memory */ 2744 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 2745 return -ENOMEM; 2746 2747 return 0; 2748} 2749 2750/** 2751 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 2752 * @phba: pointer to lpfc hba data structure. 2753 * 2754 * This routine is invoked to unset the driver internal resources set up 2755 * specific for supporting the SLI-3 HBA device it attached to. 2756 **/ 2757static void 2758lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 2759{ 2760 /* Free device driver memory allocated */ 2761 lpfc_mem_free_all(phba); 2762 2763 return; 2764} 2765 2766/** 2767 * lpfc_init_api_table_setup - Set up init api fucntion jump table 2768 * @phba: The hba struct for which this call is being executed. 2769 * @dev_grp: The HBA PCI-Device group number. 2770 * 2771 * This routine sets up the device INIT interface API function jump table 2772 * in @phba struct. 2773 * 2774 * Returns: 0 - success, -ENODEV - failure. 2775 **/ 2776int 2777lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 2778{ 2779 switch (dev_grp) { 2780 case LPFC_PCI_DEV_LP: 2781 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 2782 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 2783 phba->lpfc_stop_port = lpfc_stop_port_s3; 2784 break; 2785 default: 2786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2787 "1431 Invalid HBA PCI-device group: 0x%x\n", 2788 dev_grp); 2789 return -ENODEV; 2790 break; 2791 } 2792 return 0; 2793} 2794 2795/** 2796 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 2797 * @phba: pointer to lpfc hba data structure. 2798 * 2799 * This routine is invoked to set up the driver internal resources before the 2800 * device specific resource setup to support the HBA device it attached to. 2801 * 2802 * Return codes 2803 * 0 - sucessful 2804 * other values - error 2805 **/ 2806static int 2807lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 2808{ 2809 /* 2810 * Driver resources common to all SLI revisions 2811 */ 2812 atomic_set(&phba->fast_event_count, 0); 2813 spin_lock_init(&phba->hbalock); 2814 2815 /* Initialize ndlp management spinlock */ 2816 spin_lock_init(&phba->ndlp_lock); 2817 2818 INIT_LIST_HEAD(&phba->port_list); 2819 INIT_LIST_HEAD(&phba->work_list); 2820 init_waitqueue_head(&phba->wait_4_mlo_m_q); 2821 2822 /* Initialize the wait queue head for the kernel thread */ 2823 init_waitqueue_head(&phba->work_waitq); 2824 2825 /* Initialize the scsi buffer list used by driver for scsi IO */ 2826 spin_lock_init(&phba->scsi_buf_list_lock); 2827 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 2828 2829 /* Initialize the fabric iocb list */ 2830 INIT_LIST_HEAD(&phba->fabric_iocb_list); 2831 2832 /* Initialize list to save ELS buffers */ 2833 INIT_LIST_HEAD(&phba->elsbuf); 2834 2835 /* Initialize FCF connection rec list */ 2836 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 2837 2838 return 0; 2839} 2840 2841/** 2842 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 2843 * @phba: pointer to lpfc hba data structure. 2844 * 2845 * This routine is invoked to set up the driver internal resources after the 2846 * device specific resource setup to support the HBA device it attached to. 2847 * 2848 * Return codes 2849 * 0 - sucessful 2850 * other values - error 2851 **/ 2852static int 2853lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 2854{ 2855 int error; 2856 2857 /* Startup the kernel thread for this host adapter. */ 2858 phba->worker_thread = kthread_run(lpfc_do_work, phba, 2859 "lpfc_worker_%d", phba->brd_no); 2860 if (IS_ERR(phba->worker_thread)) { 2861 error = PTR_ERR(phba->worker_thread); 2862 return error; 2863 } 2864 2865 return 0; 2866} 2867 2868/** 2869 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 2870 * @phba: pointer to lpfc hba data structure. 2871 * 2872 * This routine is invoked to unset the driver internal resources set up after 2873 * the device specific resource setup for supporting the HBA device it 2874 * attached to. 2875 **/ 2876static void 2877lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 2878{ 2879 /* Stop kernel worker thread */ 2880 kthread_stop(phba->worker_thread); 2881} 2882 2883/** 2884 * lpfc_free_iocb_list - Free iocb list. 2885 * @phba: pointer to lpfc hba data structure. 2886 * 2887 * This routine is invoked to free the driver's IOCB list and memory. 2888 **/ 2889static void 2890lpfc_free_iocb_list(struct lpfc_hba *phba) 2891{ 2892 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 2893 2894 spin_lock_irq(&phba->hbalock); 2895 list_for_each_entry_safe(iocbq_entry, iocbq_next, 2896 &phba->lpfc_iocb_list, list) { 2897 list_del(&iocbq_entry->list); 2898 kfree(iocbq_entry); 2899 phba->total_iocbq_bufs--; 2900 } 2901 spin_unlock_irq(&phba->hbalock); 2902 2903 return; 2904} 2905 2906/** 2907 * lpfc_init_iocb_list - Allocate and initialize iocb list. 2908 * @phba: pointer to lpfc hba data structure. 2909 * 2910 * This routine is invoked to allocate and initizlize the driver's IOCB 2911 * list and set up the IOCB tag array accordingly. 2912 * 2913 * Return codes 2914 * 0 - sucessful 2915 * other values - error 2916 **/ 2917static int 2918lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 2919{ 2920 struct lpfc_iocbq *iocbq_entry = NULL; 2921 uint16_t iotag; 2922 int i; 2923 2924 /* Initialize and populate the iocb list per host. */ 2925 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 2926 for (i = 0; i < iocb_count; i++) { 2927 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 2928 if (iocbq_entry == NULL) { 2929 printk(KERN_ERR "%s: only allocated %d iocbs of " 2930 "expected %d count. Unloading driver.\n", 2931 __func__, i, LPFC_IOCB_LIST_CNT); 2932 goto out_free_iocbq; 2933 } 2934 2935 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 2936 if (iotag == 0) { 2937 kfree(iocbq_entry); 2938 printk(KERN_ERR "%s: failed to allocate IOTAG. " 2939 "Unloading driver.\n", __func__); 2940 goto out_free_iocbq; 2941 } 2942 iocbq_entry->sli4_xritag = NO_XRI; 2943 2944 spin_lock_irq(&phba->hbalock); 2945 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 2946 phba->total_iocbq_bufs++; 2947 spin_unlock_irq(&phba->hbalock); 2948 } 2949 2950 return 0; 2951 2952out_free_iocbq: 2953 lpfc_free_iocb_list(phba); 2954 2955 return -ENOMEM; 2956} 2957 2958/** 2959 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 2960 * @pdev: pointer to pci device data structure. 2961 * 2962 * This routine is invoked to allocate the driver hba data structure for an 2963 * HBA device. If the allocation is successful, the phba reference to the 2964 * PCI device data structure is set. 2965 * 2966 * Return codes 2967 * pointer to @phba - sucessful 2968 * NULL - error 2969 **/ 2970static struct lpfc_hba * 2971lpfc_hba_alloc(struct pci_dev *pdev) 2972{ 2973 struct lpfc_hba *phba; 2974 2975 /* Allocate memory for HBA structure */ 2976 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 2977 if (!phba) { 2978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2979 "1417 Failed to allocate hba struct.\n"); 2980 return NULL; 2981 } 2982 2983 /* Set reference to PCI device in HBA structure */ 2984 phba->pcidev = pdev; 2985 2986 /* Assign an unused board number */ 2987 phba->brd_no = lpfc_get_instance(); 2988 if (phba->brd_no < 0) { 2989 kfree(phba); 2990 return NULL; 2991 } 2992 2993 return phba; 2994} 2995 2996/** 2997 * lpfc_hba_free - Free driver hba data structure with a device. 2998 * @phba: pointer to lpfc hba data structure. 2999 * 3000 * This routine is invoked to free the driver hba data structure with an 3001 * HBA device. 3002 **/ 3003static void 3004lpfc_hba_free(struct lpfc_hba *phba) 3005{ 3006 /* Release the driver assigned board number */ 3007 idr_remove(&lpfc_hba_index, phba->brd_no); 3008 3009 kfree(phba); 3010 return; 3011} 3012 3013/** 3014 * lpfc_create_shost - Create hba physical port with associated scsi host. 3015 * @phba: pointer to lpfc hba data structure. 3016 * 3017 * This routine is invoked to create HBA physical port and associate a SCSI 3018 * host with it. 3019 * 3020 * Return codes 3021 * 0 - sucessful 3022 * other values - error 3023 **/ 3024static int 3025lpfc_create_shost(struct lpfc_hba *phba) 3026{ 3027 struct lpfc_vport *vport; 3028 struct Scsi_Host *shost; 3029 3030 /* Initialize HBA FC structure */ 3031 phba->fc_edtov = FF_DEF_EDTOV; 3032 phba->fc_ratov = FF_DEF_RATOV; 3033 phba->fc_altov = FF_DEF_ALTOV; 3034 phba->fc_arbtov = FF_DEF_ARBTOV; 3035 3036 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 3037 if (!vport) 3038 return -ENODEV; 3039 3040 shost = lpfc_shost_from_vport(vport); 3041 phba->pport = vport; 3042 lpfc_debugfs_initialize(vport); 3043 /* Put reference to SCSI host to driver's device private data */ 3044 pci_set_drvdata(phba->pcidev, shost); 3045 3046 return 0; 3047} 3048 3049/** 3050 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 3051 * @phba: pointer to lpfc hba data structure. 3052 * 3053 * This routine is invoked to destroy HBA physical port and the associated 3054 * SCSI host. 3055 **/ 3056static void 3057lpfc_destroy_shost(struct lpfc_hba *phba) 3058{ 3059 struct lpfc_vport *vport = phba->pport; 3060 3061 /* Destroy physical port that associated with the SCSI host */ 3062 destroy_port(vport); 3063 3064 return; 3065} 3066 3067/** 3068 * lpfc_setup_bg - Setup Block guard structures and debug areas. 3069 * @phba: pointer to lpfc hba data structure. 3070 * @shost: the shost to be used to detect Block guard settings. 3071 * 3072 * This routine sets up the local Block guard protocol settings for @shost. 3073 * This routine also allocates memory for debugging bg buffers. 3074 **/ 3075static void 3076lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 3077{ 3078 int pagecnt = 10; 3079 if (lpfc_prot_mask && lpfc_prot_guard) { 3080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3081 "1478 Registering BlockGuard with the " 3082 "SCSI layer\n"); 3083 scsi_host_set_prot(shost, lpfc_prot_mask); 3084 scsi_host_set_guard(shost, lpfc_prot_guard); 3085 } 3086 if (!_dump_buf_data) { 3087 while (pagecnt) { 3088 spin_lock_init(&_dump_buf_lock); 3089 _dump_buf_data = 3090 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 3091 if (_dump_buf_data) { 3092 printk(KERN_ERR "BLKGRD allocated %d pages for " 3093 "_dump_buf_data at 0x%p\n", 3094 (1 << pagecnt), _dump_buf_data); 3095 _dump_buf_data_order = pagecnt; 3096 memset(_dump_buf_data, 0, 3097 ((1 << PAGE_SHIFT) << pagecnt)); 3098 break; 3099 } else 3100 --pagecnt; 3101 } 3102 if (!_dump_buf_data_order) 3103 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 3104 "memory for hexdump\n"); 3105 } else 3106 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" 3107 "\n", _dump_buf_data); 3108 if (!_dump_buf_dif) { 3109 while (pagecnt) { 3110 _dump_buf_dif = 3111 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 3112 if (_dump_buf_dif) { 3113 printk(KERN_ERR "BLKGRD allocated %d pages for " 3114 "_dump_buf_dif at 0x%p\n", 3115 (1 << pagecnt), _dump_buf_dif); 3116 _dump_buf_dif_order = pagecnt; 3117 memset(_dump_buf_dif, 0, 3118 ((1 << PAGE_SHIFT) << pagecnt)); 3119 break; 3120 } else 3121 --pagecnt; 3122 } 3123 if (!_dump_buf_dif_order) 3124 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 3125 "memory for hexdump\n"); 3126 } else 3127 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", 3128 _dump_buf_dif); 3129} 3130 3131/** 3132 * lpfc_post_init_setup - Perform necessary device post initialization setup. 3133 * @phba: pointer to lpfc hba data structure. 3134 * 3135 * This routine is invoked to perform all the necessary post initialization 3136 * setup for the device. 3137 **/ 3138static void 3139lpfc_post_init_setup(struct lpfc_hba *phba) 3140{ 3141 struct Scsi_Host *shost; 3142 struct lpfc_adapter_event_header adapter_event; 3143 3144 /* Get the default values for Model Name and Description */ 3145 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 3146 3147 /* 3148 * hba setup may have changed the hba_queue_depth so we need to 3149 * adjust the value of can_queue. 3150 */ 3151 shost = pci_get_drvdata(phba->pcidev); 3152 shost->can_queue = phba->cfg_hba_queue_depth - 10; 3153 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 3154 lpfc_setup_bg(phba, shost); 3155 3156 lpfc_host_attrib_init(shost); 3157 3158 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 3159 spin_lock_irq(shost->host_lock); 3160 lpfc_poll_start_timer(phba); 3161 spin_unlock_irq(shost->host_lock); 3162 } 3163 3164 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3165 "0428 Perform SCSI scan\n"); 3166 /* Send board arrival event to upper layer */ 3167 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 3168 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 3169 fc_host_post_vendor_event(shost, fc_get_event_number(), 3170 sizeof(adapter_event), 3171 (char *) &adapter_event, 3172 LPFC_NL_VENDOR_ID); 3173 return; 3174} 3175 3176/** 3177 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 3178 * @phba: pointer to lpfc hba data structure. 3179 * 3180 * This routine is invoked to set up the PCI device memory space for device 3181 * with SLI-3 interface spec. 3182 * 3183 * Return codes 3184 * 0 - sucessful 3185 * other values - error 3186 **/ 3187static int 3188lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 3189{ 3190 struct pci_dev *pdev; 3191 unsigned long bar0map_len, bar2map_len; 3192 int i, hbq_count; 3193 void *ptr; 3194 int error = -ENODEV; 3195 3196 /* Obtain PCI device reference */ 3197 if (!phba->pcidev) 3198 return error; 3199 else 3200 pdev = phba->pcidev; 3201 3202 /* Set the device DMA mask size */ 3203 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 3204 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 3205 return error; 3206 3207 /* Get the bus address of Bar0 and Bar2 and the number of bytes 3208 * required by each mapping. 3209 */ 3210 phba->pci_bar0_map = pci_resource_start(pdev, 0); 3211 bar0map_len = pci_resource_len(pdev, 0); 3212 3213 phba->pci_bar2_map = pci_resource_start(pdev, 2); 3214 bar2map_len = pci_resource_len(pdev, 2); 3215 3216 /* Map HBA SLIM to a kernel virtual address. */ 3217 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 3218 if (!phba->slim_memmap_p) { 3219 dev_printk(KERN_ERR, &pdev->dev, 3220 "ioremap failed for SLIM memory.\n"); 3221 goto out; 3222 } 3223 3224 /* Map HBA Control Registers to a kernel virtual address. */ 3225 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 3226 if (!phba->ctrl_regs_memmap_p) { 3227 dev_printk(KERN_ERR, &pdev->dev, 3228 "ioremap failed for HBA control registers.\n"); 3229 goto out_iounmap_slim; 3230 } 3231 3232 /* Allocate memory for SLI-2 structures */ 3233 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 3234 SLI2_SLIM_SIZE, 3235 &phba->slim2p.phys, 3236 GFP_KERNEL); 3237 if (!phba->slim2p.virt) 3238 goto out_iounmap; 3239 3240 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 3241 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 3242 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 3243 phba->IOCBs = (phba->slim2p.virt + 3244 offsetof(struct lpfc_sli2_slim, IOCBs)); 3245 3246 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 3247 lpfc_sli_hbq_size(), 3248 &phba->hbqslimp.phys, 3249 GFP_KERNEL); 3250 if (!phba->hbqslimp.virt) 3251 goto out_free_slim; 3252 3253 hbq_count = lpfc_sli_hbq_count(); 3254 ptr = phba->hbqslimp.virt; 3255 for (i = 0; i < hbq_count; ++i) { 3256 phba->hbqs[i].hbq_virt = ptr; 3257 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 3258 ptr += (lpfc_hbq_defs[i]->entry_count * 3259 sizeof(struct lpfc_hbq_entry)); 3260 } 3261 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 3262 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 3263 3264 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 3265 3266 INIT_LIST_HEAD(&phba->rb_pend_list); 3267 3268 phba->MBslimaddr = phba->slim_memmap_p; 3269 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 3270 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 3271 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 3272 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 3273 3274 return 0; 3275 3276out_free_slim: 3277 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 3278 phba->slim2p.virt, phba->slim2p.phys); 3279out_iounmap: 3280 iounmap(phba->ctrl_regs_memmap_p); 3281out_iounmap_slim: 3282 iounmap(phba->slim_memmap_p); 3283out: 3284 return error; 3285} 3286 3287/** 3288 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 3289 * @phba: pointer to lpfc hba data structure. 3290 * 3291 * This routine is invoked to unset the PCI device memory space for device 3292 * with SLI-3 interface spec. 3293 **/ 3294static void 3295lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 3296{ 3297 struct pci_dev *pdev; 3298 3299 /* Obtain PCI device reference */ 3300 if (!phba->pcidev) 3301 return; 3302 else 3303 pdev = phba->pcidev; 3304 3305 /* Free coherent DMA memory allocated */ 3306 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 3307 phba->hbqslimp.virt, phba->hbqslimp.phys); 3308 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 3309 phba->slim2p.virt, phba->slim2p.phys); 3310 3311 /* I/O memory unmap */ 3312 iounmap(phba->ctrl_regs_memmap_p); 3313 iounmap(phba->slim_memmap_p); 3314 3315 return; 3316} 3317 3318/** 3319 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 3320 * @phba: pointer to lpfc hba data structure. 3321 * 3322 * This routine is invoked to enable the MSI-X interrupt vectors to device 3323 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 3324 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 3325 * invoked, enables either all or nothing, depending on the current 3326 * availability of PCI vector resources. The device driver is responsible 3327 * for calling the individual request_irq() to register each MSI-X vector 3328 * with a interrupt handler, which is done in this function. Note that 3329 * later when device is unloading, the driver should always call free_irq() 3330 * on all MSI-X vectors it has done request_irq() on before calling 3331 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 3332 * will be left with MSI-X enabled and leaks its vectors. 3333 * 3334 * Return codes 3335 * 0 - sucessful 3336 * other values - error 3337 **/ 3338static int 3339lpfc_sli_enable_msix(struct lpfc_hba *phba) 3340{ 3341 int rc, i; 3342 LPFC_MBOXQ_t *pmb; 3343 3344 /* Set up MSI-X multi-message vectors */ 3345 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 3346 phba->msix_entries[i].entry = i; 3347 3348 /* Configure MSI-X capability structure */ 3349 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 3350 ARRAY_SIZE(phba->msix_entries)); 3351 if (rc) { 3352 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3353 "0420 PCI enable MSI-X failed (%d)\n", rc); 3354 goto msi_fail_out; 3355 } 3356 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 3357 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3358 "0477 MSI-X entry[%d]: vector=x%x " 3359 "message=%d\n", i, 3360 phba->msix_entries[i].vector, 3361 phba->msix_entries[i].entry); 3362 /* 3363 * Assign MSI-X vectors to interrupt handlers 3364 */ 3365 3366 /* vector-0 is associated to slow-path handler */ 3367 rc = request_irq(phba->msix_entries[0].vector, 3368 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 3369 LPFC_SP_DRIVER_HANDLER_NAME, phba); 3370 if (rc) { 3371 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3372 "0421 MSI-X slow-path request_irq failed " 3373 "(%d)\n", rc); 3374 goto msi_fail_out; 3375 } 3376 3377 /* vector-1 is associated to fast-path handler */ 3378 rc = request_irq(phba->msix_entries[1].vector, 3379 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 3380 LPFC_FP_DRIVER_HANDLER_NAME, phba); 3381 3382 if (rc) { 3383 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3384 "0429 MSI-X fast-path request_irq failed " 3385 "(%d)\n", rc); 3386 goto irq_fail_out; 3387 } 3388 3389 /* 3390 * Configure HBA MSI-X attention conditions to messages 3391 */ 3392 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3393 3394 if (!pmb) { 3395 rc = -ENOMEM; 3396 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3397 "0474 Unable to allocate memory for issuing " 3398 "MBOX_CONFIG_MSI command\n"); 3399 goto mem_fail_out; 3400 } 3401 rc = lpfc_config_msi(phba, pmb); 3402 if (rc) 3403 goto mbx_fail_out; 3404 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 3405 if (rc != MBX_SUCCESS) { 3406 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 3407 "0351 Config MSI mailbox command failed, " 3408 "mbxCmd x%x, mbxStatus x%x\n", 3409 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 3410 goto mbx_fail_out; 3411 } 3412 3413 /* Free memory allocated for mailbox command */ 3414 mempool_free(pmb, phba->mbox_mem_pool); 3415 return rc; 3416 3417mbx_fail_out: 3418 /* Free memory allocated for mailbox command */ 3419 mempool_free(pmb, phba->mbox_mem_pool); 3420 3421mem_fail_out: 3422 /* free the irq already requested */ 3423 free_irq(phba->msix_entries[1].vector, phba); 3424 3425irq_fail_out: 3426 /* free the irq already requested */ 3427 free_irq(phba->msix_entries[0].vector, phba); 3428 3429msi_fail_out: 3430 /* Unconfigure MSI-X capability structure */ 3431 pci_disable_msix(phba->pcidev); 3432 return rc; 3433} 3434 3435/** 3436 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 3437 * @phba: pointer to lpfc hba data structure. 3438 * 3439 * This routine is invoked to release the MSI-X vectors and then disable the 3440 * MSI-X interrupt mode to device with SLI-3 interface spec. 3441 **/ 3442static void 3443lpfc_sli_disable_msix(struct lpfc_hba *phba) 3444{ 3445 int i; 3446 3447 /* Free up MSI-X multi-message vectors */ 3448 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 3449 free_irq(phba->msix_entries[i].vector, phba); 3450 /* Disable MSI-X */ 3451 pci_disable_msix(phba->pcidev); 3452 3453 return; 3454} 3455 3456/** 3457 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 3458 * @phba: pointer to lpfc hba data structure. 3459 * 3460 * This routine is invoked to enable the MSI interrupt mode to device with 3461 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 3462 * enable the MSI vector. The device driver is responsible for calling the 3463 * request_irq() to register MSI vector with a interrupt the handler, which 3464 * is done in this function. 3465 * 3466 * Return codes 3467 * 0 - sucessful 3468 * other values - error 3469 */ 3470static int 3471lpfc_sli_enable_msi(struct lpfc_hba *phba) 3472{ 3473 int rc; 3474 3475 rc = pci_enable_msi(phba->pcidev); 3476 if (!rc) 3477 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3478 "0462 PCI enable MSI mode success.\n"); 3479 else { 3480 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3481 "0471 PCI enable MSI mode failed (%d)\n", rc); 3482 return rc; 3483 } 3484 3485 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 3486 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 3487 if (rc) { 3488 pci_disable_msi(phba->pcidev); 3489 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3490 "0478 MSI request_irq failed (%d)\n", rc); 3491 } 3492 return rc; 3493} 3494 3495/** 3496 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 3497 * @phba: pointer to lpfc hba data structure. 3498 * 3499 * This routine is invoked to disable the MSI interrupt mode to device with 3500 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 3501 * done request_irq() on before calling pci_disable_msi(). Failure to do so 3502 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 3503 * its vector. 3504 */ 3505static void 3506lpfc_sli_disable_msi(struct lpfc_hba *phba) 3507{ 3508 free_irq(phba->pcidev->irq, phba); 3509 pci_disable_msi(phba->pcidev); 3510 return; 3511} 3512 3513/** 3514 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 3515 * @phba: pointer to lpfc hba data structure. 3516 * 3517 * This routine is invoked to enable device interrupt and associate driver's 3518 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 3519 * spec. Depends on the interrupt mode configured to the driver, the driver 3520 * will try to fallback from the configured interrupt mode to an interrupt 3521 * mode which is supported by the platform, kernel, and device in the order 3522 * of: 3523 * MSI-X -> MSI -> IRQ. 3524 * 3525 * Return codes 3526 * 0 - sucessful 3527 * other values - error 3528 **/ 3529static uint32_t 3530lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 3531{ 3532 uint32_t intr_mode = LPFC_INTR_ERROR; 3533 int retval; 3534 3535 if (cfg_mode == 2) { 3536 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 3537 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 3538 if (!retval) { 3539 /* Now, try to enable MSI-X interrupt mode */ 3540 retval = lpfc_sli_enable_msix(phba); 3541 if (!retval) { 3542 /* Indicate initialization to MSI-X mode */ 3543 phba->intr_type = MSIX; 3544 intr_mode = 2; 3545 } 3546 } 3547 } 3548 3549 /* Fallback to MSI if MSI-X initialization failed */ 3550 if (cfg_mode >= 1 && phba->intr_type == NONE) { 3551 retval = lpfc_sli_enable_msi(phba); 3552 if (!retval) { 3553 /* Indicate initialization to MSI mode */ 3554 phba->intr_type = MSI; 3555 intr_mode = 1; 3556 } 3557 } 3558 3559 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 3560 if (phba->intr_type == NONE) { 3561 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 3562 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 3563 if (!retval) { 3564 /* Indicate initialization to INTx mode */ 3565 phba->intr_type = INTx; 3566 intr_mode = 0; 3567 } 3568 } 3569 return intr_mode; 3570} 3571 3572/** 3573 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 3574 * @phba: pointer to lpfc hba data structure. 3575 * 3576 * This routine is invoked to disable device interrupt and disassociate the 3577 * driver's interrupt handler(s) from interrupt vector(s) to device with 3578 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 3579 * release the interrupt vector(s) for the message signaled interrupt. 3580 **/ 3581static void 3582lpfc_sli_disable_intr(struct lpfc_hba *phba) 3583{ 3584 /* Disable the currently initialized interrupt mode */ 3585 if (phba->intr_type == MSIX) 3586 lpfc_sli_disable_msix(phba); 3587 else if (phba->intr_type == MSI) 3588 lpfc_sli_disable_msi(phba); 3589 else if (phba->intr_type == INTx) 3590 free_irq(phba->pcidev->irq, phba); 3591 3592 /* Reset interrupt management states */ 3593 phba->intr_type = NONE; 3594 phba->sli.slistat.sli_intr = 0; 3595 3596 return; 3597} 3598 3599/** 3600 * lpfc_unset_hba - Unset SLI3 hba device initialization 3601 * @phba: pointer to lpfc hba data structure. 3602 * 3603 * This routine is invoked to unset the HBA device initialization steps to 3604 * a device with SLI-3 interface spec. 3605 **/ 3606static void 3607lpfc_unset_hba(struct lpfc_hba *phba) 3608{ 3609 struct lpfc_vport *vport = phba->pport; 3610 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3611 3612 spin_lock_irq(shost->host_lock); 3613 vport->load_flag |= FC_UNLOADING; 3614 spin_unlock_irq(shost->host_lock); 3615 3616 lpfc_stop_hba_timers(phba); 3617 3618 phba->pport->work_port_events = 0; 3619 3620 lpfc_sli_hba_down(phba); 3621 3622 lpfc_sli_brdrestart(phba); 3623 3624 lpfc_sli_disable_intr(phba); 3625 3626 return; 3627} 3628 3629/** 3630 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 3631 * @pdev: pointer to PCI device 3632 * @pid: pointer to PCI device identifier 3633 * 3634 * This routine is to be called to attach a device with SLI-3 interface spec 3635 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 3636 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 3637 * information of the device and driver to see if the driver state that it can 3638 * support this kind of device. If the match is successful, the driver core 3639 * invokes this routine. If this routine determines it can claim the HBA, it 3640 * does all the initialization that it needs to do to handle the HBA properly. 3641 * 3642 * Return code 3643 * 0 - driver can claim the device 3644 * negative value - driver can not claim the device 3645 **/ 3646static int __devinit 3647lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 3648{ 3649 struct lpfc_hba *phba; 3650 struct lpfc_vport *vport = NULL; 3651 int error; 3652 uint32_t cfg_mode, intr_mode; 3653 3654 /* Allocate memory for HBA structure */ 3655 phba = lpfc_hba_alloc(pdev); 3656 if (!phba) 3657 return -ENOMEM; 3658 3659 /* Perform generic PCI device enabling operation */ 3660 error = lpfc_enable_pci_dev(phba); 3661 if (error) { 3662 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3663 "1401 Failed to enable pci device.\n"); 3664 goto out_free_phba; 3665 } 3666 3667 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 3668 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 3669 if (error) 3670 goto out_disable_pci_dev; 3671 3672 /* Set up SLI-3 specific device PCI memory space */ 3673 error = lpfc_sli_pci_mem_setup(phba); 3674 if (error) { 3675 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3676 "1402 Failed to set up pci memory space.\n"); 3677 goto out_disable_pci_dev; 3678 } 3679 3680 /* Set up phase-1 common device driver resources */ 3681 error = lpfc_setup_driver_resource_phase1(phba); 3682 if (error) { 3683 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3684 "1403 Failed to set up driver resource.\n"); 3685 goto out_unset_pci_mem_s3; 3686 } 3687 3688 /* Set up SLI-3 specific device driver resources */ 3689 error = lpfc_sli_driver_resource_setup(phba); 3690 if (error) { 3691 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3692 "1404 Failed to set up driver resource.\n"); 3693 goto out_unset_pci_mem_s3; 3694 } 3695 3696 /* Initialize and populate the iocb list per host */ 3697 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 3698 if (error) { 3699 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3700 "1405 Failed to initialize iocb list.\n"); 3701 goto out_unset_driver_resource_s3; 3702 } 3703 3704 /* Set up common device driver resources */ 3705 error = lpfc_setup_driver_resource_phase2(phba); 3706 if (error) { 3707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3708 "1406 Failed to set up driver resource.\n"); 3709 goto out_free_iocb_list; 3710 } 3711 3712 /* Create SCSI host to the physical port */ 3713 error = lpfc_create_shost(phba); 3714 if (error) { 3715 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3716 "1407 Failed to create scsi host.\n"); 3717 goto out_unset_driver_resource; 3718 } 3719 3720 /* Configure sysfs attributes */ 3721 vport = phba->pport; 3722 error = lpfc_alloc_sysfs_attr(vport); 3723 if (error) { 3724 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3725 "1476 Failed to allocate sysfs attr\n"); 3726 goto out_destroy_shost; 3727 } 3728 3729 /* Now, trying to enable interrupt and bring up the device */ 3730 cfg_mode = phba->cfg_use_msi; 3731 while (true) { 3732 /* Put device to a known state before enabling interrupt */ 3733 lpfc_stop_port(phba); 3734 /* Configure and enable interrupt */ 3735 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 3736 if (intr_mode == LPFC_INTR_ERROR) { 3737 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3738 "0431 Failed to enable interrupt.\n"); 3739 error = -ENODEV; 3740 goto out_free_sysfs_attr; 3741 } 3742 /* SLI-3 HBA setup */ 3743 if (lpfc_sli_hba_setup(phba)) { 3744 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3745 "1477 Failed to set up hba\n"); 3746 error = -ENODEV; 3747 goto out_remove_device; 3748 } 3749 3750 /* Wait 50ms for the interrupts of previous mailbox commands */ 3751 msleep(50); 3752 /* Check active interrupts on message signaled interrupts */ 3753 if (intr_mode == 0 || 3754 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 3755 /* Log the current active interrupt mode */ 3756 phba->intr_mode = intr_mode; 3757 lpfc_log_intr_mode(phba, intr_mode); 3758 break; 3759 } else { 3760 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3761 "0447 Configure interrupt mode (%d) " 3762 "failed active interrupt test.\n", 3763 intr_mode); 3764 /* Disable the current interrupt mode */ 3765 lpfc_sli_disable_intr(phba); 3766 /* Try next level of interrupt mode */ 3767 cfg_mode = --intr_mode; 3768 } 3769 } 3770 3771 /* Perform post initialization setup */ 3772 lpfc_post_init_setup(phba); 3773 3774 /* Check if there are static vports to be created. */ 3775 lpfc_create_static_vport(phba); 3776 3777 return 0; 3778 3779out_remove_device: 3780 lpfc_unset_hba(phba); 3781out_free_sysfs_attr: 3782 lpfc_free_sysfs_attr(vport); 3783out_destroy_shost: 3784 lpfc_destroy_shost(phba); 3785out_unset_driver_resource: 3786 lpfc_unset_driver_resource_phase2(phba); 3787out_free_iocb_list: 3788 lpfc_free_iocb_list(phba); 3789out_unset_driver_resource_s3: 3790 lpfc_sli_driver_resource_unset(phba); 3791out_unset_pci_mem_s3: 3792 lpfc_sli_pci_mem_unset(phba); 3793out_disable_pci_dev: 3794 lpfc_disable_pci_dev(phba); 3795out_free_phba: 3796 lpfc_hba_free(phba); 3797 return error; 3798} 3799 3800/** 3801 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 3802 * @pdev: pointer to PCI device 3803 * 3804 * This routine is to be called to disattach a device with SLI-3 interface 3805 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 3806 * removed from PCI bus, it performs all the necessary cleanup for the HBA 3807 * device to be removed from the PCI subsystem properly. 3808 **/ 3809static void __devexit 3810lpfc_pci_remove_one_s3(struct pci_dev *pdev) 3811{ 3812 struct Scsi_Host *shost = pci_get_drvdata(pdev); 3813 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3814 struct lpfc_vport **vports; 3815 struct lpfc_hba *phba = vport->phba; 3816 int i; 3817 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 3818 3819 spin_lock_irq(&phba->hbalock); 3820 vport->load_flag |= FC_UNLOADING; 3821 spin_unlock_irq(&phba->hbalock); 3822 3823 lpfc_free_sysfs_attr(vport); 3824 3825 /* Release all the vports against this physical port */ 3826 vports = lpfc_create_vport_work_array(phba); 3827 if (vports != NULL) 3828 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 3829 fc_vport_terminate(vports[i]->fc_vport); 3830 lpfc_destroy_vport_work_array(phba, vports); 3831 3832 /* Remove FC host and then SCSI host with the physical port */ 3833 fc_remove_host(shost); 3834 scsi_remove_host(shost); 3835 lpfc_cleanup(vport); 3836 3837 /* 3838 * Bring down the SLI Layer. This step disable all interrupts, 3839 * clears the rings, discards all mailbox commands, and resets 3840 * the HBA. 3841 */ 3842 3843 /* HBA interrupt will be diabled after this call */ 3844 lpfc_sli_hba_down(phba); 3845 /* Stop kthread signal shall trigger work_done one more time */ 3846 kthread_stop(phba->worker_thread); 3847 /* Final cleanup of txcmplq and reset the HBA */ 3848 lpfc_sli_brdrestart(phba); 3849 3850 lpfc_stop_hba_timers(phba); 3851 spin_lock_irq(&phba->hbalock); 3852 list_del_init(&vport->listentry); 3853 spin_unlock_irq(&phba->hbalock); 3854 3855 lpfc_debugfs_terminate(vport); 3856 3857 /* Disable interrupt */ 3858 lpfc_sli_disable_intr(phba); 3859 3860 pci_set_drvdata(pdev, NULL); 3861 scsi_host_put(shost); 3862 3863 /* 3864 * Call scsi_free before mem_free since scsi bufs are released to their 3865 * corresponding pools here. 3866 */ 3867 lpfc_scsi_free(phba); 3868 lpfc_mem_free_all(phba); 3869 3870 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 3871 phba->hbqslimp.virt, phba->hbqslimp.phys); 3872 3873 /* Free resources associated with SLI2 interface */ 3874 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 3875 phba->slim2p.virt, phba->slim2p.phys); 3876 3877 /* unmap adapter SLIM and Control Registers */ 3878 iounmap(phba->ctrl_regs_memmap_p); 3879 iounmap(phba->slim_memmap_p); 3880 3881 lpfc_hba_free(phba); 3882 3883 pci_release_selected_regions(pdev, bars); 3884 pci_disable_device(pdev); 3885} 3886 3887/** 3888 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 3889 * @pdev: pointer to PCI device 3890 * @msg: power management message 3891 * 3892 * This routine is to be called from the kernel's PCI subsystem to support 3893 * system Power Management (PM) to device with SLI-3 interface spec. When 3894 * PM invokes this method, it quiesces the device by stopping the driver's 3895 * worker thread for the device, turning off device's interrupt and DMA, 3896 * and bring the device offline. Note that as the driver implements the 3897 * minimum PM requirements to a power-aware driver's PM support for the 3898 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 3899 * to the suspend() method call will be treated as SUSPEND and the driver will 3900 * fully reinitialize its device during resume() method call, the driver will 3901 * set device to PCI_D3hot state in PCI config space instead of setting it 3902 * according to the @msg provided by the PM. 3903 * 3904 * Return code 3905 * 0 - driver suspended the device 3906 * Error otherwise 3907 **/ 3908static int 3909lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 3910{ 3911 struct Scsi_Host *shost = pci_get_drvdata(pdev); 3912 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3913 3914 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3915 "0473 PCI device Power Management suspend.\n"); 3916 3917 /* Bring down the device */ 3918 lpfc_offline_prep(phba); 3919 lpfc_offline(phba); 3920 kthread_stop(phba->worker_thread); 3921 3922 /* Disable interrupt from device */ 3923 lpfc_sli_disable_intr(phba); 3924 3925 /* Save device state to PCI config space */ 3926 pci_save_state(pdev); 3927 pci_set_power_state(pdev, PCI_D3hot); 3928 3929 return 0; 3930} 3931 3932/** 3933 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 3934 * @pdev: pointer to PCI device 3935 * 3936 * This routine is to be called from the kernel's PCI subsystem to support 3937 * system Power Management (PM) to device with SLI-3 interface spec. When PM 3938 * invokes this method, it restores the device's PCI config space state and 3939 * fully reinitializes the device and brings it online. Note that as the 3940 * driver implements the minimum PM requirements to a power-aware driver's 3941 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 3942 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 3943 * driver will fully reinitialize its device during resume() method call, 3944 * the device will be set to PCI_D0 directly in PCI config space before 3945 * restoring the state. 3946 * 3947 * Return code 3948 * 0 - driver suspended the device 3949 * Error otherwise 3950 **/ 3951static int 3952lpfc_pci_resume_one_s3(struct pci_dev *pdev) 3953{ 3954 struct Scsi_Host *shost = pci_get_drvdata(pdev); 3955 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3956 uint32_t intr_mode; 3957 int error; 3958 3959 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3960 "0452 PCI device Power Management resume.\n"); 3961 3962 /* Restore device state from PCI config space */ 3963 pci_set_power_state(pdev, PCI_D0); 3964 pci_restore_state(pdev); 3965 if (pdev->is_busmaster) 3966 pci_set_master(pdev); 3967 3968 /* Startup the kernel thread for this host adapter. */ 3969 phba->worker_thread = kthread_run(lpfc_do_work, phba, 3970 "lpfc_worker_%d", phba->brd_no); 3971 if (IS_ERR(phba->worker_thread)) { 3972 error = PTR_ERR(phba->worker_thread); 3973 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3974 "0434 PM resume failed to start worker " 3975 "thread: error=x%x.\n", error); 3976 return error; 3977 } 3978 3979 /* Configure and enable interrupt */ 3980 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 3981 if (intr_mode == LPFC_INTR_ERROR) { 3982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3983 "0430 PM resume Failed to enable interrupt\n"); 3984 return -EIO; 3985 } else 3986 phba->intr_mode = intr_mode; 3987 3988 /* Restart HBA and bring it online */ 3989 lpfc_sli_brdrestart(phba); 3990 lpfc_online(phba); 3991 3992 /* Log the current active interrupt mode */ 3993 lpfc_log_intr_mode(phba, phba->intr_mode); 3994 3995 return 0; 3996} 3997 3998/** 3999 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 4000 * @pdev: pointer to PCI device. 4001 * @state: the current PCI connection state. 4002 * 4003 * This routine is called from the PCI subsystem for I/O error handling to 4004 * device with SLI-3 interface spec. This function is called by the PCI 4005 * subsystem after a PCI bus error affecting this device has been detected. 4006 * When this function is invoked, it will need to stop all the I/Os and 4007 * interrupt(s) to the device. Once that is done, it will return 4008 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 4009 * as desired. 4010 * 4011 * Return codes 4012 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 4013 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 4014 **/ 4015static pci_ers_result_t 4016lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 4017{ 4018 struct Scsi_Host *shost = pci_get_drvdata(pdev); 4019 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 4020 struct lpfc_sli *psli = &phba->sli; 4021 struct lpfc_sli_ring *pring; 4022 4023 if (state == pci_channel_io_perm_failure) { 4024 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4025 "0472 PCI channel I/O permanent failure\n"); 4026 /* Block all SCSI devices' I/Os on the host */ 4027 lpfc_scsi_dev_block(phba); 4028 /* Clean up all driver's outstanding SCSI I/Os */ 4029 lpfc_sli_flush_fcp_rings(phba); 4030 return PCI_ERS_RESULT_DISCONNECT; 4031 } 4032 4033 pci_disable_device(pdev); 4034 /* 4035 * There may be I/Os dropped by the firmware. 4036 * Error iocb (I/O) on txcmplq and let the SCSI layer 4037 * retry it after re-establishing link. 4038 */ 4039 pring = &psli->ring[psli->fcp_ring]; 4040 lpfc_sli_abort_iocb_ring(phba, pring); 4041 4042 /* Disable interrupt */ 4043 lpfc_sli_disable_intr(phba); 4044 4045 /* Request a slot reset. */ 4046 return PCI_ERS_RESULT_NEED_RESET; 4047} 4048 4049/** 4050 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 4051 * @pdev: pointer to PCI device. 4052 * 4053 * This routine is called from the PCI subsystem for error handling to 4054 * device with SLI-3 interface spec. This is called after PCI bus has been 4055 * reset to restart the PCI card from scratch, as if from a cold-boot. 4056 * During the PCI subsystem error recovery, after driver returns 4057 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 4058 * recovery and then call this routine before calling the .resume method 4059 * to recover the device. This function will initialize the HBA device, 4060 * enable the interrupt, but it will just put the HBA to offline state 4061 * without passing any I/O traffic. 4062 * 4063 * Return codes 4064 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 4065 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 4066 */ 4067static pci_ers_result_t 4068lpfc_io_slot_reset_s3(struct pci_dev *pdev) 4069{ 4070 struct Scsi_Host *shost = pci_get_drvdata(pdev); 4071 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 4072 struct lpfc_sli *psli = &phba->sli; 4073 uint32_t intr_mode; 4074 4075 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 4076 if (pci_enable_device_mem(pdev)) { 4077 printk(KERN_ERR "lpfc: Cannot re-enable " 4078 "PCI device after reset.\n"); 4079 return PCI_ERS_RESULT_DISCONNECT; 4080 } 4081 4082 pci_restore_state(pdev); 4083 if (pdev->is_busmaster) 4084 pci_set_master(pdev); 4085 4086 spin_lock_irq(&phba->hbalock); 4087 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 4088 spin_unlock_irq(&phba->hbalock); 4089 4090 /* Configure and enable interrupt */ 4091 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 4092 if (intr_mode == LPFC_INTR_ERROR) { 4093 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4094 "0427 Cannot re-enable interrupt after " 4095 "slot reset.\n"); 4096 return PCI_ERS_RESULT_DISCONNECT; 4097 } else 4098 phba->intr_mode = intr_mode; 4099 4100 /* Take device offline; this will perform cleanup */ 4101 lpfc_offline(phba); 4102 lpfc_sli_brdrestart(phba); 4103 4104 /* Log the current active interrupt mode */ 4105 lpfc_log_intr_mode(phba, phba->intr_mode); 4106 4107 return PCI_ERS_RESULT_RECOVERED; 4108} 4109 4110/** 4111 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 4112 * @pdev: pointer to PCI device 4113 * 4114 * This routine is called from the PCI subsystem for error handling to device 4115 * with SLI-3 interface spec. It is called when kernel error recovery tells 4116 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 4117 * error recovery. After this call, traffic can start to flow from this device 4118 * again. 4119 */ 4120static void 4121lpfc_io_resume_s3(struct pci_dev *pdev) 4122{ 4123 struct Scsi_Host *shost = pci_get_drvdata(pdev); 4124 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 4125 4126 lpfc_online(phba); 4127} 4128 4129/** 4130 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 4131 * @pdev: pointer to PCI device 4132 * @pid: pointer to PCI device identifier 4133 * 4134 * This routine is to be registered to the kernel's PCI subsystem. When an 4135 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 4136 * at PCI device-specific information of the device and driver to see if the 4137 * driver state that it can support this kind of device. If the match is 4138 * successful, the driver core invokes this routine. This routine dispatches 4139 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 4140 * do all the initialization that it needs to do to handle the HBA device 4141 * properly. 4142 * 4143 * Return code 4144 * 0 - driver can claim the device 4145 * negative value - driver can not claim the device 4146 **/ 4147static int __devinit 4148lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 4149{ 4150 int rc; 4151 uint16_t dev_id; 4152 4153 if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id)) 4154 return -ENODEV; 4155 4156 switch (dev_id) { 4157 default: 4158 rc = lpfc_pci_probe_one_s3(pdev, pid); 4159 break; 4160 } 4161 return rc; 4162} 4163 4164/** 4165 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 4166 * @pdev: pointer to PCI device 4167 * 4168 * This routine is to be registered to the kernel's PCI subsystem. When an 4169 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 4170 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 4171 * remove routine, which will perform all the necessary cleanup for the 4172 * device to be removed from the PCI subsystem properly. 4173 **/ 4174static void __devexit 4175lpfc_pci_remove_one(struct pci_dev *pdev) 4176{ 4177 struct Scsi_Host *shost = pci_get_drvdata(pdev); 4178 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 4179 4180 switch (phba->pci_dev_grp) { 4181 case LPFC_PCI_DEV_LP: 4182 lpfc_pci_remove_one_s3(pdev); 4183 break; 4184 default: 4185 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4186 "1424 Invalid PCI device group: 0x%x\n", 4187 phba->pci_dev_grp); 4188 break; 4189 } 4190 return; 4191} 4192 4193/** 4194 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 4195 * @pdev: pointer to PCI device 4196 * @msg: power management message 4197 * 4198 * This routine is to be registered to the kernel's PCI subsystem to support 4199 * system Power Management (PM). When PM invokes this method, it dispatches 4200 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 4201 * suspend the device. 4202 * 4203 * Return code 4204 * 0 - driver suspended the device 4205 * Error otherwise 4206 **/ 4207static int 4208lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 4209{ 4210 struct Scsi_Host *shost = pci_get_drvdata(pdev); 4211 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 4212 int rc = -ENODEV; 4213 4214 switch (phba->pci_dev_grp) { 4215 case LPFC_PCI_DEV_LP: 4216 rc = lpfc_pci_suspend_one_s3(pdev, msg); 4217 break; 4218 default: 4219 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4220 "1425 Invalid PCI device group: 0x%x\n", 4221 phba->pci_dev_grp); 4222 break; 4223 } 4224 return rc; 4225} 4226 4227/** 4228 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 4229 * @pdev: pointer to PCI device 4230 * 4231 * This routine is to be registered to the kernel's PCI subsystem to support 4232 * system Power Management (PM). When PM invokes this method, it dispatches 4233 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 4234 * resume the device. 4235 * 4236 * Return code 4237 * 0 - driver suspended the device 4238 * Error otherwise 4239 **/ 4240static int 4241lpfc_pci_resume_one(struct pci_dev *pdev) 4242{ 4243 struct Scsi_Host *shost = pci_get_drvdata(pdev); 4244 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 4245 int rc = -ENODEV; 4246 4247 switch (phba->pci_dev_grp) { 4248 case LPFC_PCI_DEV_LP: 4249 rc = lpfc_pci_resume_one_s3(pdev); 4250 break; 4251 default: 4252 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4253 "1426 Invalid PCI device group: 0x%x\n", 4254 phba->pci_dev_grp); 4255 break; 4256 } 4257 return rc; 4258} 4259 4260/** 4261 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 4262 * @pdev: pointer to PCI device. 4263 * @state: the current PCI connection state. 4264 * 4265 * This routine is registered to the PCI subsystem for error handling. This 4266 * function is called by the PCI subsystem after a PCI bus error affecting 4267 * this device has been detected. When this routine is invoked, it dispatches 4268 * the action to the proper SLI-3 or SLI-4 device error detected handling 4269 * routine, which will perform the proper error detected operation. 4270 * 4271 * Return codes 4272 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 4273 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 4274 **/ 4275static pci_ers_result_t 4276lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 4277{ 4278 struct Scsi_Host *shost = pci_get_drvdata(pdev); 4279 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 4280 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 4281 4282 switch (phba->pci_dev_grp) { 4283 case LPFC_PCI_DEV_LP: 4284 rc = lpfc_io_error_detected_s3(pdev, state); 4285 break; 4286 default: 4287 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4288 "1427 Invalid PCI device group: 0x%x\n", 4289 phba->pci_dev_grp); 4290 break; 4291 } 4292 return rc; 4293} 4294 4295/** 4296 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 4297 * @pdev: pointer to PCI device. 4298 * 4299 * This routine is registered to the PCI subsystem for error handling. This 4300 * function is called after PCI bus has been reset to restart the PCI card 4301 * from scratch, as if from a cold-boot. When this routine is invoked, it 4302 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 4303 * routine, which will perform the proper device reset. 4304 * 4305 * Return codes 4306 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 4307 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 4308 **/ 4309static pci_ers_result_t 4310lpfc_io_slot_reset(struct pci_dev *pdev) 4311{ 4312 struct Scsi_Host *shost = pci_get_drvdata(pdev); 4313 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 4314 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 4315 4316 switch (phba->pci_dev_grp) { 4317 case LPFC_PCI_DEV_LP: 4318 rc = lpfc_io_slot_reset_s3(pdev); 4319 break; 4320 default: 4321 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4322 "1428 Invalid PCI device group: 0x%x\n", 4323 phba->pci_dev_grp); 4324 break; 4325 } 4326 return rc; 4327} 4328 4329/** 4330 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 4331 * @pdev: pointer to PCI device 4332 * 4333 * This routine is registered to the PCI subsystem for error handling. It 4334 * is called when kernel error recovery tells the lpfc driver that it is 4335 * OK to resume normal PCI operation after PCI bus error recovery. When 4336 * this routine is invoked, it dispatches the action to the proper SLI-3 4337 * or SLI-4 device io_resume routine, which will resume the device operation. 4338 **/ 4339static void 4340lpfc_io_resume(struct pci_dev *pdev) 4341{ 4342 struct Scsi_Host *shost = pci_get_drvdata(pdev); 4343 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 4344 4345 switch (phba->pci_dev_grp) { 4346 case LPFC_PCI_DEV_LP: 4347 lpfc_io_resume_s3(pdev); 4348 break; 4349 default: 4350 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4351 "1429 Invalid PCI device group: 0x%x\n", 4352 phba->pci_dev_grp); 4353 break; 4354 } 4355 return; 4356} 4357 4358static struct pci_device_id lpfc_id_table[] = { 4359 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 4360 PCI_ANY_ID, PCI_ANY_ID, }, 4361 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 4362 PCI_ANY_ID, PCI_ANY_ID, }, 4363 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 4364 PCI_ANY_ID, PCI_ANY_ID, }, 4365 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 4366 PCI_ANY_ID, PCI_ANY_ID, }, 4367 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 4368 PCI_ANY_ID, PCI_ANY_ID, }, 4369 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 4370 PCI_ANY_ID, PCI_ANY_ID, }, 4371 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 4372 PCI_ANY_ID, PCI_ANY_ID, }, 4373 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 4374 PCI_ANY_ID, PCI_ANY_ID, }, 4375 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 4376 PCI_ANY_ID, PCI_ANY_ID, }, 4377 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 4378 PCI_ANY_ID, PCI_ANY_ID, }, 4379 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 4380 PCI_ANY_ID, PCI_ANY_ID, }, 4381 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 4382 PCI_ANY_ID, PCI_ANY_ID, }, 4383 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 4384 PCI_ANY_ID, PCI_ANY_ID, }, 4385 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 4386 PCI_ANY_ID, PCI_ANY_ID, }, 4387 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 4388 PCI_ANY_ID, PCI_ANY_ID, }, 4389 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 4390 PCI_ANY_ID, PCI_ANY_ID, }, 4391 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 4392 PCI_ANY_ID, PCI_ANY_ID, }, 4393 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 4394 PCI_ANY_ID, PCI_ANY_ID, }, 4395 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 4396 PCI_ANY_ID, PCI_ANY_ID, }, 4397 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 4398 PCI_ANY_ID, PCI_ANY_ID, }, 4399 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 4400 PCI_ANY_ID, PCI_ANY_ID, }, 4401 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 4402 PCI_ANY_ID, PCI_ANY_ID, }, 4403 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 4404 PCI_ANY_ID, PCI_ANY_ID, }, 4405 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 4406 PCI_ANY_ID, PCI_ANY_ID, }, 4407 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 4408 PCI_ANY_ID, PCI_ANY_ID, }, 4409 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 4410 PCI_ANY_ID, PCI_ANY_ID, }, 4411 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 4412 PCI_ANY_ID, PCI_ANY_ID, }, 4413 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 4414 PCI_ANY_ID, PCI_ANY_ID, }, 4415 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 4416 PCI_ANY_ID, PCI_ANY_ID, }, 4417 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 4418 PCI_ANY_ID, PCI_ANY_ID, }, 4419 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 4420 PCI_ANY_ID, PCI_ANY_ID, }, 4421 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 4422 PCI_ANY_ID, PCI_ANY_ID, }, 4423 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 4424 PCI_ANY_ID, PCI_ANY_ID, }, 4425 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 4426 PCI_ANY_ID, PCI_ANY_ID, }, 4427 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 4428 PCI_ANY_ID, PCI_ANY_ID, }, 4429 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 4430 PCI_ANY_ID, PCI_ANY_ID, }, 4431 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 4432 PCI_ANY_ID, PCI_ANY_ID, }, 4433 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 4434 PCI_ANY_ID, PCI_ANY_ID, }, 4435 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S, 4436 PCI_ANY_ID, PCI_ANY_ID, }, 4437 { 0 } 4438}; 4439 4440MODULE_DEVICE_TABLE(pci, lpfc_id_table); 4441 4442static struct pci_error_handlers lpfc_err_handler = { 4443 .error_detected = lpfc_io_error_detected, 4444 .slot_reset = lpfc_io_slot_reset, 4445 .resume = lpfc_io_resume, 4446}; 4447 4448static struct pci_driver lpfc_driver = { 4449 .name = LPFC_DRIVER_NAME, 4450 .id_table = lpfc_id_table, 4451 .probe = lpfc_pci_probe_one, 4452 .remove = __devexit_p(lpfc_pci_remove_one), 4453 .suspend = lpfc_pci_suspend_one, 4454 .resume = lpfc_pci_resume_one, 4455 .err_handler = &lpfc_err_handler, 4456}; 4457 4458/** 4459 * lpfc_init - lpfc module initialization routine 4460 * 4461 * This routine is to be invoked when the lpfc module is loaded into the 4462 * kernel. The special kernel macro module_init() is used to indicate the 4463 * role of this routine to the kernel as lpfc module entry point. 4464 * 4465 * Return codes 4466 * 0 - successful 4467 * -ENOMEM - FC attach transport failed 4468 * all others - failed 4469 */ 4470static int __init 4471lpfc_init(void) 4472{ 4473 int error = 0; 4474 4475 printk(LPFC_MODULE_DESC "\n"); 4476 printk(LPFC_COPYRIGHT "\n"); 4477 4478 if (lpfc_enable_npiv) { 4479 lpfc_transport_functions.vport_create = lpfc_vport_create; 4480 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 4481 } 4482 lpfc_transport_template = 4483 fc_attach_transport(&lpfc_transport_functions); 4484 if (lpfc_transport_template == NULL) 4485 return -ENOMEM; 4486 if (lpfc_enable_npiv) { 4487 lpfc_vport_transport_template = 4488 fc_attach_transport(&lpfc_vport_transport_functions); 4489 if (lpfc_vport_transport_template == NULL) { 4490 fc_release_transport(lpfc_transport_template); 4491 return -ENOMEM; 4492 } 4493 } 4494 error = pci_register_driver(&lpfc_driver); 4495 if (error) { 4496 fc_release_transport(lpfc_transport_template); 4497 if (lpfc_enable_npiv) 4498 fc_release_transport(lpfc_vport_transport_template); 4499 } 4500 4501 return error; 4502} 4503 4504/** 4505 * lpfc_exit - lpfc module removal routine 4506 * 4507 * This routine is invoked when the lpfc module is removed from the kernel. 4508 * The special kernel macro module_exit() is used to indicate the role of 4509 * this routine to the kernel as lpfc module exit point. 4510 */ 4511static void __exit 4512lpfc_exit(void) 4513{ 4514 pci_unregister_driver(&lpfc_driver); 4515 fc_release_transport(lpfc_transport_template); 4516 if (lpfc_enable_npiv) 4517 fc_release_transport(lpfc_vport_transport_template); 4518 if (_dump_buf_data) { 4519 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data " 4520 "at 0x%p\n", 4521 (1L << _dump_buf_data_order), _dump_buf_data); 4522 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 4523 } 4524 4525 if (_dump_buf_dif) { 4526 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif " 4527 "at 0x%p\n", 4528 (1L << _dump_buf_dif_order), _dump_buf_dif); 4529 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 4530 } 4531} 4532 4533module_init(lpfc_init); 4534module_exit(lpfc_exit); 4535MODULE_LICENSE("GPL"); 4536MODULE_DESCRIPTION(LPFC_MODULE_DESC); 4537MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 4538MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 4539