lpfc_init.c revision 9795724476860069ce183ead59d0a5958f882037
1/******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/dma-mapping.h> 25#include <linux/idr.h> 26#include <linux/interrupt.h> 27#include <linux/kthread.h> 28#include <linux/pci.h> 29#include <linux/spinlock.h> 30#include <linux/ctype.h> 31#include <linux/aer.h> 32 33#include <scsi/scsi.h> 34#include <scsi/scsi_device.h> 35#include <scsi/scsi_host.h> 36#include <scsi/scsi_transport_fc.h> 37 38#include "lpfc_hw4.h" 39#include "lpfc_hw.h" 40#include "lpfc_sli.h" 41#include "lpfc_sli4.h" 42#include "lpfc_nl.h" 43#include "lpfc_disc.h" 44#include "lpfc_scsi.h" 45#include "lpfc.h" 46#include "lpfc_logmsg.h" 47#include "lpfc_crtn.h" 48#include "lpfc_vport.h" 49#include "lpfc_version.h" 50 51char *_dump_buf_data; 52unsigned long _dump_buf_data_order; 53char *_dump_buf_dif; 54unsigned long _dump_buf_dif_order; 55spinlock_t _dump_buf_lock; 56 57static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 58static int lpfc_post_rcv_buf(struct lpfc_hba *); 59static int lpfc_sli4_queue_create(struct lpfc_hba *); 60static void lpfc_sli4_queue_destroy(struct lpfc_hba *); 61static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 62static int lpfc_setup_endian_order(struct lpfc_hba *); 63static int lpfc_sli4_read_config(struct lpfc_hba *); 64static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 65static void lpfc_free_sgl_list(struct lpfc_hba *); 66static int lpfc_init_sgl_list(struct lpfc_hba *); 67static int lpfc_init_active_sgl_array(struct lpfc_hba *); 68static void lpfc_free_active_sgl(struct lpfc_hba *); 69static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 70static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 71static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 72static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 73static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 74 75static struct scsi_transport_template *lpfc_transport_template = NULL; 76static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 77static DEFINE_IDR(lpfc_hba_index); 78 79/** 80 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 81 * @phba: pointer to lpfc hba data structure. 82 * 83 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 84 * mailbox command. It retrieves the revision information from the HBA and 85 * collects the Vital Product Data (VPD) about the HBA for preparing the 86 * configuration of the HBA. 87 * 88 * Return codes: 89 * 0 - success. 90 * -ERESTART - requests the SLI layer to reset the HBA and try again. 91 * Any other value - indicates an error. 92 **/ 93int 94lpfc_config_port_prep(struct lpfc_hba *phba) 95{ 96 lpfc_vpd_t *vp = &phba->vpd; 97 int i = 0, rc; 98 LPFC_MBOXQ_t *pmb; 99 MAILBOX_t *mb; 100 char *lpfc_vpd_data = NULL; 101 uint16_t offset = 0; 102 static char licensed[56] = 103 "key unlock for use with gnu public licensed code only\0"; 104 static int init_key = 1; 105 106 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 107 if (!pmb) { 108 phba->link_state = LPFC_HBA_ERROR; 109 return -ENOMEM; 110 } 111 112 mb = &pmb->u.mb; 113 phba->link_state = LPFC_INIT_MBX_CMDS; 114 115 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 116 if (init_key) { 117 uint32_t *ptext = (uint32_t *) licensed; 118 119 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 120 *ptext = cpu_to_be32(*ptext); 121 init_key = 0; 122 } 123 124 lpfc_read_nv(phba, pmb); 125 memset((char*)mb->un.varRDnvp.rsvd3, 0, 126 sizeof (mb->un.varRDnvp.rsvd3)); 127 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 128 sizeof (licensed)); 129 130 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 131 132 if (rc != MBX_SUCCESS) { 133 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 134 "0324 Config Port initialization " 135 "error, mbxCmd x%x READ_NVPARM, " 136 "mbxStatus x%x\n", 137 mb->mbxCommand, mb->mbxStatus); 138 mempool_free(pmb, phba->mbox_mem_pool); 139 return -ERESTART; 140 } 141 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 142 sizeof(phba->wwnn)); 143 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 144 sizeof(phba->wwpn)); 145 } 146 147 phba->sli3_options = 0x0; 148 149 /* Setup and issue mailbox READ REV command */ 150 lpfc_read_rev(phba, pmb); 151 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 152 if (rc != MBX_SUCCESS) { 153 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 154 "0439 Adapter failed to init, mbxCmd x%x " 155 "READ_REV, mbxStatus x%x\n", 156 mb->mbxCommand, mb->mbxStatus); 157 mempool_free( pmb, phba->mbox_mem_pool); 158 return -ERESTART; 159 } 160 161 162 /* 163 * The value of rr must be 1 since the driver set the cv field to 1. 164 * This setting requires the FW to set all revision fields. 165 */ 166 if (mb->un.varRdRev.rr == 0) { 167 vp->rev.rBit = 0; 168 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 169 "0440 Adapter failed to init, READ_REV has " 170 "missing revision information.\n"); 171 mempool_free(pmb, phba->mbox_mem_pool); 172 return -ERESTART; 173 } 174 175 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 176 mempool_free(pmb, phba->mbox_mem_pool); 177 return -EINVAL; 178 } 179 180 /* Save information as VPD data */ 181 vp->rev.rBit = 1; 182 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 183 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 184 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 185 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 186 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 187 vp->rev.biuRev = mb->un.varRdRev.biuRev; 188 vp->rev.smRev = mb->un.varRdRev.smRev; 189 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 190 vp->rev.endecRev = mb->un.varRdRev.endecRev; 191 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 192 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 193 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 194 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 195 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 196 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 197 198 /* If the sli feature level is less then 9, we must 199 * tear down all RPIs and VPIs on link down if NPIV 200 * is enabled. 201 */ 202 if (vp->rev.feaLevelHigh < 9) 203 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 204 205 if (lpfc_is_LC_HBA(phba->pcidev->device)) 206 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 207 sizeof (phba->RandomData)); 208 209 /* Get adapter VPD information */ 210 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 211 if (!lpfc_vpd_data) 212 goto out_free_mbox; 213 214 do { 215 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 216 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 217 218 if (rc != MBX_SUCCESS) { 219 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 220 "0441 VPD not present on adapter, " 221 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 222 mb->mbxCommand, mb->mbxStatus); 223 mb->un.varDmp.word_cnt = 0; 224 } 225 /* dump mem may return a zero when finished or we got a 226 * mailbox error, either way we are done. 227 */ 228 if (mb->un.varDmp.word_cnt == 0) 229 break; 230 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 231 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 232 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 233 lpfc_vpd_data + offset, 234 mb->un.varDmp.word_cnt); 235 offset += mb->un.varDmp.word_cnt; 236 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 237 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 238 239 kfree(lpfc_vpd_data); 240out_free_mbox: 241 mempool_free(pmb, phba->mbox_mem_pool); 242 return 0; 243} 244 245/** 246 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 247 * @phba: pointer to lpfc hba data structure. 248 * @pmboxq: pointer to the driver internal queue element for mailbox command. 249 * 250 * This is the completion handler for driver's configuring asynchronous event 251 * mailbox command to the device. If the mailbox command returns successfully, 252 * it will set internal async event support flag to 1; otherwise, it will 253 * set internal async event support flag to 0. 254 **/ 255static void 256lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 257{ 258 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 259 phba->temp_sensor_support = 1; 260 else 261 phba->temp_sensor_support = 0; 262 mempool_free(pmboxq, phba->mbox_mem_pool); 263 return; 264} 265 266/** 267 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 268 * @phba: pointer to lpfc hba data structure. 269 * @pmboxq: pointer to the driver internal queue element for mailbox command. 270 * 271 * This is the completion handler for dump mailbox command for getting 272 * wake up parameters. When this command complete, the response contain 273 * Option rom version of the HBA. This function translate the version number 274 * into a human readable string and store it in OptionROMVersion. 275 **/ 276static void 277lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 278{ 279 struct prog_id *prg; 280 uint32_t prog_id_word; 281 char dist = ' '; 282 /* character array used for decoding dist type. */ 283 char dist_char[] = "nabx"; 284 285 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 286 mempool_free(pmboxq, phba->mbox_mem_pool); 287 return; 288 } 289 290 prg = (struct prog_id *) &prog_id_word; 291 292 /* word 7 contain option rom version */ 293 prog_id_word = pmboxq->u.mb.un.varWords[7]; 294 295 /* Decode the Option rom version word to a readable string */ 296 if (prg->dist < 4) 297 dist = dist_char[prg->dist]; 298 299 if ((prg->dist == 3) && (prg->num == 0)) 300 sprintf(phba->OptionROMVersion, "%d.%d%d", 301 prg->ver, prg->rev, prg->lev); 302 else 303 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 304 prg->ver, prg->rev, prg->lev, 305 dist, prg->num); 306 mempool_free(pmboxq, phba->mbox_mem_pool); 307 return; 308} 309 310/** 311 * lpfc_config_port_post - Perform lpfc initialization after config port 312 * @phba: pointer to lpfc hba data structure. 313 * 314 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 315 * command call. It performs all internal resource and state setups on the 316 * port: post IOCB buffers, enable appropriate host interrupt attentions, 317 * ELS ring timers, etc. 318 * 319 * Return codes 320 * 0 - success. 321 * Any other value - error. 322 **/ 323int 324lpfc_config_port_post(struct lpfc_hba *phba) 325{ 326 struct lpfc_vport *vport = phba->pport; 327 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 328 LPFC_MBOXQ_t *pmb; 329 MAILBOX_t *mb; 330 struct lpfc_dmabuf *mp; 331 struct lpfc_sli *psli = &phba->sli; 332 uint32_t status, timeout; 333 int i, j; 334 int rc; 335 336 spin_lock_irq(&phba->hbalock); 337 /* 338 * If the Config port completed correctly the HBA is not 339 * over heated any more. 340 */ 341 if (phba->over_temp_state == HBA_OVER_TEMP) 342 phba->over_temp_state = HBA_NORMAL_TEMP; 343 spin_unlock_irq(&phba->hbalock); 344 345 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 346 if (!pmb) { 347 phba->link_state = LPFC_HBA_ERROR; 348 return -ENOMEM; 349 } 350 mb = &pmb->u.mb; 351 352 /* Get login parameters for NID. */ 353 lpfc_read_sparam(phba, pmb, 0); 354 pmb->vport = vport; 355 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 357 "0448 Adapter failed init, mbxCmd x%x " 358 "READ_SPARM mbxStatus x%x\n", 359 mb->mbxCommand, mb->mbxStatus); 360 phba->link_state = LPFC_HBA_ERROR; 361 mp = (struct lpfc_dmabuf *) pmb->context1; 362 mempool_free( pmb, phba->mbox_mem_pool); 363 lpfc_mbuf_free(phba, mp->virt, mp->phys); 364 kfree(mp); 365 return -EIO; 366 } 367 368 mp = (struct lpfc_dmabuf *) pmb->context1; 369 370 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 371 lpfc_mbuf_free(phba, mp->virt, mp->phys); 372 kfree(mp); 373 pmb->context1 = NULL; 374 375 if (phba->cfg_soft_wwnn) 376 u64_to_wwn(phba->cfg_soft_wwnn, 377 vport->fc_sparam.nodeName.u.wwn); 378 if (phba->cfg_soft_wwpn) 379 u64_to_wwn(phba->cfg_soft_wwpn, 380 vport->fc_sparam.portName.u.wwn); 381 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 382 sizeof (struct lpfc_name)); 383 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 384 sizeof (struct lpfc_name)); 385 386 /* Update the fc_host data structures with new wwn. */ 387 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 388 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 389 fc_host_max_npiv_vports(shost) = phba->max_vpi; 390 391 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 392 /* This should be consolidated into parse_vpd ? - mr */ 393 if (phba->SerialNumber[0] == 0) { 394 uint8_t *outptr; 395 396 outptr = &vport->fc_nodename.u.s.IEEE[0]; 397 for (i = 0; i < 12; i++) { 398 status = *outptr++; 399 j = ((status & 0xf0) >> 4); 400 if (j <= 9) 401 phba->SerialNumber[i] = 402 (char)((uint8_t) 0x30 + (uint8_t) j); 403 else 404 phba->SerialNumber[i] = 405 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 406 i++; 407 j = (status & 0xf); 408 if (j <= 9) 409 phba->SerialNumber[i] = 410 (char)((uint8_t) 0x30 + (uint8_t) j); 411 else 412 phba->SerialNumber[i] = 413 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 414 } 415 } 416 417 lpfc_read_config(phba, pmb); 418 pmb->vport = vport; 419 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 420 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 421 "0453 Adapter failed to init, mbxCmd x%x " 422 "READ_CONFIG, mbxStatus x%x\n", 423 mb->mbxCommand, mb->mbxStatus); 424 phba->link_state = LPFC_HBA_ERROR; 425 mempool_free( pmb, phba->mbox_mem_pool); 426 return -EIO; 427 } 428 429 /* Check if the port is disabled */ 430 lpfc_sli_read_link_ste(phba); 431 432 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 433 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 434 phba->cfg_hba_queue_depth = 435 (mb->un.varRdConfig.max_xri + 1) - 436 lpfc_sli4_get_els_iocb_cnt(phba); 437 438 phba->lmt = mb->un.varRdConfig.lmt; 439 440 /* Get the default values for Model Name and Description */ 441 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 442 443 if ((phba->cfg_link_speed > LINK_SPEED_10G) 444 || ((phba->cfg_link_speed == LINK_SPEED_1G) 445 && !(phba->lmt & LMT_1Gb)) 446 || ((phba->cfg_link_speed == LINK_SPEED_2G) 447 && !(phba->lmt & LMT_2Gb)) 448 || ((phba->cfg_link_speed == LINK_SPEED_4G) 449 && !(phba->lmt & LMT_4Gb)) 450 || ((phba->cfg_link_speed == LINK_SPEED_8G) 451 && !(phba->lmt & LMT_8Gb)) 452 || ((phba->cfg_link_speed == LINK_SPEED_10G) 453 && !(phba->lmt & LMT_10Gb))) { 454 /* Reset link speed to auto */ 455 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 456 "1302 Invalid speed for this board: " 457 "Reset link speed to auto: x%x\n", 458 phba->cfg_link_speed); 459 phba->cfg_link_speed = LINK_SPEED_AUTO; 460 } 461 462 phba->link_state = LPFC_LINK_DOWN; 463 464 /* Only process IOCBs on ELS ring till hba_state is READY */ 465 if (psli->ring[psli->extra_ring].cmdringaddr) 466 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 467 if (psli->ring[psli->fcp_ring].cmdringaddr) 468 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 469 if (psli->ring[psli->next_ring].cmdringaddr) 470 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 471 472 /* Post receive buffers for desired rings */ 473 if (phba->sli_rev != 3) 474 lpfc_post_rcv_buf(phba); 475 476 /* 477 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 478 */ 479 if (phba->intr_type == MSIX) { 480 rc = lpfc_config_msi(phba, pmb); 481 if (rc) { 482 mempool_free(pmb, phba->mbox_mem_pool); 483 return -EIO; 484 } 485 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 486 if (rc != MBX_SUCCESS) { 487 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 488 "0352 Config MSI mailbox command " 489 "failed, mbxCmd x%x, mbxStatus x%x\n", 490 pmb->u.mb.mbxCommand, 491 pmb->u.mb.mbxStatus); 492 mempool_free(pmb, phba->mbox_mem_pool); 493 return -EIO; 494 } 495 } 496 497 spin_lock_irq(&phba->hbalock); 498 /* Initialize ERATT handling flag */ 499 phba->hba_flag &= ~HBA_ERATT_HANDLED; 500 501 /* Enable appropriate host interrupts */ 502 status = readl(phba->HCregaddr); 503 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 504 if (psli->num_rings > 0) 505 status |= HC_R0INT_ENA; 506 if (psli->num_rings > 1) 507 status |= HC_R1INT_ENA; 508 if (psli->num_rings > 2) 509 status |= HC_R2INT_ENA; 510 if (psli->num_rings > 3) 511 status |= HC_R3INT_ENA; 512 513 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 514 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 515 status &= ~(HC_R0INT_ENA); 516 517 writel(status, phba->HCregaddr); 518 readl(phba->HCregaddr); /* flush */ 519 spin_unlock_irq(&phba->hbalock); 520 521 /* Set up ring-0 (ELS) timer */ 522 timeout = phba->fc_ratov * 2; 523 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 524 /* Set up heart beat (HB) timer */ 525 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 526 phba->hb_outstanding = 0; 527 phba->last_completion_time = jiffies; 528 /* Set up error attention (ERATT) polling timer */ 529 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 530 531 if (phba->hba_flag & LINK_DISABLED) { 532 lpfc_printf_log(phba, 533 KERN_ERR, LOG_INIT, 534 "2598 Adapter Link is disabled.\n"); 535 lpfc_down_link(phba, pmb); 536 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 537 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 538 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 539 lpfc_printf_log(phba, 540 KERN_ERR, LOG_INIT, 541 "2599 Adapter failed to issue DOWN_LINK" 542 " mbox command rc 0x%x\n", rc); 543 544 mempool_free(pmb, phba->mbox_mem_pool); 545 return -EIO; 546 } 547 } else { 548 lpfc_init_link(phba, pmb, phba->cfg_topology, 549 phba->cfg_link_speed); 550 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 551 lpfc_set_loopback_flag(phba); 552 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 553 if (rc != MBX_SUCCESS) { 554 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 555 "0454 Adapter failed to init, mbxCmd x%x " 556 "INIT_LINK, mbxStatus x%x\n", 557 mb->mbxCommand, mb->mbxStatus); 558 559 /* Clear all interrupt enable conditions */ 560 writel(0, phba->HCregaddr); 561 readl(phba->HCregaddr); /* flush */ 562 /* Clear all pending interrupts */ 563 writel(0xffffffff, phba->HAregaddr); 564 readl(phba->HAregaddr); /* flush */ 565 566 phba->link_state = LPFC_HBA_ERROR; 567 if (rc != MBX_BUSY) 568 mempool_free(pmb, phba->mbox_mem_pool); 569 return -EIO; 570 } 571 } 572 /* MBOX buffer will be freed in mbox compl */ 573 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 574 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 575 pmb->mbox_cmpl = lpfc_config_async_cmpl; 576 pmb->vport = phba->pport; 577 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 578 579 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 580 lpfc_printf_log(phba, 581 KERN_ERR, 582 LOG_INIT, 583 "0456 Adapter failed to issue " 584 "ASYNCEVT_ENABLE mbox status x%x\n", 585 rc); 586 mempool_free(pmb, phba->mbox_mem_pool); 587 } 588 589 /* Get Option rom version */ 590 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 591 lpfc_dump_wakeup_param(phba, pmb); 592 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 593 pmb->vport = phba->pport; 594 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 595 596 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 597 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 598 "to get Option ROM version status x%x\n", rc); 599 mempool_free(pmb, phba->mbox_mem_pool); 600 } 601 602 return 0; 603} 604 605/** 606 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 607 * @phba: pointer to lpfc HBA data structure. 608 * 609 * This routine will do LPFC uninitialization before the HBA is reset when 610 * bringing down the SLI Layer. 611 * 612 * Return codes 613 * 0 - success. 614 * Any other value - error. 615 **/ 616int 617lpfc_hba_down_prep(struct lpfc_hba *phba) 618{ 619 struct lpfc_vport **vports; 620 int i; 621 622 if (phba->sli_rev <= LPFC_SLI_REV3) { 623 /* Disable interrupts */ 624 writel(0, phba->HCregaddr); 625 readl(phba->HCregaddr); /* flush */ 626 } 627 628 if (phba->pport->load_flag & FC_UNLOADING) 629 lpfc_cleanup_discovery_resources(phba->pport); 630 else { 631 vports = lpfc_create_vport_work_array(phba); 632 if (vports != NULL) 633 for (i = 0; i <= phba->max_vports && 634 vports[i] != NULL; i++) 635 lpfc_cleanup_discovery_resources(vports[i]); 636 lpfc_destroy_vport_work_array(phba, vports); 637 } 638 return 0; 639} 640 641/** 642 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 643 * @phba: pointer to lpfc HBA data structure. 644 * 645 * This routine will do uninitialization after the HBA is reset when bring 646 * down the SLI Layer. 647 * 648 * Return codes 649 * 0 - success. 650 * Any other value - error. 651 **/ 652static int 653lpfc_hba_down_post_s3(struct lpfc_hba *phba) 654{ 655 struct lpfc_sli *psli = &phba->sli; 656 struct lpfc_sli_ring *pring; 657 struct lpfc_dmabuf *mp, *next_mp; 658 LIST_HEAD(completions); 659 int i; 660 661 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 662 lpfc_sli_hbqbuf_free_all(phba); 663 else { 664 /* Cleanup preposted buffers on the ELS ring */ 665 pring = &psli->ring[LPFC_ELS_RING]; 666 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 667 list_del(&mp->list); 668 pring->postbufq_cnt--; 669 lpfc_mbuf_free(phba, mp->virt, mp->phys); 670 kfree(mp); 671 } 672 } 673 674 spin_lock_irq(&phba->hbalock); 675 for (i = 0; i < psli->num_rings; i++) { 676 pring = &psli->ring[i]; 677 678 /* At this point in time the HBA is either reset or DOA. Either 679 * way, nothing should be on txcmplq as it will NEVER complete. 680 */ 681 list_splice_init(&pring->txcmplq, &completions); 682 pring->txcmplq_cnt = 0; 683 spin_unlock_irq(&phba->hbalock); 684 685 /* Cancel all the IOCBs from the completions list */ 686 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 687 IOERR_SLI_ABORTED); 688 689 lpfc_sli_abort_iocb_ring(phba, pring); 690 spin_lock_irq(&phba->hbalock); 691 } 692 spin_unlock_irq(&phba->hbalock); 693 694 return 0; 695} 696/** 697 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 698 * @phba: pointer to lpfc HBA data structure. 699 * 700 * This routine will do uninitialization after the HBA is reset when bring 701 * down the SLI Layer. 702 * 703 * Return codes 704 * 0 - success. 705 * Any other value - error. 706 **/ 707static int 708lpfc_hba_down_post_s4(struct lpfc_hba *phba) 709{ 710 struct lpfc_scsi_buf *psb, *psb_next; 711 LIST_HEAD(aborts); 712 int ret; 713 unsigned long iflag = 0; 714 ret = lpfc_hba_down_post_s3(phba); 715 if (ret) 716 return ret; 717 /* At this point in time the HBA is either reset or DOA. Either 718 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 719 * on the lpfc_sgl_list so that it can either be freed if the 720 * driver is unloading or reposted if the driver is restarting 721 * the port. 722 */ 723 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 724 /* scsl_buf_list */ 725 /* abts_sgl_list_lock required because worker thread uses this 726 * list. 727 */ 728 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 729 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 730 &phba->sli4_hba.lpfc_sgl_list); 731 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 732 /* abts_scsi_buf_list_lock required because worker thread uses this 733 * list. 734 */ 735 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 736 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 737 &aborts); 738 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 739 spin_unlock_irq(&phba->hbalock); 740 741 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 742 psb->pCmd = NULL; 743 psb->status = IOSTAT_SUCCESS; 744 } 745 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 746 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 747 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 748 return 0; 749} 750 751/** 752 * lpfc_hba_down_post - Wrapper func for hba down post routine 753 * @phba: pointer to lpfc HBA data structure. 754 * 755 * This routine wraps the actual SLI3 or SLI4 routine for performing 756 * uninitialization after the HBA is reset when bring down the SLI Layer. 757 * 758 * Return codes 759 * 0 - success. 760 * Any other value - error. 761 **/ 762int 763lpfc_hba_down_post(struct lpfc_hba *phba) 764{ 765 return (*phba->lpfc_hba_down_post)(phba); 766} 767 768/** 769 * lpfc_hb_timeout - The HBA-timer timeout handler 770 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 771 * 772 * This is the HBA-timer timeout handler registered to the lpfc driver. When 773 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 774 * work-port-events bitmap and the worker thread is notified. This timeout 775 * event will be used by the worker thread to invoke the actual timeout 776 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 777 * be performed in the timeout handler and the HBA timeout event bit shall 778 * be cleared by the worker thread after it has taken the event bitmap out. 779 **/ 780static void 781lpfc_hb_timeout(unsigned long ptr) 782{ 783 struct lpfc_hba *phba; 784 uint32_t tmo_posted; 785 unsigned long iflag; 786 787 phba = (struct lpfc_hba *)ptr; 788 789 /* Check for heart beat timeout conditions */ 790 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 791 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 792 if (!tmo_posted) 793 phba->pport->work_port_events |= WORKER_HB_TMO; 794 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 795 796 /* Tell the worker thread there is work to do */ 797 if (!tmo_posted) 798 lpfc_worker_wake_up(phba); 799 return; 800} 801 802/** 803 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 804 * @phba: pointer to lpfc hba data structure. 805 * @pmboxq: pointer to the driver internal queue element for mailbox command. 806 * 807 * This is the callback function to the lpfc heart-beat mailbox command. 808 * If configured, the lpfc driver issues the heart-beat mailbox command to 809 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 810 * heart-beat mailbox command is issued, the driver shall set up heart-beat 811 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 812 * heart-beat outstanding state. Once the mailbox command comes back and 813 * no error conditions detected, the heart-beat mailbox command timer is 814 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 815 * state is cleared for the next heart-beat. If the timer expired with the 816 * heart-beat outstanding state set, the driver will put the HBA offline. 817 **/ 818static void 819lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 820{ 821 unsigned long drvr_flag; 822 823 spin_lock_irqsave(&phba->hbalock, drvr_flag); 824 phba->hb_outstanding = 0; 825 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 826 827 /* Check and reset heart-beat timer is necessary */ 828 mempool_free(pmboxq, phba->mbox_mem_pool); 829 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 830 !(phba->link_state == LPFC_HBA_ERROR) && 831 !(phba->pport->load_flag & FC_UNLOADING)) 832 mod_timer(&phba->hb_tmofunc, 833 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 834 return; 835} 836 837/** 838 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 839 * @phba: pointer to lpfc hba data structure. 840 * 841 * This is the actual HBA-timer timeout handler to be invoked by the worker 842 * thread whenever the HBA timer fired and HBA-timeout event posted. This 843 * handler performs any periodic operations needed for the device. If such 844 * periodic event has already been attended to either in the interrupt handler 845 * or by processing slow-ring or fast-ring events within the HBA-timer 846 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 847 * the timer for the next timeout period. If lpfc heart-beat mailbox command 848 * is configured and there is no heart-beat mailbox command outstanding, a 849 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 850 * has been a heart-beat mailbox command outstanding, the HBA shall be put 851 * to offline. 852 **/ 853void 854lpfc_hb_timeout_handler(struct lpfc_hba *phba) 855{ 856 struct lpfc_vport **vports; 857 LPFC_MBOXQ_t *pmboxq; 858 struct lpfc_dmabuf *buf_ptr; 859 int retval, i; 860 struct lpfc_sli *psli = &phba->sli; 861 LIST_HEAD(completions); 862 863 vports = lpfc_create_vport_work_array(phba); 864 if (vports != NULL) 865 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 866 lpfc_rcv_seq_check_edtov(vports[i]); 867 lpfc_destroy_vport_work_array(phba, vports); 868 869 if ((phba->link_state == LPFC_HBA_ERROR) || 870 (phba->pport->load_flag & FC_UNLOADING) || 871 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 872 return; 873 874 spin_lock_irq(&phba->pport->work_port_lock); 875 876 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 877 jiffies)) { 878 spin_unlock_irq(&phba->pport->work_port_lock); 879 if (!phba->hb_outstanding) 880 mod_timer(&phba->hb_tmofunc, 881 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 882 else 883 mod_timer(&phba->hb_tmofunc, 884 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 885 return; 886 } 887 spin_unlock_irq(&phba->pport->work_port_lock); 888 889 if (phba->elsbuf_cnt && 890 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 891 spin_lock_irq(&phba->hbalock); 892 list_splice_init(&phba->elsbuf, &completions); 893 phba->elsbuf_cnt = 0; 894 phba->elsbuf_prev_cnt = 0; 895 spin_unlock_irq(&phba->hbalock); 896 897 while (!list_empty(&completions)) { 898 list_remove_head(&completions, buf_ptr, 899 struct lpfc_dmabuf, list); 900 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 901 kfree(buf_ptr); 902 } 903 } 904 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 905 906 /* If there is no heart beat outstanding, issue a heartbeat command */ 907 if (phba->cfg_enable_hba_heartbeat) { 908 if (!phba->hb_outstanding) { 909 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 910 if (!pmboxq) { 911 mod_timer(&phba->hb_tmofunc, 912 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 913 return; 914 } 915 916 lpfc_heart_beat(phba, pmboxq); 917 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 918 pmboxq->vport = phba->pport; 919 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 920 921 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 922 mempool_free(pmboxq, phba->mbox_mem_pool); 923 mod_timer(&phba->hb_tmofunc, 924 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 925 return; 926 } 927 mod_timer(&phba->hb_tmofunc, 928 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 929 phba->hb_outstanding = 1; 930 return; 931 } else { 932 /* 933 * If heart beat timeout called with hb_outstanding set 934 * we need to take the HBA offline. 935 */ 936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 937 "0459 Adapter heartbeat failure, " 938 "taking this port offline.\n"); 939 940 spin_lock_irq(&phba->hbalock); 941 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 942 spin_unlock_irq(&phba->hbalock); 943 944 lpfc_offline_prep(phba); 945 lpfc_offline(phba); 946 lpfc_unblock_mgmt_io(phba); 947 phba->link_state = LPFC_HBA_ERROR; 948 lpfc_hba_down_post(phba); 949 } 950 } 951} 952 953/** 954 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 955 * @phba: pointer to lpfc hba data structure. 956 * 957 * This routine is called to bring the HBA offline when HBA hardware error 958 * other than Port Error 6 has been detected. 959 **/ 960static void 961lpfc_offline_eratt(struct lpfc_hba *phba) 962{ 963 struct lpfc_sli *psli = &phba->sli; 964 965 spin_lock_irq(&phba->hbalock); 966 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 967 spin_unlock_irq(&phba->hbalock); 968 lpfc_offline_prep(phba); 969 970 lpfc_offline(phba); 971 lpfc_reset_barrier(phba); 972 spin_lock_irq(&phba->hbalock); 973 lpfc_sli_brdreset(phba); 974 spin_unlock_irq(&phba->hbalock); 975 lpfc_hba_down_post(phba); 976 lpfc_sli_brdready(phba, HS_MBRDY); 977 lpfc_unblock_mgmt_io(phba); 978 phba->link_state = LPFC_HBA_ERROR; 979 return; 980} 981 982/** 983 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 984 * @phba: pointer to lpfc hba data structure. 985 * 986 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 987 * other than Port Error 6 has been detected. 988 **/ 989static void 990lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 991{ 992 lpfc_offline_prep(phba); 993 lpfc_offline(phba); 994 lpfc_sli4_brdreset(phba); 995 lpfc_hba_down_post(phba); 996 lpfc_sli4_post_status_check(phba); 997 lpfc_unblock_mgmt_io(phba); 998 phba->link_state = LPFC_HBA_ERROR; 999} 1000 1001/** 1002 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1003 * @phba: pointer to lpfc hba data structure. 1004 * 1005 * This routine is invoked to handle the deferred HBA hardware error 1006 * conditions. This type of error is indicated by HBA by setting ER1 1007 * and another ER bit in the host status register. The driver will 1008 * wait until the ER1 bit clears before handling the error condition. 1009 **/ 1010static void 1011lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1012{ 1013 uint32_t old_host_status = phba->work_hs; 1014 struct lpfc_sli_ring *pring; 1015 struct lpfc_sli *psli = &phba->sli; 1016 1017 /* If the pci channel is offline, ignore possible errors, 1018 * since we cannot communicate with the pci card anyway. 1019 */ 1020 if (pci_channel_offline(phba->pcidev)) { 1021 spin_lock_irq(&phba->hbalock); 1022 phba->hba_flag &= ~DEFER_ERATT; 1023 spin_unlock_irq(&phba->hbalock); 1024 return; 1025 } 1026 1027 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1028 "0479 Deferred Adapter Hardware Error " 1029 "Data: x%x x%x x%x\n", 1030 phba->work_hs, 1031 phba->work_status[0], phba->work_status[1]); 1032 1033 spin_lock_irq(&phba->hbalock); 1034 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1035 spin_unlock_irq(&phba->hbalock); 1036 1037 1038 /* 1039 * Firmware stops when it triggred erratt. That could cause the I/Os 1040 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1041 * SCSI layer retry it after re-establishing link. 1042 */ 1043 pring = &psli->ring[psli->fcp_ring]; 1044 lpfc_sli_abort_iocb_ring(phba, pring); 1045 1046 /* 1047 * There was a firmware error. Take the hba offline and then 1048 * attempt to restart it. 1049 */ 1050 lpfc_offline_prep(phba); 1051 lpfc_offline(phba); 1052 1053 /* Wait for the ER1 bit to clear.*/ 1054 while (phba->work_hs & HS_FFER1) { 1055 msleep(100); 1056 phba->work_hs = readl(phba->HSregaddr); 1057 /* If driver is unloading let the worker thread continue */ 1058 if (phba->pport->load_flag & FC_UNLOADING) { 1059 phba->work_hs = 0; 1060 break; 1061 } 1062 } 1063 1064 /* 1065 * This is to ptrotect against a race condition in which 1066 * first write to the host attention register clear the 1067 * host status register. 1068 */ 1069 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1070 phba->work_hs = old_host_status & ~HS_FFER1; 1071 1072 spin_lock_irq(&phba->hbalock); 1073 phba->hba_flag &= ~DEFER_ERATT; 1074 spin_unlock_irq(&phba->hbalock); 1075 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1076 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1077} 1078 1079static void 1080lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1081{ 1082 struct lpfc_board_event_header board_event; 1083 struct Scsi_Host *shost; 1084 1085 board_event.event_type = FC_REG_BOARD_EVENT; 1086 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1087 shost = lpfc_shost_from_vport(phba->pport); 1088 fc_host_post_vendor_event(shost, fc_get_event_number(), 1089 sizeof(board_event), 1090 (char *) &board_event, 1091 LPFC_NL_VENDOR_ID); 1092} 1093 1094/** 1095 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1096 * @phba: pointer to lpfc hba data structure. 1097 * 1098 * This routine is invoked to handle the following HBA hardware error 1099 * conditions: 1100 * 1 - HBA error attention interrupt 1101 * 2 - DMA ring index out of range 1102 * 3 - Mailbox command came back as unknown 1103 **/ 1104static void 1105lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1106{ 1107 struct lpfc_vport *vport = phba->pport; 1108 struct lpfc_sli *psli = &phba->sli; 1109 struct lpfc_sli_ring *pring; 1110 uint32_t event_data; 1111 unsigned long temperature; 1112 struct temp_event temp_event_data; 1113 struct Scsi_Host *shost; 1114 1115 /* If the pci channel is offline, ignore possible errors, 1116 * since we cannot communicate with the pci card anyway. 1117 */ 1118 if (pci_channel_offline(phba->pcidev)) { 1119 spin_lock_irq(&phba->hbalock); 1120 phba->hba_flag &= ~DEFER_ERATT; 1121 spin_unlock_irq(&phba->hbalock); 1122 return; 1123 } 1124 1125 /* If resets are disabled then leave the HBA alone and return */ 1126 if (!phba->cfg_enable_hba_reset) 1127 return; 1128 1129 /* Send an internal error event to mgmt application */ 1130 lpfc_board_errevt_to_mgmt(phba); 1131 1132 if (phba->hba_flag & DEFER_ERATT) 1133 lpfc_handle_deferred_eratt(phba); 1134 1135 if (phba->work_hs & HS_FFER6) { 1136 /* Re-establishing Link */ 1137 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1138 "1301 Re-establishing Link " 1139 "Data: x%x x%x x%x\n", 1140 phba->work_hs, 1141 phba->work_status[0], phba->work_status[1]); 1142 1143 spin_lock_irq(&phba->hbalock); 1144 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1145 spin_unlock_irq(&phba->hbalock); 1146 1147 /* 1148 * Firmware stops when it triggled erratt with HS_FFER6. 1149 * That could cause the I/Os dropped by the firmware. 1150 * Error iocb (I/O) on txcmplq and let the SCSI layer 1151 * retry it after re-establishing link. 1152 */ 1153 pring = &psli->ring[psli->fcp_ring]; 1154 lpfc_sli_abort_iocb_ring(phba, pring); 1155 1156 /* 1157 * There was a firmware error. Take the hba offline and then 1158 * attempt to restart it. 1159 */ 1160 lpfc_offline_prep(phba); 1161 lpfc_offline(phba); 1162 lpfc_sli_brdrestart(phba); 1163 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1164 lpfc_unblock_mgmt_io(phba); 1165 return; 1166 } 1167 lpfc_unblock_mgmt_io(phba); 1168 } else if (phba->work_hs & HS_CRIT_TEMP) { 1169 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1170 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1171 temp_event_data.event_code = LPFC_CRIT_TEMP; 1172 temp_event_data.data = (uint32_t)temperature; 1173 1174 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1175 "0406 Adapter maximum temperature exceeded " 1176 "(%ld), taking this port offline " 1177 "Data: x%x x%x x%x\n", 1178 temperature, phba->work_hs, 1179 phba->work_status[0], phba->work_status[1]); 1180 1181 shost = lpfc_shost_from_vport(phba->pport); 1182 fc_host_post_vendor_event(shost, fc_get_event_number(), 1183 sizeof(temp_event_data), 1184 (char *) &temp_event_data, 1185 SCSI_NL_VID_TYPE_PCI 1186 | PCI_VENDOR_ID_EMULEX); 1187 1188 spin_lock_irq(&phba->hbalock); 1189 phba->over_temp_state = HBA_OVER_TEMP; 1190 spin_unlock_irq(&phba->hbalock); 1191 lpfc_offline_eratt(phba); 1192 1193 } else { 1194 /* The if clause above forces this code path when the status 1195 * failure is a value other than FFER6. Do not call the offline 1196 * twice. This is the adapter hardware error path. 1197 */ 1198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1199 "0457 Adapter Hardware Error " 1200 "Data: x%x x%x x%x\n", 1201 phba->work_hs, 1202 phba->work_status[0], phba->work_status[1]); 1203 1204 event_data = FC_REG_DUMP_EVENT; 1205 shost = lpfc_shost_from_vport(vport); 1206 fc_host_post_vendor_event(shost, fc_get_event_number(), 1207 sizeof(event_data), (char *) &event_data, 1208 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1209 1210 lpfc_offline_eratt(phba); 1211 } 1212 return; 1213} 1214 1215/** 1216 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1217 * @phba: pointer to lpfc hba data structure. 1218 * 1219 * This routine is invoked to handle the SLI4 HBA hardware error attention 1220 * conditions. 1221 **/ 1222static void 1223lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1224{ 1225 struct lpfc_vport *vport = phba->pport; 1226 uint32_t event_data; 1227 struct Scsi_Host *shost; 1228 1229 /* If the pci channel is offline, ignore possible errors, since 1230 * we cannot communicate with the pci card anyway. 1231 */ 1232 if (pci_channel_offline(phba->pcidev)) 1233 return; 1234 /* If resets are disabled then leave the HBA alone and return */ 1235 if (!phba->cfg_enable_hba_reset) 1236 return; 1237 1238 /* Send an internal error event to mgmt application */ 1239 lpfc_board_errevt_to_mgmt(phba); 1240 1241 /* For now, the actual action for SLI4 device handling is not 1242 * specified yet, just treated it as adaptor hardware failure 1243 */ 1244 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1245 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", 1246 phba->work_status[0], phba->work_status[1]); 1247 1248 event_data = FC_REG_DUMP_EVENT; 1249 shost = lpfc_shost_from_vport(vport); 1250 fc_host_post_vendor_event(shost, fc_get_event_number(), 1251 sizeof(event_data), (char *) &event_data, 1252 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1253 1254 lpfc_sli4_offline_eratt(phba); 1255} 1256 1257/** 1258 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1259 * @phba: pointer to lpfc HBA data structure. 1260 * 1261 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1262 * routine from the API jump table function pointer from the lpfc_hba struct. 1263 * 1264 * Return codes 1265 * 0 - success. 1266 * Any other value - error. 1267 **/ 1268void 1269lpfc_handle_eratt(struct lpfc_hba *phba) 1270{ 1271 (*phba->lpfc_handle_eratt)(phba); 1272} 1273 1274/** 1275 * lpfc_handle_latt - The HBA link event handler 1276 * @phba: pointer to lpfc hba data structure. 1277 * 1278 * This routine is invoked from the worker thread to handle a HBA host 1279 * attention link event. 1280 **/ 1281void 1282lpfc_handle_latt(struct lpfc_hba *phba) 1283{ 1284 struct lpfc_vport *vport = phba->pport; 1285 struct lpfc_sli *psli = &phba->sli; 1286 LPFC_MBOXQ_t *pmb; 1287 volatile uint32_t control; 1288 struct lpfc_dmabuf *mp; 1289 int rc = 0; 1290 1291 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1292 if (!pmb) { 1293 rc = 1; 1294 goto lpfc_handle_latt_err_exit; 1295 } 1296 1297 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1298 if (!mp) { 1299 rc = 2; 1300 goto lpfc_handle_latt_free_pmb; 1301 } 1302 1303 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1304 if (!mp->virt) { 1305 rc = 3; 1306 goto lpfc_handle_latt_free_mp; 1307 } 1308 1309 /* Cleanup any outstanding ELS commands */ 1310 lpfc_els_flush_all_cmd(phba); 1311 1312 psli->slistat.link_event++; 1313 lpfc_read_la(phba, pmb, mp); 1314 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 1315 pmb->vport = vport; 1316 /* Block ELS IOCBs until we have processed this mbox command */ 1317 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1318 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1319 if (rc == MBX_NOT_FINISHED) { 1320 rc = 4; 1321 goto lpfc_handle_latt_free_mbuf; 1322 } 1323 1324 /* Clear Link Attention in HA REG */ 1325 spin_lock_irq(&phba->hbalock); 1326 writel(HA_LATT, phba->HAregaddr); 1327 readl(phba->HAregaddr); /* flush */ 1328 spin_unlock_irq(&phba->hbalock); 1329 1330 return; 1331 1332lpfc_handle_latt_free_mbuf: 1333 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1334 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1335lpfc_handle_latt_free_mp: 1336 kfree(mp); 1337lpfc_handle_latt_free_pmb: 1338 mempool_free(pmb, phba->mbox_mem_pool); 1339lpfc_handle_latt_err_exit: 1340 /* Enable Link attention interrupts */ 1341 spin_lock_irq(&phba->hbalock); 1342 psli->sli_flag |= LPFC_PROCESS_LA; 1343 control = readl(phba->HCregaddr); 1344 control |= HC_LAINT_ENA; 1345 writel(control, phba->HCregaddr); 1346 readl(phba->HCregaddr); /* flush */ 1347 1348 /* Clear Link Attention in HA REG */ 1349 writel(HA_LATT, phba->HAregaddr); 1350 readl(phba->HAregaddr); /* flush */ 1351 spin_unlock_irq(&phba->hbalock); 1352 lpfc_linkdown(phba); 1353 phba->link_state = LPFC_HBA_ERROR; 1354 1355 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1356 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1357 1358 return; 1359} 1360 1361/** 1362 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1363 * @phba: pointer to lpfc hba data structure. 1364 * @vpd: pointer to the vital product data. 1365 * @len: length of the vital product data in bytes. 1366 * 1367 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1368 * an array of characters. In this routine, the ModelName, ProgramType, and 1369 * ModelDesc, etc. fields of the phba data structure will be populated. 1370 * 1371 * Return codes 1372 * 0 - pointer to the VPD passed in is NULL 1373 * 1 - success 1374 **/ 1375int 1376lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1377{ 1378 uint8_t lenlo, lenhi; 1379 int Length; 1380 int i, j; 1381 int finished = 0; 1382 int index = 0; 1383 1384 if (!vpd) 1385 return 0; 1386 1387 /* Vital Product */ 1388 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1389 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1390 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1391 (uint32_t) vpd[3]); 1392 while (!finished && (index < (len - 4))) { 1393 switch (vpd[index]) { 1394 case 0x82: 1395 case 0x91: 1396 index += 1; 1397 lenlo = vpd[index]; 1398 index += 1; 1399 lenhi = vpd[index]; 1400 index += 1; 1401 i = ((((unsigned short)lenhi) << 8) + lenlo); 1402 index += i; 1403 break; 1404 case 0x90: 1405 index += 1; 1406 lenlo = vpd[index]; 1407 index += 1; 1408 lenhi = vpd[index]; 1409 index += 1; 1410 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1411 if (Length > len - index) 1412 Length = len - index; 1413 while (Length > 0) { 1414 /* Look for Serial Number */ 1415 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1416 index += 2; 1417 i = vpd[index]; 1418 index += 1; 1419 j = 0; 1420 Length -= (3+i); 1421 while(i--) { 1422 phba->SerialNumber[j++] = vpd[index++]; 1423 if (j == 31) 1424 break; 1425 } 1426 phba->SerialNumber[j] = 0; 1427 continue; 1428 } 1429 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1430 phba->vpd_flag |= VPD_MODEL_DESC; 1431 index += 2; 1432 i = vpd[index]; 1433 index += 1; 1434 j = 0; 1435 Length -= (3+i); 1436 while(i--) { 1437 phba->ModelDesc[j++] = vpd[index++]; 1438 if (j == 255) 1439 break; 1440 } 1441 phba->ModelDesc[j] = 0; 1442 continue; 1443 } 1444 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1445 phba->vpd_flag |= VPD_MODEL_NAME; 1446 index += 2; 1447 i = vpd[index]; 1448 index += 1; 1449 j = 0; 1450 Length -= (3+i); 1451 while(i--) { 1452 phba->ModelName[j++] = vpd[index++]; 1453 if (j == 79) 1454 break; 1455 } 1456 phba->ModelName[j] = 0; 1457 continue; 1458 } 1459 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1460 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1461 index += 2; 1462 i = vpd[index]; 1463 index += 1; 1464 j = 0; 1465 Length -= (3+i); 1466 while(i--) { 1467 phba->ProgramType[j++] = vpd[index++]; 1468 if (j == 255) 1469 break; 1470 } 1471 phba->ProgramType[j] = 0; 1472 continue; 1473 } 1474 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1475 phba->vpd_flag |= VPD_PORT; 1476 index += 2; 1477 i = vpd[index]; 1478 index += 1; 1479 j = 0; 1480 Length -= (3+i); 1481 while(i--) { 1482 phba->Port[j++] = vpd[index++]; 1483 if (j == 19) 1484 break; 1485 } 1486 phba->Port[j] = 0; 1487 continue; 1488 } 1489 else { 1490 index += 2; 1491 i = vpd[index]; 1492 index += 1; 1493 index += i; 1494 Length -= (3 + i); 1495 } 1496 } 1497 finished = 0; 1498 break; 1499 case 0x78: 1500 finished = 1; 1501 break; 1502 default: 1503 index ++; 1504 break; 1505 } 1506 } 1507 1508 return(1); 1509} 1510 1511/** 1512 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1513 * @phba: pointer to lpfc hba data structure. 1514 * @mdp: pointer to the data structure to hold the derived model name. 1515 * @descp: pointer to the data structure to hold the derived description. 1516 * 1517 * This routine retrieves HBA's description based on its registered PCI device 1518 * ID. The @descp passed into this function points to an array of 256 chars. It 1519 * shall be returned with the model name, maximum speed, and the host bus type. 1520 * The @mdp passed into this function points to an array of 80 chars. When the 1521 * function returns, the @mdp will be filled with the model name. 1522 **/ 1523static void 1524lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1525{ 1526 lpfc_vpd_t *vp; 1527 uint16_t dev_id = phba->pcidev->device; 1528 int max_speed; 1529 int GE = 0; 1530 int oneConnect = 0; /* default is not a oneConnect */ 1531 struct { 1532 char *name; 1533 char *bus; 1534 char *function; 1535 } m = {"<Unknown>", "", ""}; 1536 1537 if (mdp && mdp[0] != '\0' 1538 && descp && descp[0] != '\0') 1539 return; 1540 1541 if (phba->lmt & LMT_10Gb) 1542 max_speed = 10; 1543 else if (phba->lmt & LMT_8Gb) 1544 max_speed = 8; 1545 else if (phba->lmt & LMT_4Gb) 1546 max_speed = 4; 1547 else if (phba->lmt & LMT_2Gb) 1548 max_speed = 2; 1549 else 1550 max_speed = 1; 1551 1552 vp = &phba->vpd; 1553 1554 switch (dev_id) { 1555 case PCI_DEVICE_ID_FIREFLY: 1556 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 1557 break; 1558 case PCI_DEVICE_ID_SUPERFLY: 1559 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1560 m = (typeof(m)){"LP7000", "PCI", 1561 "Fibre Channel Adapter"}; 1562 else 1563 m = (typeof(m)){"LP7000E", "PCI", 1564 "Fibre Channel Adapter"}; 1565 break; 1566 case PCI_DEVICE_ID_DRAGONFLY: 1567 m = (typeof(m)){"LP8000", "PCI", 1568 "Fibre Channel Adapter"}; 1569 break; 1570 case PCI_DEVICE_ID_CENTAUR: 1571 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1572 m = (typeof(m)){"LP9002", "PCI", 1573 "Fibre Channel Adapter"}; 1574 else 1575 m = (typeof(m)){"LP9000", "PCI", 1576 "Fibre Channel Adapter"}; 1577 break; 1578 case PCI_DEVICE_ID_RFLY: 1579 m = (typeof(m)){"LP952", "PCI", 1580 "Fibre Channel Adapter"}; 1581 break; 1582 case PCI_DEVICE_ID_PEGASUS: 1583 m = (typeof(m)){"LP9802", "PCI-X", 1584 "Fibre Channel Adapter"}; 1585 break; 1586 case PCI_DEVICE_ID_THOR: 1587 m = (typeof(m)){"LP10000", "PCI-X", 1588 "Fibre Channel Adapter"}; 1589 break; 1590 case PCI_DEVICE_ID_VIPER: 1591 m = (typeof(m)){"LPX1000", "PCI-X", 1592 "Fibre Channel Adapter"}; 1593 break; 1594 case PCI_DEVICE_ID_PFLY: 1595 m = (typeof(m)){"LP982", "PCI-X", 1596 "Fibre Channel Adapter"}; 1597 break; 1598 case PCI_DEVICE_ID_TFLY: 1599 m = (typeof(m)){"LP1050", "PCI-X", 1600 "Fibre Channel Adapter"}; 1601 break; 1602 case PCI_DEVICE_ID_HELIOS: 1603 m = (typeof(m)){"LP11000", "PCI-X2", 1604 "Fibre Channel Adapter"}; 1605 break; 1606 case PCI_DEVICE_ID_HELIOS_SCSP: 1607 m = (typeof(m)){"LP11000-SP", "PCI-X2", 1608 "Fibre Channel Adapter"}; 1609 break; 1610 case PCI_DEVICE_ID_HELIOS_DCSP: 1611 m = (typeof(m)){"LP11002-SP", "PCI-X2", 1612 "Fibre Channel Adapter"}; 1613 break; 1614 case PCI_DEVICE_ID_NEPTUNE: 1615 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 1616 break; 1617 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1618 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 1619 break; 1620 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1621 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 1622 break; 1623 case PCI_DEVICE_ID_BMID: 1624 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 1625 break; 1626 case PCI_DEVICE_ID_BSMB: 1627 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 1628 break; 1629 case PCI_DEVICE_ID_ZEPHYR: 1630 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1631 break; 1632 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1633 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1634 break; 1635 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1636 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 1637 GE = 1; 1638 break; 1639 case PCI_DEVICE_ID_ZMID: 1640 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 1641 break; 1642 case PCI_DEVICE_ID_ZSMB: 1643 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 1644 break; 1645 case PCI_DEVICE_ID_LP101: 1646 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 1647 break; 1648 case PCI_DEVICE_ID_LP10000S: 1649 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 1650 break; 1651 case PCI_DEVICE_ID_LP11000S: 1652 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 1653 break; 1654 case PCI_DEVICE_ID_LPE11000S: 1655 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 1656 break; 1657 case PCI_DEVICE_ID_SAT: 1658 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 1659 break; 1660 case PCI_DEVICE_ID_SAT_MID: 1661 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 1662 break; 1663 case PCI_DEVICE_ID_SAT_SMB: 1664 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 1665 break; 1666 case PCI_DEVICE_ID_SAT_DCSP: 1667 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 1668 break; 1669 case PCI_DEVICE_ID_SAT_SCSP: 1670 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 1671 break; 1672 case PCI_DEVICE_ID_SAT_S: 1673 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 1674 break; 1675 case PCI_DEVICE_ID_HORNET: 1676 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 1677 GE = 1; 1678 break; 1679 case PCI_DEVICE_ID_PROTEUS_VF: 1680 m = (typeof(m)){"LPev12000", "PCIe IOV", 1681 "Fibre Channel Adapter"}; 1682 break; 1683 case PCI_DEVICE_ID_PROTEUS_PF: 1684 m = (typeof(m)){"LPev12000", "PCIe IOV", 1685 "Fibre Channel Adapter"}; 1686 break; 1687 case PCI_DEVICE_ID_PROTEUS_S: 1688 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 1689 "Fibre Channel Adapter"}; 1690 break; 1691 case PCI_DEVICE_ID_TIGERSHARK: 1692 oneConnect = 1; 1693 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 1694 break; 1695 case PCI_DEVICE_ID_TOMCAT: 1696 oneConnect = 1; 1697 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 1698 break; 1699 case PCI_DEVICE_ID_FALCON: 1700 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 1701 "EmulexSecure Fibre"}; 1702 break; 1703 default: 1704 m = (typeof(m)){"Unknown", "", ""}; 1705 break; 1706 } 1707 1708 if (mdp && mdp[0] == '\0') 1709 snprintf(mdp, 79,"%s", m.name); 1710 /* oneConnect hba requires special processing, they are all initiators 1711 * and we put the port number on the end 1712 */ 1713 if (descp && descp[0] == '\0') { 1714 if (oneConnect) 1715 snprintf(descp, 255, 1716 "Emulex OneConnect %s, %s Initiator, Port %s", 1717 m.name, m.function, 1718 phba->Port); 1719 else 1720 snprintf(descp, 255, 1721 "Emulex %s %d%s %s %s", 1722 m.name, max_speed, (GE) ? "GE" : "Gb", 1723 m.bus, m.function); 1724 } 1725} 1726 1727/** 1728 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 1729 * @phba: pointer to lpfc hba data structure. 1730 * @pring: pointer to a IOCB ring. 1731 * @cnt: the number of IOCBs to be posted to the IOCB ring. 1732 * 1733 * This routine posts a given number of IOCBs with the associated DMA buffer 1734 * descriptors specified by the cnt argument to the given IOCB ring. 1735 * 1736 * Return codes 1737 * The number of IOCBs NOT able to be posted to the IOCB ring. 1738 **/ 1739int 1740lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 1741{ 1742 IOCB_t *icmd; 1743 struct lpfc_iocbq *iocb; 1744 struct lpfc_dmabuf *mp1, *mp2; 1745 1746 cnt += pring->missbufcnt; 1747 1748 /* While there are buffers to post */ 1749 while (cnt > 0) { 1750 /* Allocate buffer for command iocb */ 1751 iocb = lpfc_sli_get_iocbq(phba); 1752 if (iocb == NULL) { 1753 pring->missbufcnt = cnt; 1754 return cnt; 1755 } 1756 icmd = &iocb->iocb; 1757 1758 /* 2 buffers can be posted per command */ 1759 /* Allocate buffer to post */ 1760 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1761 if (mp1) 1762 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1763 if (!mp1 || !mp1->virt) { 1764 kfree(mp1); 1765 lpfc_sli_release_iocbq(phba, iocb); 1766 pring->missbufcnt = cnt; 1767 return cnt; 1768 } 1769 1770 INIT_LIST_HEAD(&mp1->list); 1771 /* Allocate buffer to post */ 1772 if (cnt > 1) { 1773 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1774 if (mp2) 1775 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1776 &mp2->phys); 1777 if (!mp2 || !mp2->virt) { 1778 kfree(mp2); 1779 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1780 kfree(mp1); 1781 lpfc_sli_release_iocbq(phba, iocb); 1782 pring->missbufcnt = cnt; 1783 return cnt; 1784 } 1785 1786 INIT_LIST_HEAD(&mp2->list); 1787 } else { 1788 mp2 = NULL; 1789 } 1790 1791 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 1792 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 1793 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 1794 icmd->ulpBdeCount = 1; 1795 cnt--; 1796 if (mp2) { 1797 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 1798 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 1799 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 1800 cnt--; 1801 icmd->ulpBdeCount = 2; 1802 } 1803 1804 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1805 icmd->ulpLe = 1; 1806 1807 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 1808 IOCB_ERROR) { 1809 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1810 kfree(mp1); 1811 cnt++; 1812 if (mp2) { 1813 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 1814 kfree(mp2); 1815 cnt++; 1816 } 1817 lpfc_sli_release_iocbq(phba, iocb); 1818 pring->missbufcnt = cnt; 1819 return cnt; 1820 } 1821 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1822 if (mp2) 1823 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1824 } 1825 pring->missbufcnt = 0; 1826 return 0; 1827} 1828 1829/** 1830 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 1831 * @phba: pointer to lpfc hba data structure. 1832 * 1833 * This routine posts initial receive IOCB buffers to the ELS ring. The 1834 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 1835 * set to 64 IOCBs. 1836 * 1837 * Return codes 1838 * 0 - success (currently always success) 1839 **/ 1840static int 1841lpfc_post_rcv_buf(struct lpfc_hba *phba) 1842{ 1843 struct lpfc_sli *psli = &phba->sli; 1844 1845 /* Ring 0, ELS / CT buffers */ 1846 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 1847 /* Ring 2 - FCP no buffers needed */ 1848 1849 return 0; 1850} 1851 1852#define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 1853 1854/** 1855 * lpfc_sha_init - Set up initial array of hash table entries 1856 * @HashResultPointer: pointer to an array as hash table. 1857 * 1858 * This routine sets up the initial values to the array of hash table entries 1859 * for the LC HBAs. 1860 **/ 1861static void 1862lpfc_sha_init(uint32_t * HashResultPointer) 1863{ 1864 HashResultPointer[0] = 0x67452301; 1865 HashResultPointer[1] = 0xEFCDAB89; 1866 HashResultPointer[2] = 0x98BADCFE; 1867 HashResultPointer[3] = 0x10325476; 1868 HashResultPointer[4] = 0xC3D2E1F0; 1869} 1870 1871/** 1872 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 1873 * @HashResultPointer: pointer to an initial/result hash table. 1874 * @HashWorkingPointer: pointer to an working hash table. 1875 * 1876 * This routine iterates an initial hash table pointed by @HashResultPointer 1877 * with the values from the working hash table pointeed by @HashWorkingPointer. 1878 * The results are putting back to the initial hash table, returned through 1879 * the @HashResultPointer as the result hash table. 1880 **/ 1881static void 1882lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 1883{ 1884 int t; 1885 uint32_t TEMP; 1886 uint32_t A, B, C, D, E; 1887 t = 16; 1888 do { 1889 HashWorkingPointer[t] = 1890 S(1, 1891 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 1892 8] ^ 1893 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 1894 } while (++t <= 79); 1895 t = 0; 1896 A = HashResultPointer[0]; 1897 B = HashResultPointer[1]; 1898 C = HashResultPointer[2]; 1899 D = HashResultPointer[3]; 1900 E = HashResultPointer[4]; 1901 1902 do { 1903 if (t < 20) { 1904 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 1905 } else if (t < 40) { 1906 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 1907 } else if (t < 60) { 1908 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 1909 } else { 1910 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 1911 } 1912 TEMP += S(5, A) + E + HashWorkingPointer[t]; 1913 E = D; 1914 D = C; 1915 C = S(30, B); 1916 B = A; 1917 A = TEMP; 1918 } while (++t <= 79); 1919 1920 HashResultPointer[0] += A; 1921 HashResultPointer[1] += B; 1922 HashResultPointer[2] += C; 1923 HashResultPointer[3] += D; 1924 HashResultPointer[4] += E; 1925 1926} 1927 1928/** 1929 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 1930 * @RandomChallenge: pointer to the entry of host challenge random number array. 1931 * @HashWorking: pointer to the entry of the working hash array. 1932 * 1933 * This routine calculates the working hash array referred by @HashWorking 1934 * from the challenge random numbers associated with the host, referred by 1935 * @RandomChallenge. The result is put into the entry of the working hash 1936 * array and returned by reference through @HashWorking. 1937 **/ 1938static void 1939lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 1940{ 1941 *HashWorking = (*RandomChallenge ^ *HashWorking); 1942} 1943 1944/** 1945 * lpfc_hba_init - Perform special handling for LC HBA initialization 1946 * @phba: pointer to lpfc hba data structure. 1947 * @hbainit: pointer to an array of unsigned 32-bit integers. 1948 * 1949 * This routine performs the special handling for LC HBA initialization. 1950 **/ 1951void 1952lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 1953{ 1954 int t; 1955 uint32_t *HashWorking; 1956 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 1957 1958 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 1959 if (!HashWorking) 1960 return; 1961 1962 HashWorking[0] = HashWorking[78] = *pwwnn++; 1963 HashWorking[1] = HashWorking[79] = *pwwnn; 1964 1965 for (t = 0; t < 7; t++) 1966 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 1967 1968 lpfc_sha_init(hbainit); 1969 lpfc_sha_iterate(hbainit, HashWorking); 1970 kfree(HashWorking); 1971} 1972 1973/** 1974 * lpfc_cleanup - Performs vport cleanups before deleting a vport 1975 * @vport: pointer to a virtual N_Port data structure. 1976 * 1977 * This routine performs the necessary cleanups before deleting the @vport. 1978 * It invokes the discovery state machine to perform necessary state 1979 * transitions and to release the ndlps associated with the @vport. Note, 1980 * the physical port is treated as @vport 0. 1981 **/ 1982void 1983lpfc_cleanup(struct lpfc_vport *vport) 1984{ 1985 struct lpfc_hba *phba = vport->phba; 1986 struct lpfc_nodelist *ndlp, *next_ndlp; 1987 int i = 0; 1988 1989 if (phba->link_state > LPFC_LINK_DOWN) 1990 lpfc_port_link_failure(vport); 1991 1992 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 1993 if (!NLP_CHK_NODE_ACT(ndlp)) { 1994 ndlp = lpfc_enable_node(vport, ndlp, 1995 NLP_STE_UNUSED_NODE); 1996 if (!ndlp) 1997 continue; 1998 spin_lock_irq(&phba->ndlp_lock); 1999 NLP_SET_FREE_REQ(ndlp); 2000 spin_unlock_irq(&phba->ndlp_lock); 2001 /* Trigger the release of the ndlp memory */ 2002 lpfc_nlp_put(ndlp); 2003 continue; 2004 } 2005 spin_lock_irq(&phba->ndlp_lock); 2006 if (NLP_CHK_FREE_REQ(ndlp)) { 2007 /* The ndlp should not be in memory free mode already */ 2008 spin_unlock_irq(&phba->ndlp_lock); 2009 continue; 2010 } else 2011 /* Indicate request for freeing ndlp memory */ 2012 NLP_SET_FREE_REQ(ndlp); 2013 spin_unlock_irq(&phba->ndlp_lock); 2014 2015 if (vport->port_type != LPFC_PHYSICAL_PORT && 2016 ndlp->nlp_DID == Fabric_DID) { 2017 /* Just free up ndlp with Fabric_DID for vports */ 2018 lpfc_nlp_put(ndlp); 2019 continue; 2020 } 2021 2022 if (ndlp->nlp_type & NLP_FABRIC) 2023 lpfc_disc_state_machine(vport, ndlp, NULL, 2024 NLP_EVT_DEVICE_RECOVERY); 2025 2026 lpfc_disc_state_machine(vport, ndlp, NULL, 2027 NLP_EVT_DEVICE_RM); 2028 2029 } 2030 2031 /* At this point, ALL ndlp's should be gone 2032 * because of the previous NLP_EVT_DEVICE_RM. 2033 * Lets wait for this to happen, if needed. 2034 */ 2035 while (!list_empty(&vport->fc_nodes)) { 2036 if (i++ > 3000) { 2037 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2038 "0233 Nodelist not empty\n"); 2039 list_for_each_entry_safe(ndlp, next_ndlp, 2040 &vport->fc_nodes, nlp_listp) { 2041 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2042 LOG_NODE, 2043 "0282 did:x%x ndlp:x%p " 2044 "usgmap:x%x refcnt:%d\n", 2045 ndlp->nlp_DID, (void *)ndlp, 2046 ndlp->nlp_usg_map, 2047 atomic_read( 2048 &ndlp->kref.refcount)); 2049 } 2050 break; 2051 } 2052 2053 /* Wait for any activity on ndlps to settle */ 2054 msleep(10); 2055 } 2056} 2057 2058/** 2059 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2060 * @vport: pointer to a virtual N_Port data structure. 2061 * 2062 * This routine stops all the timers associated with a @vport. This function 2063 * is invoked before disabling or deleting a @vport. Note that the physical 2064 * port is treated as @vport 0. 2065 **/ 2066void 2067lpfc_stop_vport_timers(struct lpfc_vport *vport) 2068{ 2069 del_timer_sync(&vport->els_tmofunc); 2070 del_timer_sync(&vport->fc_fdmitmo); 2071 lpfc_can_disctmo(vport); 2072 return; 2073} 2074 2075/** 2076 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2077 * @phba: pointer to lpfc hba data structure. 2078 * 2079 * This routine stops all the timers associated with a HBA. This function is 2080 * invoked before either putting a HBA offline or unloading the driver. 2081 **/ 2082void 2083lpfc_stop_hba_timers(struct lpfc_hba *phba) 2084{ 2085 lpfc_stop_vport_timers(phba->pport); 2086 del_timer_sync(&phba->sli.mbox_tmo); 2087 del_timer_sync(&phba->fabric_block_timer); 2088 del_timer_sync(&phba->eratt_poll); 2089 del_timer_sync(&phba->hb_tmofunc); 2090 phba->hb_outstanding = 0; 2091 2092 switch (phba->pci_dev_grp) { 2093 case LPFC_PCI_DEV_LP: 2094 /* Stop any LightPulse device specific driver timers */ 2095 del_timer_sync(&phba->fcp_poll_timer); 2096 break; 2097 case LPFC_PCI_DEV_OC: 2098 /* Stop any OneConnect device sepcific driver timers */ 2099 break; 2100 default: 2101 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2102 "0297 Invalid device group (x%x)\n", 2103 phba->pci_dev_grp); 2104 break; 2105 } 2106 return; 2107} 2108 2109/** 2110 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2111 * @phba: pointer to lpfc hba data structure. 2112 * 2113 * This routine marks a HBA's management interface as blocked. Once the HBA's 2114 * management interface is marked as blocked, all the user space access to 2115 * the HBA, whether they are from sysfs interface or libdfc interface will 2116 * all be blocked. The HBA is set to block the management interface when the 2117 * driver prepares the HBA interface for online or offline. 2118 **/ 2119static void 2120lpfc_block_mgmt_io(struct lpfc_hba * phba) 2121{ 2122 unsigned long iflag; 2123 2124 spin_lock_irqsave(&phba->hbalock, iflag); 2125 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2126 spin_unlock_irqrestore(&phba->hbalock, iflag); 2127} 2128 2129/** 2130 * lpfc_online - Initialize and bring a HBA online 2131 * @phba: pointer to lpfc hba data structure. 2132 * 2133 * This routine initializes the HBA and brings a HBA online. During this 2134 * process, the management interface is blocked to prevent user space access 2135 * to the HBA interfering with the driver initialization. 2136 * 2137 * Return codes 2138 * 0 - successful 2139 * 1 - failed 2140 **/ 2141int 2142lpfc_online(struct lpfc_hba *phba) 2143{ 2144 struct lpfc_vport *vport; 2145 struct lpfc_vport **vports; 2146 int i; 2147 2148 if (!phba) 2149 return 0; 2150 vport = phba->pport; 2151 2152 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2153 return 0; 2154 2155 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2156 "0458 Bring Adapter online\n"); 2157 2158 lpfc_block_mgmt_io(phba); 2159 2160 if (!lpfc_sli_queue_setup(phba)) { 2161 lpfc_unblock_mgmt_io(phba); 2162 return 1; 2163 } 2164 2165 if (phba->sli_rev == LPFC_SLI_REV4) { 2166 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2167 lpfc_unblock_mgmt_io(phba); 2168 return 1; 2169 } 2170 } else { 2171 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2172 lpfc_unblock_mgmt_io(phba); 2173 return 1; 2174 } 2175 } 2176 2177 vports = lpfc_create_vport_work_array(phba); 2178 if (vports != NULL) 2179 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2180 struct Scsi_Host *shost; 2181 shost = lpfc_shost_from_vport(vports[i]); 2182 spin_lock_irq(shost->host_lock); 2183 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2184 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2185 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2186 if (phba->sli_rev == LPFC_SLI_REV4) 2187 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2188 spin_unlock_irq(shost->host_lock); 2189 } 2190 lpfc_destroy_vport_work_array(phba, vports); 2191 2192 lpfc_unblock_mgmt_io(phba); 2193 return 0; 2194} 2195 2196/** 2197 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2198 * @phba: pointer to lpfc hba data structure. 2199 * 2200 * This routine marks a HBA's management interface as not blocked. Once the 2201 * HBA's management interface is marked as not blocked, all the user space 2202 * access to the HBA, whether they are from sysfs interface or libdfc 2203 * interface will be allowed. The HBA is set to block the management interface 2204 * when the driver prepares the HBA interface for online or offline and then 2205 * set to unblock the management interface afterwards. 2206 **/ 2207void 2208lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2209{ 2210 unsigned long iflag; 2211 2212 spin_lock_irqsave(&phba->hbalock, iflag); 2213 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2214 spin_unlock_irqrestore(&phba->hbalock, iflag); 2215} 2216 2217/** 2218 * lpfc_offline_prep - Prepare a HBA to be brought offline 2219 * @phba: pointer to lpfc hba data structure. 2220 * 2221 * This routine is invoked to prepare a HBA to be brought offline. It performs 2222 * unregistration login to all the nodes on all vports and flushes the mailbox 2223 * queue to make it ready to be brought offline. 2224 **/ 2225void 2226lpfc_offline_prep(struct lpfc_hba * phba) 2227{ 2228 struct lpfc_vport *vport = phba->pport; 2229 struct lpfc_nodelist *ndlp, *next_ndlp; 2230 struct lpfc_vport **vports; 2231 int i; 2232 2233 if (vport->fc_flag & FC_OFFLINE_MODE) 2234 return; 2235 2236 lpfc_block_mgmt_io(phba); 2237 2238 lpfc_linkdown(phba); 2239 2240 /* Issue an unreg_login to all nodes on all vports */ 2241 vports = lpfc_create_vport_work_array(phba); 2242 if (vports != NULL) { 2243 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2244 struct Scsi_Host *shost; 2245 2246 if (vports[i]->load_flag & FC_UNLOADING) 2247 continue; 2248 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2249 shost = lpfc_shost_from_vport(vports[i]); 2250 list_for_each_entry_safe(ndlp, next_ndlp, 2251 &vports[i]->fc_nodes, 2252 nlp_listp) { 2253 if (!NLP_CHK_NODE_ACT(ndlp)) 2254 continue; 2255 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2256 continue; 2257 if (ndlp->nlp_type & NLP_FABRIC) { 2258 lpfc_disc_state_machine(vports[i], ndlp, 2259 NULL, NLP_EVT_DEVICE_RECOVERY); 2260 lpfc_disc_state_machine(vports[i], ndlp, 2261 NULL, NLP_EVT_DEVICE_RM); 2262 } 2263 spin_lock_irq(shost->host_lock); 2264 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2265 spin_unlock_irq(shost->host_lock); 2266 lpfc_unreg_rpi(vports[i], ndlp); 2267 } 2268 } 2269 } 2270 lpfc_destroy_vport_work_array(phba, vports); 2271 2272 lpfc_sli_mbox_sys_shutdown(phba); 2273} 2274 2275/** 2276 * lpfc_offline - Bring a HBA offline 2277 * @phba: pointer to lpfc hba data structure. 2278 * 2279 * This routine actually brings a HBA offline. It stops all the timers 2280 * associated with the HBA, brings down the SLI layer, and eventually 2281 * marks the HBA as in offline state for the upper layer protocol. 2282 **/ 2283void 2284lpfc_offline(struct lpfc_hba *phba) 2285{ 2286 struct Scsi_Host *shost; 2287 struct lpfc_vport **vports; 2288 int i; 2289 2290 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2291 return; 2292 2293 /* stop port and all timers associated with this hba */ 2294 lpfc_stop_port(phba); 2295 vports = lpfc_create_vport_work_array(phba); 2296 if (vports != NULL) 2297 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2298 lpfc_stop_vport_timers(vports[i]); 2299 lpfc_destroy_vport_work_array(phba, vports); 2300 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2301 "0460 Bring Adapter offline\n"); 2302 /* Bring down the SLI Layer and cleanup. The HBA is offline 2303 now. */ 2304 lpfc_sli_hba_down(phba); 2305 spin_lock_irq(&phba->hbalock); 2306 phba->work_ha = 0; 2307 spin_unlock_irq(&phba->hbalock); 2308 vports = lpfc_create_vport_work_array(phba); 2309 if (vports != NULL) 2310 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2311 shost = lpfc_shost_from_vport(vports[i]); 2312 spin_lock_irq(shost->host_lock); 2313 vports[i]->work_port_events = 0; 2314 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2315 spin_unlock_irq(shost->host_lock); 2316 } 2317 lpfc_destroy_vport_work_array(phba, vports); 2318} 2319 2320/** 2321 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2322 * @phba: pointer to lpfc hba data structure. 2323 * 2324 * This routine is to free all the SCSI buffers and IOCBs from the driver 2325 * list back to kernel. It is called from lpfc_pci_remove_one to free 2326 * the internal resources before the device is removed from the system. 2327 * 2328 * Return codes 2329 * 0 - successful (for now, it always returns 0) 2330 **/ 2331static int 2332lpfc_scsi_free(struct lpfc_hba *phba) 2333{ 2334 struct lpfc_scsi_buf *sb, *sb_next; 2335 struct lpfc_iocbq *io, *io_next; 2336 2337 spin_lock_irq(&phba->hbalock); 2338 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2339 spin_lock(&phba->scsi_buf_list_lock); 2340 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2341 list_del(&sb->list); 2342 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2343 sb->dma_handle); 2344 kfree(sb); 2345 phba->total_scsi_bufs--; 2346 } 2347 spin_unlock(&phba->scsi_buf_list_lock); 2348 2349 /* Release all the lpfc_iocbq entries maintained by this host. */ 2350 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2351 list_del(&io->list); 2352 kfree(io); 2353 phba->total_iocbq_bufs--; 2354 } 2355 spin_unlock_irq(&phba->hbalock); 2356 return 0; 2357} 2358 2359/** 2360 * lpfc_create_port - Create an FC port 2361 * @phba: pointer to lpfc hba data structure. 2362 * @instance: a unique integer ID to this FC port. 2363 * @dev: pointer to the device data structure. 2364 * 2365 * This routine creates a FC port for the upper layer protocol. The FC port 2366 * can be created on top of either a physical port or a virtual port provided 2367 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2368 * and associates the FC port created before adding the shost into the SCSI 2369 * layer. 2370 * 2371 * Return codes 2372 * @vport - pointer to the virtual N_Port data structure. 2373 * NULL - port create failed. 2374 **/ 2375struct lpfc_vport * 2376lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2377{ 2378 struct lpfc_vport *vport; 2379 struct Scsi_Host *shost; 2380 int error = 0; 2381 2382 if (dev != &phba->pcidev->dev) 2383 shost = scsi_host_alloc(&lpfc_vport_template, 2384 sizeof(struct lpfc_vport)); 2385 else 2386 shost = scsi_host_alloc(&lpfc_template, 2387 sizeof(struct lpfc_vport)); 2388 if (!shost) 2389 goto out; 2390 2391 vport = (struct lpfc_vport *) shost->hostdata; 2392 vport->phba = phba; 2393 vport->load_flag |= FC_LOADING; 2394 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2395 vport->fc_rscn_flush = 0; 2396 2397 lpfc_get_vport_cfgparam(vport); 2398 shost->unique_id = instance; 2399 shost->max_id = LPFC_MAX_TARGET; 2400 shost->max_lun = vport->cfg_max_luns; 2401 shost->this_id = -1; 2402 shost->max_cmd_len = 16; 2403 if (phba->sli_rev == LPFC_SLI_REV4) { 2404 shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE; 2405 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2406 } 2407 2408 /* 2409 * Set initial can_queue value since 0 is no longer supported and 2410 * scsi_add_host will fail. This will be adjusted later based on the 2411 * max xri value determined in hba setup. 2412 */ 2413 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2414 if (dev != &phba->pcidev->dev) { 2415 shost->transportt = lpfc_vport_transport_template; 2416 vport->port_type = LPFC_NPIV_PORT; 2417 } else { 2418 shost->transportt = lpfc_transport_template; 2419 vport->port_type = LPFC_PHYSICAL_PORT; 2420 } 2421 2422 /* Initialize all internally managed lists. */ 2423 INIT_LIST_HEAD(&vport->fc_nodes); 2424 INIT_LIST_HEAD(&vport->rcv_buffer_list); 2425 spin_lock_init(&vport->work_port_lock); 2426 2427 init_timer(&vport->fc_disctmo); 2428 vport->fc_disctmo.function = lpfc_disc_timeout; 2429 vport->fc_disctmo.data = (unsigned long)vport; 2430 2431 init_timer(&vport->fc_fdmitmo); 2432 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2433 vport->fc_fdmitmo.data = (unsigned long)vport; 2434 2435 init_timer(&vport->els_tmofunc); 2436 vport->els_tmofunc.function = lpfc_els_timeout; 2437 vport->els_tmofunc.data = (unsigned long)vport; 2438 2439 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2440 if (error) 2441 goto out_put_shost; 2442 2443 spin_lock_irq(&phba->hbalock); 2444 list_add_tail(&vport->listentry, &phba->port_list); 2445 spin_unlock_irq(&phba->hbalock); 2446 return vport; 2447 2448out_put_shost: 2449 scsi_host_put(shost); 2450out: 2451 return NULL; 2452} 2453 2454/** 2455 * destroy_port - destroy an FC port 2456 * @vport: pointer to an lpfc virtual N_Port data structure. 2457 * 2458 * This routine destroys a FC port from the upper layer protocol. All the 2459 * resources associated with the port are released. 2460 **/ 2461void 2462destroy_port(struct lpfc_vport *vport) 2463{ 2464 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2465 struct lpfc_hba *phba = vport->phba; 2466 2467 lpfc_debugfs_terminate(vport); 2468 fc_remove_host(shost); 2469 scsi_remove_host(shost); 2470 2471 spin_lock_irq(&phba->hbalock); 2472 list_del_init(&vport->listentry); 2473 spin_unlock_irq(&phba->hbalock); 2474 2475 lpfc_cleanup(vport); 2476 return; 2477} 2478 2479/** 2480 * lpfc_get_instance - Get a unique integer ID 2481 * 2482 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2483 * uses the kernel idr facility to perform the task. 2484 * 2485 * Return codes: 2486 * instance - a unique integer ID allocated as the new instance. 2487 * -1 - lpfc get instance failed. 2488 **/ 2489int 2490lpfc_get_instance(void) 2491{ 2492 int instance = 0; 2493 2494 /* Assign an unused number */ 2495 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2496 return -1; 2497 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2498 return -1; 2499 return instance; 2500} 2501 2502/** 2503 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2504 * @shost: pointer to SCSI host data structure. 2505 * @time: elapsed time of the scan in jiffies. 2506 * 2507 * This routine is called by the SCSI layer with a SCSI host to determine 2508 * whether the scan host is finished. 2509 * 2510 * Note: there is no scan_start function as adapter initialization will have 2511 * asynchronously kicked off the link initialization. 2512 * 2513 * Return codes 2514 * 0 - SCSI host scan is not over yet. 2515 * 1 - SCSI host scan is over. 2516 **/ 2517int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2518{ 2519 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2520 struct lpfc_hba *phba = vport->phba; 2521 int stat = 0; 2522 2523 spin_lock_irq(shost->host_lock); 2524 2525 if (vport->load_flag & FC_UNLOADING) { 2526 stat = 1; 2527 goto finished; 2528 } 2529 if (time >= 30 * HZ) { 2530 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2531 "0461 Scanning longer than 30 " 2532 "seconds. Continuing initialization\n"); 2533 stat = 1; 2534 goto finished; 2535 } 2536 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2537 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2538 "0465 Link down longer than 15 " 2539 "seconds. Continuing initialization\n"); 2540 stat = 1; 2541 goto finished; 2542 } 2543 2544 if (vport->port_state != LPFC_VPORT_READY) 2545 goto finished; 2546 if (vport->num_disc_nodes || vport->fc_prli_sent) 2547 goto finished; 2548 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2549 goto finished; 2550 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2551 goto finished; 2552 2553 stat = 1; 2554 2555finished: 2556 spin_unlock_irq(shost->host_lock); 2557 return stat; 2558} 2559 2560/** 2561 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2562 * @shost: pointer to SCSI host data structure. 2563 * 2564 * This routine initializes a given SCSI host attributes on a FC port. The 2565 * SCSI host can be either on top of a physical port or a virtual port. 2566 **/ 2567void lpfc_host_attrib_init(struct Scsi_Host *shost) 2568{ 2569 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2570 struct lpfc_hba *phba = vport->phba; 2571 /* 2572 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2573 */ 2574 2575 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2576 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2577 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2578 2579 memset(fc_host_supported_fc4s(shost), 0, 2580 sizeof(fc_host_supported_fc4s(shost))); 2581 fc_host_supported_fc4s(shost)[2] = 1; 2582 fc_host_supported_fc4s(shost)[7] = 1; 2583 2584 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2585 sizeof fc_host_symbolic_name(shost)); 2586 2587 fc_host_supported_speeds(shost) = 0; 2588 if (phba->lmt & LMT_10Gb) 2589 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2590 if (phba->lmt & LMT_8Gb) 2591 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2592 if (phba->lmt & LMT_4Gb) 2593 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2594 if (phba->lmt & LMT_2Gb) 2595 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2596 if (phba->lmt & LMT_1Gb) 2597 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2598 2599 fc_host_maxframe_size(shost) = 2600 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2601 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2602 2603 /* This value is also unchanging */ 2604 memset(fc_host_active_fc4s(shost), 0, 2605 sizeof(fc_host_active_fc4s(shost))); 2606 fc_host_active_fc4s(shost)[2] = 1; 2607 fc_host_active_fc4s(shost)[7] = 1; 2608 2609 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2610 spin_lock_irq(shost->host_lock); 2611 vport->load_flag &= ~FC_LOADING; 2612 spin_unlock_irq(shost->host_lock); 2613} 2614 2615/** 2616 * lpfc_stop_port_s3 - Stop SLI3 device port 2617 * @phba: pointer to lpfc hba data structure. 2618 * 2619 * This routine is invoked to stop an SLI3 device port, it stops the device 2620 * from generating interrupts and stops the device driver's timers for the 2621 * device. 2622 **/ 2623static void 2624lpfc_stop_port_s3(struct lpfc_hba *phba) 2625{ 2626 /* Clear all interrupt enable conditions */ 2627 writel(0, phba->HCregaddr); 2628 readl(phba->HCregaddr); /* flush */ 2629 /* Clear all pending interrupts */ 2630 writel(0xffffffff, phba->HAregaddr); 2631 readl(phba->HAregaddr); /* flush */ 2632 2633 /* Reset some HBA SLI setup states */ 2634 lpfc_stop_hba_timers(phba); 2635 phba->pport->work_port_events = 0; 2636} 2637 2638/** 2639 * lpfc_stop_port_s4 - Stop SLI4 device port 2640 * @phba: pointer to lpfc hba data structure. 2641 * 2642 * This routine is invoked to stop an SLI4 device port, it stops the device 2643 * from generating interrupts and stops the device driver's timers for the 2644 * device. 2645 **/ 2646static void 2647lpfc_stop_port_s4(struct lpfc_hba *phba) 2648{ 2649 /* Reset some HBA SLI4 setup states */ 2650 lpfc_stop_hba_timers(phba); 2651 phba->pport->work_port_events = 0; 2652 phba->sli4_hba.intr_enable = 0; 2653 /* Hard clear it for now, shall have more graceful way to wait later */ 2654 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2655} 2656 2657/** 2658 * lpfc_stop_port - Wrapper function for stopping hba port 2659 * @phba: Pointer to HBA context object. 2660 * 2661 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 2662 * the API jump table function pointer from the lpfc_hba struct. 2663 **/ 2664void 2665lpfc_stop_port(struct lpfc_hba *phba) 2666{ 2667 phba->lpfc_stop_port(phba); 2668} 2669 2670/** 2671 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port. 2672 * @phba: pointer to lpfc hba data structure. 2673 * 2674 * This routine is invoked to remove the driver default fcf record from 2675 * the port. This routine currently acts on FCF Index 0. 2676 * 2677 **/ 2678void 2679lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) 2680{ 2681 int rc = 0; 2682 LPFC_MBOXQ_t *mboxq; 2683 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record; 2684 uint32_t mbox_tmo, req_len; 2685 uint32_t shdr_status, shdr_add_status; 2686 2687 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2688 if (!mboxq) { 2689 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2690 "2020 Failed to allocate mbox for ADD_FCF cmd\n"); 2691 return; 2692 } 2693 2694 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) - 2695 sizeof(struct lpfc_sli4_cfg_mhdr); 2696 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2697 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF, 2698 req_len, LPFC_SLI4_MBX_EMBED); 2699 /* 2700 * In phase 1, there is a single FCF index, 0. In phase2, the driver 2701 * supports multiple FCF indices. 2702 */ 2703 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; 2704 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); 2705 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, 2706 phba->fcf.fcf_indx); 2707 2708 if (!phba->sli4_hba.intr_enable) 2709 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2710 else { 2711 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 2712 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 2713 } 2714 /* The IOCTL status is embedded in the mailbox subheader. */ 2715 shdr_status = bf_get(lpfc_mbox_hdr_status, 2716 &del_fcf_record->header.cfg_shdr.response); 2717 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 2718 &del_fcf_record->header.cfg_shdr.response); 2719 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { 2720 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2721 "2516 DEL FCF of default FCF Index failed " 2722 "mbx status x%x, status x%x add_status x%x\n", 2723 rc, shdr_status, shdr_add_status); 2724 } 2725 if (rc != MBX_TIMEOUT) 2726 mempool_free(mboxq, phba->mbox_mem_pool); 2727} 2728 2729/** 2730 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support 2731 * @phba: pointer to lpfc hba data structure. 2732 * 2733 * This function uses the QUERY_FW_CFG mailbox command to determine if the 2734 * firmware loaded supports FCoE. A return of zero indicates that the mailbox 2735 * was successful and the firmware supports FCoE. Any other return indicates 2736 * a error. It is assumed that this function will be called before interrupts 2737 * are enabled. 2738 **/ 2739static int 2740lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba) 2741{ 2742 int rc = 0; 2743 LPFC_MBOXQ_t *mboxq; 2744 struct lpfc_mbx_query_fw_cfg *query_fw_cfg; 2745 uint32_t length; 2746 uint32_t shdr_status, shdr_add_status; 2747 2748 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2749 if (!mboxq) { 2750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2751 "2621 Failed to allocate mbox for " 2752 "query firmware config cmd\n"); 2753 return -ENOMEM; 2754 } 2755 query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg; 2756 length = (sizeof(struct lpfc_mbx_query_fw_cfg) - 2757 sizeof(struct lpfc_sli4_cfg_mhdr)); 2758 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 2759 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 2760 length, LPFC_SLI4_MBX_EMBED); 2761 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2762 /* The IOCTL status is embedded in the mailbox subheader. */ 2763 shdr_status = bf_get(lpfc_mbox_hdr_status, 2764 &query_fw_cfg->header.cfg_shdr.response); 2765 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 2766 &query_fw_cfg->header.cfg_shdr.response); 2767 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { 2768 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2769 "2622 Query Firmware Config failed " 2770 "mbx status x%x, status x%x add_status x%x\n", 2771 rc, shdr_status, shdr_add_status); 2772 return -EINVAL; 2773 } 2774 if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) { 2775 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2776 "2623 FCoE Function not supported by firmware. " 2777 "Function mode = %08x\n", 2778 query_fw_cfg->function_mode); 2779 return -EINVAL; 2780 } 2781 if (rc != MBX_TIMEOUT) 2782 mempool_free(mboxq, phba->mbox_mem_pool); 2783 return 0; 2784} 2785 2786/** 2787 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 2788 * @phba: pointer to lpfc hba data structure. 2789 * @acqe_link: pointer to the async link completion queue entry. 2790 * 2791 * This routine is to parse the SLI4 link-attention link fault code and 2792 * translate it into the base driver's read link attention mailbox command 2793 * status. 2794 * 2795 * Return: Link-attention status in terms of base driver's coding. 2796 **/ 2797static uint16_t 2798lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 2799 struct lpfc_acqe_link *acqe_link) 2800{ 2801 uint16_t latt_fault; 2802 2803 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 2804 case LPFC_ASYNC_LINK_FAULT_NONE: 2805 case LPFC_ASYNC_LINK_FAULT_LOCAL: 2806 case LPFC_ASYNC_LINK_FAULT_REMOTE: 2807 latt_fault = 0; 2808 break; 2809 default: 2810 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2811 "0398 Invalid link fault code: x%x\n", 2812 bf_get(lpfc_acqe_link_fault, acqe_link)); 2813 latt_fault = MBXERR_ERROR; 2814 break; 2815 } 2816 return latt_fault; 2817} 2818 2819/** 2820 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 2821 * @phba: pointer to lpfc hba data structure. 2822 * @acqe_link: pointer to the async link completion queue entry. 2823 * 2824 * This routine is to parse the SLI4 link attention type and translate it 2825 * into the base driver's link attention type coding. 2826 * 2827 * Return: Link attention type in terms of base driver's coding. 2828 **/ 2829static uint8_t 2830lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 2831 struct lpfc_acqe_link *acqe_link) 2832{ 2833 uint8_t att_type; 2834 2835 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 2836 case LPFC_ASYNC_LINK_STATUS_DOWN: 2837 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 2838 att_type = AT_LINK_DOWN; 2839 break; 2840 case LPFC_ASYNC_LINK_STATUS_UP: 2841 /* Ignore physical link up events - wait for logical link up */ 2842 att_type = AT_RESERVED; 2843 break; 2844 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 2845 att_type = AT_LINK_UP; 2846 break; 2847 default: 2848 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2849 "0399 Invalid link attention type: x%x\n", 2850 bf_get(lpfc_acqe_link_status, acqe_link)); 2851 att_type = AT_RESERVED; 2852 break; 2853 } 2854 return att_type; 2855} 2856 2857/** 2858 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 2859 * @phba: pointer to lpfc hba data structure. 2860 * @acqe_link: pointer to the async link completion queue entry. 2861 * 2862 * This routine is to parse the SLI4 link-attention link speed and translate 2863 * it into the base driver's link-attention link speed coding. 2864 * 2865 * Return: Link-attention link speed in terms of base driver's coding. 2866 **/ 2867static uint8_t 2868lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 2869 struct lpfc_acqe_link *acqe_link) 2870{ 2871 uint8_t link_speed; 2872 2873 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 2874 case LPFC_ASYNC_LINK_SPEED_ZERO: 2875 link_speed = LA_UNKNW_LINK; 2876 break; 2877 case LPFC_ASYNC_LINK_SPEED_10MBPS: 2878 link_speed = LA_UNKNW_LINK; 2879 break; 2880 case LPFC_ASYNC_LINK_SPEED_100MBPS: 2881 link_speed = LA_UNKNW_LINK; 2882 break; 2883 case LPFC_ASYNC_LINK_SPEED_1GBPS: 2884 link_speed = LA_1GHZ_LINK; 2885 break; 2886 case LPFC_ASYNC_LINK_SPEED_10GBPS: 2887 link_speed = LA_10GHZ_LINK; 2888 break; 2889 default: 2890 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2891 "0483 Invalid link-attention link speed: x%x\n", 2892 bf_get(lpfc_acqe_link_speed, acqe_link)); 2893 link_speed = LA_UNKNW_LINK; 2894 break; 2895 } 2896 return link_speed; 2897} 2898 2899/** 2900 * lpfc_sli4_async_link_evt - Process the asynchronous link event 2901 * @phba: pointer to lpfc hba data structure. 2902 * @acqe_link: pointer to the async link completion queue entry. 2903 * 2904 * This routine is to handle the SLI4 asynchronous link event. 2905 **/ 2906static void 2907lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 2908 struct lpfc_acqe_link *acqe_link) 2909{ 2910 struct lpfc_dmabuf *mp; 2911 LPFC_MBOXQ_t *pmb; 2912 MAILBOX_t *mb; 2913 READ_LA_VAR *la; 2914 uint8_t att_type; 2915 2916 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 2917 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) 2918 return; 2919 phba->fcoe_eventtag = acqe_link->event_tag; 2920 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2921 if (!pmb) { 2922 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2923 "0395 The mboxq allocation failed\n"); 2924 return; 2925 } 2926 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2927 if (!mp) { 2928 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2929 "0396 The lpfc_dmabuf allocation failed\n"); 2930 goto out_free_pmb; 2931 } 2932 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2933 if (!mp->virt) { 2934 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2935 "0397 The mbuf allocation failed\n"); 2936 goto out_free_dmabuf; 2937 } 2938 2939 /* Cleanup any outstanding ELS commands */ 2940 lpfc_els_flush_all_cmd(phba); 2941 2942 /* Block ELS IOCBs until we have done process link event */ 2943 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2944 2945 /* Update link event statistics */ 2946 phba->sli.slistat.link_event++; 2947 2948 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ 2949 lpfc_read_la(phba, pmb, mp); 2950 pmb->vport = phba->pport; 2951 2952 /* Parse and translate status field */ 2953 mb = &pmb->u.mb; 2954 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 2955 2956 /* Parse and translate link attention fields */ 2957 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; 2958 la->eventTag = acqe_link->event_tag; 2959 la->attType = att_type; 2960 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); 2961 2962 /* Fake the the following irrelvant fields */ 2963 la->topology = TOPOLOGY_PT_PT; 2964 la->granted_AL_PA = 0; 2965 la->il = 0; 2966 la->pb = 0; 2967 la->fa = 0; 2968 la->mm = 0; 2969 2970 /* Keep the link status for extra SLI4 state machine reference */ 2971 phba->sli4_hba.link_state.speed = 2972 bf_get(lpfc_acqe_link_speed, acqe_link); 2973 phba->sli4_hba.link_state.duplex = 2974 bf_get(lpfc_acqe_link_duplex, acqe_link); 2975 phba->sli4_hba.link_state.status = 2976 bf_get(lpfc_acqe_link_status, acqe_link); 2977 phba->sli4_hba.link_state.physical = 2978 bf_get(lpfc_acqe_link_physical, acqe_link); 2979 phba->sli4_hba.link_state.fault = 2980 bf_get(lpfc_acqe_link_fault, acqe_link); 2981 2982 /* Invoke the lpfc_handle_latt mailbox command callback function */ 2983 lpfc_mbx_cmpl_read_la(phba, pmb); 2984 2985 return; 2986 2987out_free_dmabuf: 2988 kfree(mp); 2989out_free_pmb: 2990 mempool_free(pmb, phba->mbox_mem_pool); 2991} 2992 2993/** 2994 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 2995 * @phba: pointer to lpfc hba data structure. 2996 * @acqe_link: pointer to the async fcoe completion queue entry. 2997 * 2998 * This routine is to handle the SLI4 asynchronous fcoe event. 2999 **/ 3000static void 3001lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, 3002 struct lpfc_acqe_fcoe *acqe_fcoe) 3003{ 3004 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 3005 int rc; 3006 struct lpfc_vport *vport; 3007 struct lpfc_nodelist *ndlp; 3008 struct Scsi_Host *shost; 3009 uint32_t link_state; 3010 3011 phba->fc_eventTag = acqe_fcoe->event_tag; 3012 phba->fcoe_eventtag = acqe_fcoe->event_tag; 3013 switch (event_type) { 3014 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3015 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3016 "2546 New FCF found index 0x%x tag 0x%x\n", 3017 acqe_fcoe->index, 3018 acqe_fcoe->event_tag); 3019 /* 3020 * If the current FCF is in discovered state, or 3021 * FCF discovery is in progress do nothing. 3022 */ 3023 spin_lock_irq(&phba->hbalock); 3024 if ((phba->fcf.fcf_flag & FCF_DISCOVERED) || 3025 (phba->hba_flag & FCF_DISC_INPROGRESS)) { 3026 spin_unlock_irq(&phba->hbalock); 3027 break; 3028 } 3029 spin_unlock_irq(&phba->hbalock); 3030 3031 /* Read the FCF table and re-discover SAN. */ 3032 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 3033 if (rc) 3034 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3035 "2547 Read FCF record failed 0x%x\n", 3036 rc); 3037 break; 3038 3039 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3040 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3041 "2548 FCF Table full count 0x%x tag 0x%x\n", 3042 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), 3043 acqe_fcoe->event_tag); 3044 break; 3045 3046 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3047 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3048 "2549 FCF disconnected from network index 0x%x" 3049 " tag 0x%x\n", acqe_fcoe->index, 3050 acqe_fcoe->event_tag); 3051 /* If the event is not for currently used fcf do nothing */ 3052 if (phba->fcf.fcf_indx != acqe_fcoe->index) 3053 break; 3054 /* 3055 * Currently, driver support only one FCF - so treat this as 3056 * a link down, but save the link state because we don't want 3057 * it to be changed to Link Down unless it is already down. 3058 */ 3059 link_state = phba->link_state; 3060 lpfc_linkdown(phba); 3061 phba->link_state = link_state; 3062 /* Unregister FCF if no devices connected to it */ 3063 lpfc_unregister_unused_fcf(phba); 3064 break; 3065 case LPFC_FCOE_EVENT_TYPE_CVL: 3066 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3067 "2718 Clear Virtual Link Received for VPI 0x%x" 3068 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3069 vport = lpfc_find_vport_by_vpid(phba, 3070 acqe_fcoe->index - phba->vpi_base); 3071 if (!vport) 3072 break; 3073 ndlp = lpfc_findnode_did(vport, Fabric_DID); 3074 if (!ndlp) 3075 break; 3076 shost = lpfc_shost_from_vport(vport); 3077 lpfc_linkdown_port(vport); 3078 if (vport->port_type != LPFC_NPIV_PORT) { 3079 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3080 spin_lock_irq(shost->host_lock); 3081 ndlp->nlp_flag |= NLP_DELAY_TMO; 3082 spin_unlock_irq(shost->host_lock); 3083 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 3084 vport->port_state = LPFC_FLOGI; 3085 } 3086 break; 3087 default: 3088 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3089 "0288 Unknown FCoE event type 0x%x event tag " 3090 "0x%x\n", event_type, acqe_fcoe->event_tag); 3091 break; 3092 } 3093} 3094 3095/** 3096 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 3097 * @phba: pointer to lpfc hba data structure. 3098 * @acqe_link: pointer to the async dcbx completion queue entry. 3099 * 3100 * This routine is to handle the SLI4 asynchronous dcbx event. 3101 **/ 3102static void 3103lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3104 struct lpfc_acqe_dcbx *acqe_dcbx) 3105{ 3106 phba->fc_eventTag = acqe_dcbx->event_tag; 3107 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3108 "0290 The SLI4 DCBX asynchronous event is not " 3109 "handled yet\n"); 3110} 3111 3112/** 3113 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 3114 * @phba: pointer to lpfc hba data structure. 3115 * 3116 * This routine is invoked by the worker thread to process all the pending 3117 * SLI4 asynchronous events. 3118 **/ 3119void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 3120{ 3121 struct lpfc_cq_event *cq_event; 3122 3123 /* First, declare the async event has been handled */ 3124 spin_lock_irq(&phba->hbalock); 3125 phba->hba_flag &= ~ASYNC_EVENT; 3126 spin_unlock_irq(&phba->hbalock); 3127 /* Now, handle all the async events */ 3128 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 3129 /* Get the first event from the head of the event queue */ 3130 spin_lock_irq(&phba->hbalock); 3131 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 3132 cq_event, struct lpfc_cq_event, list); 3133 spin_unlock_irq(&phba->hbalock); 3134 /* Process the asynchronous event */ 3135 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 3136 case LPFC_TRAILER_CODE_LINK: 3137 lpfc_sli4_async_link_evt(phba, 3138 &cq_event->cqe.acqe_link); 3139 break; 3140 case LPFC_TRAILER_CODE_FCOE: 3141 lpfc_sli4_async_fcoe_evt(phba, 3142 &cq_event->cqe.acqe_fcoe); 3143 break; 3144 case LPFC_TRAILER_CODE_DCBX: 3145 lpfc_sli4_async_dcbx_evt(phba, 3146 &cq_event->cqe.acqe_dcbx); 3147 break; 3148 default: 3149 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3150 "1804 Invalid asynchrous event code: " 3151 "x%x\n", bf_get(lpfc_trailer_code, 3152 &cq_event->cqe.mcqe_cmpl)); 3153 break; 3154 } 3155 /* Free the completion event processed to the free pool */ 3156 lpfc_sli4_cq_event_release(phba, cq_event); 3157 } 3158} 3159 3160/** 3161 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3162 * @phba: pointer to lpfc hba data structure. 3163 * @dev_grp: The HBA PCI-Device group number. 3164 * 3165 * This routine is invoked to set up the per HBA PCI-Device group function 3166 * API jump table entries. 3167 * 3168 * Return: 0 if success, otherwise -ENODEV 3169 **/ 3170int 3171lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3172{ 3173 int rc; 3174 3175 /* Set up lpfc PCI-device group */ 3176 phba->pci_dev_grp = dev_grp; 3177 3178 /* The LPFC_PCI_DEV_OC uses SLI4 */ 3179 if (dev_grp == LPFC_PCI_DEV_OC) 3180 phba->sli_rev = LPFC_SLI_REV4; 3181 3182 /* Set up device INIT API function jump table */ 3183 rc = lpfc_init_api_table_setup(phba, dev_grp); 3184 if (rc) 3185 return -ENODEV; 3186 /* Set up SCSI API function jump table */ 3187 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 3188 if (rc) 3189 return -ENODEV; 3190 /* Set up SLI API function jump table */ 3191 rc = lpfc_sli_api_table_setup(phba, dev_grp); 3192 if (rc) 3193 return -ENODEV; 3194 /* Set up MBOX API function jump table */ 3195 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 3196 if (rc) 3197 return -ENODEV; 3198 3199 return 0; 3200} 3201 3202/** 3203 * lpfc_log_intr_mode - Log the active interrupt mode 3204 * @phba: pointer to lpfc hba data structure. 3205 * @intr_mode: active interrupt mode adopted. 3206 * 3207 * This routine it invoked to log the currently used active interrupt mode 3208 * to the device. 3209 **/ 3210static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 3211{ 3212 switch (intr_mode) { 3213 case 0: 3214 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3215 "0470 Enable INTx interrupt mode.\n"); 3216 break; 3217 case 1: 3218 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3219 "0481 Enabled MSI interrupt mode.\n"); 3220 break; 3221 case 2: 3222 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3223 "0480 Enabled MSI-X interrupt mode.\n"); 3224 break; 3225 default: 3226 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3227 "0482 Illegal interrupt mode.\n"); 3228 break; 3229 } 3230 return; 3231} 3232 3233/** 3234 * lpfc_enable_pci_dev - Enable a generic PCI device. 3235 * @phba: pointer to lpfc hba data structure. 3236 * 3237 * This routine is invoked to enable the PCI device that is common to all 3238 * PCI devices. 3239 * 3240 * Return codes 3241 * 0 - successful 3242 * other values - error 3243 **/ 3244static int 3245lpfc_enable_pci_dev(struct lpfc_hba *phba) 3246{ 3247 struct pci_dev *pdev; 3248 int bars; 3249 3250 /* Obtain PCI device reference */ 3251 if (!phba->pcidev) 3252 goto out_error; 3253 else 3254 pdev = phba->pcidev; 3255 /* Select PCI BARs */ 3256 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3257 /* Enable PCI device */ 3258 if (pci_enable_device_mem(pdev)) 3259 goto out_error; 3260 /* Request PCI resource for the device */ 3261 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 3262 goto out_disable_device; 3263 /* Set up device as PCI master and save state for EEH */ 3264 pci_set_master(pdev); 3265 pci_try_set_mwi(pdev); 3266 pci_save_state(pdev); 3267 3268 return 0; 3269 3270out_disable_device: 3271 pci_disable_device(pdev); 3272out_error: 3273 return -ENODEV; 3274} 3275 3276/** 3277 * lpfc_disable_pci_dev - Disable a generic PCI device. 3278 * @phba: pointer to lpfc hba data structure. 3279 * 3280 * This routine is invoked to disable the PCI device that is common to all 3281 * PCI devices. 3282 **/ 3283static void 3284lpfc_disable_pci_dev(struct lpfc_hba *phba) 3285{ 3286 struct pci_dev *pdev; 3287 int bars; 3288 3289 /* Obtain PCI device reference */ 3290 if (!phba->pcidev) 3291 return; 3292 else 3293 pdev = phba->pcidev; 3294 /* Select PCI BARs */ 3295 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3296 /* Release PCI resource and disable PCI device */ 3297 pci_release_selected_regions(pdev, bars); 3298 pci_disable_device(pdev); 3299 /* Null out PCI private reference to driver */ 3300 pci_set_drvdata(pdev, NULL); 3301 3302 return; 3303} 3304 3305/** 3306 * lpfc_reset_hba - Reset a hba 3307 * @phba: pointer to lpfc hba data structure. 3308 * 3309 * This routine is invoked to reset a hba device. It brings the HBA 3310 * offline, performs a board restart, and then brings the board back 3311 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 3312 * on outstanding mailbox commands. 3313 **/ 3314void 3315lpfc_reset_hba(struct lpfc_hba *phba) 3316{ 3317 /* If resets are disabled then set error state and return. */ 3318 if (!phba->cfg_enable_hba_reset) { 3319 phba->link_state = LPFC_HBA_ERROR; 3320 return; 3321 } 3322 lpfc_offline_prep(phba); 3323 lpfc_offline(phba); 3324 lpfc_sli_brdrestart(phba); 3325 lpfc_online(phba); 3326 lpfc_unblock_mgmt_io(phba); 3327} 3328 3329/** 3330 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 3331 * @phba: pointer to lpfc hba data structure. 3332 * 3333 * This routine is invoked to set up the driver internal resources specific to 3334 * support the SLI-3 HBA device it attached to. 3335 * 3336 * Return codes 3337 * 0 - successful 3338 * other values - error 3339 **/ 3340static int 3341lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 3342{ 3343 struct lpfc_sli *psli; 3344 3345 /* 3346 * Initialize timers used by driver 3347 */ 3348 3349 /* Heartbeat timer */ 3350 init_timer(&phba->hb_tmofunc); 3351 phba->hb_tmofunc.function = lpfc_hb_timeout; 3352 phba->hb_tmofunc.data = (unsigned long)phba; 3353 3354 psli = &phba->sli; 3355 /* MBOX heartbeat timer */ 3356 init_timer(&psli->mbox_tmo); 3357 psli->mbox_tmo.function = lpfc_mbox_timeout; 3358 psli->mbox_tmo.data = (unsigned long) phba; 3359 /* FCP polling mode timer */ 3360 init_timer(&phba->fcp_poll_timer); 3361 phba->fcp_poll_timer.function = lpfc_poll_timeout; 3362 phba->fcp_poll_timer.data = (unsigned long) phba; 3363 /* Fabric block timer */ 3364 init_timer(&phba->fabric_block_timer); 3365 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3366 phba->fabric_block_timer.data = (unsigned long) phba; 3367 /* EA polling mode timer */ 3368 init_timer(&phba->eratt_poll); 3369 phba->eratt_poll.function = lpfc_poll_eratt; 3370 phba->eratt_poll.data = (unsigned long) phba; 3371 3372 /* Host attention work mask setup */ 3373 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 3374 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 3375 3376 /* Get all the module params for configuring this host */ 3377 lpfc_get_cfgparam(phba); 3378 /* 3379 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3380 * used to create the sg_dma_buf_pool must be dynamically calculated. 3381 * 2 segments are added since the IOCB needs a command and response bde. 3382 */ 3383 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 3384 sizeof(struct fcp_rsp) + 3385 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 3386 3387 if (phba->cfg_enable_bg) { 3388 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 3389 phba->cfg_sg_dma_buf_size += 3390 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 3391 } 3392 3393 /* Also reinitialize the host templates with new values. */ 3394 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3395 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3396 3397 phba->max_vpi = LPFC_MAX_VPI; 3398 /* This will be set to correct value after config_port mbox */ 3399 phba->max_vports = 0; 3400 3401 /* 3402 * Initialize the SLI Layer to run with lpfc HBAs. 3403 */ 3404 lpfc_sli_setup(phba); 3405 lpfc_sli_queue_setup(phba); 3406 3407 /* Allocate device driver memory */ 3408 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 3409 return -ENOMEM; 3410 3411 return 0; 3412} 3413 3414/** 3415 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 3416 * @phba: pointer to lpfc hba data structure. 3417 * 3418 * This routine is invoked to unset the driver internal resources set up 3419 * specific for supporting the SLI-3 HBA device it attached to. 3420 **/ 3421static void 3422lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 3423{ 3424 /* Free device driver memory allocated */ 3425 lpfc_mem_free_all(phba); 3426 3427 return; 3428} 3429 3430/** 3431 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 3432 * @phba: pointer to lpfc hba data structure. 3433 * 3434 * This routine is invoked to set up the driver internal resources specific to 3435 * support the SLI-4 HBA device it attached to. 3436 * 3437 * Return codes 3438 * 0 - successful 3439 * other values - error 3440 **/ 3441static int 3442lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 3443{ 3444 struct lpfc_sli *psli; 3445 int rc; 3446 int i, hbq_count; 3447 3448 /* Before proceed, wait for POST done and device ready */ 3449 rc = lpfc_sli4_post_status_check(phba); 3450 if (rc) 3451 return -ENODEV; 3452 3453 /* 3454 * Initialize timers used by driver 3455 */ 3456 3457 /* Heartbeat timer */ 3458 init_timer(&phba->hb_tmofunc); 3459 phba->hb_tmofunc.function = lpfc_hb_timeout; 3460 phba->hb_tmofunc.data = (unsigned long)phba; 3461 3462 psli = &phba->sli; 3463 /* MBOX heartbeat timer */ 3464 init_timer(&psli->mbox_tmo); 3465 psli->mbox_tmo.function = lpfc_mbox_timeout; 3466 psli->mbox_tmo.data = (unsigned long) phba; 3467 /* Fabric block timer */ 3468 init_timer(&phba->fabric_block_timer); 3469 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3470 phba->fabric_block_timer.data = (unsigned long) phba; 3471 /* EA polling mode timer */ 3472 init_timer(&phba->eratt_poll); 3473 phba->eratt_poll.function = lpfc_poll_eratt; 3474 phba->eratt_poll.data = (unsigned long) phba; 3475 /* 3476 * We need to do a READ_CONFIG mailbox command here before 3477 * calling lpfc_get_cfgparam. For VFs this will report the 3478 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 3479 * All of the resources allocated 3480 * for this Port are tied to these values. 3481 */ 3482 /* Get all the module params for configuring this host */ 3483 lpfc_get_cfgparam(phba); 3484 phba->max_vpi = LPFC_MAX_VPI; 3485 /* This will be set to correct value after the read_config mbox */ 3486 phba->max_vports = 0; 3487 3488 /* Program the default value of vlan_id and fc_map */ 3489 phba->valid_vlan = 0; 3490 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 3491 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 3492 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 3493 3494 /* 3495 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3496 * used to create the sg_dma_buf_pool must be dynamically calculated. 3497 * 2 segments are added since the IOCB needs a command and response bde. 3498 * To insure that the scsi sgl does not cross a 4k page boundary only 3499 * sgl sizes of 1k, 2k, 4k, and 8k are supported. 3500 * Table of sgl sizes and seg_cnt: 3501 * sgl size, sg_seg_cnt total seg 3502 * 1k 50 52 3503 * 2k 114 116 3504 * 4k 242 244 3505 * 8k 498 500 3506 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024 3507 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048 3508 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096 3509 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192 3510 */ 3511 if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT) 3512 phba->cfg_sg_seg_cnt = 50; 3513 else if (phba->cfg_sg_seg_cnt <= 114) 3514 phba->cfg_sg_seg_cnt = 114; 3515 else if (phba->cfg_sg_seg_cnt <= 242) 3516 phba->cfg_sg_seg_cnt = 242; 3517 else 3518 phba->cfg_sg_seg_cnt = 498; 3519 3520 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) 3521 + sizeof(struct fcp_rsp); 3522 phba->cfg_sg_dma_buf_size += 3523 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); 3524 3525 /* Initialize buffer queue management fields */ 3526 hbq_count = lpfc_sli_hbq_count(); 3527 for (i = 0; i < hbq_count; ++i) 3528 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 3529 INIT_LIST_HEAD(&phba->rb_pend_list); 3530 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 3531 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 3532 3533 /* 3534 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 3535 */ 3536 /* Initialize the Abort scsi buffer list used by driver */ 3537 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 3538 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 3539 /* This abort list used by worker thread */ 3540 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 3541 3542 /* 3543 * Initialize dirver internal slow-path work queues 3544 */ 3545 3546 /* Driver internel slow-path CQ Event pool */ 3547 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 3548 /* Response IOCB work queue list */ 3549 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 3550 /* Asynchronous event CQ Event work queue list */ 3551 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 3552 /* Fast-path XRI aborted CQ Event work queue list */ 3553 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 3554 /* Slow-path XRI aborted CQ Event work queue list */ 3555 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 3556 /* Receive queue CQ Event work queue list */ 3557 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 3558 3559 /* Initialize the driver internal SLI layer lists. */ 3560 lpfc_sli_setup(phba); 3561 lpfc_sli_queue_setup(phba); 3562 3563 /* Allocate device driver memory */ 3564 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 3565 if (rc) 3566 return -ENOMEM; 3567 3568 /* Create the bootstrap mailbox command */ 3569 rc = lpfc_create_bootstrap_mbox(phba); 3570 if (unlikely(rc)) 3571 goto out_free_mem; 3572 3573 /* Set up the host's endian order with the device. */ 3574 rc = lpfc_setup_endian_order(phba); 3575 if (unlikely(rc)) 3576 goto out_free_bsmbx; 3577 3578 rc = lpfc_sli4_fw_cfg_check(phba); 3579 if (unlikely(rc)) 3580 goto out_free_bsmbx; 3581 3582 /* Set up the hba's configuration parameters. */ 3583 rc = lpfc_sli4_read_config(phba); 3584 if (unlikely(rc)) 3585 goto out_free_bsmbx; 3586 3587 /* Perform a function reset */ 3588 rc = lpfc_pci_function_reset(phba); 3589 if (unlikely(rc)) 3590 goto out_free_bsmbx; 3591 3592 /* Create all the SLI4 queues */ 3593 rc = lpfc_sli4_queue_create(phba); 3594 if (rc) 3595 goto out_free_bsmbx; 3596 3597 /* Create driver internal CQE event pool */ 3598 rc = lpfc_sli4_cq_event_pool_create(phba); 3599 if (rc) 3600 goto out_destroy_queue; 3601 3602 /* Initialize and populate the iocb list per host */ 3603 rc = lpfc_init_sgl_list(phba); 3604 if (rc) { 3605 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3606 "1400 Failed to initialize sgl list.\n"); 3607 goto out_destroy_cq_event_pool; 3608 } 3609 rc = lpfc_init_active_sgl_array(phba); 3610 if (rc) { 3611 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3612 "1430 Failed to initialize sgl list.\n"); 3613 goto out_free_sgl_list; 3614 } 3615 3616 rc = lpfc_sli4_init_rpi_hdrs(phba); 3617 if (rc) { 3618 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3619 "1432 Failed to initialize rpi headers.\n"); 3620 goto out_free_active_sgl; 3621 } 3622 3623 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 3624 phba->cfg_fcp_eq_count), GFP_KERNEL); 3625 if (!phba->sli4_hba.fcp_eq_hdl) { 3626 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3627 "2572 Failed allocate memory for fast-path " 3628 "per-EQ handle array\n"); 3629 goto out_remove_rpi_hdrs; 3630 } 3631 3632 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 3633 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 3634 if (!phba->sli4_hba.msix_entries) { 3635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3636 "2573 Failed allocate memory for msi-x " 3637 "interrupt vector entries\n"); 3638 goto out_free_fcp_eq_hdl; 3639 } 3640 3641 return rc; 3642 3643out_free_fcp_eq_hdl: 3644 kfree(phba->sli4_hba.fcp_eq_hdl); 3645out_remove_rpi_hdrs: 3646 lpfc_sli4_remove_rpi_hdrs(phba); 3647out_free_active_sgl: 3648 lpfc_free_active_sgl(phba); 3649out_free_sgl_list: 3650 lpfc_free_sgl_list(phba); 3651out_destroy_cq_event_pool: 3652 lpfc_sli4_cq_event_pool_destroy(phba); 3653out_destroy_queue: 3654 lpfc_sli4_queue_destroy(phba); 3655out_free_bsmbx: 3656 lpfc_destroy_bootstrap_mbox(phba); 3657out_free_mem: 3658 lpfc_mem_free(phba); 3659 return rc; 3660} 3661 3662/** 3663 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 3664 * @phba: pointer to lpfc hba data structure. 3665 * 3666 * This routine is invoked to unset the driver internal resources set up 3667 * specific for supporting the SLI-4 HBA device it attached to. 3668 **/ 3669static void 3670lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 3671{ 3672 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 3673 3674 /* unregister default FCFI from the HBA */ 3675 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); 3676 3677 /* Free the default FCR table */ 3678 lpfc_sli_remove_dflt_fcf(phba); 3679 3680 /* Free memory allocated for msi-x interrupt vector entries */ 3681 kfree(phba->sli4_hba.msix_entries); 3682 3683 /* Free memory allocated for fast-path work queue handles */ 3684 kfree(phba->sli4_hba.fcp_eq_hdl); 3685 3686 /* Free the allocated rpi headers. */ 3687 lpfc_sli4_remove_rpi_hdrs(phba); 3688 lpfc_sli4_remove_rpis(phba); 3689 3690 /* Free the ELS sgl list */ 3691 lpfc_free_active_sgl(phba); 3692 lpfc_free_sgl_list(phba); 3693 3694 /* Free the SCSI sgl management array */ 3695 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 3696 3697 /* Free the SLI4 queues */ 3698 lpfc_sli4_queue_destroy(phba); 3699 3700 /* Free the completion queue EQ event pool */ 3701 lpfc_sli4_cq_event_release_all(phba); 3702 lpfc_sli4_cq_event_pool_destroy(phba); 3703 3704 /* Reset SLI4 HBA FCoE function */ 3705 lpfc_pci_function_reset(phba); 3706 3707 /* Free the bsmbx region. */ 3708 lpfc_destroy_bootstrap_mbox(phba); 3709 3710 /* Free the SLI Layer memory with SLI4 HBAs */ 3711 lpfc_mem_free_all(phba); 3712 3713 /* Free the current connect table */ 3714 list_for_each_entry_safe(conn_entry, next_conn_entry, 3715 &phba->fcf_conn_rec_list, list) { 3716 list_del_init(&conn_entry->list); 3717 kfree(conn_entry); 3718 } 3719 3720 return; 3721} 3722 3723/** 3724 * lpfc_init_api_table_setup - Set up init api fucntion jump table 3725 * @phba: The hba struct for which this call is being executed. 3726 * @dev_grp: The HBA PCI-Device group number. 3727 * 3728 * This routine sets up the device INIT interface API function jump table 3729 * in @phba struct. 3730 * 3731 * Returns: 0 - success, -ENODEV - failure. 3732 **/ 3733int 3734lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3735{ 3736 switch (dev_grp) { 3737 case LPFC_PCI_DEV_LP: 3738 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 3739 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 3740 phba->lpfc_stop_port = lpfc_stop_port_s3; 3741 break; 3742 case LPFC_PCI_DEV_OC: 3743 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 3744 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 3745 phba->lpfc_stop_port = lpfc_stop_port_s4; 3746 break; 3747 default: 3748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3749 "1431 Invalid HBA PCI-device group: 0x%x\n", 3750 dev_grp); 3751 return -ENODEV; 3752 break; 3753 } 3754 return 0; 3755} 3756 3757/** 3758 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 3759 * @phba: pointer to lpfc hba data structure. 3760 * 3761 * This routine is invoked to set up the driver internal resources before the 3762 * device specific resource setup to support the HBA device it attached to. 3763 * 3764 * Return codes 3765 * 0 - successful 3766 * other values - error 3767 **/ 3768static int 3769lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 3770{ 3771 /* 3772 * Driver resources common to all SLI revisions 3773 */ 3774 atomic_set(&phba->fast_event_count, 0); 3775 spin_lock_init(&phba->hbalock); 3776 3777 /* Initialize ndlp management spinlock */ 3778 spin_lock_init(&phba->ndlp_lock); 3779 3780 INIT_LIST_HEAD(&phba->port_list); 3781 INIT_LIST_HEAD(&phba->work_list); 3782 init_waitqueue_head(&phba->wait_4_mlo_m_q); 3783 3784 /* Initialize the wait queue head for the kernel thread */ 3785 init_waitqueue_head(&phba->work_waitq); 3786 3787 /* Initialize the scsi buffer list used by driver for scsi IO */ 3788 spin_lock_init(&phba->scsi_buf_list_lock); 3789 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 3790 3791 /* Initialize the fabric iocb list */ 3792 INIT_LIST_HEAD(&phba->fabric_iocb_list); 3793 3794 /* Initialize list to save ELS buffers */ 3795 INIT_LIST_HEAD(&phba->elsbuf); 3796 3797 /* Initialize FCF connection rec list */ 3798 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 3799 3800 return 0; 3801} 3802 3803/** 3804 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 3805 * @phba: pointer to lpfc hba data structure. 3806 * 3807 * This routine is invoked to set up the driver internal resources after the 3808 * device specific resource setup to support the HBA device it attached to. 3809 * 3810 * Return codes 3811 * 0 - successful 3812 * other values - error 3813 **/ 3814static int 3815lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 3816{ 3817 int error; 3818 3819 /* Startup the kernel thread for this host adapter. */ 3820 phba->worker_thread = kthread_run(lpfc_do_work, phba, 3821 "lpfc_worker_%d", phba->brd_no); 3822 if (IS_ERR(phba->worker_thread)) { 3823 error = PTR_ERR(phba->worker_thread); 3824 return error; 3825 } 3826 3827 return 0; 3828} 3829 3830/** 3831 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 3832 * @phba: pointer to lpfc hba data structure. 3833 * 3834 * This routine is invoked to unset the driver internal resources set up after 3835 * the device specific resource setup for supporting the HBA device it 3836 * attached to. 3837 **/ 3838static void 3839lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 3840{ 3841 /* Stop kernel worker thread */ 3842 kthread_stop(phba->worker_thread); 3843} 3844 3845/** 3846 * lpfc_free_iocb_list - Free iocb list. 3847 * @phba: pointer to lpfc hba data structure. 3848 * 3849 * This routine is invoked to free the driver's IOCB list and memory. 3850 **/ 3851static void 3852lpfc_free_iocb_list(struct lpfc_hba *phba) 3853{ 3854 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 3855 3856 spin_lock_irq(&phba->hbalock); 3857 list_for_each_entry_safe(iocbq_entry, iocbq_next, 3858 &phba->lpfc_iocb_list, list) { 3859 list_del(&iocbq_entry->list); 3860 kfree(iocbq_entry); 3861 phba->total_iocbq_bufs--; 3862 } 3863 spin_unlock_irq(&phba->hbalock); 3864 3865 return; 3866} 3867 3868/** 3869 * lpfc_init_iocb_list - Allocate and initialize iocb list. 3870 * @phba: pointer to lpfc hba data structure. 3871 * 3872 * This routine is invoked to allocate and initizlize the driver's IOCB 3873 * list and set up the IOCB tag array accordingly. 3874 * 3875 * Return codes 3876 * 0 - successful 3877 * other values - error 3878 **/ 3879static int 3880lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 3881{ 3882 struct lpfc_iocbq *iocbq_entry = NULL; 3883 uint16_t iotag; 3884 int i; 3885 3886 /* Initialize and populate the iocb list per host. */ 3887 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 3888 for (i = 0; i < iocb_count; i++) { 3889 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 3890 if (iocbq_entry == NULL) { 3891 printk(KERN_ERR "%s: only allocated %d iocbs of " 3892 "expected %d count. Unloading driver.\n", 3893 __func__, i, LPFC_IOCB_LIST_CNT); 3894 goto out_free_iocbq; 3895 } 3896 3897 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 3898 if (iotag == 0) { 3899 kfree(iocbq_entry); 3900 printk(KERN_ERR "%s: failed to allocate IOTAG. " 3901 "Unloading driver.\n", __func__); 3902 goto out_free_iocbq; 3903 } 3904 iocbq_entry->sli4_xritag = NO_XRI; 3905 3906 spin_lock_irq(&phba->hbalock); 3907 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 3908 phba->total_iocbq_bufs++; 3909 spin_unlock_irq(&phba->hbalock); 3910 } 3911 3912 return 0; 3913 3914out_free_iocbq: 3915 lpfc_free_iocb_list(phba); 3916 3917 return -ENOMEM; 3918} 3919 3920/** 3921 * lpfc_free_sgl_list - Free sgl list. 3922 * @phba: pointer to lpfc hba data structure. 3923 * 3924 * This routine is invoked to free the driver's sgl list and memory. 3925 **/ 3926static void 3927lpfc_free_sgl_list(struct lpfc_hba *phba) 3928{ 3929 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 3930 LIST_HEAD(sglq_list); 3931 int rc = 0; 3932 3933 spin_lock_irq(&phba->hbalock); 3934 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 3935 spin_unlock_irq(&phba->hbalock); 3936 3937 list_for_each_entry_safe(sglq_entry, sglq_next, 3938 &sglq_list, list) { 3939 list_del(&sglq_entry->list); 3940 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 3941 kfree(sglq_entry); 3942 phba->sli4_hba.total_sglq_bufs--; 3943 } 3944 rc = lpfc_sli4_remove_all_sgl_pages(phba); 3945 if (rc) { 3946 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3947 "2005 Unable to deregister pages from HBA: %x\n", rc); 3948 } 3949 kfree(phba->sli4_hba.lpfc_els_sgl_array); 3950} 3951 3952/** 3953 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 3954 * @phba: pointer to lpfc hba data structure. 3955 * 3956 * This routine is invoked to allocate the driver's active sgl memory. 3957 * This array will hold the sglq_entry's for active IOs. 3958 **/ 3959static int 3960lpfc_init_active_sgl_array(struct lpfc_hba *phba) 3961{ 3962 int size; 3963 size = sizeof(struct lpfc_sglq *); 3964 size *= phba->sli4_hba.max_cfg_param.max_xri; 3965 3966 phba->sli4_hba.lpfc_sglq_active_list = 3967 kzalloc(size, GFP_KERNEL); 3968 if (!phba->sli4_hba.lpfc_sglq_active_list) 3969 return -ENOMEM; 3970 return 0; 3971} 3972 3973/** 3974 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 3975 * @phba: pointer to lpfc hba data structure. 3976 * 3977 * This routine is invoked to walk through the array of active sglq entries 3978 * and free all of the resources. 3979 * This is just a place holder for now. 3980 **/ 3981static void 3982lpfc_free_active_sgl(struct lpfc_hba *phba) 3983{ 3984 kfree(phba->sli4_hba.lpfc_sglq_active_list); 3985} 3986 3987/** 3988 * lpfc_init_sgl_list - Allocate and initialize sgl list. 3989 * @phba: pointer to lpfc hba data structure. 3990 * 3991 * This routine is invoked to allocate and initizlize the driver's sgl 3992 * list and set up the sgl xritag tag array accordingly. 3993 * 3994 * Return codes 3995 * 0 - successful 3996 * other values - error 3997 **/ 3998static int 3999lpfc_init_sgl_list(struct lpfc_hba *phba) 4000{ 4001 struct lpfc_sglq *sglq_entry = NULL; 4002 int i; 4003 int els_xri_cnt; 4004 4005 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4006 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4007 "2400 lpfc_init_sgl_list els %d.\n", 4008 els_xri_cnt); 4009 /* Initialize and populate the sglq list per host/VF. */ 4010 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4011 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 4012 4013 /* Sanity check on XRI management */ 4014 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 4015 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4016 "2562 No room left for SCSI XRI allocation: " 4017 "max_xri=%d, els_xri=%d\n", 4018 phba->sli4_hba.max_cfg_param.max_xri, 4019 els_xri_cnt); 4020 return -ENOMEM; 4021 } 4022 4023 /* Allocate memory for the ELS XRI management array */ 4024 phba->sli4_hba.lpfc_els_sgl_array = 4025 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), 4026 GFP_KERNEL); 4027 4028 if (!phba->sli4_hba.lpfc_els_sgl_array) { 4029 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4030 "2401 Failed to allocate memory for ELS " 4031 "XRI management array of size %d.\n", 4032 els_xri_cnt); 4033 return -ENOMEM; 4034 } 4035 4036 /* Keep the SCSI XRI into the XRI management array */ 4037 phba->sli4_hba.scsi_xri_max = 4038 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4039 phba->sli4_hba.scsi_xri_cnt = 0; 4040 4041 phba->sli4_hba.lpfc_scsi_psb_array = 4042 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4043 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4044 4045 if (!phba->sli4_hba.lpfc_scsi_psb_array) { 4046 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4047 "2563 Failed to allocate memory for SCSI " 4048 "XRI management array of size %d.\n", 4049 phba->sli4_hba.scsi_xri_max); 4050 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4051 return -ENOMEM; 4052 } 4053 4054 for (i = 0; i < els_xri_cnt; i++) { 4055 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); 4056 if (sglq_entry == NULL) { 4057 printk(KERN_ERR "%s: only allocated %d sgls of " 4058 "expected %d count. Unloading driver.\n", 4059 __func__, i, els_xri_cnt); 4060 goto out_free_mem; 4061 } 4062 4063 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); 4064 if (sglq_entry->sli4_xritag == NO_XRI) { 4065 kfree(sglq_entry); 4066 printk(KERN_ERR "%s: failed to allocate XRI.\n" 4067 "Unloading driver.\n", __func__); 4068 goto out_free_mem; 4069 } 4070 sglq_entry->buff_type = GEN_BUFF_TYPE; 4071 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4072 if (sglq_entry->virt == NULL) { 4073 kfree(sglq_entry); 4074 printk(KERN_ERR "%s: failed to allocate mbuf.\n" 4075 "Unloading driver.\n", __func__); 4076 goto out_free_mem; 4077 } 4078 sglq_entry->sgl = sglq_entry->virt; 4079 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 4080 4081 /* The list order is used by later block SGL registraton */ 4082 spin_lock_irq(&phba->hbalock); 4083 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4084 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4085 phba->sli4_hba.total_sglq_bufs++; 4086 spin_unlock_irq(&phba->hbalock); 4087 } 4088 return 0; 4089 4090out_free_mem: 4091 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4092 lpfc_free_sgl_list(phba); 4093 return -ENOMEM; 4094} 4095 4096/** 4097 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 4098 * @phba: pointer to lpfc hba data structure. 4099 * 4100 * This routine is invoked to post rpi header templates to the 4101 * HBA consistent with the SLI-4 interface spec. This routine 4102 * posts a PAGE_SIZE memory region to the port to hold up to 4103 * PAGE_SIZE modulo 64 rpi context headers. 4104 * No locks are held here because this is an initialization routine 4105 * called only from probe or lpfc_online when interrupts are not 4106 * enabled and the driver is reinitializing the device. 4107 * 4108 * Return codes 4109 * 0 - successful 4110 * ENOMEM - No availble memory 4111 * EIO - The mailbox failed to complete successfully. 4112 **/ 4113int 4114lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4115{ 4116 int rc = 0; 4117 int longs; 4118 uint16_t rpi_count; 4119 struct lpfc_rpi_hdr *rpi_hdr; 4120 4121 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 4122 4123 /* 4124 * Provision an rpi bitmask range for discovery. The total count 4125 * is the difference between max and base + 1. 4126 */ 4127 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 4128 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4129 4130 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 4131 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 4132 GFP_KERNEL); 4133 if (!phba->sli4_hba.rpi_bmask) 4134 return -ENOMEM; 4135 4136 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 4137 if (!rpi_hdr) { 4138 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4139 "0391 Error during rpi post operation\n"); 4140 lpfc_sli4_remove_rpis(phba); 4141 rc = -ENODEV; 4142 } 4143 4144 return rc; 4145} 4146 4147/** 4148 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 4149 * @phba: pointer to lpfc hba data structure. 4150 * 4151 * This routine is invoked to allocate a single 4KB memory region to 4152 * support rpis and stores them in the phba. This single region 4153 * provides support for up to 64 rpis. The region is used globally 4154 * by the device. 4155 * 4156 * Returns: 4157 * A valid rpi hdr on success. 4158 * A NULL pointer on any failure. 4159 **/ 4160struct lpfc_rpi_hdr * 4161lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 4162{ 4163 uint16_t rpi_limit, curr_rpi_range; 4164 struct lpfc_dmabuf *dmabuf; 4165 struct lpfc_rpi_hdr *rpi_hdr; 4166 4167 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 4168 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4169 4170 spin_lock_irq(&phba->hbalock); 4171 curr_rpi_range = phba->sli4_hba.next_rpi; 4172 spin_unlock_irq(&phba->hbalock); 4173 4174 /* 4175 * The port has a limited number of rpis. The increment here 4176 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 4177 * and to allow the full max_rpi range per port. 4178 */ 4179 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 4180 return NULL; 4181 4182 /* 4183 * First allocate the protocol header region for the port. The 4184 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 4185 */ 4186 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4187 if (!dmabuf) 4188 return NULL; 4189 4190 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4191 LPFC_HDR_TEMPLATE_SIZE, 4192 &dmabuf->phys, 4193 GFP_KERNEL); 4194 if (!dmabuf->virt) { 4195 rpi_hdr = NULL; 4196 goto err_free_dmabuf; 4197 } 4198 4199 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 4200 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 4201 rpi_hdr = NULL; 4202 goto err_free_coherent; 4203 } 4204 4205 /* Save the rpi header data for cleanup later. */ 4206 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 4207 if (!rpi_hdr) 4208 goto err_free_coherent; 4209 4210 rpi_hdr->dmabuf = dmabuf; 4211 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 4212 rpi_hdr->page_count = 1; 4213 spin_lock_irq(&phba->hbalock); 4214 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 4215 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 4216 4217 /* 4218 * The next_rpi stores the next module-64 rpi value to post 4219 * in any subsequent rpi memory region postings. 4220 */ 4221 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; 4222 spin_unlock_irq(&phba->hbalock); 4223 return rpi_hdr; 4224 4225 err_free_coherent: 4226 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 4227 dmabuf->virt, dmabuf->phys); 4228 err_free_dmabuf: 4229 kfree(dmabuf); 4230 return NULL; 4231} 4232 4233/** 4234 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 4235 * @phba: pointer to lpfc hba data structure. 4236 * 4237 * This routine is invoked to remove all memory resources allocated 4238 * to support rpis. This routine presumes the caller has released all 4239 * rpis consumed by fabric or port logins and is prepared to have 4240 * the header pages removed. 4241 **/ 4242void 4243lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 4244{ 4245 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 4246 4247 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 4248 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 4249 list_del(&rpi_hdr->list); 4250 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 4251 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 4252 kfree(rpi_hdr->dmabuf); 4253 kfree(rpi_hdr); 4254 } 4255 4256 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4257 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 4258} 4259 4260/** 4261 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 4262 * @pdev: pointer to pci device data structure. 4263 * 4264 * This routine is invoked to allocate the driver hba data structure for an 4265 * HBA device. If the allocation is successful, the phba reference to the 4266 * PCI device data structure is set. 4267 * 4268 * Return codes 4269 * pointer to @phba - successful 4270 * NULL - error 4271 **/ 4272static struct lpfc_hba * 4273lpfc_hba_alloc(struct pci_dev *pdev) 4274{ 4275 struct lpfc_hba *phba; 4276 4277 /* Allocate memory for HBA structure */ 4278 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 4279 if (!phba) { 4280 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 4281 return NULL; 4282 } 4283 4284 /* Set reference to PCI device in HBA structure */ 4285 phba->pcidev = pdev; 4286 4287 /* Assign an unused board number */ 4288 phba->brd_no = lpfc_get_instance(); 4289 if (phba->brd_no < 0) { 4290 kfree(phba); 4291 return NULL; 4292 } 4293 4294 mutex_init(&phba->ct_event_mutex); 4295 INIT_LIST_HEAD(&phba->ct_ev_waiters); 4296 4297 return phba; 4298} 4299 4300/** 4301 * lpfc_hba_free - Free driver hba data structure with a device. 4302 * @phba: pointer to lpfc hba data structure. 4303 * 4304 * This routine is invoked to free the driver hba data structure with an 4305 * HBA device. 4306 **/ 4307static void 4308lpfc_hba_free(struct lpfc_hba *phba) 4309{ 4310 /* Release the driver assigned board number */ 4311 idr_remove(&lpfc_hba_index, phba->brd_no); 4312 4313 kfree(phba); 4314 return; 4315} 4316 4317/** 4318 * lpfc_create_shost - Create hba physical port with associated scsi host. 4319 * @phba: pointer to lpfc hba data structure. 4320 * 4321 * This routine is invoked to create HBA physical port and associate a SCSI 4322 * host with it. 4323 * 4324 * Return codes 4325 * 0 - successful 4326 * other values - error 4327 **/ 4328static int 4329lpfc_create_shost(struct lpfc_hba *phba) 4330{ 4331 struct lpfc_vport *vport; 4332 struct Scsi_Host *shost; 4333 4334 /* Initialize HBA FC structure */ 4335 phba->fc_edtov = FF_DEF_EDTOV; 4336 phba->fc_ratov = FF_DEF_RATOV; 4337 phba->fc_altov = FF_DEF_ALTOV; 4338 phba->fc_arbtov = FF_DEF_ARBTOV; 4339 4340 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 4341 if (!vport) 4342 return -ENODEV; 4343 4344 shost = lpfc_shost_from_vport(vport); 4345 phba->pport = vport; 4346 lpfc_debugfs_initialize(vport); 4347 /* Put reference to SCSI host to driver's device private data */ 4348 pci_set_drvdata(phba->pcidev, shost); 4349 4350 return 0; 4351} 4352 4353/** 4354 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 4355 * @phba: pointer to lpfc hba data structure. 4356 * 4357 * This routine is invoked to destroy HBA physical port and the associated 4358 * SCSI host. 4359 **/ 4360static void 4361lpfc_destroy_shost(struct lpfc_hba *phba) 4362{ 4363 struct lpfc_vport *vport = phba->pport; 4364 4365 /* Destroy physical port that associated with the SCSI host */ 4366 destroy_port(vport); 4367 4368 return; 4369} 4370 4371/** 4372 * lpfc_setup_bg - Setup Block guard structures and debug areas. 4373 * @phba: pointer to lpfc hba data structure. 4374 * @shost: the shost to be used to detect Block guard settings. 4375 * 4376 * This routine sets up the local Block guard protocol settings for @shost. 4377 * This routine also allocates memory for debugging bg buffers. 4378 **/ 4379static void 4380lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 4381{ 4382 int pagecnt = 10; 4383 if (lpfc_prot_mask && lpfc_prot_guard) { 4384 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4385 "1478 Registering BlockGuard with the " 4386 "SCSI layer\n"); 4387 scsi_host_set_prot(shost, lpfc_prot_mask); 4388 scsi_host_set_guard(shost, lpfc_prot_guard); 4389 } 4390 if (!_dump_buf_data) { 4391 while (pagecnt) { 4392 spin_lock_init(&_dump_buf_lock); 4393 _dump_buf_data = 4394 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4395 if (_dump_buf_data) { 4396 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4397 "9043 BLKGRD: allocated %d pages for " 4398 "_dump_buf_data at 0x%p\n", 4399 (1 << pagecnt), _dump_buf_data); 4400 _dump_buf_data_order = pagecnt; 4401 memset(_dump_buf_data, 0, 4402 ((1 << PAGE_SHIFT) << pagecnt)); 4403 break; 4404 } else 4405 --pagecnt; 4406 } 4407 if (!_dump_buf_data_order) 4408 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4409 "9044 BLKGRD: ERROR unable to allocate " 4410 "memory for hexdump\n"); 4411 } else 4412 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4413 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 4414 "\n", _dump_buf_data); 4415 if (!_dump_buf_dif) { 4416 while (pagecnt) { 4417 _dump_buf_dif = 4418 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4419 if (_dump_buf_dif) { 4420 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4421 "9046 BLKGRD: allocated %d pages for " 4422 "_dump_buf_dif at 0x%p\n", 4423 (1 << pagecnt), _dump_buf_dif); 4424 _dump_buf_dif_order = pagecnt; 4425 memset(_dump_buf_dif, 0, 4426 ((1 << PAGE_SHIFT) << pagecnt)); 4427 break; 4428 } else 4429 --pagecnt; 4430 } 4431 if (!_dump_buf_dif_order) 4432 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4433 "9047 BLKGRD: ERROR unable to allocate " 4434 "memory for hexdump\n"); 4435 } else 4436 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4437 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 4438 _dump_buf_dif); 4439} 4440 4441/** 4442 * lpfc_post_init_setup - Perform necessary device post initialization setup. 4443 * @phba: pointer to lpfc hba data structure. 4444 * 4445 * This routine is invoked to perform all the necessary post initialization 4446 * setup for the device. 4447 **/ 4448static void 4449lpfc_post_init_setup(struct lpfc_hba *phba) 4450{ 4451 struct Scsi_Host *shost; 4452 struct lpfc_adapter_event_header adapter_event; 4453 4454 /* Get the default values for Model Name and Description */ 4455 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 4456 4457 /* 4458 * hba setup may have changed the hba_queue_depth so we need to 4459 * adjust the value of can_queue. 4460 */ 4461 shost = pci_get_drvdata(phba->pcidev); 4462 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4463 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4464 lpfc_setup_bg(phba, shost); 4465 4466 lpfc_host_attrib_init(shost); 4467 4468 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 4469 spin_lock_irq(shost->host_lock); 4470 lpfc_poll_start_timer(phba); 4471 spin_unlock_irq(shost->host_lock); 4472 } 4473 4474 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4475 "0428 Perform SCSI scan\n"); 4476 /* Send board arrival event to upper layer */ 4477 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 4478 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 4479 fc_host_post_vendor_event(shost, fc_get_event_number(), 4480 sizeof(adapter_event), 4481 (char *) &adapter_event, 4482 LPFC_NL_VENDOR_ID); 4483 return; 4484} 4485 4486/** 4487 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 4488 * @phba: pointer to lpfc hba data structure. 4489 * 4490 * This routine is invoked to set up the PCI device memory space for device 4491 * with SLI-3 interface spec. 4492 * 4493 * Return codes 4494 * 0 - successful 4495 * other values - error 4496 **/ 4497static int 4498lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 4499{ 4500 struct pci_dev *pdev; 4501 unsigned long bar0map_len, bar2map_len; 4502 int i, hbq_count; 4503 void *ptr; 4504 int error = -ENODEV; 4505 4506 /* Obtain PCI device reference */ 4507 if (!phba->pcidev) 4508 return error; 4509 else 4510 pdev = phba->pcidev; 4511 4512 /* Set the device DMA mask size */ 4513 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 4514 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 4515 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 4516 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 4517 return error; 4518 } 4519 } 4520 4521 /* Get the bus address of Bar0 and Bar2 and the number of bytes 4522 * required by each mapping. 4523 */ 4524 phba->pci_bar0_map = pci_resource_start(pdev, 0); 4525 bar0map_len = pci_resource_len(pdev, 0); 4526 4527 phba->pci_bar2_map = pci_resource_start(pdev, 2); 4528 bar2map_len = pci_resource_len(pdev, 2); 4529 4530 /* Map HBA SLIM to a kernel virtual address. */ 4531 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 4532 if (!phba->slim_memmap_p) { 4533 dev_printk(KERN_ERR, &pdev->dev, 4534 "ioremap failed for SLIM memory.\n"); 4535 goto out; 4536 } 4537 4538 /* Map HBA Control Registers to a kernel virtual address. */ 4539 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 4540 if (!phba->ctrl_regs_memmap_p) { 4541 dev_printk(KERN_ERR, &pdev->dev, 4542 "ioremap failed for HBA control registers.\n"); 4543 goto out_iounmap_slim; 4544 } 4545 4546 /* Allocate memory for SLI-2 structures */ 4547 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 4548 SLI2_SLIM_SIZE, 4549 &phba->slim2p.phys, 4550 GFP_KERNEL); 4551 if (!phba->slim2p.virt) 4552 goto out_iounmap; 4553 4554 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 4555 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 4556 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 4557 phba->IOCBs = (phba->slim2p.virt + 4558 offsetof(struct lpfc_sli2_slim, IOCBs)); 4559 4560 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 4561 lpfc_sli_hbq_size(), 4562 &phba->hbqslimp.phys, 4563 GFP_KERNEL); 4564 if (!phba->hbqslimp.virt) 4565 goto out_free_slim; 4566 4567 hbq_count = lpfc_sli_hbq_count(); 4568 ptr = phba->hbqslimp.virt; 4569 for (i = 0; i < hbq_count; ++i) { 4570 phba->hbqs[i].hbq_virt = ptr; 4571 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 4572 ptr += (lpfc_hbq_defs[i]->entry_count * 4573 sizeof(struct lpfc_hbq_entry)); 4574 } 4575 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 4576 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 4577 4578 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 4579 4580 INIT_LIST_HEAD(&phba->rb_pend_list); 4581 4582 phba->MBslimaddr = phba->slim_memmap_p; 4583 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 4584 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 4585 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 4586 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 4587 4588 return 0; 4589 4590out_free_slim: 4591 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 4592 phba->slim2p.virt, phba->slim2p.phys); 4593out_iounmap: 4594 iounmap(phba->ctrl_regs_memmap_p); 4595out_iounmap_slim: 4596 iounmap(phba->slim_memmap_p); 4597out: 4598 return error; 4599} 4600 4601/** 4602 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 4603 * @phba: pointer to lpfc hba data structure. 4604 * 4605 * This routine is invoked to unset the PCI device memory space for device 4606 * with SLI-3 interface spec. 4607 **/ 4608static void 4609lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 4610{ 4611 struct pci_dev *pdev; 4612 4613 /* Obtain PCI device reference */ 4614 if (!phba->pcidev) 4615 return; 4616 else 4617 pdev = phba->pcidev; 4618 4619 /* Free coherent DMA memory allocated */ 4620 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 4621 phba->hbqslimp.virt, phba->hbqslimp.phys); 4622 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 4623 phba->slim2p.virt, phba->slim2p.phys); 4624 4625 /* I/O memory unmap */ 4626 iounmap(phba->ctrl_regs_memmap_p); 4627 iounmap(phba->slim_memmap_p); 4628 4629 return; 4630} 4631 4632/** 4633 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 4634 * @phba: pointer to lpfc hba data structure. 4635 * 4636 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 4637 * done and check status. 4638 * 4639 * Return 0 if successful, otherwise -ENODEV. 4640 **/ 4641int 4642lpfc_sli4_post_status_check(struct lpfc_hba *phba) 4643{ 4644 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; 4645 int i, port_error = -ENODEV; 4646 4647 if (!phba->sli4_hba.STAregaddr) 4648 return -ENODEV; 4649 4650 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 4651 for (i = 0; i < 3000; i++) { 4652 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); 4653 /* Encounter fatal POST error, break out */ 4654 if (bf_get(lpfc_hst_state_perr, &sta_reg)) { 4655 port_error = -ENODEV; 4656 break; 4657 } 4658 if (LPFC_POST_STAGE_ARMFW_READY == 4659 bf_get(lpfc_hst_state_port_status, &sta_reg)) { 4660 port_error = 0; 4661 break; 4662 } 4663 msleep(10); 4664 } 4665 4666 if (port_error) 4667 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4668 "1408 Failure HBA POST Status: sta_reg=0x%x, " 4669 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " 4670 "dl=x%x, pstatus=x%x\n", sta_reg.word0, 4671 bf_get(lpfc_hst_state_perr, &sta_reg), 4672 bf_get(lpfc_hst_state_sfi, &sta_reg), 4673 bf_get(lpfc_hst_state_nip, &sta_reg), 4674 bf_get(lpfc_hst_state_ipc, &sta_reg), 4675 bf_get(lpfc_hst_state_xrom, &sta_reg), 4676 bf_get(lpfc_hst_state_dl, &sta_reg), 4677 bf_get(lpfc_hst_state_port_status, &sta_reg)); 4678 4679 /* Log device information */ 4680 scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr); 4681 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4682 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " 4683 "FeatureL1=0x%x, FeatureL2=0x%x\n", 4684 bf_get(lpfc_scratchpad_chiptype, &scratchpad), 4685 bf_get(lpfc_scratchpad_slirev, &scratchpad), 4686 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), 4687 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); 4688 phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr); 4689 phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr); 4690 /* With uncoverable error, log the error message and return error */ 4691 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); 4692 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); 4693 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 4694 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 4695 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4696 "1422 HBA Unrecoverable error: " 4697 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 4698 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n", 4699 uerrlo_reg.word0, uerrhi_reg.word0, 4700 phba->sli4_hba.ue_mask_lo, 4701 phba->sli4_hba.ue_mask_hi); 4702 return -ENODEV; 4703 } 4704 4705 return port_error; 4706} 4707 4708/** 4709 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 4710 * @phba: pointer to lpfc hba data structure. 4711 * 4712 * This routine is invoked to set up SLI4 BAR0 PCI config space register 4713 * memory map. 4714 **/ 4715static void 4716lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) 4717{ 4718 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 4719 LPFC_UERR_STATUS_LO; 4720 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 4721 LPFC_UERR_STATUS_HI; 4722 phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 4723 LPFC_UE_MASK_LO; 4724 phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 4725 LPFC_UE_MASK_HI; 4726 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + 4727 LPFC_SCRATCHPAD; 4728} 4729 4730/** 4731 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 4732 * @phba: pointer to lpfc hba data structure. 4733 * 4734 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 4735 * memory map. 4736 **/ 4737static void 4738lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 4739{ 4740 4741 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 4742 LPFC_HST_STATE; 4743 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 4744 LPFC_HST_ISR0; 4745 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 4746 LPFC_HST_IMR0; 4747 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 4748 LPFC_HST_ISCR0; 4749 return; 4750} 4751 4752/** 4753 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 4754 * @phba: pointer to lpfc hba data structure. 4755 * @vf: virtual function number 4756 * 4757 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 4758 * based on the given viftual function number, @vf. 4759 * 4760 * Return 0 if successful, otherwise -ENODEV. 4761 **/ 4762static int 4763lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 4764{ 4765 if (vf > LPFC_VIR_FUNC_MAX) 4766 return -ENODEV; 4767 4768 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4769 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 4770 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4771 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 4772 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4773 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 4774 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4775 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 4776 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4777 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 4778 return 0; 4779} 4780 4781/** 4782 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 4783 * @phba: pointer to lpfc hba data structure. 4784 * 4785 * This routine is invoked to create the bootstrap mailbox 4786 * region consistent with the SLI-4 interface spec. This 4787 * routine allocates all memory necessary to communicate 4788 * mailbox commands to the port and sets up all alignment 4789 * needs. No locks are expected to be held when calling 4790 * this routine. 4791 * 4792 * Return codes 4793 * 0 - successful 4794 * ENOMEM - could not allocated memory. 4795 **/ 4796static int 4797lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 4798{ 4799 uint32_t bmbx_size; 4800 struct lpfc_dmabuf *dmabuf; 4801 struct dma_address *dma_address; 4802 uint32_t pa_addr; 4803 uint64_t phys_addr; 4804 4805 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4806 if (!dmabuf) 4807 return -ENOMEM; 4808 4809 /* 4810 * The bootstrap mailbox region is comprised of 2 parts 4811 * plus an alignment restriction of 16 bytes. 4812 */ 4813 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 4814 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4815 bmbx_size, 4816 &dmabuf->phys, 4817 GFP_KERNEL); 4818 if (!dmabuf->virt) { 4819 kfree(dmabuf); 4820 return -ENOMEM; 4821 } 4822 memset(dmabuf->virt, 0, bmbx_size); 4823 4824 /* 4825 * Initialize the bootstrap mailbox pointers now so that the register 4826 * operations are simple later. The mailbox dma address is required 4827 * to be 16-byte aligned. Also align the virtual memory as each 4828 * maibox is copied into the bmbx mailbox region before issuing the 4829 * command to the port. 4830 */ 4831 phba->sli4_hba.bmbx.dmabuf = dmabuf; 4832 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 4833 4834 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 4835 LPFC_ALIGN_16_BYTE); 4836 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 4837 LPFC_ALIGN_16_BYTE); 4838 4839 /* 4840 * Set the high and low physical addresses now. The SLI4 alignment 4841 * requirement is 16 bytes and the mailbox is posted to the port 4842 * as two 30-bit addresses. The other data is a bit marking whether 4843 * the 30-bit address is the high or low address. 4844 * Upcast bmbx aphys to 64bits so shift instruction compiles 4845 * clean on 32 bit machines. 4846 */ 4847 dma_address = &phba->sli4_hba.bmbx.dma_address; 4848 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 4849 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 4850 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 4851 LPFC_BMBX_BIT1_ADDR_HI); 4852 4853 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 4854 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 4855 LPFC_BMBX_BIT1_ADDR_LO); 4856 return 0; 4857} 4858 4859/** 4860 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 4861 * @phba: pointer to lpfc hba data structure. 4862 * 4863 * This routine is invoked to teardown the bootstrap mailbox 4864 * region and release all host resources. This routine requires 4865 * the caller to ensure all mailbox commands recovered, no 4866 * additional mailbox comands are sent, and interrupts are disabled 4867 * before calling this routine. 4868 * 4869 **/ 4870static void 4871lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 4872{ 4873 dma_free_coherent(&phba->pcidev->dev, 4874 phba->sli4_hba.bmbx.bmbx_size, 4875 phba->sli4_hba.bmbx.dmabuf->virt, 4876 phba->sli4_hba.bmbx.dmabuf->phys); 4877 4878 kfree(phba->sli4_hba.bmbx.dmabuf); 4879 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 4880} 4881 4882/** 4883 * lpfc_sli4_read_config - Get the config parameters. 4884 * @phba: pointer to lpfc hba data structure. 4885 * 4886 * This routine is invoked to read the configuration parameters from the HBA. 4887 * The configuration parameters are used to set the base and maximum values 4888 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 4889 * allocation for the port. 4890 * 4891 * Return codes 4892 * 0 - successful 4893 * ENOMEM - No availble memory 4894 * EIO - The mailbox failed to complete successfully. 4895 **/ 4896static int 4897lpfc_sli4_read_config(struct lpfc_hba *phba) 4898{ 4899 LPFC_MBOXQ_t *pmb; 4900 struct lpfc_mbx_read_config *rd_config; 4901 uint32_t rc = 0; 4902 4903 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4904 if (!pmb) { 4905 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4906 "2011 Unable to allocate memory for issuing " 4907 "SLI_CONFIG_SPECIAL mailbox command\n"); 4908 return -ENOMEM; 4909 } 4910 4911 lpfc_read_config(phba, pmb); 4912 4913 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4914 if (rc != MBX_SUCCESS) { 4915 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4916 "2012 Mailbox failed , mbxCmd x%x " 4917 "READ_CONFIG, mbxStatus x%x\n", 4918 bf_get(lpfc_mqe_command, &pmb->u.mqe), 4919 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 4920 rc = -EIO; 4921 } else { 4922 rd_config = &pmb->u.mqe.un.rd_config; 4923 phba->sli4_hba.max_cfg_param.max_xri = 4924 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 4925 phba->sli4_hba.max_cfg_param.xri_base = 4926 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 4927 phba->sli4_hba.max_cfg_param.max_vpi = 4928 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 4929 phba->sli4_hba.max_cfg_param.vpi_base = 4930 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 4931 phba->sli4_hba.max_cfg_param.max_rpi = 4932 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 4933 phba->sli4_hba.max_cfg_param.rpi_base = 4934 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 4935 phba->sli4_hba.max_cfg_param.max_vfi = 4936 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 4937 phba->sli4_hba.max_cfg_param.vfi_base = 4938 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 4939 phba->sli4_hba.max_cfg_param.max_fcfi = 4940 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 4941 phba->sli4_hba.max_cfg_param.fcfi_base = 4942 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); 4943 phba->sli4_hba.max_cfg_param.max_eq = 4944 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 4945 phba->sli4_hba.max_cfg_param.max_rq = 4946 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 4947 phba->sli4_hba.max_cfg_param.max_wq = 4948 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 4949 phba->sli4_hba.max_cfg_param.max_cq = 4950 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 4951 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 4952 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 4953 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 4954 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 4955 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4956 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 4957 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 4958 phba->max_vports = phba->max_vpi; 4959 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4960 "2003 cfg params XRI(B:%d M:%d), " 4961 "VPI(B:%d M:%d) " 4962 "VFI(B:%d M:%d) " 4963 "RPI(B:%d M:%d) " 4964 "FCFI(B:%d M:%d)\n", 4965 phba->sli4_hba.max_cfg_param.xri_base, 4966 phba->sli4_hba.max_cfg_param.max_xri, 4967 phba->sli4_hba.max_cfg_param.vpi_base, 4968 phba->sli4_hba.max_cfg_param.max_vpi, 4969 phba->sli4_hba.max_cfg_param.vfi_base, 4970 phba->sli4_hba.max_cfg_param.max_vfi, 4971 phba->sli4_hba.max_cfg_param.rpi_base, 4972 phba->sli4_hba.max_cfg_param.max_rpi, 4973 phba->sli4_hba.max_cfg_param.fcfi_base, 4974 phba->sli4_hba.max_cfg_param.max_fcfi); 4975 } 4976 mempool_free(pmb, phba->mbox_mem_pool); 4977 4978 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 4979 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri)) 4980 phba->cfg_hba_queue_depth = 4981 phba->sli4_hba.max_cfg_param.max_xri; 4982 return rc; 4983} 4984 4985/** 4986 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. 4987 * @phba: pointer to lpfc hba data structure. 4988 * 4989 * This routine is invoked to setup the host-side endian order to the 4990 * HBA consistent with the SLI-4 interface spec. 4991 * 4992 * Return codes 4993 * 0 - successful 4994 * ENOMEM - No availble memory 4995 * EIO - The mailbox failed to complete successfully. 4996 **/ 4997static int 4998lpfc_setup_endian_order(struct lpfc_hba *phba) 4999{ 5000 LPFC_MBOXQ_t *mboxq; 5001 uint32_t rc = 0; 5002 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 5003 HOST_ENDIAN_HIGH_WORD1}; 5004 5005 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5006 if (!mboxq) { 5007 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5008 "0492 Unable to allocate memory for issuing " 5009 "SLI_CONFIG_SPECIAL mailbox command\n"); 5010 return -ENOMEM; 5011 } 5012 5013 /* 5014 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two 5015 * words to contain special data values and no other data. 5016 */ 5017 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 5018 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 5019 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5020 if (rc != MBX_SUCCESS) { 5021 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5022 "0493 SLI_CONFIG_SPECIAL mailbox failed with " 5023 "status x%x\n", 5024 rc); 5025 rc = -EIO; 5026 } 5027 5028 mempool_free(mboxq, phba->mbox_mem_pool); 5029 return rc; 5030} 5031 5032/** 5033 * lpfc_sli4_queue_create - Create all the SLI4 queues 5034 * @phba: pointer to lpfc hba data structure. 5035 * 5036 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 5037 * operation. For each SLI4 queue type, the parameters such as queue entry 5038 * count (queue depth) shall be taken from the module parameter. For now, 5039 * we just use some constant number as place holder. 5040 * 5041 * Return codes 5042 * 0 - successful 5043 * ENOMEM - No availble memory 5044 * EIO - The mailbox failed to complete successfully. 5045 **/ 5046static int 5047lpfc_sli4_queue_create(struct lpfc_hba *phba) 5048{ 5049 struct lpfc_queue *qdesc; 5050 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5051 int cfg_fcp_wq_count; 5052 int cfg_fcp_eq_count; 5053 5054 /* 5055 * Sanity check for confiugred queue parameters against the run-time 5056 * device parameters 5057 */ 5058 5059 /* Sanity check on FCP fast-path WQ parameters */ 5060 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 5061 if (cfg_fcp_wq_count > 5062 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 5063 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 5064 LPFC_SP_WQN_DEF; 5065 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 5066 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5067 "2581 Not enough WQs (%d) from " 5068 "the pci function for supporting " 5069 "FCP WQs (%d)\n", 5070 phba->sli4_hba.max_cfg_param.max_wq, 5071 phba->cfg_fcp_wq_count); 5072 goto out_error; 5073 } 5074 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5075 "2582 Not enough WQs (%d) from the pci " 5076 "function for supporting the requested " 5077 "FCP WQs (%d), the actual FCP WQs can " 5078 "be supported: %d\n", 5079 phba->sli4_hba.max_cfg_param.max_wq, 5080 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 5081 } 5082 /* The actual number of FCP work queues adopted */ 5083 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 5084 5085 /* Sanity check on FCP fast-path EQ parameters */ 5086 cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 5087 if (cfg_fcp_eq_count > 5088 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 5089 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 5090 LPFC_SP_EQN_DEF; 5091 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 5092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5093 "2574 Not enough EQs (%d) from the " 5094 "pci function for supporting FCP " 5095 "EQs (%d)\n", 5096 phba->sli4_hba.max_cfg_param.max_eq, 5097 phba->cfg_fcp_eq_count); 5098 goto out_error; 5099 } 5100 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5101 "2575 Not enough EQs (%d) from the pci " 5102 "function for supporting the requested " 5103 "FCP EQs (%d), the actual FCP EQs can " 5104 "be supported: %d\n", 5105 phba->sli4_hba.max_cfg_param.max_eq, 5106 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 5107 } 5108 /* It does not make sense to have more EQs than WQs */ 5109 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 5110 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5111 "2593 The FCP EQ count(%d) cannot be greater " 5112 "than the FCP WQ count(%d), limiting the " 5113 "FCP EQ count to %d\n", cfg_fcp_eq_count, 5114 phba->cfg_fcp_wq_count, 5115 phba->cfg_fcp_wq_count); 5116 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 5117 } 5118 /* The actual number of FCP event queues adopted */ 5119 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 5120 /* The overall number of event queues used */ 5121 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 5122 5123 /* 5124 * Create Event Queues (EQs) 5125 */ 5126 5127 /* Get EQ depth from module parameter, fake the default for now */ 5128 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 5129 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 5130 5131 /* Create slow path event queue */ 5132 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5133 phba->sli4_hba.eq_ecount); 5134 if (!qdesc) { 5135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5136 "0496 Failed allocate slow-path EQ\n"); 5137 goto out_error; 5138 } 5139 phba->sli4_hba.sp_eq = qdesc; 5140 5141 /* Create fast-path FCP Event Queue(s) */ 5142 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 5143 phba->cfg_fcp_eq_count), GFP_KERNEL); 5144 if (!phba->sli4_hba.fp_eq) { 5145 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5146 "2576 Failed allocate memory for fast-path " 5147 "EQ record array\n"); 5148 goto out_free_sp_eq; 5149 } 5150 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5151 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5152 phba->sli4_hba.eq_ecount); 5153 if (!qdesc) { 5154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5155 "0497 Failed allocate fast-path EQ\n"); 5156 goto out_free_fp_eq; 5157 } 5158 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 5159 } 5160 5161 /* 5162 * Create Complete Queues (CQs) 5163 */ 5164 5165 /* Get CQ depth from module parameter, fake the default for now */ 5166 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 5167 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 5168 5169 /* Create slow-path Mailbox Command Complete Queue */ 5170 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5171 phba->sli4_hba.cq_ecount); 5172 if (!qdesc) { 5173 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5174 "0500 Failed allocate slow-path mailbox CQ\n"); 5175 goto out_free_fp_eq; 5176 } 5177 phba->sli4_hba.mbx_cq = qdesc; 5178 5179 /* Create slow-path ELS Complete Queue */ 5180 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5181 phba->sli4_hba.cq_ecount); 5182 if (!qdesc) { 5183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5184 "0501 Failed allocate slow-path ELS CQ\n"); 5185 goto out_free_mbx_cq; 5186 } 5187 phba->sli4_hba.els_cq = qdesc; 5188 5189 5190 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 5191 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 5192 phba->cfg_fcp_eq_count), GFP_KERNEL); 5193 if (!phba->sli4_hba.fcp_cq) { 5194 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5195 "2577 Failed allocate memory for fast-path " 5196 "CQ record array\n"); 5197 goto out_free_els_cq; 5198 } 5199 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5200 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5201 phba->sli4_hba.cq_ecount); 5202 if (!qdesc) { 5203 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5204 "0499 Failed allocate fast-path FCP " 5205 "CQ (%d)\n", fcp_cqidx); 5206 goto out_free_fcp_cq; 5207 } 5208 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 5209 } 5210 5211 /* Create Mailbox Command Queue */ 5212 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 5213 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 5214 5215 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 5216 phba->sli4_hba.mq_ecount); 5217 if (!qdesc) { 5218 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5219 "0505 Failed allocate slow-path MQ\n"); 5220 goto out_free_fcp_cq; 5221 } 5222 phba->sli4_hba.mbx_wq = qdesc; 5223 5224 /* 5225 * Create all the Work Queues (WQs) 5226 */ 5227 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 5228 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 5229 5230 /* Create slow-path ELS Work Queue */ 5231 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5232 phba->sli4_hba.wq_ecount); 5233 if (!qdesc) { 5234 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5235 "0504 Failed allocate slow-path ELS WQ\n"); 5236 goto out_free_mbx_wq; 5237 } 5238 phba->sli4_hba.els_wq = qdesc; 5239 5240 /* Create fast-path FCP Work Queue(s) */ 5241 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 5242 phba->cfg_fcp_wq_count), GFP_KERNEL); 5243 if (!phba->sli4_hba.fcp_wq) { 5244 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5245 "2578 Failed allocate memory for fast-path " 5246 "WQ record array\n"); 5247 goto out_free_els_wq; 5248 } 5249 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 5250 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5251 phba->sli4_hba.wq_ecount); 5252 if (!qdesc) { 5253 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5254 "0503 Failed allocate fast-path FCP " 5255 "WQ (%d)\n", fcp_wqidx); 5256 goto out_free_fcp_wq; 5257 } 5258 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 5259 } 5260 5261 /* 5262 * Create Receive Queue (RQ) 5263 */ 5264 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 5265 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 5266 5267 /* Create Receive Queue for header */ 5268 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5269 phba->sli4_hba.rq_ecount); 5270 if (!qdesc) { 5271 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5272 "0506 Failed allocate receive HRQ\n"); 5273 goto out_free_fcp_wq; 5274 } 5275 phba->sli4_hba.hdr_rq = qdesc; 5276 5277 /* Create Receive Queue for data */ 5278 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5279 phba->sli4_hba.rq_ecount); 5280 if (!qdesc) { 5281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5282 "0507 Failed allocate receive DRQ\n"); 5283 goto out_free_hdr_rq; 5284 } 5285 phba->sli4_hba.dat_rq = qdesc; 5286 5287 return 0; 5288 5289out_free_hdr_rq: 5290 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5291 phba->sli4_hba.hdr_rq = NULL; 5292out_free_fcp_wq: 5293 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 5294 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 5295 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 5296 } 5297 kfree(phba->sli4_hba.fcp_wq); 5298out_free_els_wq: 5299 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5300 phba->sli4_hba.els_wq = NULL; 5301out_free_mbx_wq: 5302 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5303 phba->sli4_hba.mbx_wq = NULL; 5304out_free_fcp_cq: 5305 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 5306 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 5307 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 5308 } 5309 kfree(phba->sli4_hba.fcp_cq); 5310out_free_els_cq: 5311 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5312 phba->sli4_hba.els_cq = NULL; 5313out_free_mbx_cq: 5314 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5315 phba->sli4_hba.mbx_cq = NULL; 5316out_free_fp_eq: 5317 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 5318 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 5319 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 5320 } 5321 kfree(phba->sli4_hba.fp_eq); 5322out_free_sp_eq: 5323 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5324 phba->sli4_hba.sp_eq = NULL; 5325out_error: 5326 return -ENOMEM; 5327} 5328 5329/** 5330 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 5331 * @phba: pointer to lpfc hba data structure. 5332 * 5333 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 5334 * operation. 5335 * 5336 * Return codes 5337 * 0 - successful 5338 * ENOMEM - No availble memory 5339 * EIO - The mailbox failed to complete successfully. 5340 **/ 5341static void 5342lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 5343{ 5344 int fcp_qidx; 5345 5346 /* Release mailbox command work queue */ 5347 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5348 phba->sli4_hba.mbx_wq = NULL; 5349 5350 /* Release ELS work queue */ 5351 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5352 phba->sli4_hba.els_wq = NULL; 5353 5354 /* Release FCP work queue */ 5355 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 5356 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 5357 kfree(phba->sli4_hba.fcp_wq); 5358 phba->sli4_hba.fcp_wq = NULL; 5359 5360 /* Release unsolicited receive queue */ 5361 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5362 phba->sli4_hba.hdr_rq = NULL; 5363 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 5364 phba->sli4_hba.dat_rq = NULL; 5365 5366 /* Release ELS complete queue */ 5367 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5368 phba->sli4_hba.els_cq = NULL; 5369 5370 /* Release mailbox command complete queue */ 5371 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5372 phba->sli4_hba.mbx_cq = NULL; 5373 5374 /* Release FCP response complete queue */ 5375 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5376 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 5377 kfree(phba->sli4_hba.fcp_cq); 5378 phba->sli4_hba.fcp_cq = NULL; 5379 5380 /* Release fast-path event queue */ 5381 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5382 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 5383 kfree(phba->sli4_hba.fp_eq); 5384 phba->sli4_hba.fp_eq = NULL; 5385 5386 /* Release slow-path event queue */ 5387 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5388 phba->sli4_hba.sp_eq = NULL; 5389 5390 return; 5391} 5392 5393/** 5394 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 5395 * @phba: pointer to lpfc hba data structure. 5396 * 5397 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 5398 * operation. 5399 * 5400 * Return codes 5401 * 0 - successful 5402 * ENOMEM - No availble memory 5403 * EIO - The mailbox failed to complete successfully. 5404 **/ 5405int 5406lpfc_sli4_queue_setup(struct lpfc_hba *phba) 5407{ 5408 int rc = -ENOMEM; 5409 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5410 int fcp_cq_index = 0; 5411 5412 /* 5413 * Set up Event Queues (EQs) 5414 */ 5415 5416 /* Set up slow-path event queue */ 5417 if (!phba->sli4_hba.sp_eq) { 5418 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5419 "0520 Slow-path EQ not allocated\n"); 5420 goto out_error; 5421 } 5422 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 5423 LPFC_SP_DEF_IMAX); 5424 if (rc) { 5425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5426 "0521 Failed setup of slow-path EQ: " 5427 "rc = 0x%x\n", rc); 5428 goto out_error; 5429 } 5430 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5431 "2583 Slow-path EQ setup: queue-id=%d\n", 5432 phba->sli4_hba.sp_eq->queue_id); 5433 5434 /* Set up fast-path event queue */ 5435 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5436 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 5437 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5438 "0522 Fast-path EQ (%d) not " 5439 "allocated\n", fcp_eqidx); 5440 goto out_destroy_fp_eq; 5441 } 5442 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 5443 phba->cfg_fcp_imax); 5444 if (rc) { 5445 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5446 "0523 Failed setup of fast-path EQ " 5447 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 5448 goto out_destroy_fp_eq; 5449 } 5450 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5451 "2584 Fast-path EQ setup: " 5452 "queue[%d]-id=%d\n", fcp_eqidx, 5453 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 5454 } 5455 5456 /* 5457 * Set up Complete Queues (CQs) 5458 */ 5459 5460 /* Set up slow-path MBOX Complete Queue as the first CQ */ 5461 if (!phba->sli4_hba.mbx_cq) { 5462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5463 "0528 Mailbox CQ not allocated\n"); 5464 goto out_destroy_fp_eq; 5465 } 5466 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 5467 LPFC_MCQ, LPFC_MBOX); 5468 if (rc) { 5469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5470 "0529 Failed setup of slow-path mailbox CQ: " 5471 "rc = 0x%x\n", rc); 5472 goto out_destroy_fp_eq; 5473 } 5474 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5475 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 5476 phba->sli4_hba.mbx_cq->queue_id, 5477 phba->sli4_hba.sp_eq->queue_id); 5478 5479 /* Set up slow-path ELS Complete Queue */ 5480 if (!phba->sli4_hba.els_cq) { 5481 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5482 "0530 ELS CQ not allocated\n"); 5483 goto out_destroy_mbx_cq; 5484 } 5485 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 5486 LPFC_WCQ, LPFC_ELS); 5487 if (rc) { 5488 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5489 "0531 Failed setup of slow-path ELS CQ: " 5490 "rc = 0x%x\n", rc); 5491 goto out_destroy_mbx_cq; 5492 } 5493 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5494 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 5495 phba->sli4_hba.els_cq->queue_id, 5496 phba->sli4_hba.sp_eq->queue_id); 5497 5498 /* Set up fast-path FCP Response Complete Queue */ 5499 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5500 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 5501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5502 "0526 Fast-path FCP CQ (%d) not " 5503 "allocated\n", fcp_cqidx); 5504 goto out_destroy_fcp_cq; 5505 } 5506 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 5507 phba->sli4_hba.fp_eq[fcp_cqidx], 5508 LPFC_WCQ, LPFC_FCP); 5509 if (rc) { 5510 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5511 "0527 Failed setup of fast-path FCP " 5512 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 5513 goto out_destroy_fcp_cq; 5514 } 5515 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5516 "2588 FCP CQ setup: cq[%d]-id=%d, " 5517 "parent eq[%d]-id=%d\n", 5518 fcp_cqidx, 5519 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 5520 fcp_cqidx, 5521 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 5522 } 5523 5524 /* 5525 * Set up all the Work Queues (WQs) 5526 */ 5527 5528 /* Set up Mailbox Command Queue */ 5529 if (!phba->sli4_hba.mbx_wq) { 5530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5531 "0538 Slow-path MQ not allocated\n"); 5532 goto out_destroy_fcp_cq; 5533 } 5534 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 5535 phba->sli4_hba.mbx_cq, LPFC_MBOX); 5536 if (rc) { 5537 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5538 "0539 Failed setup of slow-path MQ: " 5539 "rc = 0x%x\n", rc); 5540 goto out_destroy_fcp_cq; 5541 } 5542 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5543 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 5544 phba->sli4_hba.mbx_wq->queue_id, 5545 phba->sli4_hba.mbx_cq->queue_id); 5546 5547 /* Set up slow-path ELS Work Queue */ 5548 if (!phba->sli4_hba.els_wq) { 5549 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5550 "0536 Slow-path ELS WQ not allocated\n"); 5551 goto out_destroy_mbx_wq; 5552 } 5553 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 5554 phba->sli4_hba.els_cq, LPFC_ELS); 5555 if (rc) { 5556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5557 "0537 Failed setup of slow-path ELS WQ: " 5558 "rc = 0x%x\n", rc); 5559 goto out_destroy_mbx_wq; 5560 } 5561 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5562 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 5563 phba->sli4_hba.els_wq->queue_id, 5564 phba->sli4_hba.els_cq->queue_id); 5565 5566 /* Set up fast-path FCP Work Queue */ 5567 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 5568 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 5569 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5570 "0534 Fast-path FCP WQ (%d) not " 5571 "allocated\n", fcp_wqidx); 5572 goto out_destroy_fcp_wq; 5573 } 5574 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 5575 phba->sli4_hba.fcp_cq[fcp_cq_index], 5576 LPFC_FCP); 5577 if (rc) { 5578 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5579 "0535 Failed setup of fast-path FCP " 5580 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 5581 goto out_destroy_fcp_wq; 5582 } 5583 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5584 "2591 FCP WQ setup: wq[%d]-id=%d, " 5585 "parent cq[%d]-id=%d\n", 5586 fcp_wqidx, 5587 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 5588 fcp_cq_index, 5589 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 5590 /* Round robin FCP Work Queue's Completion Queue assignment */ 5591 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 5592 } 5593 5594 /* 5595 * Create Receive Queue (RQ) 5596 */ 5597 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 5598 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5599 "0540 Receive Queue not allocated\n"); 5600 goto out_destroy_fcp_wq; 5601 } 5602 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 5603 phba->sli4_hba.els_cq, LPFC_USOL); 5604 if (rc) { 5605 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5606 "0541 Failed setup of Receive Queue: " 5607 "rc = 0x%x\n", rc); 5608 goto out_destroy_fcp_wq; 5609 } 5610 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5611 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 5612 "parent cq-id=%d\n", 5613 phba->sli4_hba.hdr_rq->queue_id, 5614 phba->sli4_hba.dat_rq->queue_id, 5615 phba->sli4_hba.els_cq->queue_id); 5616 return 0; 5617 5618out_destroy_fcp_wq: 5619 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 5620 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 5621 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 5622out_destroy_mbx_wq: 5623 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 5624out_destroy_fcp_cq: 5625 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 5626 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 5627 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 5628out_destroy_mbx_cq: 5629 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 5630out_destroy_fp_eq: 5631 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 5632 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 5633 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 5634out_error: 5635 return rc; 5636} 5637 5638/** 5639 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 5640 * @phba: pointer to lpfc hba data structure. 5641 * 5642 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 5643 * operation. 5644 * 5645 * Return codes 5646 * 0 - successful 5647 * ENOMEM - No availble memory 5648 * EIO - The mailbox failed to complete successfully. 5649 **/ 5650void 5651lpfc_sli4_queue_unset(struct lpfc_hba *phba) 5652{ 5653 int fcp_qidx; 5654 5655 /* Unset mailbox command work queue */ 5656 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 5657 /* Unset ELS work queue */ 5658 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 5659 /* Unset unsolicited receive queue */ 5660 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 5661 /* Unset FCP work queue */ 5662 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 5663 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 5664 /* Unset mailbox command complete queue */ 5665 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 5666 /* Unset ELS complete queue */ 5667 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 5668 /* Unset FCP response complete queue */ 5669 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5670 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 5671 /* Unset fast-path event queue */ 5672 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5673 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 5674 /* Unset slow-path event queue */ 5675 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 5676} 5677 5678/** 5679 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 5680 * @phba: pointer to lpfc hba data structure. 5681 * 5682 * This routine is invoked to allocate and set up a pool of completion queue 5683 * events. The body of the completion queue event is a completion queue entry 5684 * CQE. For now, this pool is used for the interrupt service routine to queue 5685 * the following HBA completion queue events for the worker thread to process: 5686 * - Mailbox asynchronous events 5687 * - Receive queue completion unsolicited events 5688 * Later, this can be used for all the slow-path events. 5689 * 5690 * Return codes 5691 * 0 - successful 5692 * -ENOMEM - No availble memory 5693 **/ 5694static int 5695lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 5696{ 5697 struct lpfc_cq_event *cq_event; 5698 int i; 5699 5700 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 5701 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 5702 if (!cq_event) 5703 goto out_pool_create_fail; 5704 list_add_tail(&cq_event->list, 5705 &phba->sli4_hba.sp_cqe_event_pool); 5706 } 5707 return 0; 5708 5709out_pool_create_fail: 5710 lpfc_sli4_cq_event_pool_destroy(phba); 5711 return -ENOMEM; 5712} 5713 5714/** 5715 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 5716 * @phba: pointer to lpfc hba data structure. 5717 * 5718 * This routine is invoked to free the pool of completion queue events at 5719 * driver unload time. Note that, it is the responsibility of the driver 5720 * cleanup routine to free all the outstanding completion-queue events 5721 * allocated from this pool back into the pool before invoking this routine 5722 * to destroy the pool. 5723 **/ 5724static void 5725lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 5726{ 5727 struct lpfc_cq_event *cq_event, *next_cq_event; 5728 5729 list_for_each_entry_safe(cq_event, next_cq_event, 5730 &phba->sli4_hba.sp_cqe_event_pool, list) { 5731 list_del(&cq_event->list); 5732 kfree(cq_event); 5733 } 5734} 5735 5736/** 5737 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 5738 * @phba: pointer to lpfc hba data structure. 5739 * 5740 * This routine is the lock free version of the API invoked to allocate a 5741 * completion-queue event from the free pool. 5742 * 5743 * Return: Pointer to the newly allocated completion-queue event if successful 5744 * NULL otherwise. 5745 **/ 5746struct lpfc_cq_event * 5747__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 5748{ 5749 struct lpfc_cq_event *cq_event = NULL; 5750 5751 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 5752 struct lpfc_cq_event, list); 5753 return cq_event; 5754} 5755 5756/** 5757 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 5758 * @phba: pointer to lpfc hba data structure. 5759 * 5760 * This routine is the lock version of the API invoked to allocate a 5761 * completion-queue event from the free pool. 5762 * 5763 * Return: Pointer to the newly allocated completion-queue event if successful 5764 * NULL otherwise. 5765 **/ 5766struct lpfc_cq_event * 5767lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 5768{ 5769 struct lpfc_cq_event *cq_event; 5770 unsigned long iflags; 5771 5772 spin_lock_irqsave(&phba->hbalock, iflags); 5773 cq_event = __lpfc_sli4_cq_event_alloc(phba); 5774 spin_unlock_irqrestore(&phba->hbalock, iflags); 5775 return cq_event; 5776} 5777 5778/** 5779 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 5780 * @phba: pointer to lpfc hba data structure. 5781 * @cq_event: pointer to the completion queue event to be freed. 5782 * 5783 * This routine is the lock free version of the API invoked to release a 5784 * completion-queue event back into the free pool. 5785 **/ 5786void 5787__lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 5788 struct lpfc_cq_event *cq_event) 5789{ 5790 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 5791} 5792 5793/** 5794 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 5795 * @phba: pointer to lpfc hba data structure. 5796 * @cq_event: pointer to the completion queue event to be freed. 5797 * 5798 * This routine is the lock version of the API invoked to release a 5799 * completion-queue event back into the free pool. 5800 **/ 5801void 5802lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 5803 struct lpfc_cq_event *cq_event) 5804{ 5805 unsigned long iflags; 5806 spin_lock_irqsave(&phba->hbalock, iflags); 5807 __lpfc_sli4_cq_event_release(phba, cq_event); 5808 spin_unlock_irqrestore(&phba->hbalock, iflags); 5809} 5810 5811/** 5812 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 5813 * @phba: pointer to lpfc hba data structure. 5814 * 5815 * This routine is to free all the pending completion-queue events to the 5816 * back into the free pool for device reset. 5817 **/ 5818static void 5819lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 5820{ 5821 LIST_HEAD(cqelist); 5822 struct lpfc_cq_event *cqe; 5823 unsigned long iflags; 5824 5825 /* Retrieve all the pending WCQEs from pending WCQE lists */ 5826 spin_lock_irqsave(&phba->hbalock, iflags); 5827 /* Pending FCP XRI abort events */ 5828 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 5829 &cqelist); 5830 /* Pending ELS XRI abort events */ 5831 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 5832 &cqelist); 5833 /* Pending asynnc events */ 5834 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 5835 &cqelist); 5836 spin_unlock_irqrestore(&phba->hbalock, iflags); 5837 5838 while (!list_empty(&cqelist)) { 5839 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 5840 lpfc_sli4_cq_event_release(phba, cqe); 5841 } 5842} 5843 5844/** 5845 * lpfc_pci_function_reset - Reset pci function. 5846 * @phba: pointer to lpfc hba data structure. 5847 * 5848 * This routine is invoked to request a PCI function reset. It will destroys 5849 * all resources assigned to the PCI function which originates this request. 5850 * 5851 * Return codes 5852 * 0 - successful 5853 * ENOMEM - No availble memory 5854 * EIO - The mailbox failed to complete successfully. 5855 **/ 5856int 5857lpfc_pci_function_reset(struct lpfc_hba *phba) 5858{ 5859 LPFC_MBOXQ_t *mboxq; 5860 uint32_t rc = 0; 5861 uint32_t shdr_status, shdr_add_status; 5862 union lpfc_sli4_cfg_shdr *shdr; 5863 5864 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5865 if (!mboxq) { 5866 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5867 "0494 Unable to allocate memory for issuing " 5868 "SLI_FUNCTION_RESET mailbox command\n"); 5869 return -ENOMEM; 5870 } 5871 5872 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ 5873 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5874 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 5875 LPFC_SLI4_MBX_EMBED); 5876 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5877 shdr = (union lpfc_sli4_cfg_shdr *) 5878 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 5879 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5880 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5881 if (rc != MBX_TIMEOUT) 5882 mempool_free(mboxq, phba->mbox_mem_pool); 5883 if (shdr_status || shdr_add_status || rc) { 5884 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5885 "0495 SLI_FUNCTION_RESET mailbox failed with " 5886 "status x%x add_status x%x, mbx status x%x\n", 5887 shdr_status, shdr_add_status, rc); 5888 rc = -ENXIO; 5889 } 5890 return rc; 5891} 5892 5893/** 5894 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 5895 * @phba: pointer to lpfc hba data structure. 5896 * @cnt: number of nop mailbox commands to send. 5897 * 5898 * This routine is invoked to send a number @cnt of NOP mailbox command and 5899 * wait for each command to complete. 5900 * 5901 * Return: the number of NOP mailbox command completed. 5902 **/ 5903static int 5904lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 5905{ 5906 LPFC_MBOXQ_t *mboxq; 5907 int length, cmdsent; 5908 uint32_t mbox_tmo; 5909 uint32_t rc = 0; 5910 uint32_t shdr_status, shdr_add_status; 5911 union lpfc_sli4_cfg_shdr *shdr; 5912 5913 if (cnt == 0) { 5914 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5915 "2518 Requested to send 0 NOP mailbox cmd\n"); 5916 return cnt; 5917 } 5918 5919 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5920 if (!mboxq) { 5921 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5922 "2519 Unable to allocate memory for issuing " 5923 "NOP mailbox command\n"); 5924 return 0; 5925 } 5926 5927 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 5928 length = (sizeof(struct lpfc_mbx_nop) - 5929 sizeof(struct lpfc_sli4_cfg_mhdr)); 5930 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5931 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 5932 5933 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 5934 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 5935 if (!phba->sli4_hba.intr_enable) 5936 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5937 else 5938 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 5939 if (rc == MBX_TIMEOUT) 5940 break; 5941 /* Check return status */ 5942 shdr = (union lpfc_sli4_cfg_shdr *) 5943 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 5944 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5945 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 5946 &shdr->response); 5947 if (shdr_status || shdr_add_status || rc) { 5948 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5949 "2520 NOP mailbox command failed " 5950 "status x%x add_status x%x mbx " 5951 "status x%x\n", shdr_status, 5952 shdr_add_status, rc); 5953 break; 5954 } 5955 } 5956 5957 if (rc != MBX_TIMEOUT) 5958 mempool_free(mboxq, phba->mbox_mem_pool); 5959 5960 return cmdsent; 5961} 5962 5963/** 5964 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device 5965 * @phba: pointer to lpfc hba data structure. 5966 * @fcfi: fcf index. 5967 * 5968 * This routine is invoked to unregister a FCFI from device. 5969 **/ 5970void 5971lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) 5972{ 5973 LPFC_MBOXQ_t *mbox; 5974 uint32_t mbox_tmo; 5975 int rc; 5976 unsigned long flags; 5977 5978 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5979 5980 if (!mbox) 5981 return; 5982 5983 lpfc_unreg_fcfi(mbox, fcfi); 5984 5985 if (!phba->sli4_hba.intr_enable) 5986 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5987 else { 5988 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 5989 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5990 } 5991 if (rc != MBX_TIMEOUT) 5992 mempool_free(mbox, phba->mbox_mem_pool); 5993 if (rc != MBX_SUCCESS) 5994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5995 "2517 Unregister FCFI command failed " 5996 "status %d, mbxStatus x%x\n", rc, 5997 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 5998 else { 5999 spin_lock_irqsave(&phba->hbalock, flags); 6000 /* Mark the FCFI is no longer registered */ 6001 phba->fcf.fcf_flag &= 6002 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED); 6003 spin_unlock_irqrestore(&phba->hbalock, flags); 6004 } 6005} 6006 6007/** 6008 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 6009 * @phba: pointer to lpfc hba data structure. 6010 * 6011 * This routine is invoked to set up the PCI device memory space for device 6012 * with SLI-4 interface spec. 6013 * 6014 * Return codes 6015 * 0 - successful 6016 * other values - error 6017 **/ 6018static int 6019lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 6020{ 6021 struct pci_dev *pdev; 6022 unsigned long bar0map_len, bar1map_len, bar2map_len; 6023 int error = -ENODEV; 6024 6025 /* Obtain PCI device reference */ 6026 if (!phba->pcidev) 6027 return error; 6028 else 6029 pdev = phba->pcidev; 6030 6031 /* Set the device DMA mask size */ 6032 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6033 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6034 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6035 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6036 return error; 6037 } 6038 } 6039 6040 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 6041 * number of bytes required by each mapping. They are actually 6042 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device. 6043 */ 6044 phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0); 6045 bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0); 6046 6047 phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1); 6048 bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1); 6049 6050 phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2); 6051 bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2); 6052 6053 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ 6054 phba->sli4_hba.conf_regs_memmap_p = 6055 ioremap(phba->pci_bar0_map, bar0map_len); 6056 if (!phba->sli4_hba.conf_regs_memmap_p) { 6057 dev_printk(KERN_ERR, &pdev->dev, 6058 "ioremap failed for SLI4 PCI config registers.\n"); 6059 goto out; 6060 } 6061 6062 /* Map SLI4 HBA Control Register base to a kernel virtual address. */ 6063 phba->sli4_hba.ctrl_regs_memmap_p = 6064 ioremap(phba->pci_bar1_map, bar1map_len); 6065 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 6066 dev_printk(KERN_ERR, &pdev->dev, 6067 "ioremap failed for SLI4 HBA control registers.\n"); 6068 goto out_iounmap_conf; 6069 } 6070 6071 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ 6072 phba->sli4_hba.drbl_regs_memmap_p = 6073 ioremap(phba->pci_bar2_map, bar2map_len); 6074 if (!phba->sli4_hba.drbl_regs_memmap_p) { 6075 dev_printk(KERN_ERR, &pdev->dev, 6076 "ioremap failed for SLI4 HBA doorbell registers.\n"); 6077 goto out_iounmap_ctrl; 6078 } 6079 6080 /* Set up BAR0 PCI config space register memory map */ 6081 lpfc_sli4_bar0_register_memmap(phba); 6082 6083 /* Set up BAR1 register memory map */ 6084 lpfc_sli4_bar1_register_memmap(phba); 6085 6086 /* Set up BAR2 register memory map */ 6087 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 6088 if (error) 6089 goto out_iounmap_all; 6090 6091 return 0; 6092 6093out_iounmap_all: 6094 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 6095out_iounmap_ctrl: 6096 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 6097out_iounmap_conf: 6098 iounmap(phba->sli4_hba.conf_regs_memmap_p); 6099out: 6100 return error; 6101} 6102 6103/** 6104 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 6105 * @phba: pointer to lpfc hba data structure. 6106 * 6107 * This routine is invoked to unset the PCI device memory space for device 6108 * with SLI-4 interface spec. 6109 **/ 6110static void 6111lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 6112{ 6113 struct pci_dev *pdev; 6114 6115 /* Obtain PCI device reference */ 6116 if (!phba->pcidev) 6117 return; 6118 else 6119 pdev = phba->pcidev; 6120 6121 /* Free coherent DMA memory allocated */ 6122 6123 /* Unmap I/O memory space */ 6124 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 6125 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 6126 iounmap(phba->sli4_hba.conf_regs_memmap_p); 6127 6128 return; 6129} 6130 6131/** 6132 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 6133 * @phba: pointer to lpfc hba data structure. 6134 * 6135 * This routine is invoked to enable the MSI-X interrupt vectors to device 6136 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 6137 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 6138 * invoked, enables either all or nothing, depending on the current 6139 * availability of PCI vector resources. The device driver is responsible 6140 * for calling the individual request_irq() to register each MSI-X vector 6141 * with a interrupt handler, which is done in this function. Note that 6142 * later when device is unloading, the driver should always call free_irq() 6143 * on all MSI-X vectors it has done request_irq() on before calling 6144 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 6145 * will be left with MSI-X enabled and leaks its vectors. 6146 * 6147 * Return codes 6148 * 0 - successful 6149 * other values - error 6150 **/ 6151static int 6152lpfc_sli_enable_msix(struct lpfc_hba *phba) 6153{ 6154 int rc, i; 6155 LPFC_MBOXQ_t *pmb; 6156 6157 /* Set up MSI-X multi-message vectors */ 6158 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6159 phba->msix_entries[i].entry = i; 6160 6161 /* Configure MSI-X capability structure */ 6162 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 6163 ARRAY_SIZE(phba->msix_entries)); 6164 if (rc) { 6165 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6166 "0420 PCI enable MSI-X failed (%d)\n", rc); 6167 goto msi_fail_out; 6168 } 6169 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6170 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6171 "0477 MSI-X entry[%d]: vector=x%x " 6172 "message=%d\n", i, 6173 phba->msix_entries[i].vector, 6174 phba->msix_entries[i].entry); 6175 /* 6176 * Assign MSI-X vectors to interrupt handlers 6177 */ 6178 6179 /* vector-0 is associated to slow-path handler */ 6180 rc = request_irq(phba->msix_entries[0].vector, 6181 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 6182 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6183 if (rc) { 6184 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6185 "0421 MSI-X slow-path request_irq failed " 6186 "(%d)\n", rc); 6187 goto msi_fail_out; 6188 } 6189 6190 /* vector-1 is associated to fast-path handler */ 6191 rc = request_irq(phba->msix_entries[1].vector, 6192 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 6193 LPFC_FP_DRIVER_HANDLER_NAME, phba); 6194 6195 if (rc) { 6196 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6197 "0429 MSI-X fast-path request_irq failed " 6198 "(%d)\n", rc); 6199 goto irq_fail_out; 6200 } 6201 6202 /* 6203 * Configure HBA MSI-X attention conditions to messages 6204 */ 6205 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6206 6207 if (!pmb) { 6208 rc = -ENOMEM; 6209 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6210 "0474 Unable to allocate memory for issuing " 6211 "MBOX_CONFIG_MSI command\n"); 6212 goto mem_fail_out; 6213 } 6214 rc = lpfc_config_msi(phba, pmb); 6215 if (rc) 6216 goto mbx_fail_out; 6217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6218 if (rc != MBX_SUCCESS) { 6219 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6220 "0351 Config MSI mailbox command failed, " 6221 "mbxCmd x%x, mbxStatus x%x\n", 6222 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 6223 goto mbx_fail_out; 6224 } 6225 6226 /* Free memory allocated for mailbox command */ 6227 mempool_free(pmb, phba->mbox_mem_pool); 6228 return rc; 6229 6230mbx_fail_out: 6231 /* Free memory allocated for mailbox command */ 6232 mempool_free(pmb, phba->mbox_mem_pool); 6233 6234mem_fail_out: 6235 /* free the irq already requested */ 6236 free_irq(phba->msix_entries[1].vector, phba); 6237 6238irq_fail_out: 6239 /* free the irq already requested */ 6240 free_irq(phba->msix_entries[0].vector, phba); 6241 6242msi_fail_out: 6243 /* Unconfigure MSI-X capability structure */ 6244 pci_disable_msix(phba->pcidev); 6245 return rc; 6246} 6247 6248/** 6249 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 6250 * @phba: pointer to lpfc hba data structure. 6251 * 6252 * This routine is invoked to release the MSI-X vectors and then disable the 6253 * MSI-X interrupt mode to device with SLI-3 interface spec. 6254 **/ 6255static void 6256lpfc_sli_disable_msix(struct lpfc_hba *phba) 6257{ 6258 int i; 6259 6260 /* Free up MSI-X multi-message vectors */ 6261 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6262 free_irq(phba->msix_entries[i].vector, phba); 6263 /* Disable MSI-X */ 6264 pci_disable_msix(phba->pcidev); 6265 6266 return; 6267} 6268 6269/** 6270 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 6271 * @phba: pointer to lpfc hba data structure. 6272 * 6273 * This routine is invoked to enable the MSI interrupt mode to device with 6274 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 6275 * enable the MSI vector. The device driver is responsible for calling the 6276 * request_irq() to register MSI vector with a interrupt the handler, which 6277 * is done in this function. 6278 * 6279 * Return codes 6280 * 0 - successful 6281 * other values - error 6282 */ 6283static int 6284lpfc_sli_enable_msi(struct lpfc_hba *phba) 6285{ 6286 int rc; 6287 6288 rc = pci_enable_msi(phba->pcidev); 6289 if (!rc) 6290 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6291 "0462 PCI enable MSI mode success.\n"); 6292 else { 6293 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6294 "0471 PCI enable MSI mode failed (%d)\n", rc); 6295 return rc; 6296 } 6297 6298 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6299 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6300 if (rc) { 6301 pci_disable_msi(phba->pcidev); 6302 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6303 "0478 MSI request_irq failed (%d)\n", rc); 6304 } 6305 return rc; 6306} 6307 6308/** 6309 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 6310 * @phba: pointer to lpfc hba data structure. 6311 * 6312 * This routine is invoked to disable the MSI interrupt mode to device with 6313 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 6314 * done request_irq() on before calling pci_disable_msi(). Failure to do so 6315 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 6316 * its vector. 6317 */ 6318static void 6319lpfc_sli_disable_msi(struct lpfc_hba *phba) 6320{ 6321 free_irq(phba->pcidev->irq, phba); 6322 pci_disable_msi(phba->pcidev); 6323 return; 6324} 6325 6326/** 6327 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 6328 * @phba: pointer to lpfc hba data structure. 6329 * 6330 * This routine is invoked to enable device interrupt and associate driver's 6331 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 6332 * spec. Depends on the interrupt mode configured to the driver, the driver 6333 * will try to fallback from the configured interrupt mode to an interrupt 6334 * mode which is supported by the platform, kernel, and device in the order 6335 * of: 6336 * MSI-X -> MSI -> IRQ. 6337 * 6338 * Return codes 6339 * 0 - successful 6340 * other values - error 6341 **/ 6342static uint32_t 6343lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6344{ 6345 uint32_t intr_mode = LPFC_INTR_ERROR; 6346 int retval; 6347 6348 if (cfg_mode == 2) { 6349 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 6350 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 6351 if (!retval) { 6352 /* Now, try to enable MSI-X interrupt mode */ 6353 retval = lpfc_sli_enable_msix(phba); 6354 if (!retval) { 6355 /* Indicate initialization to MSI-X mode */ 6356 phba->intr_type = MSIX; 6357 intr_mode = 2; 6358 } 6359 } 6360 } 6361 6362 /* Fallback to MSI if MSI-X initialization failed */ 6363 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6364 retval = lpfc_sli_enable_msi(phba); 6365 if (!retval) { 6366 /* Indicate initialization to MSI mode */ 6367 phba->intr_type = MSI; 6368 intr_mode = 1; 6369 } 6370 } 6371 6372 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6373 if (phba->intr_type == NONE) { 6374 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6375 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6376 if (!retval) { 6377 /* Indicate initialization to INTx mode */ 6378 phba->intr_type = INTx; 6379 intr_mode = 0; 6380 } 6381 } 6382 return intr_mode; 6383} 6384 6385/** 6386 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 6387 * @phba: pointer to lpfc hba data structure. 6388 * 6389 * This routine is invoked to disable device interrupt and disassociate the 6390 * driver's interrupt handler(s) from interrupt vector(s) to device with 6391 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 6392 * release the interrupt vector(s) for the message signaled interrupt. 6393 **/ 6394static void 6395lpfc_sli_disable_intr(struct lpfc_hba *phba) 6396{ 6397 /* Disable the currently initialized interrupt mode */ 6398 if (phba->intr_type == MSIX) 6399 lpfc_sli_disable_msix(phba); 6400 else if (phba->intr_type == MSI) 6401 lpfc_sli_disable_msi(phba); 6402 else if (phba->intr_type == INTx) 6403 free_irq(phba->pcidev->irq, phba); 6404 6405 /* Reset interrupt management states */ 6406 phba->intr_type = NONE; 6407 phba->sli.slistat.sli_intr = 0; 6408 6409 return; 6410} 6411 6412/** 6413 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 6414 * @phba: pointer to lpfc hba data structure. 6415 * 6416 * This routine is invoked to enable the MSI-X interrupt vectors to device 6417 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 6418 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 6419 * enables either all or nothing, depending on the current availability of 6420 * PCI vector resources. The device driver is responsible for calling the 6421 * individual request_irq() to register each MSI-X vector with a interrupt 6422 * handler, which is done in this function. Note that later when device is 6423 * unloading, the driver should always call free_irq() on all MSI-X vectors 6424 * it has done request_irq() on before calling pci_disable_msix(). Failure 6425 * to do so results in a BUG_ON() and a device will be left with MSI-X 6426 * enabled and leaks its vectors. 6427 * 6428 * Return codes 6429 * 0 - successful 6430 * other values - error 6431 **/ 6432static int 6433lpfc_sli4_enable_msix(struct lpfc_hba *phba) 6434{ 6435 int rc, index; 6436 6437 /* Set up MSI-X multi-message vectors */ 6438 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 6439 phba->sli4_hba.msix_entries[index].entry = index; 6440 6441 /* Configure MSI-X capability structure */ 6442 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 6443 phba->sli4_hba.cfg_eqn); 6444 if (rc) { 6445 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6446 "0484 PCI enable MSI-X failed (%d)\n", rc); 6447 goto msi_fail_out; 6448 } 6449 /* Log MSI-X vector assignment */ 6450 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 6451 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6452 "0489 MSI-X entry[%d]: vector=x%x " 6453 "message=%d\n", index, 6454 phba->sli4_hba.msix_entries[index].vector, 6455 phba->sli4_hba.msix_entries[index].entry); 6456 /* 6457 * Assign MSI-X vectors to interrupt handlers 6458 */ 6459 6460 /* The first vector must associated to slow-path handler for MQ */ 6461 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 6462 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 6463 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6464 if (rc) { 6465 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6466 "0485 MSI-X slow-path request_irq failed " 6467 "(%d)\n", rc); 6468 goto msi_fail_out; 6469 } 6470 6471 /* The rest of the vector(s) are associated to fast-path handler(s) */ 6472 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) { 6473 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 6474 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 6475 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 6476 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 6477 LPFC_FP_DRIVER_HANDLER_NAME, 6478 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6479 if (rc) { 6480 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6481 "0486 MSI-X fast-path (%d) " 6482 "request_irq failed (%d)\n", index, rc); 6483 goto cfg_fail_out; 6484 } 6485 } 6486 6487 return rc; 6488 6489cfg_fail_out: 6490 /* free the irq already requested */ 6491 for (--index; index >= 1; index--) 6492 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 6493 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6494 6495 /* free the irq already requested */ 6496 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 6497 6498msi_fail_out: 6499 /* Unconfigure MSI-X capability structure */ 6500 pci_disable_msix(phba->pcidev); 6501 return rc; 6502} 6503 6504/** 6505 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 6506 * @phba: pointer to lpfc hba data structure. 6507 * 6508 * This routine is invoked to release the MSI-X vectors and then disable the 6509 * MSI-X interrupt mode to device with SLI-4 interface spec. 6510 **/ 6511static void 6512lpfc_sli4_disable_msix(struct lpfc_hba *phba) 6513{ 6514 int index; 6515 6516 /* Free up MSI-X multi-message vectors */ 6517 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 6518 6519 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) 6520 free_irq(phba->sli4_hba.msix_entries[index].vector, 6521 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6522 /* Disable MSI-X */ 6523 pci_disable_msix(phba->pcidev); 6524 6525 return; 6526} 6527 6528/** 6529 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 6530 * @phba: pointer to lpfc hba data structure. 6531 * 6532 * This routine is invoked to enable the MSI interrupt mode to device with 6533 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 6534 * to enable the MSI vector. The device driver is responsible for calling 6535 * the request_irq() to register MSI vector with a interrupt the handler, 6536 * which is done in this function. 6537 * 6538 * Return codes 6539 * 0 - successful 6540 * other values - error 6541 **/ 6542static int 6543lpfc_sli4_enable_msi(struct lpfc_hba *phba) 6544{ 6545 int rc, index; 6546 6547 rc = pci_enable_msi(phba->pcidev); 6548 if (!rc) 6549 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6550 "0487 PCI enable MSI mode success.\n"); 6551 else { 6552 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6553 "0488 PCI enable MSI mode failed (%d)\n", rc); 6554 return rc; 6555 } 6556 6557 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 6558 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6559 if (rc) { 6560 pci_disable_msi(phba->pcidev); 6561 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6562 "0490 MSI request_irq failed (%d)\n", rc); 6563 } 6564 6565 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 6566 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 6567 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 6568 } 6569 6570 return rc; 6571} 6572 6573/** 6574 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 6575 * @phba: pointer to lpfc hba data structure. 6576 * 6577 * This routine is invoked to disable the MSI interrupt mode to device with 6578 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 6579 * done request_irq() on before calling pci_disable_msi(). Failure to do so 6580 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 6581 * its vector. 6582 **/ 6583static void 6584lpfc_sli4_disable_msi(struct lpfc_hba *phba) 6585{ 6586 free_irq(phba->pcidev->irq, phba); 6587 pci_disable_msi(phba->pcidev); 6588 return; 6589} 6590 6591/** 6592 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 6593 * @phba: pointer to lpfc hba data structure. 6594 * 6595 * This routine is invoked to enable device interrupt and associate driver's 6596 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 6597 * interface spec. Depends on the interrupt mode configured to the driver, 6598 * the driver will try to fallback from the configured interrupt mode to an 6599 * interrupt mode which is supported by the platform, kernel, and device in 6600 * the order of: 6601 * MSI-X -> MSI -> IRQ. 6602 * 6603 * Return codes 6604 * 0 - successful 6605 * other values - error 6606 **/ 6607static uint32_t 6608lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6609{ 6610 uint32_t intr_mode = LPFC_INTR_ERROR; 6611 int retval, index; 6612 6613 if (cfg_mode == 2) { 6614 /* Preparation before conf_msi mbox cmd */ 6615 retval = 0; 6616 if (!retval) { 6617 /* Now, try to enable MSI-X interrupt mode */ 6618 retval = lpfc_sli4_enable_msix(phba); 6619 if (!retval) { 6620 /* Indicate initialization to MSI-X mode */ 6621 phba->intr_type = MSIX; 6622 intr_mode = 2; 6623 } 6624 } 6625 } 6626 6627 /* Fallback to MSI if MSI-X initialization failed */ 6628 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6629 retval = lpfc_sli4_enable_msi(phba); 6630 if (!retval) { 6631 /* Indicate initialization to MSI mode */ 6632 phba->intr_type = MSI; 6633 intr_mode = 1; 6634 } 6635 } 6636 6637 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6638 if (phba->intr_type == NONE) { 6639 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 6640 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6641 if (!retval) { 6642 /* Indicate initialization to INTx mode */ 6643 phba->intr_type = INTx; 6644 intr_mode = 0; 6645 for (index = 0; index < phba->cfg_fcp_eq_count; 6646 index++) { 6647 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 6648 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 6649 } 6650 } 6651 } 6652 return intr_mode; 6653} 6654 6655/** 6656 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 6657 * @phba: pointer to lpfc hba data structure. 6658 * 6659 * This routine is invoked to disable device interrupt and disassociate 6660 * the driver's interrupt handler(s) from interrupt vector(s) to device 6661 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 6662 * will release the interrupt vector(s) for the message signaled interrupt. 6663 **/ 6664static void 6665lpfc_sli4_disable_intr(struct lpfc_hba *phba) 6666{ 6667 /* Disable the currently initialized interrupt mode */ 6668 if (phba->intr_type == MSIX) 6669 lpfc_sli4_disable_msix(phba); 6670 else if (phba->intr_type == MSI) 6671 lpfc_sli4_disable_msi(phba); 6672 else if (phba->intr_type == INTx) 6673 free_irq(phba->pcidev->irq, phba); 6674 6675 /* Reset interrupt management states */ 6676 phba->intr_type = NONE; 6677 phba->sli.slistat.sli_intr = 0; 6678 6679 return; 6680} 6681 6682/** 6683 * lpfc_unset_hba - Unset SLI3 hba device initialization 6684 * @phba: pointer to lpfc hba data structure. 6685 * 6686 * This routine is invoked to unset the HBA device initialization steps to 6687 * a device with SLI-3 interface spec. 6688 **/ 6689static void 6690lpfc_unset_hba(struct lpfc_hba *phba) 6691{ 6692 struct lpfc_vport *vport = phba->pport; 6693 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6694 6695 spin_lock_irq(shost->host_lock); 6696 vport->load_flag |= FC_UNLOADING; 6697 spin_unlock_irq(shost->host_lock); 6698 6699 lpfc_stop_hba_timers(phba); 6700 6701 phba->pport->work_port_events = 0; 6702 6703 lpfc_sli_hba_down(phba); 6704 6705 lpfc_sli_brdrestart(phba); 6706 6707 lpfc_sli_disable_intr(phba); 6708 6709 return; 6710} 6711 6712/** 6713 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 6714 * @phba: pointer to lpfc hba data structure. 6715 * 6716 * This routine is invoked to unset the HBA device initialization steps to 6717 * a device with SLI-4 interface spec. 6718 **/ 6719static void 6720lpfc_sli4_unset_hba(struct lpfc_hba *phba) 6721{ 6722 struct lpfc_vport *vport = phba->pport; 6723 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6724 6725 spin_lock_irq(shost->host_lock); 6726 vport->load_flag |= FC_UNLOADING; 6727 spin_unlock_irq(shost->host_lock); 6728 6729 phba->pport->work_port_events = 0; 6730 6731 lpfc_sli4_hba_down(phba); 6732 6733 lpfc_sli4_disable_intr(phba); 6734 6735 return; 6736} 6737 6738/** 6739 * lpfc_sli4_hba_unset - Unset the fcoe hba 6740 * @phba: Pointer to HBA context object. 6741 * 6742 * This function is called in the SLI4 code path to reset the HBA's FCoE 6743 * function. The caller is not required to hold any lock. This routine 6744 * issues PCI function reset mailbox command to reset the FCoE function. 6745 * At the end of the function, it calls lpfc_hba_down_post function to 6746 * free any pending commands. 6747 **/ 6748static void 6749lpfc_sli4_hba_unset(struct lpfc_hba *phba) 6750{ 6751 int wait_cnt = 0; 6752 LPFC_MBOXQ_t *mboxq; 6753 6754 lpfc_stop_hba_timers(phba); 6755 phba->sli4_hba.intr_enable = 0; 6756 6757 /* 6758 * Gracefully wait out the potential current outstanding asynchronous 6759 * mailbox command. 6760 */ 6761 6762 /* First, block any pending async mailbox command from posted */ 6763 spin_lock_irq(&phba->hbalock); 6764 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 6765 spin_unlock_irq(&phba->hbalock); 6766 /* Now, trying to wait it out if we can */ 6767 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6768 msleep(10); 6769 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 6770 break; 6771 } 6772 /* Forcefully release the outstanding mailbox command if timed out */ 6773 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6774 spin_lock_irq(&phba->hbalock); 6775 mboxq = phba->sli.mbox_active; 6776 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 6777 __lpfc_mbox_cmpl_put(phba, mboxq); 6778 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6779 phba->sli.mbox_active = NULL; 6780 spin_unlock_irq(&phba->hbalock); 6781 } 6782 6783 /* Tear down the queues in the HBA */ 6784 lpfc_sli4_queue_unset(phba); 6785 6786 /* Disable PCI subsystem interrupt */ 6787 lpfc_sli4_disable_intr(phba); 6788 6789 /* Stop kthread signal shall trigger work_done one more time */ 6790 kthread_stop(phba->worker_thread); 6791 6792 /* Stop the SLI4 device port */ 6793 phba->pport->work_port_events = 0; 6794} 6795 6796/** 6797 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 6798 * @pdev: pointer to PCI device 6799 * @pid: pointer to PCI device identifier 6800 * 6801 * This routine is to be called to attach a device with SLI-3 interface spec 6802 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 6803 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 6804 * information of the device and driver to see if the driver state that it can 6805 * support this kind of device. If the match is successful, the driver core 6806 * invokes this routine. If this routine determines it can claim the HBA, it 6807 * does all the initialization that it needs to do to handle the HBA properly. 6808 * 6809 * Return code 6810 * 0 - driver can claim the device 6811 * negative value - driver can not claim the device 6812 **/ 6813static int __devinit 6814lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 6815{ 6816 struct lpfc_hba *phba; 6817 struct lpfc_vport *vport = NULL; 6818 struct Scsi_Host *shost = NULL; 6819 int error; 6820 uint32_t cfg_mode, intr_mode; 6821 6822 /* Allocate memory for HBA structure */ 6823 phba = lpfc_hba_alloc(pdev); 6824 if (!phba) 6825 return -ENOMEM; 6826 6827 /* Perform generic PCI device enabling operation */ 6828 error = lpfc_enable_pci_dev(phba); 6829 if (error) { 6830 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6831 "1401 Failed to enable pci device.\n"); 6832 goto out_free_phba; 6833 } 6834 6835 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 6836 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 6837 if (error) 6838 goto out_disable_pci_dev; 6839 6840 /* Set up SLI-3 specific device PCI memory space */ 6841 error = lpfc_sli_pci_mem_setup(phba); 6842 if (error) { 6843 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6844 "1402 Failed to set up pci memory space.\n"); 6845 goto out_disable_pci_dev; 6846 } 6847 6848 /* Set up phase-1 common device driver resources */ 6849 error = lpfc_setup_driver_resource_phase1(phba); 6850 if (error) { 6851 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6852 "1403 Failed to set up driver resource.\n"); 6853 goto out_unset_pci_mem_s3; 6854 } 6855 6856 /* Set up SLI-3 specific device driver resources */ 6857 error = lpfc_sli_driver_resource_setup(phba); 6858 if (error) { 6859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6860 "1404 Failed to set up driver resource.\n"); 6861 goto out_unset_pci_mem_s3; 6862 } 6863 6864 /* Initialize and populate the iocb list per host */ 6865 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 6866 if (error) { 6867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6868 "1405 Failed to initialize iocb list.\n"); 6869 goto out_unset_driver_resource_s3; 6870 } 6871 6872 /* Set up common device driver resources */ 6873 error = lpfc_setup_driver_resource_phase2(phba); 6874 if (error) { 6875 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6876 "1406 Failed to set up driver resource.\n"); 6877 goto out_free_iocb_list; 6878 } 6879 6880 /* Create SCSI host to the physical port */ 6881 error = lpfc_create_shost(phba); 6882 if (error) { 6883 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6884 "1407 Failed to create scsi host.\n"); 6885 goto out_unset_driver_resource; 6886 } 6887 6888 /* Configure sysfs attributes */ 6889 vport = phba->pport; 6890 error = lpfc_alloc_sysfs_attr(vport); 6891 if (error) { 6892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6893 "1476 Failed to allocate sysfs attr\n"); 6894 goto out_destroy_shost; 6895 } 6896 6897 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 6898 /* Now, trying to enable interrupt and bring up the device */ 6899 cfg_mode = phba->cfg_use_msi; 6900 while (true) { 6901 /* Put device to a known state before enabling interrupt */ 6902 lpfc_stop_port(phba); 6903 /* Configure and enable interrupt */ 6904 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 6905 if (intr_mode == LPFC_INTR_ERROR) { 6906 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6907 "0431 Failed to enable interrupt.\n"); 6908 error = -ENODEV; 6909 goto out_free_sysfs_attr; 6910 } 6911 /* SLI-3 HBA setup */ 6912 if (lpfc_sli_hba_setup(phba)) { 6913 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6914 "1477 Failed to set up hba\n"); 6915 error = -ENODEV; 6916 goto out_remove_device; 6917 } 6918 6919 /* Wait 50ms for the interrupts of previous mailbox commands */ 6920 msleep(50); 6921 /* Check active interrupts on message signaled interrupts */ 6922 if (intr_mode == 0 || 6923 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 6924 /* Log the current active interrupt mode */ 6925 phba->intr_mode = intr_mode; 6926 lpfc_log_intr_mode(phba, intr_mode); 6927 break; 6928 } else { 6929 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6930 "0447 Configure interrupt mode (%d) " 6931 "failed active interrupt test.\n", 6932 intr_mode); 6933 /* Disable the current interrupt mode */ 6934 lpfc_sli_disable_intr(phba); 6935 /* Try next level of interrupt mode */ 6936 cfg_mode = --intr_mode; 6937 } 6938 } 6939 6940 /* Perform post initialization setup */ 6941 lpfc_post_init_setup(phba); 6942 6943 /* Check if there are static vports to be created. */ 6944 lpfc_create_static_vport(phba); 6945 6946 return 0; 6947 6948out_remove_device: 6949 lpfc_unset_hba(phba); 6950out_free_sysfs_attr: 6951 lpfc_free_sysfs_attr(vport); 6952out_destroy_shost: 6953 lpfc_destroy_shost(phba); 6954out_unset_driver_resource: 6955 lpfc_unset_driver_resource_phase2(phba); 6956out_free_iocb_list: 6957 lpfc_free_iocb_list(phba); 6958out_unset_driver_resource_s3: 6959 lpfc_sli_driver_resource_unset(phba); 6960out_unset_pci_mem_s3: 6961 lpfc_sli_pci_mem_unset(phba); 6962out_disable_pci_dev: 6963 lpfc_disable_pci_dev(phba); 6964 if (shost) 6965 scsi_host_put(shost); 6966out_free_phba: 6967 lpfc_hba_free(phba); 6968 return error; 6969} 6970 6971/** 6972 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 6973 * @pdev: pointer to PCI device 6974 * 6975 * This routine is to be called to disattach a device with SLI-3 interface 6976 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 6977 * removed from PCI bus, it performs all the necessary cleanup for the HBA 6978 * device to be removed from the PCI subsystem properly. 6979 **/ 6980static void __devexit 6981lpfc_pci_remove_one_s3(struct pci_dev *pdev) 6982{ 6983 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6984 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6985 struct lpfc_vport **vports; 6986 struct lpfc_hba *phba = vport->phba; 6987 int i; 6988 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 6989 6990 spin_lock_irq(&phba->hbalock); 6991 vport->load_flag |= FC_UNLOADING; 6992 spin_unlock_irq(&phba->hbalock); 6993 6994 lpfc_free_sysfs_attr(vport); 6995 6996 /* Release all the vports against this physical port */ 6997 vports = lpfc_create_vport_work_array(phba); 6998 if (vports != NULL) 6999 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 7000 fc_vport_terminate(vports[i]->fc_vport); 7001 lpfc_destroy_vport_work_array(phba, vports); 7002 7003 /* Remove FC host and then SCSI host with the physical port */ 7004 fc_remove_host(shost); 7005 scsi_remove_host(shost); 7006 lpfc_cleanup(vport); 7007 7008 /* 7009 * Bring down the SLI Layer. This step disable all interrupts, 7010 * clears the rings, discards all mailbox commands, and resets 7011 * the HBA. 7012 */ 7013 7014 /* HBA interrupt will be diabled after this call */ 7015 lpfc_sli_hba_down(phba); 7016 /* Stop kthread signal shall trigger work_done one more time */ 7017 kthread_stop(phba->worker_thread); 7018 /* Final cleanup of txcmplq and reset the HBA */ 7019 lpfc_sli_brdrestart(phba); 7020 7021 lpfc_stop_hba_timers(phba); 7022 spin_lock_irq(&phba->hbalock); 7023 list_del_init(&vport->listentry); 7024 spin_unlock_irq(&phba->hbalock); 7025 7026 lpfc_debugfs_terminate(vport); 7027 7028 /* Disable interrupt */ 7029 lpfc_sli_disable_intr(phba); 7030 7031 pci_set_drvdata(pdev, NULL); 7032 scsi_host_put(shost); 7033 7034 /* 7035 * Call scsi_free before mem_free since scsi bufs are released to their 7036 * corresponding pools here. 7037 */ 7038 lpfc_scsi_free(phba); 7039 lpfc_mem_free_all(phba); 7040 7041 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7042 phba->hbqslimp.virt, phba->hbqslimp.phys); 7043 7044 /* Free resources associated with SLI2 interface */ 7045 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7046 phba->slim2p.virt, phba->slim2p.phys); 7047 7048 /* unmap adapter SLIM and Control Registers */ 7049 iounmap(phba->ctrl_regs_memmap_p); 7050 iounmap(phba->slim_memmap_p); 7051 7052 lpfc_hba_free(phba); 7053 7054 pci_release_selected_regions(pdev, bars); 7055 pci_disable_device(pdev); 7056} 7057 7058/** 7059 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 7060 * @pdev: pointer to PCI device 7061 * @msg: power management message 7062 * 7063 * This routine is to be called from the kernel's PCI subsystem to support 7064 * system Power Management (PM) to device with SLI-3 interface spec. When 7065 * PM invokes this method, it quiesces the device by stopping the driver's 7066 * worker thread for the device, turning off device's interrupt and DMA, 7067 * and bring the device offline. Note that as the driver implements the 7068 * minimum PM requirements to a power-aware driver's PM support for the 7069 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 7070 * to the suspend() method call will be treated as SUSPEND and the driver will 7071 * fully reinitialize its device during resume() method call, the driver will 7072 * set device to PCI_D3hot state in PCI config space instead of setting it 7073 * according to the @msg provided by the PM. 7074 * 7075 * Return code 7076 * 0 - driver suspended the device 7077 * Error otherwise 7078 **/ 7079static int 7080lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 7081{ 7082 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7083 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7084 7085 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7086 "0473 PCI device Power Management suspend.\n"); 7087 7088 /* Bring down the device */ 7089 lpfc_offline_prep(phba); 7090 lpfc_offline(phba); 7091 kthread_stop(phba->worker_thread); 7092 7093 /* Disable interrupt from device */ 7094 lpfc_sli_disable_intr(phba); 7095 7096 /* Save device state to PCI config space */ 7097 pci_save_state(pdev); 7098 pci_set_power_state(pdev, PCI_D3hot); 7099 7100 return 0; 7101} 7102 7103/** 7104 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 7105 * @pdev: pointer to PCI device 7106 * 7107 * This routine is to be called from the kernel's PCI subsystem to support 7108 * system Power Management (PM) to device with SLI-3 interface spec. When PM 7109 * invokes this method, it restores the device's PCI config space state and 7110 * fully reinitializes the device and brings it online. Note that as the 7111 * driver implements the minimum PM requirements to a power-aware driver's 7112 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 7113 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 7114 * driver will fully reinitialize its device during resume() method call, 7115 * the device will be set to PCI_D0 directly in PCI config space before 7116 * restoring the state. 7117 * 7118 * Return code 7119 * 0 - driver suspended the device 7120 * Error otherwise 7121 **/ 7122static int 7123lpfc_pci_resume_one_s3(struct pci_dev *pdev) 7124{ 7125 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7126 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7127 uint32_t intr_mode; 7128 int error; 7129 7130 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7131 "0452 PCI device Power Management resume.\n"); 7132 7133 /* Restore device state from PCI config space */ 7134 pci_set_power_state(pdev, PCI_D0); 7135 pci_restore_state(pdev); 7136 7137 if (pdev->is_busmaster) 7138 pci_set_master(pdev); 7139 7140 /* Startup the kernel thread for this host adapter. */ 7141 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7142 "lpfc_worker_%d", phba->brd_no); 7143 if (IS_ERR(phba->worker_thread)) { 7144 error = PTR_ERR(phba->worker_thread); 7145 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7146 "0434 PM resume failed to start worker " 7147 "thread: error=x%x.\n", error); 7148 return error; 7149 } 7150 7151 /* Configure and enable interrupt */ 7152 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7153 if (intr_mode == LPFC_INTR_ERROR) { 7154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7155 "0430 PM resume Failed to enable interrupt\n"); 7156 return -EIO; 7157 } else 7158 phba->intr_mode = intr_mode; 7159 7160 /* Restart HBA and bring it online */ 7161 lpfc_sli_brdrestart(phba); 7162 lpfc_online(phba); 7163 7164 /* Log the current active interrupt mode */ 7165 lpfc_log_intr_mode(phba, phba->intr_mode); 7166 7167 return 0; 7168} 7169 7170/** 7171 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 7172 * @phba: pointer to lpfc hba data structure. 7173 * 7174 * This routine is called to prepare the SLI3 device for PCI slot recover. It 7175 * aborts and stops all the on-going I/Os on the pci device. 7176 **/ 7177static void 7178lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 7179{ 7180 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7181 "2723 PCI channel I/O abort preparing for recovery\n"); 7182 /* Prepare for bringing HBA offline */ 7183 lpfc_offline_prep(phba); 7184 /* Clear sli active flag to prevent sysfs access to HBA */ 7185 spin_lock_irq(&phba->hbalock); 7186 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 7187 spin_unlock_irq(&phba->hbalock); 7188 /* Stop and flush all I/Os and bring HBA offline */ 7189 lpfc_offline(phba); 7190} 7191 7192/** 7193 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 7194 * @phba: pointer to lpfc hba data structure. 7195 * 7196 * This routine is called to prepare the SLI3 device for PCI slot reset. It 7197 * disables the device interrupt and pci device, and aborts the internal FCP 7198 * pending I/Os. 7199 **/ 7200static void 7201lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 7202{ 7203 struct lpfc_sli *psli = &phba->sli; 7204 struct lpfc_sli_ring *pring; 7205 7206 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7207 "2710 PCI channel disable preparing for reset\n"); 7208 /* Disable interrupt and pci device */ 7209 lpfc_sli_disable_intr(phba); 7210 pci_disable_device(phba->pcidev); 7211 /* 7212 * There may be I/Os dropped by the firmware. 7213 * Error iocb (I/O) on txcmplq and let the SCSI layer 7214 * retry it after re-establishing link. 7215 */ 7216 pring = &psli->ring[psli->fcp_ring]; 7217 lpfc_sli_abort_iocb_ring(phba, pring); 7218} 7219 7220/** 7221 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 7222 * @phba: pointer to lpfc hba data structure. 7223 * 7224 * This routine is called to prepare the SLI3 device for PCI slot permanently 7225 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 7226 * pending I/Os. 7227 **/ 7228static void 7229lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba) 7230{ 7231 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7232 "2711 PCI channel permanent disable for failure\n"); 7233 /* Clean up all driver's outstanding SCSI I/Os */ 7234 lpfc_sli_flush_fcp_rings(phba); 7235} 7236 7237/** 7238 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 7239 * @pdev: pointer to PCI device. 7240 * @state: the current PCI connection state. 7241 * 7242 * This routine is called from the PCI subsystem for I/O error handling to 7243 * device with SLI-3 interface spec. This function is called by the PCI 7244 * subsystem after a PCI bus error affecting this device has been detected. 7245 * When this function is invoked, it will need to stop all the I/Os and 7246 * interrupt(s) to the device. Once that is done, it will return 7247 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 7248 * as desired. 7249 * 7250 * Return codes 7251 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 7252 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7253 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7254 **/ 7255static pci_ers_result_t 7256lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 7257{ 7258 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7259 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7260 7261 /* Block all SCSI devices' I/Os on the host */ 7262 lpfc_scsi_dev_block(phba); 7263 7264 switch (state) { 7265 case pci_channel_io_normal: 7266 /* Non-fatal error, prepare for recovery */ 7267 lpfc_sli_prep_dev_for_recover(phba); 7268 return PCI_ERS_RESULT_CAN_RECOVER; 7269 case pci_channel_io_frozen: 7270 /* Fatal error, prepare for slot reset */ 7271 lpfc_sli_prep_dev_for_reset(phba); 7272 return PCI_ERS_RESULT_NEED_RESET; 7273 case pci_channel_io_perm_failure: 7274 /* Permanent failure, prepare for device down */ 7275 lpfc_prep_dev_for_perm_failure(phba); 7276 return PCI_ERS_RESULT_DISCONNECT; 7277 default: 7278 /* Unknown state, prepare and request slot reset */ 7279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7280 "0472 Unknown PCI error state: x%x\n", state); 7281 lpfc_sli_prep_dev_for_reset(phba); 7282 return PCI_ERS_RESULT_NEED_RESET; 7283 } 7284} 7285 7286/** 7287 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 7288 * @pdev: pointer to PCI device. 7289 * 7290 * This routine is called from the PCI subsystem for error handling to 7291 * device with SLI-3 interface spec. This is called after PCI bus has been 7292 * reset to restart the PCI card from scratch, as if from a cold-boot. 7293 * During the PCI subsystem error recovery, after driver returns 7294 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 7295 * recovery and then call this routine before calling the .resume method 7296 * to recover the device. This function will initialize the HBA device, 7297 * enable the interrupt, but it will just put the HBA to offline state 7298 * without passing any I/O traffic. 7299 * 7300 * Return codes 7301 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7302 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7303 */ 7304static pci_ers_result_t 7305lpfc_io_slot_reset_s3(struct pci_dev *pdev) 7306{ 7307 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7308 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7309 struct lpfc_sli *psli = &phba->sli; 7310 uint32_t intr_mode; 7311 7312 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 7313 if (pci_enable_device_mem(pdev)) { 7314 printk(KERN_ERR "lpfc: Cannot re-enable " 7315 "PCI device after reset.\n"); 7316 return PCI_ERS_RESULT_DISCONNECT; 7317 } 7318 7319 pci_restore_state(pdev); 7320 if (pdev->is_busmaster) 7321 pci_set_master(pdev); 7322 7323 spin_lock_irq(&phba->hbalock); 7324 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 7325 spin_unlock_irq(&phba->hbalock); 7326 7327 /* Configure and enable interrupt */ 7328 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7329 if (intr_mode == LPFC_INTR_ERROR) { 7330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7331 "0427 Cannot re-enable interrupt after " 7332 "slot reset.\n"); 7333 return PCI_ERS_RESULT_DISCONNECT; 7334 } else 7335 phba->intr_mode = intr_mode; 7336 7337 /* Take device offline; this will perform cleanup */ 7338 lpfc_offline(phba); 7339 lpfc_sli_brdrestart(phba); 7340 7341 /* Log the current active interrupt mode */ 7342 lpfc_log_intr_mode(phba, phba->intr_mode); 7343 7344 return PCI_ERS_RESULT_RECOVERED; 7345} 7346 7347/** 7348 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 7349 * @pdev: pointer to PCI device 7350 * 7351 * This routine is called from the PCI subsystem for error handling to device 7352 * with SLI-3 interface spec. It is called when kernel error recovery tells 7353 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 7354 * error recovery. After this call, traffic can start to flow from this device 7355 * again. 7356 */ 7357static void 7358lpfc_io_resume_s3(struct pci_dev *pdev) 7359{ 7360 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7361 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7362 7363 /* Bring the device online */ 7364 lpfc_online(phba); 7365 7366 /* Clean up Advanced Error Reporting (AER) if needed */ 7367 if (phba->hba_flag & HBA_AER_ENABLED) 7368 pci_cleanup_aer_uncorrect_error_status(pdev); 7369} 7370 7371/** 7372 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 7373 * @phba: pointer to lpfc hba data structure. 7374 * 7375 * returns the number of ELS/CT IOCBs to reserve 7376 **/ 7377int 7378lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 7379{ 7380 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 7381 7382 if (phba->sli_rev == LPFC_SLI_REV4) { 7383 if (max_xri <= 100) 7384 return 10; 7385 else if (max_xri <= 256) 7386 return 25; 7387 else if (max_xri <= 512) 7388 return 50; 7389 else if (max_xri <= 1024) 7390 return 100; 7391 else 7392 return 150; 7393 } else 7394 return 0; 7395} 7396 7397/** 7398 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 7399 * @pdev: pointer to PCI device 7400 * @pid: pointer to PCI device identifier 7401 * 7402 * This routine is called from the kernel's PCI subsystem to device with 7403 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 7404 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 7405 * information of the device and driver to see if the driver state that it 7406 * can support this kind of device. If the match is successful, the driver 7407 * core invokes this routine. If this routine determines it can claim the HBA, 7408 * it does all the initialization that it needs to do to handle the HBA 7409 * properly. 7410 * 7411 * Return code 7412 * 0 - driver can claim the device 7413 * negative value - driver can not claim the device 7414 **/ 7415static int __devinit 7416lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 7417{ 7418 struct lpfc_hba *phba; 7419 struct lpfc_vport *vport = NULL; 7420 struct Scsi_Host *shost = NULL; 7421 int error; 7422 uint32_t cfg_mode, intr_mode; 7423 int mcnt; 7424 7425 /* Allocate memory for HBA structure */ 7426 phba = lpfc_hba_alloc(pdev); 7427 if (!phba) 7428 return -ENOMEM; 7429 7430 /* Perform generic PCI device enabling operation */ 7431 error = lpfc_enable_pci_dev(phba); 7432 if (error) { 7433 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7434 "1409 Failed to enable pci device.\n"); 7435 goto out_free_phba; 7436 } 7437 7438 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 7439 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 7440 if (error) 7441 goto out_disable_pci_dev; 7442 7443 /* Set up SLI-4 specific device PCI memory space */ 7444 error = lpfc_sli4_pci_mem_setup(phba); 7445 if (error) { 7446 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7447 "1410 Failed to set up pci memory space.\n"); 7448 goto out_disable_pci_dev; 7449 } 7450 7451 /* Set up phase-1 common device driver resources */ 7452 error = lpfc_setup_driver_resource_phase1(phba); 7453 if (error) { 7454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7455 "1411 Failed to set up driver resource.\n"); 7456 goto out_unset_pci_mem_s4; 7457 } 7458 7459 /* Set up SLI-4 Specific device driver resources */ 7460 error = lpfc_sli4_driver_resource_setup(phba); 7461 if (error) { 7462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7463 "1412 Failed to set up driver resource.\n"); 7464 goto out_unset_pci_mem_s4; 7465 } 7466 7467 /* Initialize and populate the iocb list per host */ 7468 error = lpfc_init_iocb_list(phba, 7469 phba->sli4_hba.max_cfg_param.max_xri); 7470 if (error) { 7471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7472 "1413 Failed to initialize iocb list.\n"); 7473 goto out_unset_driver_resource_s4; 7474 } 7475 7476 /* Set up common device driver resources */ 7477 error = lpfc_setup_driver_resource_phase2(phba); 7478 if (error) { 7479 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7480 "1414 Failed to set up driver resource.\n"); 7481 goto out_free_iocb_list; 7482 } 7483 7484 /* Create SCSI host to the physical port */ 7485 error = lpfc_create_shost(phba); 7486 if (error) { 7487 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7488 "1415 Failed to create scsi host.\n"); 7489 goto out_unset_driver_resource; 7490 } 7491 7492 /* Configure sysfs attributes */ 7493 vport = phba->pport; 7494 error = lpfc_alloc_sysfs_attr(vport); 7495 if (error) { 7496 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7497 "1416 Failed to allocate sysfs attr\n"); 7498 goto out_destroy_shost; 7499 } 7500 7501 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 7502 /* Now, trying to enable interrupt and bring up the device */ 7503 cfg_mode = phba->cfg_use_msi; 7504 while (true) { 7505 /* Put device to a known state before enabling interrupt */ 7506 lpfc_stop_port(phba); 7507 /* Configure and enable interrupt */ 7508 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 7509 if (intr_mode == LPFC_INTR_ERROR) { 7510 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7511 "0426 Failed to enable interrupt.\n"); 7512 error = -ENODEV; 7513 goto out_free_sysfs_attr; 7514 } 7515 /* Default to single FCP EQ for non-MSI-X */ 7516 if (phba->intr_type != MSIX) 7517 phba->cfg_fcp_eq_count = 1; 7518 /* Set up SLI-4 HBA */ 7519 if (lpfc_sli4_hba_setup(phba)) { 7520 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7521 "1421 Failed to set up hba\n"); 7522 error = -ENODEV; 7523 goto out_disable_intr; 7524 } 7525 7526 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 7527 if (intr_mode != 0) 7528 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 7529 LPFC_ACT_INTR_CNT); 7530 7531 /* Check active interrupts received only for MSI/MSI-X */ 7532 if (intr_mode == 0 || 7533 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 7534 /* Log the current active interrupt mode */ 7535 phba->intr_mode = intr_mode; 7536 lpfc_log_intr_mode(phba, intr_mode); 7537 break; 7538 } 7539 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7540 "0451 Configure interrupt mode (%d) " 7541 "failed active interrupt test.\n", 7542 intr_mode); 7543 /* Unset the preivous SLI-4 HBA setup */ 7544 lpfc_sli4_unset_hba(phba); 7545 /* Try next level of interrupt mode */ 7546 cfg_mode = --intr_mode; 7547 } 7548 7549 /* Perform post initialization setup */ 7550 lpfc_post_init_setup(phba); 7551 7552 /* Check if there are static vports to be created. */ 7553 lpfc_create_static_vport(phba); 7554 7555 return 0; 7556 7557out_disable_intr: 7558 lpfc_sli4_disable_intr(phba); 7559out_free_sysfs_attr: 7560 lpfc_free_sysfs_attr(vport); 7561out_destroy_shost: 7562 lpfc_destroy_shost(phba); 7563out_unset_driver_resource: 7564 lpfc_unset_driver_resource_phase2(phba); 7565out_free_iocb_list: 7566 lpfc_free_iocb_list(phba); 7567out_unset_driver_resource_s4: 7568 lpfc_sli4_driver_resource_unset(phba); 7569out_unset_pci_mem_s4: 7570 lpfc_sli4_pci_mem_unset(phba); 7571out_disable_pci_dev: 7572 lpfc_disable_pci_dev(phba); 7573 if (shost) 7574 scsi_host_put(shost); 7575out_free_phba: 7576 lpfc_hba_free(phba); 7577 return error; 7578} 7579 7580/** 7581 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 7582 * @pdev: pointer to PCI device 7583 * 7584 * This routine is called from the kernel's PCI subsystem to device with 7585 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 7586 * removed from PCI bus, it performs all the necessary cleanup for the HBA 7587 * device to be removed from the PCI subsystem properly. 7588 **/ 7589static void __devexit 7590lpfc_pci_remove_one_s4(struct pci_dev *pdev) 7591{ 7592 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7593 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 7594 struct lpfc_vport **vports; 7595 struct lpfc_hba *phba = vport->phba; 7596 int i; 7597 7598 /* Mark the device unloading flag */ 7599 spin_lock_irq(&phba->hbalock); 7600 vport->load_flag |= FC_UNLOADING; 7601 spin_unlock_irq(&phba->hbalock); 7602 7603 /* Free the HBA sysfs attributes */ 7604 lpfc_free_sysfs_attr(vport); 7605 7606 /* Release all the vports against this physical port */ 7607 vports = lpfc_create_vport_work_array(phba); 7608 if (vports != NULL) 7609 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 7610 fc_vport_terminate(vports[i]->fc_vport); 7611 lpfc_destroy_vport_work_array(phba, vports); 7612 7613 /* Remove FC host and then SCSI host with the physical port */ 7614 fc_remove_host(shost); 7615 scsi_remove_host(shost); 7616 7617 /* Perform cleanup on the physical port */ 7618 lpfc_cleanup(vport); 7619 7620 /* 7621 * Bring down the SLI Layer. This step disables all interrupts, 7622 * clears the rings, discards all mailbox commands, and resets 7623 * the HBA FCoE function. 7624 */ 7625 lpfc_debugfs_terminate(vport); 7626 lpfc_sli4_hba_unset(phba); 7627 7628 spin_lock_irq(&phba->hbalock); 7629 list_del_init(&vport->listentry); 7630 spin_unlock_irq(&phba->hbalock); 7631 7632 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi 7633 * buffers are released to their corresponding pools here. 7634 */ 7635 lpfc_scsi_free(phba); 7636 lpfc_sli4_driver_resource_unset(phba); 7637 7638 /* Unmap adapter Control and Doorbell registers */ 7639 lpfc_sli4_pci_mem_unset(phba); 7640 7641 /* Release PCI resources and disable device's PCI function */ 7642 scsi_host_put(shost); 7643 lpfc_disable_pci_dev(phba); 7644 7645 /* Finally, free the driver's device data structure */ 7646 lpfc_hba_free(phba); 7647 7648 return; 7649} 7650 7651/** 7652 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 7653 * @pdev: pointer to PCI device 7654 * @msg: power management message 7655 * 7656 * This routine is called from the kernel's PCI subsystem to support system 7657 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 7658 * this method, it quiesces the device by stopping the driver's worker 7659 * thread for the device, turning off device's interrupt and DMA, and bring 7660 * the device offline. Note that as the driver implements the minimum PM 7661 * requirements to a power-aware driver's PM support for suspend/resume -- all 7662 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 7663 * method call will be treated as SUSPEND and the driver will fully 7664 * reinitialize its device during resume() method call, the driver will set 7665 * device to PCI_D3hot state in PCI config space instead of setting it 7666 * according to the @msg provided by the PM. 7667 * 7668 * Return code 7669 * 0 - driver suspended the device 7670 * Error otherwise 7671 **/ 7672static int 7673lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 7674{ 7675 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7676 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7677 7678 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7679 "0298 PCI device Power Management suspend.\n"); 7680 7681 /* Bring down the device */ 7682 lpfc_offline_prep(phba); 7683 lpfc_offline(phba); 7684 kthread_stop(phba->worker_thread); 7685 7686 /* Disable interrupt from device */ 7687 lpfc_sli4_disable_intr(phba); 7688 7689 /* Save device state to PCI config space */ 7690 pci_save_state(pdev); 7691 pci_set_power_state(pdev, PCI_D3hot); 7692 7693 return 0; 7694} 7695 7696/** 7697 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 7698 * @pdev: pointer to PCI device 7699 * 7700 * This routine is called from the kernel's PCI subsystem to support system 7701 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 7702 * this method, it restores the device's PCI config space state and fully 7703 * reinitializes the device and brings it online. Note that as the driver 7704 * implements the minimum PM requirements to a power-aware driver's PM for 7705 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 7706 * to the suspend() method call will be treated as SUSPEND and the driver 7707 * will fully reinitialize its device during resume() method call, the device 7708 * will be set to PCI_D0 directly in PCI config space before restoring the 7709 * state. 7710 * 7711 * Return code 7712 * 0 - driver suspended the device 7713 * Error otherwise 7714 **/ 7715static int 7716lpfc_pci_resume_one_s4(struct pci_dev *pdev) 7717{ 7718 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7719 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7720 uint32_t intr_mode; 7721 int error; 7722 7723 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7724 "0292 PCI device Power Management resume.\n"); 7725 7726 /* Restore device state from PCI config space */ 7727 pci_set_power_state(pdev, PCI_D0); 7728 pci_restore_state(pdev); 7729 if (pdev->is_busmaster) 7730 pci_set_master(pdev); 7731 7732 /* Startup the kernel thread for this host adapter. */ 7733 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7734 "lpfc_worker_%d", phba->brd_no); 7735 if (IS_ERR(phba->worker_thread)) { 7736 error = PTR_ERR(phba->worker_thread); 7737 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7738 "0293 PM resume failed to start worker " 7739 "thread: error=x%x.\n", error); 7740 return error; 7741 } 7742 7743 /* Configure and enable interrupt */ 7744 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 7745 if (intr_mode == LPFC_INTR_ERROR) { 7746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7747 "0294 PM resume Failed to enable interrupt\n"); 7748 return -EIO; 7749 } else 7750 phba->intr_mode = intr_mode; 7751 7752 /* Restart HBA and bring it online */ 7753 lpfc_sli_brdrestart(phba); 7754 lpfc_online(phba); 7755 7756 /* Log the current active interrupt mode */ 7757 lpfc_log_intr_mode(phba, phba->intr_mode); 7758 7759 return 0; 7760} 7761 7762/** 7763 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 7764 * @pdev: pointer to PCI device. 7765 * @state: the current PCI connection state. 7766 * 7767 * This routine is called from the PCI subsystem for error handling to device 7768 * with SLI-4 interface spec. This function is called by the PCI subsystem 7769 * after a PCI bus error affecting this device has been detected. When this 7770 * function is invoked, it will need to stop all the I/Os and interrupt(s) 7771 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 7772 * for the PCI subsystem to perform proper recovery as desired. 7773 * 7774 * Return codes 7775 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7776 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7777 **/ 7778static pci_ers_result_t 7779lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 7780{ 7781 return PCI_ERS_RESULT_NEED_RESET; 7782} 7783 7784/** 7785 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 7786 * @pdev: pointer to PCI device. 7787 * 7788 * This routine is called from the PCI subsystem for error handling to device 7789 * with SLI-4 interface spec. It is called after PCI bus has been reset to 7790 * restart the PCI card from scratch, as if from a cold-boot. During the 7791 * PCI subsystem error recovery, after the driver returns 7792 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 7793 * recovery and then call this routine before calling the .resume method to 7794 * recover the device. This function will initialize the HBA device, enable 7795 * the interrupt, but it will just put the HBA to offline state without 7796 * passing any I/O traffic. 7797 * 7798 * Return codes 7799 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7800 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7801 */ 7802static pci_ers_result_t 7803lpfc_io_slot_reset_s4(struct pci_dev *pdev) 7804{ 7805 return PCI_ERS_RESULT_RECOVERED; 7806} 7807 7808/** 7809 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 7810 * @pdev: pointer to PCI device 7811 * 7812 * This routine is called from the PCI subsystem for error handling to device 7813 * with SLI-4 interface spec. It is called when kernel error recovery tells 7814 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 7815 * error recovery. After this call, traffic can start to flow from this device 7816 * again. 7817 **/ 7818static void 7819lpfc_io_resume_s4(struct pci_dev *pdev) 7820{ 7821 return; 7822} 7823 7824/** 7825 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 7826 * @pdev: pointer to PCI device 7827 * @pid: pointer to PCI device identifier 7828 * 7829 * This routine is to be registered to the kernel's PCI subsystem. When an 7830 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 7831 * at PCI device-specific information of the device and driver to see if the 7832 * driver state that it can support this kind of device. If the match is 7833 * successful, the driver core invokes this routine. This routine dispatches 7834 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 7835 * do all the initialization that it needs to do to handle the HBA device 7836 * properly. 7837 * 7838 * Return code 7839 * 0 - driver can claim the device 7840 * negative value - driver can not claim the device 7841 **/ 7842static int __devinit 7843lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 7844{ 7845 int rc; 7846 struct lpfc_sli_intf intf; 7847 7848 if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0)) 7849 return -ENODEV; 7850 7851 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 7852 (bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4)) 7853 rc = lpfc_pci_probe_one_s4(pdev, pid); 7854 else 7855 rc = lpfc_pci_probe_one_s3(pdev, pid); 7856 7857 return rc; 7858} 7859 7860/** 7861 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 7862 * @pdev: pointer to PCI device 7863 * 7864 * This routine is to be registered to the kernel's PCI subsystem. When an 7865 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 7866 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 7867 * remove routine, which will perform all the necessary cleanup for the 7868 * device to be removed from the PCI subsystem properly. 7869 **/ 7870static void __devexit 7871lpfc_pci_remove_one(struct pci_dev *pdev) 7872{ 7873 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7874 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7875 7876 switch (phba->pci_dev_grp) { 7877 case LPFC_PCI_DEV_LP: 7878 lpfc_pci_remove_one_s3(pdev); 7879 break; 7880 case LPFC_PCI_DEV_OC: 7881 lpfc_pci_remove_one_s4(pdev); 7882 break; 7883 default: 7884 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7885 "1424 Invalid PCI device group: 0x%x\n", 7886 phba->pci_dev_grp); 7887 break; 7888 } 7889 return; 7890} 7891 7892/** 7893 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 7894 * @pdev: pointer to PCI device 7895 * @msg: power management message 7896 * 7897 * This routine is to be registered to the kernel's PCI subsystem to support 7898 * system Power Management (PM). When PM invokes this method, it dispatches 7899 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 7900 * suspend the device. 7901 * 7902 * Return code 7903 * 0 - driver suspended the device 7904 * Error otherwise 7905 **/ 7906static int 7907lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 7908{ 7909 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7910 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7911 int rc = -ENODEV; 7912 7913 switch (phba->pci_dev_grp) { 7914 case LPFC_PCI_DEV_LP: 7915 rc = lpfc_pci_suspend_one_s3(pdev, msg); 7916 break; 7917 case LPFC_PCI_DEV_OC: 7918 rc = lpfc_pci_suspend_one_s4(pdev, msg); 7919 break; 7920 default: 7921 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7922 "1425 Invalid PCI device group: 0x%x\n", 7923 phba->pci_dev_grp); 7924 break; 7925 } 7926 return rc; 7927} 7928 7929/** 7930 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 7931 * @pdev: pointer to PCI device 7932 * 7933 * This routine is to be registered to the kernel's PCI subsystem to support 7934 * system Power Management (PM). When PM invokes this method, it dispatches 7935 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 7936 * resume the device. 7937 * 7938 * Return code 7939 * 0 - driver suspended the device 7940 * Error otherwise 7941 **/ 7942static int 7943lpfc_pci_resume_one(struct pci_dev *pdev) 7944{ 7945 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7946 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7947 int rc = -ENODEV; 7948 7949 switch (phba->pci_dev_grp) { 7950 case LPFC_PCI_DEV_LP: 7951 rc = lpfc_pci_resume_one_s3(pdev); 7952 break; 7953 case LPFC_PCI_DEV_OC: 7954 rc = lpfc_pci_resume_one_s4(pdev); 7955 break; 7956 default: 7957 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7958 "1426 Invalid PCI device group: 0x%x\n", 7959 phba->pci_dev_grp); 7960 break; 7961 } 7962 return rc; 7963} 7964 7965/** 7966 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 7967 * @pdev: pointer to PCI device. 7968 * @state: the current PCI connection state. 7969 * 7970 * This routine is registered to the PCI subsystem for error handling. This 7971 * function is called by the PCI subsystem after a PCI bus error affecting 7972 * this device has been detected. When this routine is invoked, it dispatches 7973 * the action to the proper SLI-3 or SLI-4 device error detected handling 7974 * routine, which will perform the proper error detected operation. 7975 * 7976 * Return codes 7977 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7978 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7979 **/ 7980static pci_ers_result_t 7981lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 7982{ 7983 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7984 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7985 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 7986 7987 switch (phba->pci_dev_grp) { 7988 case LPFC_PCI_DEV_LP: 7989 rc = lpfc_io_error_detected_s3(pdev, state); 7990 break; 7991 case LPFC_PCI_DEV_OC: 7992 rc = lpfc_io_error_detected_s4(pdev, state); 7993 break; 7994 default: 7995 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7996 "1427 Invalid PCI device group: 0x%x\n", 7997 phba->pci_dev_grp); 7998 break; 7999 } 8000 return rc; 8001} 8002 8003/** 8004 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 8005 * @pdev: pointer to PCI device. 8006 * 8007 * This routine is registered to the PCI subsystem for error handling. This 8008 * function is called after PCI bus has been reset to restart the PCI card 8009 * from scratch, as if from a cold-boot. When this routine is invoked, it 8010 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 8011 * routine, which will perform the proper device reset. 8012 * 8013 * Return codes 8014 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8015 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8016 **/ 8017static pci_ers_result_t 8018lpfc_io_slot_reset(struct pci_dev *pdev) 8019{ 8020 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8021 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8022 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 8023 8024 switch (phba->pci_dev_grp) { 8025 case LPFC_PCI_DEV_LP: 8026 rc = lpfc_io_slot_reset_s3(pdev); 8027 break; 8028 case LPFC_PCI_DEV_OC: 8029 rc = lpfc_io_slot_reset_s4(pdev); 8030 break; 8031 default: 8032 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8033 "1428 Invalid PCI device group: 0x%x\n", 8034 phba->pci_dev_grp); 8035 break; 8036 } 8037 return rc; 8038} 8039 8040/** 8041 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 8042 * @pdev: pointer to PCI device 8043 * 8044 * This routine is registered to the PCI subsystem for error handling. It 8045 * is called when kernel error recovery tells the lpfc driver that it is 8046 * OK to resume normal PCI operation after PCI bus error recovery. When 8047 * this routine is invoked, it dispatches the action to the proper SLI-3 8048 * or SLI-4 device io_resume routine, which will resume the device operation. 8049 **/ 8050static void 8051lpfc_io_resume(struct pci_dev *pdev) 8052{ 8053 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8054 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8055 8056 switch (phba->pci_dev_grp) { 8057 case LPFC_PCI_DEV_LP: 8058 lpfc_io_resume_s3(pdev); 8059 break; 8060 case LPFC_PCI_DEV_OC: 8061 lpfc_io_resume_s4(pdev); 8062 break; 8063 default: 8064 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8065 "1429 Invalid PCI device group: 0x%x\n", 8066 phba->pci_dev_grp); 8067 break; 8068 } 8069 return; 8070} 8071 8072static struct pci_device_id lpfc_id_table[] = { 8073 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 8074 PCI_ANY_ID, PCI_ANY_ID, }, 8075 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 8076 PCI_ANY_ID, PCI_ANY_ID, }, 8077 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 8078 PCI_ANY_ID, PCI_ANY_ID, }, 8079 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 8080 PCI_ANY_ID, PCI_ANY_ID, }, 8081 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 8082 PCI_ANY_ID, PCI_ANY_ID, }, 8083 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 8084 PCI_ANY_ID, PCI_ANY_ID, }, 8085 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 8086 PCI_ANY_ID, PCI_ANY_ID, }, 8087 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 8088 PCI_ANY_ID, PCI_ANY_ID, }, 8089 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 8090 PCI_ANY_ID, PCI_ANY_ID, }, 8091 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 8092 PCI_ANY_ID, PCI_ANY_ID, }, 8093 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 8094 PCI_ANY_ID, PCI_ANY_ID, }, 8095 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 8096 PCI_ANY_ID, PCI_ANY_ID, }, 8097 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 8098 PCI_ANY_ID, PCI_ANY_ID, }, 8099 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 8100 PCI_ANY_ID, PCI_ANY_ID, }, 8101 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 8102 PCI_ANY_ID, PCI_ANY_ID, }, 8103 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 8104 PCI_ANY_ID, PCI_ANY_ID, }, 8105 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 8106 PCI_ANY_ID, PCI_ANY_ID, }, 8107 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 8108 PCI_ANY_ID, PCI_ANY_ID, }, 8109 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 8110 PCI_ANY_ID, PCI_ANY_ID, }, 8111 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 8112 PCI_ANY_ID, PCI_ANY_ID, }, 8113 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 8114 PCI_ANY_ID, PCI_ANY_ID, }, 8115 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 8116 PCI_ANY_ID, PCI_ANY_ID, }, 8117 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 8118 PCI_ANY_ID, PCI_ANY_ID, }, 8119 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 8120 PCI_ANY_ID, PCI_ANY_ID, }, 8121 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 8122 PCI_ANY_ID, PCI_ANY_ID, }, 8123 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 8124 PCI_ANY_ID, PCI_ANY_ID, }, 8125 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 8126 PCI_ANY_ID, PCI_ANY_ID, }, 8127 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 8128 PCI_ANY_ID, PCI_ANY_ID, }, 8129 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 8130 PCI_ANY_ID, PCI_ANY_ID, }, 8131 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 8132 PCI_ANY_ID, PCI_ANY_ID, }, 8133 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 8134 PCI_ANY_ID, PCI_ANY_ID, }, 8135 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 8136 PCI_ANY_ID, PCI_ANY_ID, }, 8137 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 8138 PCI_ANY_ID, PCI_ANY_ID, }, 8139 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 8140 PCI_ANY_ID, PCI_ANY_ID, }, 8141 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 8142 PCI_ANY_ID, PCI_ANY_ID, }, 8143 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 8144 PCI_ANY_ID, PCI_ANY_ID, }, 8145 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 8146 PCI_ANY_ID, PCI_ANY_ID, }, 8147 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 8148 PCI_ANY_ID, PCI_ANY_ID, }, 8149 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 8150 PCI_ANY_ID, PCI_ANY_ID, }, 8151 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 8152 PCI_ANY_ID, PCI_ANY_ID, }, 8153 { 0 } 8154}; 8155 8156MODULE_DEVICE_TABLE(pci, lpfc_id_table); 8157 8158static struct pci_error_handlers lpfc_err_handler = { 8159 .error_detected = lpfc_io_error_detected, 8160 .slot_reset = lpfc_io_slot_reset, 8161 .resume = lpfc_io_resume, 8162}; 8163 8164static struct pci_driver lpfc_driver = { 8165 .name = LPFC_DRIVER_NAME, 8166 .id_table = lpfc_id_table, 8167 .probe = lpfc_pci_probe_one, 8168 .remove = __devexit_p(lpfc_pci_remove_one), 8169 .suspend = lpfc_pci_suspend_one, 8170 .resume = lpfc_pci_resume_one, 8171 .err_handler = &lpfc_err_handler, 8172}; 8173 8174/** 8175 * lpfc_init - lpfc module initialization routine 8176 * 8177 * This routine is to be invoked when the lpfc module is loaded into the 8178 * kernel. The special kernel macro module_init() is used to indicate the 8179 * role of this routine to the kernel as lpfc module entry point. 8180 * 8181 * Return codes 8182 * 0 - successful 8183 * -ENOMEM - FC attach transport failed 8184 * all others - failed 8185 */ 8186static int __init 8187lpfc_init(void) 8188{ 8189 int error = 0; 8190 8191 printk(LPFC_MODULE_DESC "\n"); 8192 printk(LPFC_COPYRIGHT "\n"); 8193 8194 if (lpfc_enable_npiv) { 8195 lpfc_transport_functions.vport_create = lpfc_vport_create; 8196 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 8197 } 8198 lpfc_transport_template = 8199 fc_attach_transport(&lpfc_transport_functions); 8200 if (lpfc_transport_template == NULL) 8201 return -ENOMEM; 8202 if (lpfc_enable_npiv) { 8203 lpfc_vport_transport_template = 8204 fc_attach_transport(&lpfc_vport_transport_functions); 8205 if (lpfc_vport_transport_template == NULL) { 8206 fc_release_transport(lpfc_transport_template); 8207 return -ENOMEM; 8208 } 8209 } 8210 error = pci_register_driver(&lpfc_driver); 8211 if (error) { 8212 fc_release_transport(lpfc_transport_template); 8213 if (lpfc_enable_npiv) 8214 fc_release_transport(lpfc_vport_transport_template); 8215 } 8216 8217 return error; 8218} 8219 8220/** 8221 * lpfc_exit - lpfc module removal routine 8222 * 8223 * This routine is invoked when the lpfc module is removed from the kernel. 8224 * The special kernel macro module_exit() is used to indicate the role of 8225 * this routine to the kernel as lpfc module exit point. 8226 */ 8227static void __exit 8228lpfc_exit(void) 8229{ 8230 pci_unregister_driver(&lpfc_driver); 8231 fc_release_transport(lpfc_transport_template); 8232 if (lpfc_enable_npiv) 8233 fc_release_transport(lpfc_vport_transport_template); 8234 if (_dump_buf_data) { 8235 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 8236 "_dump_buf_data at 0x%p\n", 8237 (1L << _dump_buf_data_order), _dump_buf_data); 8238 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 8239 } 8240 8241 if (_dump_buf_dif) { 8242 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 8243 "_dump_buf_dif at 0x%p\n", 8244 (1L << _dump_buf_dif_order), _dump_buf_dif); 8245 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 8246 } 8247} 8248 8249module_init(lpfc_init); 8250module_exit(lpfc_exit); 8251MODULE_LICENSE("GPL"); 8252MODULE_DESCRIPTION(LPFC_MODULE_DESC); 8253MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 8254MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 8255