lpfc_init.c revision 19a7b4aebf9ad435c69a7e39930338499af4d152
1/******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2005 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/dma-mapping.h> 25#include <linux/idr.h> 26#include <linux/interrupt.h> 27#include <linux/kthread.h> 28#include <linux/pci.h> 29#include <linux/spinlock.h> 30 31#include <scsi/scsi.h> 32#include <scsi/scsi_device.h> 33#include <scsi/scsi_host.h> 34#include <scsi/scsi_transport_fc.h> 35 36#include "lpfc_hw.h" 37#include "lpfc_sli.h" 38#include "lpfc_disc.h" 39#include "lpfc_scsi.h" 40#include "lpfc.h" 41#include "lpfc_logmsg.h" 42#include "lpfc_crtn.h" 43#include "lpfc_version.h" 44 45static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *); 46static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 47static int lpfc_post_rcv_buf(struct lpfc_hba *); 48 49static struct scsi_transport_template *lpfc_transport_template = NULL; 50static DEFINE_IDR(lpfc_hba_index); 51 52/************************************************************************/ 53/* */ 54/* lpfc_config_port_prep */ 55/* This routine will do LPFC initialization prior to the */ 56/* CONFIG_PORT mailbox command. This will be initialized */ 57/* as a SLI layer callback routine. */ 58/* This routine returns 0 on success or -ERESTART if it wants */ 59/* the SLI layer to reset the HBA and try again. Any */ 60/* other return value indicates an error. */ 61/* */ 62/************************************************************************/ 63int 64lpfc_config_port_prep(struct lpfc_hba * phba) 65{ 66 lpfc_vpd_t *vp = &phba->vpd; 67 int i = 0, rc; 68 LPFC_MBOXQ_t *pmb; 69 MAILBOX_t *mb; 70 char *lpfc_vpd_data = NULL; 71 uint16_t offset = 0; 72 static char licensed[56] = 73 "key unlock for use with gnu public licensed code only\0"; 74 75 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 76 if (!pmb) { 77 phba->hba_state = LPFC_HBA_ERROR; 78 return -ENOMEM; 79 } 80 81 mb = &pmb->mb; 82 phba->hba_state = LPFC_INIT_MBX_CMDS; 83 84 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 85 uint32_t *ptext = (uint32_t *) licensed; 86 87 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 88 *ptext = cpu_to_be32(*ptext); 89 90 lpfc_read_nv(phba, pmb); 91 memset((char*)mb->un.varRDnvp.rsvd3, 0, 92 sizeof (mb->un.varRDnvp.rsvd3)); 93 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 94 sizeof (licensed)); 95 96 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 97 98 if (rc != MBX_SUCCESS) { 99 lpfc_printf_log(phba, 100 KERN_ERR, 101 LOG_MBOX, 102 "%d:0324 Config Port initialization " 103 "error, mbxCmd x%x READ_NVPARM, " 104 "mbxStatus x%x\n", 105 phba->brd_no, 106 mb->mbxCommand, mb->mbxStatus); 107 mempool_free(pmb, phba->mbox_mem_pool); 108 return -ERESTART; 109 } 110 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 111 sizeof (mb->un.varRDnvp.nodename)); 112 } 113 114 /* Setup and issue mailbox READ REV command */ 115 lpfc_read_rev(phba, pmb); 116 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 117 if (rc != MBX_SUCCESS) { 118 lpfc_printf_log(phba, 119 KERN_ERR, 120 LOG_INIT, 121 "%d:0439 Adapter failed to init, mbxCmd x%x " 122 "READ_REV, mbxStatus x%x\n", 123 phba->brd_no, 124 mb->mbxCommand, mb->mbxStatus); 125 mempool_free( pmb, phba->mbox_mem_pool); 126 return -ERESTART; 127 } 128 129 /* The HBA's current state is provided by the ProgType and rr fields. 130 * Read and check the value of these fields before continuing to config 131 * this port. 132 */ 133 if (mb->un.varRdRev.rr == 0 || mb->un.varRdRev.un.b.ProgType != 2) { 134 /* Old firmware */ 135 vp->rev.rBit = 0; 136 lpfc_printf_log(phba, 137 KERN_ERR, 138 LOG_INIT, 139 "%d:0440 Adapter failed to init, mbxCmd x%x " 140 "READ_REV detected outdated firmware" 141 "Data: x%x\n", 142 phba->brd_no, 143 mb->mbxCommand, 0); 144 mempool_free(pmb, phba->mbox_mem_pool); 145 return -ERESTART; 146 } else { 147 vp->rev.rBit = 1; 148 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 149 memcpy(vp->rev.sli1FwName, 150 (char*)mb->un.varRdRev.sli1FwName, 16); 151 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 152 memcpy(vp->rev.sli2FwName, 153 (char *)mb->un.varRdRev.sli2FwName, 16); 154 } 155 156 /* Save information as VPD data */ 157 vp->rev.biuRev = mb->un.varRdRev.biuRev; 158 vp->rev.smRev = mb->un.varRdRev.smRev; 159 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 160 vp->rev.endecRev = mb->un.varRdRev.endecRev; 161 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 162 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 163 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 164 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 165 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 166 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 167 168 if (lpfc_is_LC_HBA(phba->pcidev->device)) 169 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 170 sizeof (phba->RandomData)); 171 172 /* Get the default values for Model Name and Description */ 173 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 174 175 /* Get adapter VPD information */ 176 pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL); 177 if (!pmb->context2) 178 goto out_free_mbox; 179 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 180 if (!lpfc_vpd_data) 181 goto out_free_context2; 182 183 do { 184 lpfc_dump_mem(phba, pmb, offset); 185 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 186 187 if (rc != MBX_SUCCESS) { 188 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 189 "%d:0441 VPD not present on adapter, " 190 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 191 phba->brd_no, 192 mb->mbxCommand, mb->mbxStatus); 193 kfree(lpfc_vpd_data); 194 lpfc_vpd_data = NULL; 195 break; 196 } 197 198 lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset, 199 mb->un.varDmp.word_cnt); 200 offset += mb->un.varDmp.word_cnt; 201 } while (mb->un.varDmp.word_cnt); 202 lpfc_parse_vpd(phba, lpfc_vpd_data); 203 204 kfree(lpfc_vpd_data); 205out_free_context2: 206 kfree(pmb->context2); 207out_free_mbox: 208 mempool_free(pmb, phba->mbox_mem_pool); 209 return 0; 210} 211 212/************************************************************************/ 213/* */ 214/* lpfc_config_port_post */ 215/* This routine will do LPFC initialization after the */ 216/* CONFIG_PORT mailbox command. This will be initialized */ 217/* as a SLI layer callback routine. */ 218/* This routine returns 0 on success. Any other return value */ 219/* indicates an error. */ 220/* */ 221/************************************************************************/ 222int 223lpfc_config_port_post(struct lpfc_hba * phba) 224{ 225 LPFC_MBOXQ_t *pmb; 226 MAILBOX_t *mb; 227 struct lpfc_dmabuf *mp; 228 struct lpfc_sli *psli = &phba->sli; 229 uint32_t status, timeout; 230 int i, j, rc; 231 232 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 233 if (!pmb) { 234 phba->hba_state = LPFC_HBA_ERROR; 235 return -ENOMEM; 236 } 237 mb = &pmb->mb; 238 239 lpfc_config_link(phba, pmb); 240 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 241 if (rc != MBX_SUCCESS) { 242 lpfc_printf_log(phba, 243 KERN_ERR, 244 LOG_INIT, 245 "%d:0447 Adapter failed init, mbxCmd x%x " 246 "CONFIG_LINK mbxStatus x%x\n", 247 phba->brd_no, 248 mb->mbxCommand, mb->mbxStatus); 249 phba->hba_state = LPFC_HBA_ERROR; 250 mempool_free( pmb, phba->mbox_mem_pool); 251 return -EIO; 252 } 253 254 /* Get login parameters for NID. */ 255 lpfc_read_sparam(phba, pmb); 256 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 257 lpfc_printf_log(phba, 258 KERN_ERR, 259 LOG_INIT, 260 "%d:0448 Adapter failed init, mbxCmd x%x " 261 "READ_SPARM mbxStatus x%x\n", 262 phba->brd_no, 263 mb->mbxCommand, mb->mbxStatus); 264 phba->hba_state = LPFC_HBA_ERROR; 265 mp = (struct lpfc_dmabuf *) pmb->context1; 266 mempool_free( pmb, phba->mbox_mem_pool); 267 lpfc_mbuf_free(phba, mp->virt, mp->phys); 268 kfree(mp); 269 return -EIO; 270 } 271 272 mp = (struct lpfc_dmabuf *) pmb->context1; 273 274 memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm)); 275 lpfc_mbuf_free(phba, mp->virt, mp->phys); 276 kfree(mp); 277 pmb->context1 = NULL; 278 279 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName, 280 sizeof (struct lpfc_name)); 281 memcpy(&phba->fc_portname, &phba->fc_sparam.portName, 282 sizeof (struct lpfc_name)); 283 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 284 /* This should be consolidated into parse_vpd ? - mr */ 285 if (phba->SerialNumber[0] == 0) { 286 uint8_t *outptr; 287 288 outptr = &phba->fc_nodename.u.s.IEEE[0]; 289 for (i = 0; i < 12; i++) { 290 status = *outptr++; 291 j = ((status & 0xf0) >> 4); 292 if (j <= 9) 293 phba->SerialNumber[i] = 294 (char)((uint8_t) 0x30 + (uint8_t) j); 295 else 296 phba->SerialNumber[i] = 297 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 298 i++; 299 j = (status & 0xf); 300 if (j <= 9) 301 phba->SerialNumber[i] = 302 (char)((uint8_t) 0x30 + (uint8_t) j); 303 else 304 phba->SerialNumber[i] = 305 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 306 } 307 } 308 309 /* This should turn on DELAYED ABTS for ELS timeouts */ 310 lpfc_set_slim(phba, pmb, 0x052198, 0x1); 311 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 312 phba->hba_state = LPFC_HBA_ERROR; 313 mempool_free( pmb, phba->mbox_mem_pool); 314 return -EIO; 315 } 316 317 318 lpfc_read_config(phba, pmb); 319 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 320 lpfc_printf_log(phba, 321 KERN_ERR, 322 LOG_INIT, 323 "%d:0453 Adapter failed to init, mbxCmd x%x " 324 "READ_CONFIG, mbxStatus x%x\n", 325 phba->brd_no, 326 mb->mbxCommand, mb->mbxStatus); 327 phba->hba_state = LPFC_HBA_ERROR; 328 mempool_free( pmb, phba->mbox_mem_pool); 329 return -EIO; 330 } 331 332 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 333 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 334 phba->cfg_hba_queue_depth = 335 mb->un.varRdConfig.max_xri + 1; 336 337 phba->lmt = mb->un.varRdConfig.lmt; 338 /* HBA is not 4GB capable, or HBA is not 2GB capable, 339 don't let link speed ask for it */ 340 if ((((phba->lmt & LMT_4250_10bit) != LMT_4250_10bit) && 341 (phba->cfg_link_speed > LINK_SPEED_2G)) || 342 (((phba->lmt & LMT_2125_10bit) != LMT_2125_10bit) && 343 (phba->cfg_link_speed > LINK_SPEED_1G))) { 344 /* Reset link speed to auto. 1G/2GB HBA cfg'd for 4G */ 345 lpfc_printf_log(phba, 346 KERN_WARNING, 347 LOG_LINK_EVENT, 348 "%d:1302 Invalid speed for this board: " 349 "Reset link speed to auto: x%x\n", 350 phba->brd_no, 351 phba->cfg_link_speed); 352 phba->cfg_link_speed = LINK_SPEED_AUTO; 353 } 354 355 phba->hba_state = LPFC_LINK_DOWN; 356 357 /* Only process IOCBs on ring 0 till hba_state is READY */ 358 if (psli->ring[psli->ip_ring].cmdringaddr) 359 psli->ring[psli->ip_ring].flag |= LPFC_STOP_IOCB_EVENT; 360 if (psli->ring[psli->fcp_ring].cmdringaddr) 361 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 362 if (psli->ring[psli->next_ring].cmdringaddr) 363 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 364 365 /* Post receive buffers for desired rings */ 366 lpfc_post_rcv_buf(phba); 367 368 /* Enable appropriate host interrupts */ 369 spin_lock_irq(phba->host->host_lock); 370 status = readl(phba->HCregaddr); 371 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 372 if (psli->num_rings > 0) 373 status |= HC_R0INT_ENA; 374 if (psli->num_rings > 1) 375 status |= HC_R1INT_ENA; 376 if (psli->num_rings > 2) 377 status |= HC_R2INT_ENA; 378 if (psli->num_rings > 3) 379 status |= HC_R3INT_ENA; 380 381 writel(status, phba->HCregaddr); 382 readl(phba->HCregaddr); /* flush */ 383 spin_unlock_irq(phba->host->host_lock); 384 385 /* 386 * Setup the ring 0 (els) timeout handler 387 */ 388 timeout = phba->fc_ratov << 1; 389 phba->els_tmofunc.expires = jiffies + HZ * timeout; 390 add_timer(&phba->els_tmofunc); 391 392 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 393 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 394 if (lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT) != MBX_SUCCESS) { 395 lpfc_printf_log(phba, 396 KERN_ERR, 397 LOG_INIT, 398 "%d:0454 Adapter failed to init, mbxCmd x%x " 399 "INIT_LINK, mbxStatus x%x\n", 400 phba->brd_no, 401 mb->mbxCommand, mb->mbxStatus); 402 403 /* Clear all interrupt enable conditions */ 404 writel(0, phba->HCregaddr); 405 readl(phba->HCregaddr); /* flush */ 406 /* Clear all pending interrupts */ 407 writel(0xffffffff, phba->HAregaddr); 408 readl(phba->HAregaddr); /* flush */ 409 410 phba->hba_state = LPFC_HBA_ERROR; 411 mempool_free(pmb, phba->mbox_mem_pool); 412 return -EIO; 413 } 414 /* MBOX buffer will be freed in mbox compl */ 415 416 i = 0; 417 while ((phba->hba_state != LPFC_HBA_READY) || 418 (phba->num_disc_nodes) || (phba->fc_prli_sent) || 419 ((phba->fc_map_cnt == 0) && (i<2)) || 420 (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) { 421 /* Check every second for 30 retries. */ 422 i++; 423 if (i > 30) { 424 break; 425 } 426 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) { 427 /* The link is down. Set linkdown timeout */ 428 break; 429 } 430 431 /* Delay for 1 second to give discovery time to complete. */ 432 msleep(1000); 433 434 } 435 436 /* Since num_disc_nodes keys off of PLOGI, delay a bit to let 437 * any potential PRLIs to flush thru the SLI sub-system. 438 */ 439 msleep(50); 440 441 return (0); 442} 443 444/************************************************************************/ 445/* */ 446/* lpfc_hba_down_prep */ 447/* This routine will do LPFC uninitialization before the */ 448/* HBA is reset when bringing down the SLI Layer. This will be */ 449/* initialized as a SLI layer callback routine. */ 450/* This routine returns 0 on success. Any other return value */ 451/* indicates an error. */ 452/* */ 453/************************************************************************/ 454int 455lpfc_hba_down_prep(struct lpfc_hba * phba) 456{ 457 /* Disable interrupts */ 458 writel(0, phba->HCregaddr); 459 readl(phba->HCregaddr); /* flush */ 460 461 /* Cleanup potential discovery resources */ 462 lpfc_els_flush_rscn(phba); 463 lpfc_els_flush_cmd(phba); 464 lpfc_disc_flush_list(phba); 465 466 return (0); 467} 468 469/************************************************************************/ 470/* */ 471/* lpfc_handle_eratt */ 472/* This routine will handle processing a Host Attention */ 473/* Error Status event. This will be initialized */ 474/* as a SLI layer callback routine. */ 475/* */ 476/************************************************************************/ 477void 478lpfc_handle_eratt(struct lpfc_hba * phba) 479{ 480 struct lpfc_sli *psli = &phba->sli; 481 struct lpfc_sli_ring *pring; 482 483 /* 484 * If a reset is sent to the HBA restore PCI configuration registers. 485 */ 486 if ( phba->hba_state == LPFC_INIT_START ) { 487 mdelay(1); 488 readl(phba->HCregaddr); /* flush */ 489 writel(0, phba->HCregaddr); 490 readl(phba->HCregaddr); /* flush */ 491 492 /* Restore PCI cmd register */ 493 pci_write_config_word(phba->pcidev, 494 PCI_COMMAND, phba->pci_cfg_value); 495 } 496 497 if (phba->work_hs & HS_FFER6) { 498 /* Re-establishing Link */ 499 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 500 "%d:1301 Re-establishing Link " 501 "Data: x%x x%x x%x\n", 502 phba->brd_no, phba->work_hs, 503 phba->work_status[0], phba->work_status[1]); 504 spin_lock_irq(phba->host->host_lock); 505 phba->fc_flag |= FC_ESTABLISH_LINK; 506 spin_unlock_irq(phba->host->host_lock); 507 508 /* 509 * Firmware stops when it triggled erratt with HS_FFER6. 510 * That could cause the I/Os dropped by the firmware. 511 * Error iocb (I/O) on txcmplq and let the SCSI layer 512 * retry it after re-establishing link. 513 */ 514 pring = &psli->ring[psli->fcp_ring]; 515 lpfc_sli_abort_iocb_ring(phba, pring); 516 517 518 /* 519 * There was a firmware error. Take the hba offline and then 520 * attempt to restart it. 521 */ 522 lpfc_offline(phba); 523 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 524 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60); 525 return; 526 } 527 } else { 528 /* The if clause above forces this code path when the status 529 * failure is a value other than FFER6. Do not call the offline 530 * twice. This is the adapter hardware error path. 531 */ 532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 533 "%d:0457 Adapter Hardware Error " 534 "Data: x%x x%x x%x\n", 535 phba->brd_no, phba->work_hs, 536 phba->work_status[0], phba->work_status[1]); 537 538 lpfc_offline(phba); 539 540 } 541} 542 543/************************************************************************/ 544/* */ 545/* lpfc_handle_latt */ 546/* This routine will handle processing a Host Attention */ 547/* Link Status event. This will be initialized */ 548/* as a SLI layer callback routine. */ 549/* */ 550/************************************************************************/ 551void 552lpfc_handle_latt(struct lpfc_hba * phba) 553{ 554 struct lpfc_sli *psli = &phba->sli; 555 LPFC_MBOXQ_t *pmb; 556 volatile uint32_t control; 557 struct lpfc_dmabuf *mp; 558 int rc = -ENOMEM; 559 560 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 561 if (!pmb) 562 goto lpfc_handle_latt_err_exit; 563 564 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 565 if (!mp) 566 goto lpfc_handle_latt_free_pmb; 567 568 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 569 if (!mp->virt) 570 goto lpfc_handle_latt_free_mp; 571 572 rc = -EIO; 573 574 575 psli->slistat.link_event++; 576 lpfc_read_la(phba, pmb, mp); 577 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 578 rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)); 579 if (rc == MBX_NOT_FINISHED) 580 goto lpfc_handle_latt_free_mp; 581 582 /* Clear Link Attention in HA REG */ 583 spin_lock_irq(phba->host->host_lock); 584 writel(HA_LATT, phba->HAregaddr); 585 readl(phba->HAregaddr); /* flush */ 586 spin_unlock_irq(phba->host->host_lock); 587 588 return; 589 590lpfc_handle_latt_free_mp: 591 kfree(mp); 592lpfc_handle_latt_free_pmb: 593 kfree(pmb); 594lpfc_handle_latt_err_exit: 595 /* Enable Link attention interrupts */ 596 spin_lock_irq(phba->host->host_lock); 597 psli->sli_flag |= LPFC_PROCESS_LA; 598 control = readl(phba->HCregaddr); 599 control |= HC_LAINT_ENA; 600 writel(control, phba->HCregaddr); 601 readl(phba->HCregaddr); /* flush */ 602 603 /* Clear Link Attention in HA REG */ 604 writel(HA_LATT, phba->HAregaddr); 605 readl(phba->HAregaddr); /* flush */ 606 spin_unlock_irq(phba->host->host_lock); 607 lpfc_linkdown(phba); 608 phba->hba_state = LPFC_HBA_ERROR; 609 610 /* The other case is an error from issue_mbox */ 611 if (rc == -ENOMEM) 612 lpfc_printf_log(phba, 613 KERN_WARNING, 614 LOG_MBOX, 615 "%d:0300 READ_LA: no buffers\n", 616 phba->brd_no); 617 618 return; 619} 620 621/************************************************************************/ 622/* */ 623/* lpfc_parse_vpd */ 624/* This routine will parse the VPD data */ 625/* */ 626/************************************************************************/ 627static int 628lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd) 629{ 630 uint8_t lenlo, lenhi; 631 uint32_t Length; 632 int i, j; 633 int finished = 0; 634 int index = 0; 635 636 if (!vpd) 637 return 0; 638 639 /* Vital Product */ 640 lpfc_printf_log(phba, 641 KERN_INFO, 642 LOG_INIT, 643 "%d:0455 Vital Product Data: x%x x%x x%x x%x\n", 644 phba->brd_no, 645 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 646 (uint32_t) vpd[3]); 647 do { 648 switch (vpd[index]) { 649 case 0x82: 650 index += 1; 651 lenlo = vpd[index]; 652 index += 1; 653 lenhi = vpd[index]; 654 index += 1; 655 i = ((((unsigned short)lenhi) << 8) + lenlo); 656 index += i; 657 break; 658 case 0x90: 659 index += 1; 660 lenlo = vpd[index]; 661 index += 1; 662 lenhi = vpd[index]; 663 index += 1; 664 Length = ((((unsigned short)lenhi) << 8) + lenlo); 665 666 while (Length > 0) { 667 /* Look for Serial Number */ 668 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 669 index += 2; 670 i = vpd[index]; 671 index += 1; 672 j = 0; 673 Length -= (3+i); 674 while(i--) { 675 phba->SerialNumber[j++] = vpd[index++]; 676 if (j == 31) 677 break; 678 } 679 phba->SerialNumber[j] = 0; 680 continue; 681 } 682 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 683 phba->vpd_flag |= VPD_MODEL_DESC; 684 index += 2; 685 i = vpd[index]; 686 index += 1; 687 j = 0; 688 Length -= (3+i); 689 while(i--) { 690 phba->ModelDesc[j++] = vpd[index++]; 691 if (j == 255) 692 break; 693 } 694 phba->ModelDesc[j] = 0; 695 continue; 696 } 697 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 698 phba->vpd_flag |= VPD_MODEL_NAME; 699 index += 2; 700 i = vpd[index]; 701 index += 1; 702 j = 0; 703 Length -= (3+i); 704 while(i--) { 705 phba->ModelName[j++] = vpd[index++]; 706 if (j == 79) 707 break; 708 } 709 phba->ModelName[j] = 0; 710 continue; 711 } 712 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 713 phba->vpd_flag |= VPD_PROGRAM_TYPE; 714 index += 2; 715 i = vpd[index]; 716 index += 1; 717 j = 0; 718 Length -= (3+i); 719 while(i--) { 720 phba->ProgramType[j++] = vpd[index++]; 721 if (j == 255) 722 break; 723 } 724 phba->ProgramType[j] = 0; 725 continue; 726 } 727 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 728 phba->vpd_flag |= VPD_PORT; 729 index += 2; 730 i = vpd[index]; 731 index += 1; 732 j = 0; 733 Length -= (3+i); 734 while(i--) { 735 phba->Port[j++] = vpd[index++]; 736 if (j == 19) 737 break; 738 } 739 phba->Port[j] = 0; 740 continue; 741 } 742 else { 743 index += 2; 744 i = vpd[index]; 745 index += 1; 746 index += i; 747 Length -= (3 + i); 748 } 749 } 750 finished = 0; 751 break; 752 case 0x78: 753 finished = 1; 754 break; 755 default: 756 index ++; 757 break; 758 } 759 } while (!finished && (index < 108)); 760 761 return(1); 762} 763 764static void 765lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp) 766{ 767 lpfc_vpd_t *vp; 768 uint32_t id; 769 char str[16]; 770 771 vp = &phba->vpd; 772 pci_read_config_dword(phba->pcidev, PCI_VENDOR_ID, &id); 773 774 switch ((id >> 16) & 0xffff) { 775 case PCI_DEVICE_ID_FIREFLY: 776 strcpy(str, "LP6000 1"); 777 break; 778 case PCI_DEVICE_ID_SUPERFLY: 779 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 780 strcpy(str, "LP7000 1"); 781 else 782 strcpy(str, "LP7000E 1"); 783 break; 784 case PCI_DEVICE_ID_DRAGONFLY: 785 strcpy(str, "LP8000 1"); 786 break; 787 case PCI_DEVICE_ID_CENTAUR: 788 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 789 strcpy(str, "LP9002 2"); 790 else 791 strcpy(str, "LP9000 1"); 792 break; 793 case PCI_DEVICE_ID_RFLY: 794 strcpy(str, "LP952 2"); 795 break; 796 case PCI_DEVICE_ID_PEGASUS: 797 strcpy(str, "LP9802 2"); 798 break; 799 case PCI_DEVICE_ID_THOR: 800 strcpy(str, "LP10000 2"); 801 break; 802 case PCI_DEVICE_ID_VIPER: 803 strcpy(str, "LPX1000 10"); 804 break; 805 case PCI_DEVICE_ID_PFLY: 806 strcpy(str, "LP982 2"); 807 break; 808 case PCI_DEVICE_ID_TFLY: 809 strcpy(str, "LP1050 2"); 810 break; 811 case PCI_DEVICE_ID_HELIOS: 812 strcpy(str, "LP11000 4"); 813 break; 814 case PCI_DEVICE_ID_BMID: 815 strcpy(str, "LP1150 4"); 816 break; 817 case PCI_DEVICE_ID_BSMB: 818 strcpy(str, "LP111 4"); 819 break; 820 case PCI_DEVICE_ID_ZEPHYR: 821 strcpy(str, "LP11000e 4"); 822 break; 823 case PCI_DEVICE_ID_ZMID: 824 strcpy(str, "LP1150e 4"); 825 break; 826 case PCI_DEVICE_ID_ZSMB: 827 strcpy(str, "LP111e 4"); 828 break; 829 case PCI_DEVICE_ID_LP101: 830 strcpy(str, "LP101 2"); 831 break; 832 case PCI_DEVICE_ID_LP10000S: 833 strcpy(str, "LP10000-S 2"); 834 break; 835 default: 836 memset(str, 0, 16); 837 break; 838 } 839 if (mdp) 840 sscanf(str, "%s", mdp); 841 if (descp) 842 sprintf(descp, "Emulex LightPulse %s Gigabit PCI Fibre " 843 "Channel Adapter", str); 844} 845 846/**************************************************/ 847/* lpfc_post_buffer */ 848/* */ 849/* This routine will post count buffers to the */ 850/* ring with the QUE_RING_BUF_CN command. This */ 851/* allows 3 buffers / command to be posted. */ 852/* Returns the number of buffers NOT posted. */ 853/**************************************************/ 854int 855lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt, 856 int type) 857{ 858 IOCB_t *icmd; 859 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 860 struct lpfc_iocbq *iocb = NULL; 861 struct lpfc_dmabuf *mp1, *mp2; 862 863 cnt += pring->missbufcnt; 864 865 /* While there are buffers to post */ 866 while (cnt > 0) { 867 /* Allocate buffer for command iocb */ 868 spin_lock_irq(phba->host->host_lock); 869 list_remove_head(lpfc_iocb_list, iocb, struct lpfc_iocbq, list); 870 spin_unlock_irq(phba->host->host_lock); 871 if (iocb == NULL) { 872 pring->missbufcnt = cnt; 873 return cnt; 874 } 875 memset(iocb, 0, sizeof (struct lpfc_iocbq)); 876 icmd = &iocb->iocb; 877 878 /* 2 buffers can be posted per command */ 879 /* Allocate buffer to post */ 880 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 881 if (mp1) 882 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 883 &mp1->phys); 884 if (mp1 == 0 || mp1->virt == 0) { 885 if (mp1) 886 kfree(mp1); 887 spin_lock_irq(phba->host->host_lock); 888 list_add_tail(&iocb->list, lpfc_iocb_list); 889 spin_unlock_irq(phba->host->host_lock); 890 pring->missbufcnt = cnt; 891 return cnt; 892 } 893 894 INIT_LIST_HEAD(&mp1->list); 895 /* Allocate buffer to post */ 896 if (cnt > 1) { 897 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 898 if (mp2) 899 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 900 &mp2->phys); 901 if (mp2 == 0 || mp2->virt == 0) { 902 if (mp2) 903 kfree(mp2); 904 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 905 kfree(mp1); 906 spin_lock_irq(phba->host->host_lock); 907 list_add_tail(&iocb->list, lpfc_iocb_list); 908 spin_unlock_irq(phba->host->host_lock); 909 pring->missbufcnt = cnt; 910 return cnt; 911 } 912 913 INIT_LIST_HEAD(&mp2->list); 914 } else { 915 mp2 = NULL; 916 } 917 918 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 919 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 920 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 921 icmd->ulpBdeCount = 1; 922 cnt--; 923 if (mp2) { 924 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 925 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 926 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 927 cnt--; 928 icmd->ulpBdeCount = 2; 929 } 930 931 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 932 icmd->ulpLe = 1; 933 934 spin_lock_irq(phba->host->host_lock); 935 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { 936 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 937 kfree(mp1); 938 cnt++; 939 if (mp2) { 940 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 941 kfree(mp2); 942 cnt++; 943 } 944 list_add_tail(&iocb->list, lpfc_iocb_list); 945 pring->missbufcnt = cnt; 946 spin_unlock_irq(phba->host->host_lock); 947 return cnt; 948 } 949 spin_unlock_irq(phba->host->host_lock); 950 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 951 if (mp2) { 952 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 953 } 954 } 955 pring->missbufcnt = 0; 956 return 0; 957} 958 959/************************************************************************/ 960/* */ 961/* lpfc_post_rcv_buf */ 962/* This routine post initial rcv buffers to the configured rings */ 963/* */ 964/************************************************************************/ 965static int 966lpfc_post_rcv_buf(struct lpfc_hba * phba) 967{ 968 struct lpfc_sli *psli = &phba->sli; 969 970 /* Ring 0, ELS / CT buffers */ 971 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1); 972 /* Ring 2 - FCP no buffers needed */ 973 974 return 0; 975} 976 977#define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 978 979/************************************************************************/ 980/* */ 981/* lpfc_sha_init */ 982/* */ 983/************************************************************************/ 984static void 985lpfc_sha_init(uint32_t * HashResultPointer) 986{ 987 HashResultPointer[0] = 0x67452301; 988 HashResultPointer[1] = 0xEFCDAB89; 989 HashResultPointer[2] = 0x98BADCFE; 990 HashResultPointer[3] = 0x10325476; 991 HashResultPointer[4] = 0xC3D2E1F0; 992} 993 994/************************************************************************/ 995/* */ 996/* lpfc_sha_iterate */ 997/* */ 998/************************************************************************/ 999static void 1000lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 1001{ 1002 int t; 1003 uint32_t TEMP; 1004 uint32_t A, B, C, D, E; 1005 t = 16; 1006 do { 1007 HashWorkingPointer[t] = 1008 S(1, 1009 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 1010 8] ^ 1011 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 1012 } while (++t <= 79); 1013 t = 0; 1014 A = HashResultPointer[0]; 1015 B = HashResultPointer[1]; 1016 C = HashResultPointer[2]; 1017 D = HashResultPointer[3]; 1018 E = HashResultPointer[4]; 1019 1020 do { 1021 if (t < 20) { 1022 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 1023 } else if (t < 40) { 1024 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 1025 } else if (t < 60) { 1026 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 1027 } else { 1028 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 1029 } 1030 TEMP += S(5, A) + E + HashWorkingPointer[t]; 1031 E = D; 1032 D = C; 1033 C = S(30, B); 1034 B = A; 1035 A = TEMP; 1036 } while (++t <= 79); 1037 1038 HashResultPointer[0] += A; 1039 HashResultPointer[1] += B; 1040 HashResultPointer[2] += C; 1041 HashResultPointer[3] += D; 1042 HashResultPointer[4] += E; 1043 1044} 1045 1046/************************************************************************/ 1047/* */ 1048/* lpfc_challenge_key */ 1049/* */ 1050/************************************************************************/ 1051static void 1052lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 1053{ 1054 *HashWorking = (*RandomChallenge ^ *HashWorking); 1055} 1056 1057/************************************************************************/ 1058/* */ 1059/* lpfc_hba_init */ 1060/* */ 1061/************************************************************************/ 1062void 1063lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 1064{ 1065 int t; 1066 uint32_t *HashWorking; 1067 uint32_t *pwwnn = phba->wwnn; 1068 1069 HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL); 1070 if (!HashWorking) 1071 return; 1072 1073 memset(HashWorking, 0, (80 * sizeof(uint32_t))); 1074 HashWorking[0] = HashWorking[78] = *pwwnn++; 1075 HashWorking[1] = HashWorking[79] = *pwwnn; 1076 1077 for (t = 0; t < 7; t++) 1078 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 1079 1080 lpfc_sha_init(hbainit); 1081 lpfc_sha_iterate(hbainit, HashWorking); 1082 kfree(HashWorking); 1083} 1084 1085static void 1086lpfc_cleanup(struct lpfc_hba * phba, uint32_t save_bind) 1087{ 1088 struct lpfc_nodelist *ndlp, *next_ndlp; 1089 1090 /* clean up phba - lpfc specific */ 1091 lpfc_can_disctmo(phba); 1092 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list, 1093 nlp_listp) { 1094 lpfc_nlp_remove(phba, ndlp); 1095 } 1096 1097 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list, 1098 nlp_listp) { 1099 lpfc_nlp_remove(phba, ndlp); 1100 } 1101 1102 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list, 1103 nlp_listp) { 1104 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1105 } 1106 1107 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list, 1108 nlp_listp) { 1109 lpfc_nlp_remove(phba, ndlp); 1110 } 1111 1112 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list, 1113 nlp_listp) { 1114 lpfc_nlp_remove(phba, ndlp); 1115 } 1116 1117 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_reglogin_list, 1118 nlp_listp) { 1119 lpfc_nlp_remove(phba, ndlp); 1120 } 1121 1122 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list, 1123 nlp_listp) { 1124 lpfc_nlp_remove(phba, ndlp); 1125 } 1126 1127 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list, 1128 nlp_listp) { 1129 lpfc_nlp_remove(phba, ndlp); 1130 } 1131 1132 INIT_LIST_HEAD(&phba->fc_nlpmap_list); 1133 INIT_LIST_HEAD(&phba->fc_nlpunmap_list); 1134 INIT_LIST_HEAD(&phba->fc_unused_list); 1135 INIT_LIST_HEAD(&phba->fc_plogi_list); 1136 INIT_LIST_HEAD(&phba->fc_adisc_list); 1137 INIT_LIST_HEAD(&phba->fc_reglogin_list); 1138 INIT_LIST_HEAD(&phba->fc_prli_list); 1139 INIT_LIST_HEAD(&phba->fc_npr_list); 1140 1141 phba->fc_map_cnt = 0; 1142 phba->fc_unmap_cnt = 0; 1143 phba->fc_plogi_cnt = 0; 1144 phba->fc_adisc_cnt = 0; 1145 phba->fc_reglogin_cnt = 0; 1146 phba->fc_prli_cnt = 0; 1147 phba->fc_npr_cnt = 0; 1148 phba->fc_unused_cnt= 0; 1149 return; 1150} 1151 1152static void 1153lpfc_establish_link_tmo(unsigned long ptr) 1154{ 1155 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 1156 unsigned long iflag; 1157 1158 1159 /* Re-establishing Link, timer expired */ 1160 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1161 "%d:1300 Re-establishing Link, timer expired " 1162 "Data: x%x x%x\n", 1163 phba->brd_no, phba->fc_flag, phba->hba_state); 1164 spin_lock_irqsave(phba->host->host_lock, iflag); 1165 phba->fc_flag &= ~FC_ESTABLISH_LINK; 1166 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1167} 1168 1169static int 1170lpfc_stop_timer(struct lpfc_hba * phba) 1171{ 1172 struct lpfc_sli *psli = &phba->sli; 1173 1174 /* Instead of a timer, this has been converted to a 1175 * deferred procedding list. 1176 */ 1177 while (!list_empty(&phba->freebufList)) { 1178 1179 struct lpfc_dmabuf *mp = NULL; 1180 1181 list_remove_head((&phba->freebufList), mp, 1182 struct lpfc_dmabuf, list); 1183 if (mp) { 1184 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1185 kfree(mp); 1186 } 1187 } 1188 1189 del_timer_sync(&phba->fc_estabtmo); 1190 del_timer_sync(&phba->fc_disctmo); 1191 del_timer_sync(&phba->fc_fdmitmo); 1192 del_timer_sync(&phba->els_tmofunc); 1193 psli = &phba->sli; 1194 del_timer_sync(&psli->mbox_tmo); 1195 return(1); 1196} 1197 1198int 1199lpfc_online(struct lpfc_hba * phba) 1200{ 1201 if (!phba) 1202 return 0; 1203 1204 if (!(phba->fc_flag & FC_OFFLINE_MODE)) 1205 return 0; 1206 1207 lpfc_printf_log(phba, 1208 KERN_WARNING, 1209 LOG_INIT, 1210 "%d:0458 Bring Adapter online\n", 1211 phba->brd_no); 1212 1213 if (!lpfc_sli_queue_setup(phba)) 1214 return 1; 1215 1216 if (lpfc_sli_hba_setup(phba)) /* Initialize the HBA */ 1217 return 1; 1218 1219 spin_lock_irq(phba->host->host_lock); 1220 phba->fc_flag &= ~FC_OFFLINE_MODE; 1221 spin_unlock_irq(phba->host->host_lock); 1222 1223 return 0; 1224} 1225 1226int 1227lpfc_offline(struct lpfc_hba * phba) 1228{ 1229 struct lpfc_sli_ring *pring; 1230 struct lpfc_sli *psli; 1231 unsigned long iflag; 1232 int i = 0; 1233 1234 if (!phba) 1235 return 0; 1236 1237 if (phba->fc_flag & FC_OFFLINE_MODE) 1238 return 0; 1239 1240 psli = &phba->sli; 1241 pring = &psli->ring[psli->fcp_ring]; 1242 1243 lpfc_linkdown(phba); 1244 1245 /* The linkdown event takes 30 seconds to timeout. */ 1246 while (pring->txcmplq_cnt) { 1247 mdelay(10); 1248 if (i++ > 3000) 1249 break; 1250 } 1251 1252 /* stop all timers associated with this hba */ 1253 lpfc_stop_timer(phba); 1254 phba->work_hba_events = 0; 1255 1256 lpfc_printf_log(phba, 1257 KERN_WARNING, 1258 LOG_INIT, 1259 "%d:0460 Bring Adapter offline\n", 1260 phba->brd_no); 1261 1262 /* Bring down the SLI Layer and cleanup. The HBA is offline 1263 now. */ 1264 lpfc_sli_hba_down(phba); 1265 lpfc_cleanup(phba, 1); 1266 spin_lock_irqsave(phba->host->host_lock, iflag); 1267 phba->fc_flag |= FC_OFFLINE_MODE; 1268 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1269 return 0; 1270} 1271 1272/****************************************************************************** 1273* Function name: lpfc_scsi_free 1274* 1275* Description: Called from lpfc_pci_remove_one free internal driver resources 1276* 1277******************************************************************************/ 1278static int 1279lpfc_scsi_free(struct lpfc_hba * phba) 1280{ 1281 struct lpfc_scsi_buf *sb, *sb_next; 1282 struct lpfc_iocbq *io, *io_next; 1283 1284 spin_lock_irq(phba->host->host_lock); 1285 /* Release all the lpfc_scsi_bufs maintained by this host. */ 1286 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 1287 list_del(&sb->list); 1288 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 1289 sb->dma_handle); 1290 kfree(sb); 1291 phba->total_scsi_bufs--; 1292 } 1293 1294 /* Release all the lpfc_iocbq entries maintained by this host. */ 1295 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 1296 list_del(&io->list); 1297 kfree(io); 1298 phba->total_iocbq_bufs--; 1299 } 1300 1301 spin_unlock_irq(phba->host->host_lock); 1302 1303 return 0; 1304} 1305 1306 1307static int __devinit 1308lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 1309{ 1310 struct Scsi_Host *host; 1311 struct lpfc_hba *phba; 1312 struct lpfc_sli *psli; 1313 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 1314 unsigned long bar0map_len, bar2map_len; 1315 int error = -ENODEV, retval; 1316 int i; 1317 1318 if (pci_enable_device(pdev)) 1319 goto out; 1320 if (pci_request_regions(pdev, LPFC_DRIVER_NAME)) 1321 goto out_disable_device; 1322 1323 host = scsi_host_alloc(&lpfc_template, sizeof (struct lpfc_hba)); 1324 if (!host) 1325 goto out_release_regions; 1326 1327 phba = (struct lpfc_hba*)host->hostdata; 1328 memset(phba, 0, sizeof (struct lpfc_hba)); 1329 phba->host = host; 1330 1331 phba->fc_flag |= FC_LOADING; 1332 phba->pcidev = pdev; 1333 1334 /* Assign an unused board number */ 1335 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 1336 goto out_put_host; 1337 1338 error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no); 1339 if (error) 1340 goto out_put_host; 1341 1342 host->unique_id = phba->brd_no; 1343 1344 INIT_LIST_HEAD(&phba->ctrspbuflist); 1345 INIT_LIST_HEAD(&phba->rnidrspbuflist); 1346 INIT_LIST_HEAD(&phba->freebufList); 1347 1348 /* Initialize timers used by driver */ 1349 init_timer(&phba->fc_estabtmo); 1350 phba->fc_estabtmo.function = lpfc_establish_link_tmo; 1351 phba->fc_estabtmo.data = (unsigned long)phba; 1352 init_timer(&phba->fc_disctmo); 1353 phba->fc_disctmo.function = lpfc_disc_timeout; 1354 phba->fc_disctmo.data = (unsigned long)phba; 1355 1356 init_timer(&phba->fc_fdmitmo); 1357 phba->fc_fdmitmo.function = lpfc_fdmi_tmo; 1358 phba->fc_fdmitmo.data = (unsigned long)phba; 1359 init_timer(&phba->els_tmofunc); 1360 phba->els_tmofunc.function = lpfc_els_timeout; 1361 phba->els_tmofunc.data = (unsigned long)phba; 1362 psli = &phba->sli; 1363 init_timer(&psli->mbox_tmo); 1364 psli->mbox_tmo.function = lpfc_mbox_timeout; 1365 psli->mbox_tmo.data = (unsigned long)phba; 1366 1367 /* 1368 * Get all the module params for configuring this host and then 1369 * establish the host parameters. 1370 */ 1371 lpfc_get_cfgparam(phba); 1372 1373 host->max_id = LPFC_MAX_TARGET; 1374 host->max_lun = phba->cfg_max_luns; 1375 host->this_id = -1; 1376 1377 /* Initialize all internally managed lists. */ 1378 INIT_LIST_HEAD(&phba->fc_nlpmap_list); 1379 INIT_LIST_HEAD(&phba->fc_nlpunmap_list); 1380 INIT_LIST_HEAD(&phba->fc_unused_list); 1381 INIT_LIST_HEAD(&phba->fc_plogi_list); 1382 INIT_LIST_HEAD(&phba->fc_adisc_list); 1383 INIT_LIST_HEAD(&phba->fc_reglogin_list); 1384 INIT_LIST_HEAD(&phba->fc_prli_list); 1385 INIT_LIST_HEAD(&phba->fc_npr_list); 1386 1387 1388 pci_set_master(pdev); 1389 retval = pci_set_mwi(pdev); 1390 if (retval) 1391 dev_printk(KERN_WARNING, &pdev->dev, 1392 "Warning: pci_set_mwi returned %d\n", retval); 1393 1394 if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0) 1395 if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0) 1396 goto out_idr_remove; 1397 1398 /* 1399 * Get the bus address of Bar0 and Bar2 and the number of bytes 1400 * required by each mapping. 1401 */ 1402 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0); 1403 bar0map_len = pci_resource_len(phba->pcidev, 0); 1404 1405 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); 1406 bar2map_len = pci_resource_len(phba->pcidev, 2); 1407 1408 /* Map HBA SLIM and Control Registers to a kernel virtual address. */ 1409 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 1410 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 1411 1412 /* Allocate memory for SLI-2 structures */ 1413 phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE, 1414 &phba->slim2p_mapping, GFP_KERNEL); 1415 if (!phba->slim2p) 1416 goto out_iounmap; 1417 1418 1419 /* Initialize the SLI Layer to run with lpfc HBAs. */ 1420 lpfc_sli_setup(phba); 1421 lpfc_sli_queue_setup(phba); 1422 1423 error = lpfc_mem_alloc(phba); 1424 if (error) 1425 goto out_free_slim; 1426 1427 /* Initialize and populate the iocb list per host. */ 1428 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 1429 for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) { 1430 iocbq_entry = kmalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 1431 if (iocbq_entry == NULL) { 1432 printk(KERN_ERR "%s: only allocated %d iocbs of " 1433 "expected %d count. Unloading driver.\n", 1434 __FUNCTION__, i, LPFC_IOCB_LIST_CNT); 1435 error = -ENOMEM; 1436 goto out_free_iocbq; 1437 } 1438 1439 memset(iocbq_entry, 0, sizeof(struct lpfc_iocbq)); 1440 spin_lock_irq(phba->host->host_lock); 1441 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 1442 phba->total_iocbq_bufs++; 1443 spin_unlock_irq(phba->host->host_lock); 1444 } 1445 1446 /* Initialize HBA structure */ 1447 phba->fc_edtov = FF_DEF_EDTOV; 1448 phba->fc_ratov = FF_DEF_RATOV; 1449 phba->fc_altov = FF_DEF_ALTOV; 1450 phba->fc_arbtov = FF_DEF_ARBTOV; 1451 1452 INIT_LIST_HEAD(&phba->work_list); 1453 phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT); 1454 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 1455 1456 /* Startup the kernel thread for this host adapter. */ 1457 phba->worker_thread = kthread_run(lpfc_do_work, phba, 1458 "lpfc_worker_%d", phba->brd_no); 1459 if (IS_ERR(phba->worker_thread)) { 1460 error = PTR_ERR(phba->worker_thread); 1461 goto out_free_iocbq; 1462 } 1463 1464 /* We can rely on a queue depth attribute only after SLI HBA setup */ 1465 host->can_queue = phba->cfg_hba_queue_depth - 10; 1466 1467 /* Tell the midlayer we support 16 byte commands */ 1468 host->max_cmd_len = 16; 1469 1470 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 1471 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 1472 1473 host->transportt = lpfc_transport_template; 1474 host->hostdata[0] = (unsigned long)phba; 1475 pci_set_drvdata(pdev, host); 1476 error = scsi_add_host(host, &pdev->dev); 1477 if (error) 1478 goto out_kthread_stop; 1479 1480 error = lpfc_alloc_sysfs_attr(phba); 1481 if (error) 1482 goto out_kthread_stop; 1483 1484 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ, 1485 LPFC_DRIVER_NAME, phba); 1486 if (error) { 1487 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1488 "%d:0451 Enable interrupt handler failed\n", 1489 phba->brd_no); 1490 goto out_free_sysfs_attr; 1491 } 1492 phba->MBslimaddr = phba->slim_memmap_p; 1493 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 1494 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 1495 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 1496 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 1497 1498 error = lpfc_sli_hba_setup(phba); 1499 if (error) 1500 goto out_free_irq; 1501 1502 /* 1503 * set fixed host attributes 1504 * Must done after lpfc_sli_hba_setup() 1505 */ 1506 1507 fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.u.wwn); 1508 fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.u.wwn); 1509 fc_host_supported_classes(host) = FC_COS_CLASS3; 1510 1511 memset(fc_host_supported_fc4s(host), 0, 1512 sizeof(fc_host_supported_fc4s(host))); 1513 fc_host_supported_fc4s(host)[2] = 1; 1514 fc_host_supported_fc4s(host)[7] = 1; 1515 1516 lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(host)); 1517 1518 fc_host_supported_speeds(host) = 0; 1519 switch (FC_JEDEC_ID(phba->vpd.rev.biuRev)) { 1520 case VIPER_JEDEC_ID: 1521 fc_host_supported_speeds(host) |= FC_PORTSPEED_10GBIT; 1522 break; 1523 case HELIOS_JEDEC_ID: 1524 fc_host_supported_speeds(host) |= FC_PORTSPEED_4GBIT; 1525 /* Fall through */ 1526 case CENTAUR_2G_JEDEC_ID: 1527 case PEGASUS_JEDEC_ID: 1528 case THOR_JEDEC_ID: 1529 fc_host_supported_speeds(host) |= FC_PORTSPEED_2GBIT; 1530 /* Fall through */ 1531 default: 1532 fc_host_supported_speeds(host) = FC_PORTSPEED_1GBIT; 1533 } 1534 1535 fc_host_maxframe_size(host) = 1536 ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 1537 (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb); 1538 1539 /* This value is also unchanging */ 1540 memset(fc_host_active_fc4s(host), 0, 1541 sizeof(fc_host_active_fc4s(host))); 1542 fc_host_active_fc4s(host)[2] = 1; 1543 fc_host_active_fc4s(host)[7] = 1; 1544 1545 spin_lock_irq(phba->host->host_lock); 1546 phba->fc_flag &= ~FC_LOADING; 1547 spin_unlock_irq(phba->host->host_lock); 1548 return 0; 1549 1550out_free_irq: 1551 lpfc_stop_timer(phba); 1552 phba->work_hba_events = 0; 1553 free_irq(phba->pcidev->irq, phba); 1554out_free_sysfs_attr: 1555 lpfc_free_sysfs_attr(phba); 1556out_kthread_stop: 1557 kthread_stop(phba->worker_thread); 1558out_free_iocbq: 1559 list_for_each_entry_safe(iocbq_entry, iocbq_next, 1560 &phba->lpfc_iocb_list, list) { 1561 spin_lock_irq(phba->host->host_lock); 1562 kfree(iocbq_entry); 1563 phba->total_iocbq_bufs--; 1564 spin_unlock_irq(phba->host->host_lock); 1565 } 1566 lpfc_mem_free(phba); 1567out_free_slim: 1568 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p, 1569 phba->slim2p_mapping); 1570out_iounmap: 1571 iounmap(phba->ctrl_regs_memmap_p); 1572 iounmap(phba->slim_memmap_p); 1573out_idr_remove: 1574 idr_remove(&lpfc_hba_index, phba->brd_no); 1575out_put_host: 1576 scsi_host_put(host); 1577out_release_regions: 1578 pci_release_regions(pdev); 1579out_disable_device: 1580 pci_disable_device(pdev); 1581out: 1582 return error; 1583} 1584 1585static void __devexit 1586lpfc_pci_remove_one(struct pci_dev *pdev) 1587{ 1588 struct Scsi_Host *host = pci_get_drvdata(pdev); 1589 struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata[0]; 1590 unsigned long iflag; 1591 1592 lpfc_free_sysfs_attr(phba); 1593 1594 spin_lock_irqsave(phba->host->host_lock, iflag); 1595 phba->fc_flag |= FC_UNLOADING; 1596 1597 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1598 1599 fc_remove_host(phba->host); 1600 scsi_remove_host(phba->host); 1601 1602 kthread_stop(phba->worker_thread); 1603 1604 /* 1605 * Bring down the SLI Layer. This step disable all interrupts, 1606 * clears the rings, discards all mailbox commands, and resets 1607 * the HBA. 1608 */ 1609 lpfc_sli_hba_down(phba); 1610 1611 /* Release the irq reservation */ 1612 free_irq(phba->pcidev->irq, phba); 1613 1614 lpfc_cleanup(phba, 0); 1615 lpfc_stop_timer(phba); 1616 phba->work_hba_events = 0; 1617 1618 /* 1619 * Call scsi_free before mem_free since scsi bufs are released to their 1620 * corresponding pools here. 1621 */ 1622 lpfc_scsi_free(phba); 1623 lpfc_mem_free(phba); 1624 1625 /* Free resources associated with SLI2 interface */ 1626 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 1627 phba->slim2p, phba->slim2p_mapping); 1628 1629 /* unmap adapter SLIM and Control Registers */ 1630 iounmap(phba->ctrl_regs_memmap_p); 1631 iounmap(phba->slim_memmap_p); 1632 1633 pci_release_regions(phba->pcidev); 1634 pci_disable_device(phba->pcidev); 1635 1636 idr_remove(&lpfc_hba_index, phba->brd_no); 1637 scsi_host_put(phba->host); 1638 1639 pci_set_drvdata(pdev, NULL); 1640} 1641 1642static struct pci_device_id lpfc_id_table[] = { 1643 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 1644 PCI_ANY_ID, PCI_ANY_ID, }, 1645 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 1646 PCI_ANY_ID, PCI_ANY_ID, }, 1647 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 1648 PCI_ANY_ID, PCI_ANY_ID, }, 1649 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 1650 PCI_ANY_ID, PCI_ANY_ID, }, 1651 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 1652 PCI_ANY_ID, PCI_ANY_ID, }, 1653 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 1654 PCI_ANY_ID, PCI_ANY_ID, }, 1655 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 1656 PCI_ANY_ID, PCI_ANY_ID, }, 1657 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 1658 PCI_ANY_ID, PCI_ANY_ID, }, 1659 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 1660 PCI_ANY_ID, PCI_ANY_ID, }, 1661 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 1662 PCI_ANY_ID, PCI_ANY_ID, }, 1663 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 1664 PCI_ANY_ID, PCI_ANY_ID, }, 1665 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 1666 PCI_ANY_ID, PCI_ANY_ID, }, 1667 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 1668 PCI_ANY_ID, PCI_ANY_ID, }, 1669 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 1670 PCI_ANY_ID, PCI_ANY_ID, }, 1671 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 1672 PCI_ANY_ID, PCI_ANY_ID, }, 1673 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 1674 PCI_ANY_ID, PCI_ANY_ID, }, 1675 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 1676 PCI_ANY_ID, PCI_ANY_ID, }, 1677 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 1678 PCI_ANY_ID, PCI_ANY_ID, }, 1679 { 0 } 1680}; 1681 1682MODULE_DEVICE_TABLE(pci, lpfc_id_table); 1683 1684static struct pci_driver lpfc_driver = { 1685 .name = LPFC_DRIVER_NAME, 1686 .id_table = lpfc_id_table, 1687 .probe = lpfc_pci_probe_one, 1688 .remove = __devexit_p(lpfc_pci_remove_one), 1689}; 1690 1691static int __init 1692lpfc_init(void) 1693{ 1694 int error = 0; 1695 1696 printk(LPFC_MODULE_DESC "\n"); 1697 printk(LPFC_COPYRIGHT "\n"); 1698 1699 lpfc_transport_template = 1700 fc_attach_transport(&lpfc_transport_functions); 1701 if (!lpfc_transport_template) 1702 return -ENOMEM; 1703 error = pci_register_driver(&lpfc_driver); 1704 if (error) 1705 fc_release_transport(lpfc_transport_template); 1706 1707 return error; 1708} 1709 1710static void __exit 1711lpfc_exit(void) 1712{ 1713 pci_unregister_driver(&lpfc_driver); 1714 fc_release_transport(lpfc_transport_template); 1715} 1716 1717module_init(lpfc_init); 1718module_exit(lpfc_exit); 1719MODULE_LICENSE("GPL"); 1720MODULE_DESCRIPTION(LPFC_MODULE_DESC); 1721MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 1722MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 1723