lpfc_init.c revision 694625c0b322905d6892fad873029f764cd4823f
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2007 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
29#include <linux/spinlock.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h>
35
36#include "lpfc_hw.h"
37#include "lpfc_sli.h"
38#include "lpfc_disc.h"
39#include "lpfc_scsi.h"
40#include "lpfc.h"
41#include "lpfc_logmsg.h"
42#include "lpfc_crtn.h"
43#include "lpfc_version.h"
44
45static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
46static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
47static int lpfc_post_rcv_buf(struct lpfc_hba *);
48
49static struct scsi_transport_template *lpfc_transport_template = NULL;
50static DEFINE_IDR(lpfc_hba_index);
51
52/************************************************************************/
53/*                                                                      */
54/*    lpfc_config_port_prep                                             */
55/*    This routine will do LPFC initialization prior to the             */
56/*    CONFIG_PORT mailbox command. This will be initialized             */
57/*    as a SLI layer callback routine.                                  */
58/*    This routine returns 0 on success or -ERESTART if it wants        */
59/*    the SLI layer to reset the HBA and try again. Any                 */
60/*    other return value indicates an error.                            */
61/*                                                                      */
62/************************************************************************/
63int
64lpfc_config_port_prep(struct lpfc_hba * phba)
65{
66	lpfc_vpd_t *vp = &phba->vpd;
67	int i = 0, rc;
68	LPFC_MBOXQ_t *pmb;
69	MAILBOX_t *mb;
70	char *lpfc_vpd_data = NULL;
71	uint16_t offset = 0;
72	static char licensed[56] =
73		    "key unlock for use with gnu public licensed code only\0";
74	static int init_key = 1;
75
76	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
77	if (!pmb) {
78		phba->hba_state = LPFC_HBA_ERROR;
79		return -ENOMEM;
80	}
81
82	mb = &pmb->mb;
83	phba->hba_state = LPFC_INIT_MBX_CMDS;
84
85	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
86		if (init_key) {
87			uint32_t *ptext = (uint32_t *) licensed;
88
89			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
90				*ptext = cpu_to_be32(*ptext);
91			init_key = 0;
92		}
93
94		lpfc_read_nv(phba, pmb);
95		memset((char*)mb->un.varRDnvp.rsvd3, 0,
96			sizeof (mb->un.varRDnvp.rsvd3));
97		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
98			 sizeof (licensed));
99
100		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
101
102		if (rc != MBX_SUCCESS) {
103			lpfc_printf_log(phba,
104					KERN_ERR,
105					LOG_MBOX,
106					"%d:0324 Config Port initialization "
107					"error, mbxCmd x%x READ_NVPARM, "
108					"mbxStatus x%x\n",
109					phba->brd_no,
110					mb->mbxCommand, mb->mbxStatus);
111			mempool_free(pmb, phba->mbox_mem_pool);
112			return -ERESTART;
113		}
114		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
115		       sizeof (mb->un.varRDnvp.nodename));
116	}
117
118	/* Setup and issue mailbox READ REV command */
119	lpfc_read_rev(phba, pmb);
120	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
121	if (rc != MBX_SUCCESS) {
122		lpfc_printf_log(phba,
123				KERN_ERR,
124				LOG_INIT,
125				"%d:0439 Adapter failed to init, mbxCmd x%x "
126				"READ_REV, mbxStatus x%x\n",
127				phba->brd_no,
128				mb->mbxCommand, mb->mbxStatus);
129		mempool_free( pmb, phba->mbox_mem_pool);
130		return -ERESTART;
131	}
132
133	/*
134	 * The value of rr must be 1 since the driver set the cv field to 1.
135	 * This setting requires the FW to set all revision fields.
136	 */
137	if (mb->un.varRdRev.rr == 0) {
138		vp->rev.rBit = 0;
139		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
140				"%d:0440 Adapter failed to init, READ_REV has "
141				"missing revision information.\n",
142				phba->brd_no);
143		mempool_free(pmb, phba->mbox_mem_pool);
144		return -ERESTART;
145	}
146
147	/* Save information as VPD data */
148	vp->rev.rBit = 1;
149	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
150	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
151	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
152	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
153	vp->rev.biuRev = mb->un.varRdRev.biuRev;
154	vp->rev.smRev = mb->un.varRdRev.smRev;
155	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
156	vp->rev.endecRev = mb->un.varRdRev.endecRev;
157	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
158	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
159	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
160	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
161	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
162	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
163
164	if (lpfc_is_LC_HBA(phba->pcidev->device))
165		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
166						sizeof (phba->RandomData));
167
168	/* Get adapter VPD information */
169	pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL);
170	if (!pmb->context2)
171		goto out_free_mbox;
172	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
173	if (!lpfc_vpd_data)
174		goto out_free_context2;
175
176	do {
177		lpfc_dump_mem(phba, pmb, offset);
178		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
179
180		if (rc != MBX_SUCCESS) {
181			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
182					"%d:0441 VPD not present on adapter, "
183					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
184					phba->brd_no,
185					mb->mbxCommand, mb->mbxStatus);
186			mb->un.varDmp.word_cnt = 0;
187		}
188		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
189			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
190		lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset,
191							mb->un.varDmp.word_cnt);
192		offset += mb->un.varDmp.word_cnt;
193	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
194	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
195
196	kfree(lpfc_vpd_data);
197out_free_context2:
198	kfree(pmb->context2);
199out_free_mbox:
200	mempool_free(pmb, phba->mbox_mem_pool);
201	return 0;
202}
203
204/************************************************************************/
205/*                                                                      */
206/*    lpfc_config_port_post                                             */
207/*    This routine will do LPFC initialization after the                */
208/*    CONFIG_PORT mailbox command. This will be initialized             */
209/*    as a SLI layer callback routine.                                  */
210/*    This routine returns 0 on success. Any other return value         */
211/*    indicates an error.                                               */
212/*                                                                      */
213/************************************************************************/
214int
215lpfc_config_port_post(struct lpfc_hba * phba)
216{
217	LPFC_MBOXQ_t *pmb;
218	MAILBOX_t *mb;
219	struct lpfc_dmabuf *mp;
220	struct lpfc_sli *psli = &phba->sli;
221	uint32_t status, timeout;
222	int i, j, rc;
223
224	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
225	if (!pmb) {
226		phba->hba_state = LPFC_HBA_ERROR;
227		return -ENOMEM;
228	}
229	mb = &pmb->mb;
230
231	lpfc_config_link(phba, pmb);
232	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
233	if (rc != MBX_SUCCESS) {
234		lpfc_printf_log(phba,
235				KERN_ERR,
236				LOG_INIT,
237				"%d:0447 Adapter failed init, mbxCmd x%x "
238				"CONFIG_LINK mbxStatus x%x\n",
239				phba->brd_no,
240				mb->mbxCommand, mb->mbxStatus);
241		phba->hba_state = LPFC_HBA_ERROR;
242		mempool_free( pmb, phba->mbox_mem_pool);
243		return -EIO;
244	}
245
246	/* Get login parameters for NID.  */
247	lpfc_read_sparam(phba, pmb);
248	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
249		lpfc_printf_log(phba,
250				KERN_ERR,
251				LOG_INIT,
252				"%d:0448 Adapter failed init, mbxCmd x%x "
253				"READ_SPARM mbxStatus x%x\n",
254				phba->brd_no,
255				mb->mbxCommand, mb->mbxStatus);
256		phba->hba_state = LPFC_HBA_ERROR;
257		mp = (struct lpfc_dmabuf *) pmb->context1;
258		mempool_free( pmb, phba->mbox_mem_pool);
259		lpfc_mbuf_free(phba, mp->virt, mp->phys);
260		kfree(mp);
261		return -EIO;
262	}
263
264	mp = (struct lpfc_dmabuf *) pmb->context1;
265
266	memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm));
267	lpfc_mbuf_free(phba, mp->virt, mp->phys);
268	kfree(mp);
269	pmb->context1 = NULL;
270
271	if (phba->cfg_soft_wwnn)
272		u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
273	if (phba->cfg_soft_wwpn)
274		u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
275	memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
276	       sizeof (struct lpfc_name));
277	memcpy(&phba->fc_portname, &phba->fc_sparam.portName,
278	       sizeof (struct lpfc_name));
279	/* If no serial number in VPD data, use low 6 bytes of WWNN */
280	/* This should be consolidated into parse_vpd ? - mr */
281	if (phba->SerialNumber[0] == 0) {
282		uint8_t *outptr;
283
284		outptr = &phba->fc_nodename.u.s.IEEE[0];
285		for (i = 0; i < 12; i++) {
286			status = *outptr++;
287			j = ((status & 0xf0) >> 4);
288			if (j <= 9)
289				phba->SerialNumber[i] =
290				    (char)((uint8_t) 0x30 + (uint8_t) j);
291			else
292				phba->SerialNumber[i] =
293				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
294			i++;
295			j = (status & 0xf);
296			if (j <= 9)
297				phba->SerialNumber[i] =
298				    (char)((uint8_t) 0x30 + (uint8_t) j);
299			else
300				phba->SerialNumber[i] =
301				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
302		}
303	}
304
305	lpfc_read_config(phba, pmb);
306	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
307		lpfc_printf_log(phba,
308				KERN_ERR,
309				LOG_INIT,
310				"%d:0453 Adapter failed to init, mbxCmd x%x "
311				"READ_CONFIG, mbxStatus x%x\n",
312				phba->brd_no,
313				mb->mbxCommand, mb->mbxStatus);
314		phba->hba_state = LPFC_HBA_ERROR;
315		mempool_free( pmb, phba->mbox_mem_pool);
316		return -EIO;
317	}
318
319	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
320	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
321		phba->cfg_hba_queue_depth =
322			mb->un.varRdConfig.max_xri + 1;
323
324	phba->lmt = mb->un.varRdConfig.lmt;
325
326	/* Get the default values for Model Name and Description */
327	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
328
329	if ((phba->cfg_link_speed > LINK_SPEED_10G)
330	    || ((phba->cfg_link_speed == LINK_SPEED_1G)
331		&& !(phba->lmt & LMT_1Gb))
332	    || ((phba->cfg_link_speed == LINK_SPEED_2G)
333		&& !(phba->lmt & LMT_2Gb))
334	    || ((phba->cfg_link_speed == LINK_SPEED_4G)
335		&& !(phba->lmt & LMT_4Gb))
336	    || ((phba->cfg_link_speed == LINK_SPEED_8G)
337		&& !(phba->lmt & LMT_8Gb))
338	    || ((phba->cfg_link_speed == LINK_SPEED_10G)
339		&& !(phba->lmt & LMT_10Gb))) {
340		/* Reset link speed to auto */
341		lpfc_printf_log(phba,
342			KERN_WARNING,
343			LOG_LINK_EVENT,
344			"%d:1302 Invalid speed for this board: "
345			"Reset link speed to auto: x%x\n",
346			phba->brd_no,
347			phba->cfg_link_speed);
348			phba->cfg_link_speed = LINK_SPEED_AUTO;
349	}
350
351	phba->hba_state = LPFC_LINK_DOWN;
352
353	/* Only process IOCBs on ring 0 till hba_state is READY */
354	if (psli->ring[psli->extra_ring].cmdringaddr)
355		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
356	if (psli->ring[psli->fcp_ring].cmdringaddr)
357		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
358	if (psli->ring[psli->next_ring].cmdringaddr)
359		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
360
361	/* Post receive buffers for desired rings */
362	lpfc_post_rcv_buf(phba);
363
364	/* Enable appropriate host interrupts */
365	spin_lock_irq(phba->host->host_lock);
366	status = readl(phba->HCregaddr);
367	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
368	if (psli->num_rings > 0)
369		status |= HC_R0INT_ENA;
370	if (psli->num_rings > 1)
371		status |= HC_R1INT_ENA;
372	if (psli->num_rings > 2)
373		status |= HC_R2INT_ENA;
374	if (psli->num_rings > 3)
375		status |= HC_R3INT_ENA;
376
377	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
378	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
379		status &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
380
381	writel(status, phba->HCregaddr);
382	readl(phba->HCregaddr); /* flush */
383	spin_unlock_irq(phba->host->host_lock);
384
385	/*
386	 * Setup the ring 0 (els)  timeout handler
387	 */
388	timeout = phba->fc_ratov << 1;
389	mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout);
390
391	lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
392	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
393	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
394	lpfc_set_loopback_flag(phba);
395	if (rc != MBX_SUCCESS) {
396		lpfc_printf_log(phba,
397				KERN_ERR,
398				LOG_INIT,
399				"%d:0454 Adapter failed to init, mbxCmd x%x "
400				"INIT_LINK, mbxStatus x%x\n",
401				phba->brd_no,
402				mb->mbxCommand, mb->mbxStatus);
403
404		/* Clear all interrupt enable conditions */
405		writel(0, phba->HCregaddr);
406		readl(phba->HCregaddr); /* flush */
407		/* Clear all pending interrupts */
408		writel(0xffffffff, phba->HAregaddr);
409		readl(phba->HAregaddr); /* flush */
410
411		phba->hba_state = LPFC_HBA_ERROR;
412		if (rc != MBX_BUSY)
413			mempool_free(pmb, phba->mbox_mem_pool);
414		return -EIO;
415	}
416	/* MBOX buffer will be freed in mbox compl */
417
418	return (0);
419}
420
421/************************************************************************/
422/*                                                                      */
423/*    lpfc_hba_down_prep                                                */
424/*    This routine will do LPFC uninitialization before the             */
425/*    HBA is reset when bringing down the SLI Layer. This will be       */
426/*    initialized as a SLI layer callback routine.                      */
427/*    This routine returns 0 on success. Any other return value         */
428/*    indicates an error.                                               */
429/*                                                                      */
430/************************************************************************/
431int
432lpfc_hba_down_prep(struct lpfc_hba * phba)
433{
434	/* Disable interrupts */
435	writel(0, phba->HCregaddr);
436	readl(phba->HCregaddr); /* flush */
437
438	/* Cleanup potential discovery resources */
439	lpfc_els_flush_rscn(phba);
440	lpfc_els_flush_cmd(phba);
441	lpfc_disc_flush_list(phba);
442
443	return (0);
444}
445
446/************************************************************************/
447/*                                                                      */
448/*    lpfc_hba_down_post                                                */
449/*    This routine will do uninitialization after the HBA is reset      */
450/*    when bringing down the SLI Layer.                                 */
451/*    This routine returns 0 on success. Any other return value         */
452/*    indicates an error.                                               */
453/*                                                                      */
454/************************************************************************/
455int
456lpfc_hba_down_post(struct lpfc_hba * phba)
457{
458	struct lpfc_sli *psli = &phba->sli;
459	struct lpfc_sli_ring *pring;
460	struct lpfc_dmabuf *mp, *next_mp;
461	int i;
462
463	/* Cleanup preposted buffers on the ELS ring */
464	pring = &psli->ring[LPFC_ELS_RING];
465	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
466		list_del(&mp->list);
467		pring->postbufq_cnt--;
468		lpfc_mbuf_free(phba, mp->virt, mp->phys);
469		kfree(mp);
470	}
471
472	for (i = 0; i < psli->num_rings; i++) {
473		pring = &psli->ring[i];
474		lpfc_sli_abort_iocb_ring(phba, pring);
475	}
476
477	return 0;
478}
479
480/************************************************************************/
481/*                                                                      */
482/*    lpfc_handle_eratt                                                 */
483/*    This routine will handle processing a Host Attention              */
484/*    Error Status event. This will be initialized                      */
485/*    as a SLI layer callback routine.                                  */
486/*                                                                      */
487/************************************************************************/
488void
489lpfc_handle_eratt(struct lpfc_hba * phba)
490{
491	struct lpfc_sli *psli = &phba->sli;
492	struct lpfc_sli_ring  *pring;
493	uint32_t event_data;
494	/* If the pci channel is offline, ignore possible errors,
495	 * since we cannot communicate with the pci card anyway. */
496	if (pci_channel_offline(phba->pcidev))
497		return;
498
499	if (phba->work_hs & HS_FFER6 ||
500	    phba->work_hs & HS_FFER5) {
501		/* Re-establishing Link */
502		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
503				"%d:1301 Re-establishing Link "
504				"Data: x%x x%x x%x\n",
505				phba->brd_no, phba->work_hs,
506				phba->work_status[0], phba->work_status[1]);
507		spin_lock_irq(phba->host->host_lock);
508		phba->fc_flag |= FC_ESTABLISH_LINK;
509		psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
510		spin_unlock_irq(phba->host->host_lock);
511
512		/*
513		* Firmware stops when it triggled erratt with HS_FFER6.
514		* That could cause the I/Os dropped by the firmware.
515		* Error iocb (I/O) on txcmplq and let the SCSI layer
516		* retry it after re-establishing link.
517		*/
518		pring = &psli->ring[psli->fcp_ring];
519		lpfc_sli_abort_iocb_ring(phba, pring);
520
521
522		/*
523		 * There was a firmware error.  Take the hba offline and then
524		 * attempt to restart it.
525		 */
526		lpfc_offline_prep(phba);
527		lpfc_offline(phba);
528		lpfc_sli_brdrestart(phba);
529		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
530			mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
531			lpfc_unblock_mgmt_io(phba);
532			return;
533		}
534		lpfc_unblock_mgmt_io(phba);
535	} else {
536		/* The if clause above forces this code path when the status
537		 * failure is a value other than FFER6.  Do not call the offline
538		 *  twice. This is the adapter hardware error path.
539		 */
540		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
541				"%d:0457 Adapter Hardware Error "
542				"Data: x%x x%x x%x\n",
543				phba->brd_no, phba->work_hs,
544				phba->work_status[0], phba->work_status[1]);
545
546		event_data = FC_REG_DUMP_EVENT;
547		fc_host_post_vendor_event(phba->host, fc_get_event_number(),
548				sizeof(event_data), (char *) &event_data,
549				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
550
551		psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
552		lpfc_offline_prep(phba);
553		lpfc_offline(phba);
554		lpfc_unblock_mgmt_io(phba);
555		phba->hba_state = LPFC_HBA_ERROR;
556		lpfc_hba_down_post(phba);
557	}
558}
559
560/************************************************************************/
561/*                                                                      */
562/*    lpfc_handle_latt                                                  */
563/*    This routine will handle processing a Host Attention              */
564/*    Link Status event. This will be initialized                       */
565/*    as a SLI layer callback routine.                                  */
566/*                                                                      */
567/************************************************************************/
568void
569lpfc_handle_latt(struct lpfc_hba * phba)
570{
571	struct lpfc_sli *psli = &phba->sli;
572	LPFC_MBOXQ_t *pmb;
573	volatile uint32_t control;
574	struct lpfc_dmabuf *mp;
575	int rc = -ENOMEM;
576
577	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
578	if (!pmb)
579		goto lpfc_handle_latt_err_exit;
580
581	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
582	if (!mp)
583		goto lpfc_handle_latt_free_pmb;
584
585	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
586	if (!mp->virt)
587		goto lpfc_handle_latt_free_mp;
588
589	rc = -EIO;
590
591	/* Cleanup any outstanding ELS commands */
592	lpfc_els_flush_cmd(phba);
593
594	psli->slistat.link_event++;
595	lpfc_read_la(phba, pmb, mp);
596	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
597	rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
598	if (rc == MBX_NOT_FINISHED)
599		goto lpfc_handle_latt_free_mbuf;
600
601	/* Clear Link Attention in HA REG */
602	spin_lock_irq(phba->host->host_lock);
603	writel(HA_LATT, phba->HAregaddr);
604	readl(phba->HAregaddr); /* flush */
605	spin_unlock_irq(phba->host->host_lock);
606
607	return;
608
609lpfc_handle_latt_free_mbuf:
610	lpfc_mbuf_free(phba, mp->virt, mp->phys);
611lpfc_handle_latt_free_mp:
612	kfree(mp);
613lpfc_handle_latt_free_pmb:
614	mempool_free(pmb, phba->mbox_mem_pool);
615lpfc_handle_latt_err_exit:
616	/* Enable Link attention interrupts */
617	spin_lock_irq(phba->host->host_lock);
618	psli->sli_flag |= LPFC_PROCESS_LA;
619	control = readl(phba->HCregaddr);
620	control |= HC_LAINT_ENA;
621	writel(control, phba->HCregaddr);
622	readl(phba->HCregaddr); /* flush */
623
624	/* Clear Link Attention in HA REG */
625	writel(HA_LATT, phba->HAregaddr);
626	readl(phba->HAregaddr); /* flush */
627	spin_unlock_irq(phba->host->host_lock);
628	lpfc_linkdown(phba);
629	phba->hba_state = LPFC_HBA_ERROR;
630
631	/* The other case is an error from issue_mbox */
632	if (rc == -ENOMEM)
633		lpfc_printf_log(phba,
634				KERN_WARNING,
635				LOG_MBOX,
636			        "%d:0300 READ_LA: no buffers\n",
637				phba->brd_no);
638
639	return;
640}
641
642/************************************************************************/
643/*                                                                      */
644/*   lpfc_parse_vpd                                                     */
645/*   This routine will parse the VPD data                               */
646/*                                                                      */
647/************************************************************************/
648static int
649lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len)
650{
651	uint8_t lenlo, lenhi;
652	int Length;
653	int i, j;
654	int finished = 0;
655	int index = 0;
656
657	if (!vpd)
658		return 0;
659
660	/* Vital Product */
661	lpfc_printf_log(phba,
662			KERN_INFO,
663			LOG_INIT,
664			"%d:0455 Vital Product Data: x%x x%x x%x x%x\n",
665			phba->brd_no,
666			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
667			(uint32_t) vpd[3]);
668	while (!finished && (index < (len - 4))) {
669		switch (vpd[index]) {
670		case 0x82:
671		case 0x91:
672			index += 1;
673			lenlo = vpd[index];
674			index += 1;
675			lenhi = vpd[index];
676			index += 1;
677			i = ((((unsigned short)lenhi) << 8) + lenlo);
678			index += i;
679			break;
680		case 0x90:
681			index += 1;
682			lenlo = vpd[index];
683			index += 1;
684			lenhi = vpd[index];
685			index += 1;
686			Length = ((((unsigned short)lenhi) << 8) + lenlo);
687			if (Length > len - index)
688				Length = len - index;
689			while (Length > 0) {
690			/* Look for Serial Number */
691			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
692				index += 2;
693				i = vpd[index];
694				index += 1;
695				j = 0;
696				Length -= (3+i);
697				while(i--) {
698					phba->SerialNumber[j++] = vpd[index++];
699					if (j == 31)
700						break;
701				}
702				phba->SerialNumber[j] = 0;
703				continue;
704			}
705			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
706				phba->vpd_flag |= VPD_MODEL_DESC;
707				index += 2;
708				i = vpd[index];
709				index += 1;
710				j = 0;
711				Length -= (3+i);
712				while(i--) {
713					phba->ModelDesc[j++] = vpd[index++];
714					if (j == 255)
715						break;
716				}
717				phba->ModelDesc[j] = 0;
718				continue;
719			}
720			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
721				phba->vpd_flag |= VPD_MODEL_NAME;
722				index += 2;
723				i = vpd[index];
724				index += 1;
725				j = 0;
726				Length -= (3+i);
727				while(i--) {
728					phba->ModelName[j++] = vpd[index++];
729					if (j == 79)
730						break;
731				}
732				phba->ModelName[j] = 0;
733				continue;
734			}
735			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
736				phba->vpd_flag |= VPD_PROGRAM_TYPE;
737				index += 2;
738				i = vpd[index];
739				index += 1;
740				j = 0;
741				Length -= (3+i);
742				while(i--) {
743					phba->ProgramType[j++] = vpd[index++];
744					if (j == 255)
745						break;
746				}
747				phba->ProgramType[j] = 0;
748				continue;
749			}
750			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
751				phba->vpd_flag |= VPD_PORT;
752				index += 2;
753				i = vpd[index];
754				index += 1;
755				j = 0;
756				Length -= (3+i);
757				while(i--) {
758				phba->Port[j++] = vpd[index++];
759				if (j == 19)
760					break;
761				}
762				phba->Port[j] = 0;
763				continue;
764			}
765			else {
766				index += 2;
767				i = vpd[index];
768				index += 1;
769				index += i;
770				Length -= (3 + i);
771			}
772		}
773		finished = 0;
774		break;
775		case 0x78:
776			finished = 1;
777			break;
778		default:
779			index ++;
780			break;
781		}
782	}
783
784	return(1);
785}
786
787static void
788lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
789{
790	lpfc_vpd_t *vp;
791	uint16_t dev_id = phba->pcidev->device;
792	int max_speed;
793	struct {
794		char * name;
795		int    max_speed;
796		char * bus;
797	} m = {"<Unknown>", 0, ""};
798
799	if (mdp && mdp[0] != '\0'
800		&& descp && descp[0] != '\0')
801		return;
802
803	if (phba->lmt & LMT_10Gb)
804		max_speed = 10;
805	else if (phba->lmt & LMT_8Gb)
806		max_speed = 8;
807	else if (phba->lmt & LMT_4Gb)
808		max_speed = 4;
809	else if (phba->lmt & LMT_2Gb)
810		max_speed = 2;
811	else
812		max_speed = 1;
813
814	vp = &phba->vpd;
815
816	switch (dev_id) {
817	case PCI_DEVICE_ID_FIREFLY:
818		m = (typeof(m)){"LP6000", max_speed, "PCI"};
819		break;
820	case PCI_DEVICE_ID_SUPERFLY:
821		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
822			m = (typeof(m)){"LP7000", max_speed,  "PCI"};
823		else
824			m = (typeof(m)){"LP7000E", max_speed, "PCI"};
825		break;
826	case PCI_DEVICE_ID_DRAGONFLY:
827		m = (typeof(m)){"LP8000", max_speed, "PCI"};
828		break;
829	case PCI_DEVICE_ID_CENTAUR:
830		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
831			m = (typeof(m)){"LP9002", max_speed, "PCI"};
832		else
833			m = (typeof(m)){"LP9000", max_speed, "PCI"};
834		break;
835	case PCI_DEVICE_ID_RFLY:
836		m = (typeof(m)){"LP952", max_speed, "PCI"};
837		break;
838	case PCI_DEVICE_ID_PEGASUS:
839		m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
840		break;
841	case PCI_DEVICE_ID_THOR:
842		m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
843		break;
844	case PCI_DEVICE_ID_VIPER:
845		m = (typeof(m)){"LPX1000", max_speed,  "PCI-X"};
846		break;
847	case PCI_DEVICE_ID_PFLY:
848		m = (typeof(m)){"LP982", max_speed, "PCI-X"};
849		break;
850	case PCI_DEVICE_ID_TFLY:
851		m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
852		break;
853	case PCI_DEVICE_ID_HELIOS:
854		m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
855		break;
856	case PCI_DEVICE_ID_HELIOS_SCSP:
857		m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
858		break;
859	case PCI_DEVICE_ID_HELIOS_DCSP:
860		m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
861		break;
862	case PCI_DEVICE_ID_NEPTUNE:
863		m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
864		break;
865	case PCI_DEVICE_ID_NEPTUNE_SCSP:
866		m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
867		break;
868	case PCI_DEVICE_ID_NEPTUNE_DCSP:
869		m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
870		break;
871	case PCI_DEVICE_ID_BMID:
872		m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
873		break;
874	case PCI_DEVICE_ID_BSMB:
875		m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
876		break;
877	case PCI_DEVICE_ID_ZEPHYR:
878		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
879		break;
880	case PCI_DEVICE_ID_ZEPHYR_SCSP:
881		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
882		break;
883	case PCI_DEVICE_ID_ZEPHYR_DCSP:
884		m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"};
885		break;
886	case PCI_DEVICE_ID_ZMID:
887		m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
888		break;
889	case PCI_DEVICE_ID_ZSMB:
890		m = (typeof(m)){"LPe111", max_speed, "PCIe"};
891		break;
892	case PCI_DEVICE_ID_LP101:
893		m = (typeof(m)){"LP101", max_speed, "PCI-X"};
894		break;
895	case PCI_DEVICE_ID_LP10000S:
896		m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
897		break;
898	case PCI_DEVICE_ID_LP11000S:
899		m = (typeof(m)){"LP11000-S", max_speed,
900			"PCI-X2"};
901		break;
902	case PCI_DEVICE_ID_LPE11000S:
903		m = (typeof(m)){"LPe11000-S", max_speed,
904			"PCIe"};
905		break;
906	case PCI_DEVICE_ID_SAT:
907		m = (typeof(m)){"LPe12000", max_speed, "PCIe"};
908		break;
909	case PCI_DEVICE_ID_SAT_MID:
910		m = (typeof(m)){"LPe1250", max_speed, "PCIe"};
911		break;
912	case PCI_DEVICE_ID_SAT_SMB:
913		m = (typeof(m)){"LPe121", max_speed, "PCIe"};
914		break;
915	case PCI_DEVICE_ID_SAT_DCSP:
916		m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"};
917		break;
918	case PCI_DEVICE_ID_SAT_SCSP:
919		m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"};
920		break;
921	case PCI_DEVICE_ID_SAT_S:
922		m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
923		break;
924	default:
925		m = (typeof(m)){ NULL };
926		break;
927	}
928
929	if (mdp && mdp[0] == '\0')
930		snprintf(mdp, 79,"%s", m.name);
931	if (descp && descp[0] == '\0')
932		snprintf(descp, 255,
933			 "Emulex %s %dGb %s Fibre Channel Adapter",
934			 m.name, m.max_speed, m.bus);
935}
936
937/**************************************************/
938/*   lpfc_post_buffer                             */
939/*                                                */
940/*   This routine will post count buffers to the  */
941/*   ring with the QUE_RING_BUF_CN command. This  */
942/*   allows 3 buffers / command to be posted.     */
943/*   Returns the number of buffers NOT posted.    */
944/**************************************************/
945int
946lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
947		 int type)
948{
949	IOCB_t *icmd;
950	struct lpfc_iocbq *iocb;
951	struct lpfc_dmabuf *mp1, *mp2;
952
953	cnt += pring->missbufcnt;
954
955	/* While there are buffers to post */
956	while (cnt > 0) {
957		/* Allocate buffer for  command iocb */
958		spin_lock_irq(phba->host->host_lock);
959		iocb = lpfc_sli_get_iocbq(phba);
960		spin_unlock_irq(phba->host->host_lock);
961		if (iocb == NULL) {
962			pring->missbufcnt = cnt;
963			return cnt;
964		}
965		icmd = &iocb->iocb;
966
967		/* 2 buffers can be posted per command */
968		/* Allocate buffer to post */
969		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
970		if (mp1)
971		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
972						&mp1->phys);
973		if (mp1 == 0 || mp1->virt == 0) {
974			kfree(mp1);
975			spin_lock_irq(phba->host->host_lock);
976			lpfc_sli_release_iocbq(phba, iocb);
977			spin_unlock_irq(phba->host->host_lock);
978			pring->missbufcnt = cnt;
979			return cnt;
980		}
981
982		INIT_LIST_HEAD(&mp1->list);
983		/* Allocate buffer to post */
984		if (cnt > 1) {
985			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
986			if (mp2)
987				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
988							    &mp2->phys);
989			if (mp2 == 0 || mp2->virt == 0) {
990				kfree(mp2);
991				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
992				kfree(mp1);
993				spin_lock_irq(phba->host->host_lock);
994				lpfc_sli_release_iocbq(phba, iocb);
995				spin_unlock_irq(phba->host->host_lock);
996				pring->missbufcnt = cnt;
997				return cnt;
998			}
999
1000			INIT_LIST_HEAD(&mp2->list);
1001		} else {
1002			mp2 = NULL;
1003		}
1004
1005		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1006		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1007		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1008		icmd->ulpBdeCount = 1;
1009		cnt--;
1010		if (mp2) {
1011			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1012			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1013			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1014			cnt--;
1015			icmd->ulpBdeCount = 2;
1016		}
1017
1018		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1019		icmd->ulpLe = 1;
1020
1021		spin_lock_irq(phba->host->host_lock);
1022		if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
1023			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1024			kfree(mp1);
1025			cnt++;
1026			if (mp2) {
1027				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1028				kfree(mp2);
1029				cnt++;
1030			}
1031			lpfc_sli_release_iocbq(phba, iocb);
1032			pring->missbufcnt = cnt;
1033			spin_unlock_irq(phba->host->host_lock);
1034			return cnt;
1035		}
1036		spin_unlock_irq(phba->host->host_lock);
1037		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1038		if (mp2) {
1039			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1040		}
1041	}
1042	pring->missbufcnt = 0;
1043	return 0;
1044}
1045
1046/************************************************************************/
1047/*                                                                      */
1048/*   lpfc_post_rcv_buf                                                  */
1049/*   This routine post initial rcv buffers to the configured rings      */
1050/*                                                                      */
1051/************************************************************************/
1052static int
1053lpfc_post_rcv_buf(struct lpfc_hba * phba)
1054{
1055	struct lpfc_sli *psli = &phba->sli;
1056
1057	/* Ring 0, ELS / CT buffers */
1058	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1);
1059	/* Ring 2 - FCP no buffers needed */
1060
1061	return 0;
1062}
1063
1064#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1065
1066/************************************************************************/
1067/*                                                                      */
1068/*   lpfc_sha_init                                                      */
1069/*                                                                      */
1070/************************************************************************/
1071static void
1072lpfc_sha_init(uint32_t * HashResultPointer)
1073{
1074	HashResultPointer[0] = 0x67452301;
1075	HashResultPointer[1] = 0xEFCDAB89;
1076	HashResultPointer[2] = 0x98BADCFE;
1077	HashResultPointer[3] = 0x10325476;
1078	HashResultPointer[4] = 0xC3D2E1F0;
1079}
1080
1081/************************************************************************/
1082/*                                                                      */
1083/*   lpfc_sha_iterate                                                   */
1084/*                                                                      */
1085/************************************************************************/
1086static void
1087lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
1088{
1089	int t;
1090	uint32_t TEMP;
1091	uint32_t A, B, C, D, E;
1092	t = 16;
1093	do {
1094		HashWorkingPointer[t] =
1095		    S(1,
1096		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
1097								     8] ^
1098		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
1099	} while (++t <= 79);
1100	t = 0;
1101	A = HashResultPointer[0];
1102	B = HashResultPointer[1];
1103	C = HashResultPointer[2];
1104	D = HashResultPointer[3];
1105	E = HashResultPointer[4];
1106
1107	do {
1108		if (t < 20) {
1109			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
1110		} else if (t < 40) {
1111			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
1112		} else if (t < 60) {
1113			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
1114		} else {
1115			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
1116		}
1117		TEMP += S(5, A) + E + HashWorkingPointer[t];
1118		E = D;
1119		D = C;
1120		C = S(30, B);
1121		B = A;
1122		A = TEMP;
1123	} while (++t <= 79);
1124
1125	HashResultPointer[0] += A;
1126	HashResultPointer[1] += B;
1127	HashResultPointer[2] += C;
1128	HashResultPointer[3] += D;
1129	HashResultPointer[4] += E;
1130
1131}
1132
1133/************************************************************************/
1134/*                                                                      */
1135/*   lpfc_challenge_key                                                 */
1136/*                                                                      */
1137/************************************************************************/
1138static void
1139lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
1140{
1141	*HashWorking = (*RandomChallenge ^ *HashWorking);
1142}
1143
1144/************************************************************************/
1145/*                                                                      */
1146/*   lpfc_hba_init                                                      */
1147/*                                                                      */
1148/************************************************************************/
1149void
1150lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1151{
1152	int t;
1153	uint32_t *HashWorking;
1154	uint32_t *pwwnn = phba->wwnn;
1155
1156	HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL);
1157	if (!HashWorking)
1158		return;
1159
1160	memset(HashWorking, 0, (80 * sizeof(uint32_t)));
1161	HashWorking[0] = HashWorking[78] = *pwwnn++;
1162	HashWorking[1] = HashWorking[79] = *pwwnn;
1163
1164	for (t = 0; t < 7; t++)
1165		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
1166
1167	lpfc_sha_init(hbainit);
1168	lpfc_sha_iterate(hbainit, HashWorking);
1169	kfree(HashWorking);
1170}
1171
1172static void
1173lpfc_cleanup(struct lpfc_hba * phba)
1174{
1175	struct lpfc_nodelist *ndlp, *next_ndlp;
1176
1177	/* clean up phba - lpfc specific */
1178	lpfc_can_disctmo(phba);
1179	list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp)
1180		lpfc_nlp_put(ndlp);
1181
1182	INIT_LIST_HEAD(&phba->fc_nodes);
1183
1184	return;
1185}
1186
1187static void
1188lpfc_establish_link_tmo(unsigned long ptr)
1189{
1190	struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
1191	unsigned long iflag;
1192
1193
1194	/* Re-establishing Link, timer expired */
1195	lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1196			"%d:1300 Re-establishing Link, timer expired "
1197			"Data: x%x x%x\n",
1198			phba->brd_no, phba->fc_flag, phba->hba_state);
1199	spin_lock_irqsave(phba->host->host_lock, iflag);
1200	phba->fc_flag &= ~FC_ESTABLISH_LINK;
1201	spin_unlock_irqrestore(phba->host->host_lock, iflag);
1202}
1203
1204static int
1205lpfc_stop_timer(struct lpfc_hba * phba)
1206{
1207	struct lpfc_sli *psli = &phba->sli;
1208
1209	del_timer_sync(&phba->fcp_poll_timer);
1210	del_timer_sync(&phba->fc_estabtmo);
1211	del_timer_sync(&phba->fc_disctmo);
1212	del_timer_sync(&phba->fc_fdmitmo);
1213	del_timer_sync(&phba->els_tmofunc);
1214	psli = &phba->sli;
1215	del_timer_sync(&psli->mbox_tmo);
1216	return(1);
1217}
1218
1219int
1220lpfc_online(struct lpfc_hba * phba)
1221{
1222	if (!phba)
1223		return 0;
1224
1225	if (!(phba->fc_flag & FC_OFFLINE_MODE))
1226		return 0;
1227
1228	lpfc_printf_log(phba,
1229		       KERN_WARNING,
1230		       LOG_INIT,
1231		       "%d:0458 Bring Adapter online\n",
1232		       phba->brd_no);
1233
1234	lpfc_block_mgmt_io(phba);
1235
1236	if (!lpfc_sli_queue_setup(phba)) {
1237		lpfc_unblock_mgmt_io(phba);
1238		return 1;
1239	}
1240
1241	if (lpfc_sli_hba_setup(phba)) {	/* Initialize the HBA */
1242		lpfc_unblock_mgmt_io(phba);
1243		return 1;
1244	}
1245
1246	spin_lock_irq(phba->host->host_lock);
1247	phba->fc_flag &= ~FC_OFFLINE_MODE;
1248	spin_unlock_irq(phba->host->host_lock);
1249
1250	lpfc_unblock_mgmt_io(phba);
1251	return 0;
1252}
1253
1254void
1255lpfc_block_mgmt_io(struct lpfc_hba * phba)
1256{
1257	unsigned long iflag;
1258
1259	spin_lock_irqsave(phba->host->host_lock, iflag);
1260	phba->fc_flag |= FC_BLOCK_MGMT_IO;
1261	spin_unlock_irqrestore(phba->host->host_lock, iflag);
1262}
1263
1264void
1265lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
1266{
1267	unsigned long iflag;
1268
1269	spin_lock_irqsave(phba->host->host_lock, iflag);
1270	phba->fc_flag &= ~FC_BLOCK_MGMT_IO;
1271	spin_unlock_irqrestore(phba->host->host_lock, iflag);
1272}
1273
1274void
1275lpfc_offline_prep(struct lpfc_hba * phba)
1276{
1277	struct lpfc_nodelist  *ndlp, *next_ndlp;
1278
1279	if (phba->fc_flag & FC_OFFLINE_MODE)
1280		return;
1281
1282	lpfc_block_mgmt_io(phba);
1283
1284	lpfc_linkdown(phba);
1285
1286	/* Issue an unreg_login to all nodes */
1287	list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp)
1288		if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
1289			lpfc_unreg_rpi(phba, ndlp);
1290
1291	lpfc_sli_flush_mbox_queue(phba);
1292}
1293
1294void
1295lpfc_offline(struct lpfc_hba * phba)
1296{
1297	unsigned long iflag;
1298
1299	if (phba->fc_flag & FC_OFFLINE_MODE)
1300		return;
1301
1302	/* stop all timers associated with this hba */
1303	lpfc_stop_timer(phba);
1304
1305	lpfc_printf_log(phba,
1306		       KERN_WARNING,
1307		       LOG_INIT,
1308		       "%d:0460 Bring Adapter offline\n",
1309		       phba->brd_no);
1310
1311	/* Bring down the SLI Layer and cleanup.  The HBA is offline
1312	   now.  */
1313	lpfc_sli_hba_down(phba);
1314	lpfc_cleanup(phba);
1315	spin_lock_irqsave(phba->host->host_lock, iflag);
1316	phba->work_hba_events = 0;
1317	phba->work_ha = 0;
1318	phba->fc_flag |= FC_OFFLINE_MODE;
1319	spin_unlock_irqrestore(phba->host->host_lock, iflag);
1320}
1321
1322/******************************************************************************
1323* Function name: lpfc_scsi_free
1324*
1325* Description: Called from lpfc_pci_remove_one free internal driver resources
1326*
1327******************************************************************************/
1328static int
1329lpfc_scsi_free(struct lpfc_hba * phba)
1330{
1331	struct lpfc_scsi_buf *sb, *sb_next;
1332	struct lpfc_iocbq *io, *io_next;
1333
1334	spin_lock_irq(phba->host->host_lock);
1335	/* Release all the lpfc_scsi_bufs maintained by this host. */
1336	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
1337		list_del(&sb->list);
1338		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
1339								sb->dma_handle);
1340		kfree(sb);
1341		phba->total_scsi_bufs--;
1342	}
1343
1344	/* Release all the lpfc_iocbq entries maintained by this host. */
1345	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
1346		list_del(&io->list);
1347		kfree(io);
1348		phba->total_iocbq_bufs--;
1349	}
1350
1351	spin_unlock_irq(phba->host->host_lock);
1352
1353	return 0;
1354}
1355
1356void lpfc_remove_device(struct lpfc_hba *phba)
1357{
1358	unsigned long iflag;
1359
1360	lpfc_free_sysfs_attr(phba);
1361
1362	spin_lock_irqsave(phba->host->host_lock, iflag);
1363	phba->fc_flag |= FC_UNLOADING;
1364
1365	spin_unlock_irqrestore(phba->host->host_lock, iflag);
1366
1367	fc_remove_host(phba->host);
1368	scsi_remove_host(phba->host);
1369
1370	kthread_stop(phba->worker_thread);
1371
1372	/*
1373	 * Bring down the SLI Layer. This step disable all interrupts,
1374	 * clears the rings, discards all mailbox commands, and resets
1375	 * the HBA.
1376	 */
1377	lpfc_sli_hba_down(phba);
1378	lpfc_sli_brdrestart(phba);
1379
1380	/* Release the irq reservation */
1381	free_irq(phba->pcidev->irq, phba);
1382	pci_disable_msi(phba->pcidev);
1383
1384	lpfc_cleanup(phba);
1385	lpfc_stop_timer(phba);
1386	phba->work_hba_events = 0;
1387
1388	/*
1389	 * Call scsi_free before mem_free since scsi bufs are released to their
1390	 * corresponding pools here.
1391	 */
1392	lpfc_scsi_free(phba);
1393	lpfc_mem_free(phba);
1394
1395	/* Free resources associated with SLI2 interface */
1396	dma_free_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
1397			  phba->slim2p, phba->slim2p_mapping);
1398
1399	/* unmap adapter SLIM and Control Registers */
1400	iounmap(phba->ctrl_regs_memmap_p);
1401	iounmap(phba->slim_memmap_p);
1402
1403	pci_release_regions(phba->pcidev);
1404	pci_disable_device(phba->pcidev);
1405
1406	idr_remove(&lpfc_hba_index, phba->brd_no);
1407	scsi_host_put(phba->host);
1408}
1409
1410void lpfc_scan_start(struct Scsi_Host *host)
1411{
1412	struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
1413
1414	if (lpfc_alloc_sysfs_attr(phba))
1415		goto error;
1416
1417	phba->MBslimaddr = phba->slim_memmap_p;
1418	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
1419	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
1420	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
1421	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
1422
1423	if (lpfc_sli_hba_setup(phba))
1424		goto error;
1425
1426	/*
1427	 * hba setup may have changed the hba_queue_depth so we need to adjust
1428	 * the value of can_queue.
1429	 */
1430	host->can_queue = phba->cfg_hba_queue_depth - 10;
1431	return;
1432
1433error:
1434	lpfc_remove_device(phba);
1435}
1436
1437int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
1438{
1439	struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
1440
1441	if (!phba->host)
1442		return 1;
1443	if (time >= 30 * HZ)
1444		goto finished;
1445
1446	if (phba->hba_state != LPFC_HBA_READY)
1447		return 0;
1448	if (phba->num_disc_nodes || phba->fc_prli_sent)
1449		return 0;
1450	if ((phba->fc_map_cnt == 0) && (time < 2 * HZ))
1451		return 0;
1452	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)
1453		return 0;
1454	if ((phba->hba_state > LPFC_LINK_DOWN) || (time < 15 * HZ))
1455		return 0;
1456
1457finished:
1458	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1459		spin_lock_irq(shost->host_lock);
1460		lpfc_poll_start_timer(phba);
1461		spin_unlock_irq(shost->host_lock);
1462	}
1463
1464	/*
1465	 * set fixed host attributes
1466	 * Must done after lpfc_sli_hba_setup()
1467	 */
1468
1469	fc_host_node_name(shost) = wwn_to_u64(phba->fc_nodename.u.wwn);
1470	fc_host_port_name(shost) = wwn_to_u64(phba->fc_portname.u.wwn);
1471	fc_host_supported_classes(shost) = FC_COS_CLASS3;
1472
1473	memset(fc_host_supported_fc4s(shost), 0,
1474		sizeof(fc_host_supported_fc4s(shost)));
1475	fc_host_supported_fc4s(shost)[2] = 1;
1476	fc_host_supported_fc4s(shost)[7] = 1;
1477
1478	lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost));
1479
1480	fc_host_supported_speeds(shost) = 0;
1481	if (phba->lmt & LMT_10Gb)
1482		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
1483	if (phba->lmt & LMT_4Gb)
1484		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
1485	if (phba->lmt & LMT_2Gb)
1486		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
1487	if (phba->lmt & LMT_1Gb)
1488		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
1489
1490	fc_host_maxframe_size(shost) =
1491		((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
1492		 (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb);
1493
1494	/* This value is also unchanging */
1495	memset(fc_host_active_fc4s(shost), 0,
1496		sizeof(fc_host_active_fc4s(shost)));
1497	fc_host_active_fc4s(shost)[2] = 1;
1498	fc_host_active_fc4s(shost)[7] = 1;
1499
1500	spin_lock_irq(shost->host_lock);
1501	phba->fc_flag &= ~FC_LOADING;
1502	spin_unlock_irq(shost->host_lock);
1503
1504	return 1;
1505}
1506
1507static int __devinit
1508lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1509{
1510	struct Scsi_Host *host;
1511	struct lpfc_hba  *phba;
1512	struct lpfc_sli  *psli;
1513	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
1514	unsigned long bar0map_len, bar2map_len;
1515	int error = -ENODEV, retval;
1516	int i;
1517	uint16_t iotag;
1518
1519	if (pci_enable_device(pdev))
1520		goto out;
1521	if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
1522		goto out_disable_device;
1523
1524	host = scsi_host_alloc(&lpfc_template, sizeof (struct lpfc_hba));
1525	if (!host)
1526		goto out_release_regions;
1527
1528	phba = (struct lpfc_hba*)host->hostdata;
1529	memset(phba, 0, sizeof (struct lpfc_hba));
1530	phba->host = host;
1531
1532	phba->fc_flag |= FC_LOADING;
1533	phba->pcidev = pdev;
1534
1535	/* Assign an unused board number */
1536	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
1537		goto out_put_host;
1538
1539	error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no);
1540	if (error)
1541		goto out_put_host;
1542
1543	host->unique_id = phba->brd_no;
1544
1545	/* Initialize timers used by driver */
1546	init_timer(&phba->fc_estabtmo);
1547	phba->fc_estabtmo.function = lpfc_establish_link_tmo;
1548	phba->fc_estabtmo.data = (unsigned long)phba;
1549	init_timer(&phba->fc_disctmo);
1550	phba->fc_disctmo.function = lpfc_disc_timeout;
1551	phba->fc_disctmo.data = (unsigned long)phba;
1552
1553	init_timer(&phba->fc_fdmitmo);
1554	phba->fc_fdmitmo.function = lpfc_fdmi_tmo;
1555	phba->fc_fdmitmo.data = (unsigned long)phba;
1556	init_timer(&phba->els_tmofunc);
1557	phba->els_tmofunc.function = lpfc_els_timeout;
1558	phba->els_tmofunc.data = (unsigned long)phba;
1559	psli = &phba->sli;
1560	init_timer(&psli->mbox_tmo);
1561	psli->mbox_tmo.function = lpfc_mbox_timeout;
1562	psli->mbox_tmo.data = (unsigned long)phba;
1563
1564	init_timer(&phba->fcp_poll_timer);
1565	phba->fcp_poll_timer.function = lpfc_poll_timeout;
1566	phba->fcp_poll_timer.data = (unsigned long)phba;
1567
1568	/*
1569	 * Get all the module params for configuring this host and then
1570	 * establish the host parameters.
1571	 */
1572	lpfc_get_cfgparam(phba);
1573
1574	host->max_id = LPFC_MAX_TARGET;
1575	host->max_lun = phba->cfg_max_luns;
1576	host->this_id = -1;
1577
1578	INIT_LIST_HEAD(&phba->fc_nodes);
1579
1580	pci_set_master(pdev);
1581	pci_try_set_mwi(pdev);
1582
1583	if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
1584		if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0)
1585			goto out_idr_remove;
1586
1587	/*
1588	 * Get the bus address of Bar0 and Bar2 and the number of bytes
1589	 * required by each mapping.
1590	 */
1591	phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
1592	bar0map_len        = pci_resource_len(phba->pcidev, 0);
1593
1594	phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2);
1595	bar2map_len        = pci_resource_len(phba->pcidev, 2);
1596
1597	/* Map HBA SLIM to a kernel virtual address. */
1598	phba->slim_memmap_p      = ioremap(phba->pci_bar0_map, bar0map_len);
1599	if (!phba->slim_memmap_p) {
1600		error = -ENODEV;
1601		dev_printk(KERN_ERR, &pdev->dev,
1602			   "ioremap failed for SLIM memory.\n");
1603		goto out_idr_remove;
1604	}
1605
1606	/* Map HBA Control Registers to a kernel virtual address. */
1607	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
1608	if (!phba->ctrl_regs_memmap_p) {
1609		error = -ENODEV;
1610		dev_printk(KERN_ERR, &pdev->dev,
1611			   "ioremap failed for HBA control registers.\n");
1612		goto out_iounmap_slim;
1613	}
1614
1615	/* Allocate memory for SLI-2 structures */
1616	phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
1617					  &phba->slim2p_mapping, GFP_KERNEL);
1618	if (!phba->slim2p)
1619		goto out_iounmap;
1620
1621	memset(phba->slim2p, 0, SLI2_SLIM_SIZE);
1622
1623	/* Initialize the SLI Layer to run with lpfc HBAs. */
1624	lpfc_sli_setup(phba);
1625	lpfc_sli_queue_setup(phba);
1626
1627	error = lpfc_mem_alloc(phba);
1628	if (error)
1629		goto out_free_slim;
1630
1631	/* Initialize and populate the iocb list per host.  */
1632	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
1633	for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
1634		iocbq_entry = kmalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
1635		if (iocbq_entry == NULL) {
1636			printk(KERN_ERR "%s: only allocated %d iocbs of "
1637				"expected %d count. Unloading driver.\n",
1638				__FUNCTION__, i, LPFC_IOCB_LIST_CNT);
1639			error = -ENOMEM;
1640			goto out_free_iocbq;
1641		}
1642
1643		memset(iocbq_entry, 0, sizeof(struct lpfc_iocbq));
1644		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
1645		if (iotag == 0) {
1646			kfree (iocbq_entry);
1647			printk(KERN_ERR "%s: failed to allocate IOTAG. "
1648			       "Unloading driver.\n",
1649				__FUNCTION__);
1650			error = -ENOMEM;
1651			goto out_free_iocbq;
1652		}
1653		spin_lock_irq(phba->host->host_lock);
1654		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
1655		phba->total_iocbq_bufs++;
1656		spin_unlock_irq(phba->host->host_lock);
1657	}
1658
1659	/* Initialize HBA structure */
1660	phba->fc_edtov = FF_DEF_EDTOV;
1661	phba->fc_ratov = FF_DEF_RATOV;
1662	phba->fc_altov = FF_DEF_ALTOV;
1663	phba->fc_arbtov = FF_DEF_ARBTOV;
1664
1665	INIT_LIST_HEAD(&phba->work_list);
1666	phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
1667	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
1668
1669	/* Startup the kernel thread for this host adapter. */
1670	phba->worker_thread = kthread_run(lpfc_do_work, phba,
1671				       "lpfc_worker_%d", phba->brd_no);
1672	if (IS_ERR(phba->worker_thread)) {
1673		error = PTR_ERR(phba->worker_thread);
1674		goto out_free_iocbq;
1675	}
1676
1677	/*
1678	 * Set initial can_queue value since 0 is no longer supported and
1679	 * scsi_add_host will fail. This will be adjusted later based on the
1680	 * max xri value determined in hba setup.
1681	 */
1682	host->can_queue = phba->cfg_hba_queue_depth - 10;
1683
1684	/* Tell the midlayer we support 16 byte commands */
1685	host->max_cmd_len = 16;
1686
1687	/* Initialize the list of scsi buffers used by driver for scsi IO. */
1688	spin_lock_init(&phba->scsi_buf_list_lock);
1689	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
1690
1691	host->transportt = lpfc_transport_template;
1692	pci_set_drvdata(pdev, host);
1693
1694	if (phba->cfg_use_msi) {
1695		error = pci_enable_msi(phba->pcidev);
1696		if (error)
1697			lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "%d:0452 "
1698					"Enable MSI failed, continuing with "
1699					"IRQ\n", phba->brd_no);
1700	}
1701
1702	error =	request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
1703							LPFC_DRIVER_NAME, phba);
1704	if (error) {
1705		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1706			"%d:0451 Enable interrupt handler failed\n",
1707			phba->brd_no);
1708		goto out_kthread_stop;
1709	}
1710
1711	error = scsi_add_host(host, &pdev->dev);
1712	if (error)
1713		goto out_free_irq;
1714
1715	scsi_scan_host(host);
1716
1717	return 0;
1718
1719out_free_irq:
1720	lpfc_stop_timer(phba);
1721	phba->work_hba_events = 0;
1722	free_irq(phba->pcidev->irq, phba);
1723	pci_disable_msi(phba->pcidev);
1724out_kthread_stop:
1725	kthread_stop(phba->worker_thread);
1726out_free_iocbq:
1727	list_for_each_entry_safe(iocbq_entry, iocbq_next,
1728						&phba->lpfc_iocb_list, list) {
1729		spin_lock_irq(phba->host->host_lock);
1730		kfree(iocbq_entry);
1731		phba->total_iocbq_bufs--;
1732		spin_unlock_irq(phba->host->host_lock);
1733	}
1734	lpfc_mem_free(phba);
1735out_free_slim:
1736	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p,
1737							phba->slim2p_mapping);
1738out_iounmap:
1739	iounmap(phba->ctrl_regs_memmap_p);
1740out_iounmap_slim:
1741	iounmap(phba->slim_memmap_p);
1742out_idr_remove:
1743	idr_remove(&lpfc_hba_index, phba->brd_no);
1744out_put_host:
1745	phba->host = NULL;
1746	scsi_host_put(host);
1747out_release_regions:
1748	pci_release_regions(pdev);
1749out_disable_device:
1750	pci_disable_device(pdev);
1751out:
1752	pci_set_drvdata(pdev, NULL);
1753	return error;
1754}
1755
1756static void __devexit
1757lpfc_pci_remove_one(struct pci_dev *pdev)
1758{
1759	struct Scsi_Host   *host = pci_get_drvdata(pdev);
1760	struct lpfc_hba    *phba = (struct lpfc_hba *)host->hostdata;
1761
1762	lpfc_remove_device(phba);
1763
1764	pci_set_drvdata(pdev, NULL);
1765}
1766
1767/**
1768 * lpfc_io_error_detected - called when PCI error is detected
1769 * @pdev: Pointer to PCI device
1770 * @state: The current pci conneection state
1771 *
1772 * This function is called after a PCI bus error affecting
1773 * this device has been detected.
1774 */
1775static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
1776				pci_channel_state_t state)
1777{
1778	struct Scsi_Host *host = pci_get_drvdata(pdev);
1779	struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata;
1780	struct lpfc_sli *psli = &phba->sli;
1781	struct lpfc_sli_ring  *pring;
1782
1783	if (state == pci_channel_io_perm_failure)
1784		return PCI_ERS_RESULT_DISCONNECT;
1785
1786	pci_disable_device(pdev);
1787	/*
1788	 * There may be I/Os dropped by the firmware.
1789	 * Error iocb (I/O) on txcmplq and let the SCSI layer
1790	 * retry it after re-establishing link.
1791	 */
1792	pring = &psli->ring[psli->fcp_ring];
1793	lpfc_sli_abort_iocb_ring(phba, pring);
1794
1795	/* Request a slot reset. */
1796	return PCI_ERS_RESULT_NEED_RESET;
1797}
1798
1799/**
1800 * lpfc_io_slot_reset - called after the pci bus has been reset.
1801 * @pdev: Pointer to PCI device
1802 *
1803 * Restart the card from scratch, as if from a cold-boot.
1804 */
1805static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
1806{
1807	struct Scsi_Host *host = pci_get_drvdata(pdev);
1808	struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata;
1809	struct lpfc_sli *psli = &phba->sli;
1810	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
1811
1812	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
1813	if (pci_enable_device_bars(pdev, bars)) {
1814		printk(KERN_ERR "lpfc: Cannot re-enable "
1815			"PCI device after reset.\n");
1816		return PCI_ERS_RESULT_DISCONNECT;
1817	}
1818
1819	pci_set_master(pdev);
1820
1821	/* Re-establishing Link */
1822	spin_lock_irq(phba->host->host_lock);
1823	phba->fc_flag |= FC_ESTABLISH_LINK;
1824	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
1825	spin_unlock_irq(phba->host->host_lock);
1826
1827
1828	/* Take device offline; this will perform cleanup */
1829	lpfc_offline(phba);
1830	lpfc_sli_brdrestart(phba);
1831
1832	return PCI_ERS_RESULT_RECOVERED;
1833}
1834
1835/**
1836 * lpfc_io_resume - called when traffic can start flowing again.
1837 * @pdev: Pointer to PCI device
1838 *
1839 * This callback is called when the error recovery driver tells us that
1840 * its OK to resume normal operation.
1841 */
1842static void lpfc_io_resume(struct pci_dev *pdev)
1843{
1844	struct Scsi_Host *host = pci_get_drvdata(pdev);
1845	struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata;
1846
1847	if (lpfc_online(phba) == 0) {
1848		mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
1849	}
1850}
1851
1852static struct pci_device_id lpfc_id_table[] = {
1853	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
1854		PCI_ANY_ID, PCI_ANY_ID, },
1855	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
1856		PCI_ANY_ID, PCI_ANY_ID, },
1857	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
1858		PCI_ANY_ID, PCI_ANY_ID, },
1859	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
1860		PCI_ANY_ID, PCI_ANY_ID, },
1861	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
1862		PCI_ANY_ID, PCI_ANY_ID, },
1863	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
1864		PCI_ANY_ID, PCI_ANY_ID, },
1865	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
1866		PCI_ANY_ID, PCI_ANY_ID, },
1867	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
1868		PCI_ANY_ID, PCI_ANY_ID, },
1869	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
1870		PCI_ANY_ID, PCI_ANY_ID, },
1871	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
1872		PCI_ANY_ID, PCI_ANY_ID, },
1873	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
1874		PCI_ANY_ID, PCI_ANY_ID, },
1875	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
1876		PCI_ANY_ID, PCI_ANY_ID, },
1877	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
1878		PCI_ANY_ID, PCI_ANY_ID, },
1879	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
1880		PCI_ANY_ID, PCI_ANY_ID, },
1881	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
1882		PCI_ANY_ID, PCI_ANY_ID, },
1883	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
1884		PCI_ANY_ID, PCI_ANY_ID, },
1885	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
1886		PCI_ANY_ID, PCI_ANY_ID, },
1887	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
1888		PCI_ANY_ID, PCI_ANY_ID, },
1889	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
1890		PCI_ANY_ID, PCI_ANY_ID, },
1891	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
1892		PCI_ANY_ID, PCI_ANY_ID, },
1893	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
1894		PCI_ANY_ID, PCI_ANY_ID, },
1895	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
1896		PCI_ANY_ID, PCI_ANY_ID, },
1897	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
1898		PCI_ANY_ID, PCI_ANY_ID, },
1899	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
1900		PCI_ANY_ID, PCI_ANY_ID, },
1901	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
1902		PCI_ANY_ID, PCI_ANY_ID, },
1903	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
1904		PCI_ANY_ID, PCI_ANY_ID, },
1905	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
1906		PCI_ANY_ID, PCI_ANY_ID, },
1907	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
1908		PCI_ANY_ID, PCI_ANY_ID, },
1909	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
1910		PCI_ANY_ID, PCI_ANY_ID, },
1911	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
1912		PCI_ANY_ID, PCI_ANY_ID, },
1913	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
1914		PCI_ANY_ID, PCI_ANY_ID, },
1915	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
1916		PCI_ANY_ID, PCI_ANY_ID, },
1917	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
1918		PCI_ANY_ID, PCI_ANY_ID, },
1919	{ 0 }
1920};
1921
1922MODULE_DEVICE_TABLE(pci, lpfc_id_table);
1923
1924static struct pci_error_handlers lpfc_err_handler = {
1925	.error_detected = lpfc_io_error_detected,
1926	.slot_reset = lpfc_io_slot_reset,
1927	.resume = lpfc_io_resume,
1928};
1929
1930static struct pci_driver lpfc_driver = {
1931	.name		= LPFC_DRIVER_NAME,
1932	.id_table	= lpfc_id_table,
1933	.probe		= lpfc_pci_probe_one,
1934	.remove		= __devexit_p(lpfc_pci_remove_one),
1935	.err_handler = &lpfc_err_handler,
1936};
1937
1938static int __init
1939lpfc_init(void)
1940{
1941	int error = 0;
1942
1943	printk(LPFC_MODULE_DESC "\n");
1944	printk(LPFC_COPYRIGHT "\n");
1945
1946	lpfc_transport_template =
1947				fc_attach_transport(&lpfc_transport_functions);
1948	if (!lpfc_transport_template)
1949		return -ENOMEM;
1950	error = pci_register_driver(&lpfc_driver);
1951	if (error)
1952		fc_release_transport(lpfc_transport_template);
1953
1954	return error;
1955}
1956
1957static void __exit
1958lpfc_exit(void)
1959{
1960	pci_unregister_driver(&lpfc_driver);
1961	fc_release_transport(lpfc_transport_template);
1962}
1963
1964module_init(lpfc_init);
1965module_exit(lpfc_exit);
1966MODULE_LICENSE("GPL");
1967MODULE_DESCRIPTION(LPFC_MODULE_DESC);
1968MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
1969MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
1970