lpfc_init.c revision b92938b41ee84b83347b62baa6daa0d06a742e94
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
29#include <linux/spinlock.h>
30#include <linux/ctype.h>
31#include <linux/aer.h>
32#include <linux/slab.h>
33
34#include <scsi/scsi.h>
35#include <scsi/scsi_device.h>
36#include <scsi/scsi_host.h>
37#include <scsi/scsi_transport_fc.h>
38
39#include "lpfc_hw4.h"
40#include "lpfc_hw.h"
41#include "lpfc_sli.h"
42#include "lpfc_sli4.h"
43#include "lpfc_nl.h"
44#include "lpfc_disc.h"
45#include "lpfc_scsi.h"
46#include "lpfc.h"
47#include "lpfc_logmsg.h"
48#include "lpfc_crtn.h"
49#include "lpfc_vport.h"
50#include "lpfc_version.h"
51
52char *_dump_buf_data;
53unsigned long _dump_buf_data_order;
54char *_dump_buf_dif;
55unsigned long _dump_buf_dif_order;
56spinlock_t _dump_buf_lock;
57
58static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
59static int lpfc_post_rcv_buf(struct lpfc_hba *);
60static int lpfc_sli4_queue_create(struct lpfc_hba *);
61static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
62static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
63static int lpfc_setup_endian_order(struct lpfc_hba *);
64static int lpfc_sli4_read_config(struct lpfc_hba *);
65static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
66static void lpfc_free_sgl_list(struct lpfc_hba *);
67static int lpfc_init_sgl_list(struct lpfc_hba *);
68static int lpfc_init_active_sgl_array(struct lpfc_hba *);
69static void lpfc_free_active_sgl(struct lpfc_hba *);
70static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
71static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
72static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
73static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
74static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
75
76static struct scsi_transport_template *lpfc_transport_template = NULL;
77static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
78static DEFINE_IDR(lpfc_hba_index);
79
80/**
81 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
82 * @phba: pointer to lpfc hba data structure.
83 *
84 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
85 * mailbox command. It retrieves the revision information from the HBA and
86 * collects the Vital Product Data (VPD) about the HBA for preparing the
87 * configuration of the HBA.
88 *
89 * Return codes:
90 *   0 - success.
91 *   -ERESTART - requests the SLI layer to reset the HBA and try again.
92 *   Any other value - indicates an error.
93 **/
94int
95lpfc_config_port_prep(struct lpfc_hba *phba)
96{
97	lpfc_vpd_t *vp = &phba->vpd;
98	int i = 0, rc;
99	LPFC_MBOXQ_t *pmb;
100	MAILBOX_t *mb;
101	char *lpfc_vpd_data = NULL;
102	uint16_t offset = 0;
103	static char licensed[56] =
104		    "key unlock for use with gnu public licensed code only\0";
105	static int init_key = 1;
106
107	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
108	if (!pmb) {
109		phba->link_state = LPFC_HBA_ERROR;
110		return -ENOMEM;
111	}
112
113	mb = &pmb->u.mb;
114	phba->link_state = LPFC_INIT_MBX_CMDS;
115
116	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
117		if (init_key) {
118			uint32_t *ptext = (uint32_t *) licensed;
119
120			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
121				*ptext = cpu_to_be32(*ptext);
122			init_key = 0;
123		}
124
125		lpfc_read_nv(phba, pmb);
126		memset((char*)mb->un.varRDnvp.rsvd3, 0,
127			sizeof (mb->un.varRDnvp.rsvd3));
128		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
129			 sizeof (licensed));
130
131		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
132
133		if (rc != MBX_SUCCESS) {
134			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
135					"0324 Config Port initialization "
136					"error, mbxCmd x%x READ_NVPARM, "
137					"mbxStatus x%x\n",
138					mb->mbxCommand, mb->mbxStatus);
139			mempool_free(pmb, phba->mbox_mem_pool);
140			return -ERESTART;
141		}
142		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
143		       sizeof(phba->wwnn));
144		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
145		       sizeof(phba->wwpn));
146	}
147
148	phba->sli3_options = 0x0;
149
150	/* Setup and issue mailbox READ REV command */
151	lpfc_read_rev(phba, pmb);
152	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
153	if (rc != MBX_SUCCESS) {
154		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
155				"0439 Adapter failed to init, mbxCmd x%x "
156				"READ_REV, mbxStatus x%x\n",
157				mb->mbxCommand, mb->mbxStatus);
158		mempool_free( pmb, phba->mbox_mem_pool);
159		return -ERESTART;
160	}
161
162
163	/*
164	 * The value of rr must be 1 since the driver set the cv field to 1.
165	 * This setting requires the FW to set all revision fields.
166	 */
167	if (mb->un.varRdRev.rr == 0) {
168		vp->rev.rBit = 0;
169		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
170				"0440 Adapter failed to init, READ_REV has "
171				"missing revision information.\n");
172		mempool_free(pmb, phba->mbox_mem_pool);
173		return -ERESTART;
174	}
175
176	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
177		mempool_free(pmb, phba->mbox_mem_pool);
178		return -EINVAL;
179	}
180
181	/* Save information as VPD data */
182	vp->rev.rBit = 1;
183	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
184	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
185	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
186	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
187	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
188	vp->rev.biuRev = mb->un.varRdRev.biuRev;
189	vp->rev.smRev = mb->un.varRdRev.smRev;
190	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
191	vp->rev.endecRev = mb->un.varRdRev.endecRev;
192	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
193	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
194	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
195	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
196	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
197	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
198
199	/* If the sli feature level is less then 9, we must
200	 * tear down all RPIs and VPIs on link down if NPIV
201	 * is enabled.
202	 */
203	if (vp->rev.feaLevelHigh < 9)
204		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
205
206	if (lpfc_is_LC_HBA(phba->pcidev->device))
207		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
208						sizeof (phba->RandomData));
209
210	/* Get adapter VPD information */
211	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
212	if (!lpfc_vpd_data)
213		goto out_free_mbox;
214
215	do {
216		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
218
219		if (rc != MBX_SUCCESS) {
220			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
221					"0441 VPD not present on adapter, "
222					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
223					mb->mbxCommand, mb->mbxStatus);
224			mb->un.varDmp.word_cnt = 0;
225		}
226		/* dump mem may return a zero when finished or we got a
227		 * mailbox error, either way we are done.
228		 */
229		if (mb->un.varDmp.word_cnt == 0)
230			break;
231		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
232			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
233		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
234				      lpfc_vpd_data + offset,
235				      mb->un.varDmp.word_cnt);
236		offset += mb->un.varDmp.word_cnt;
237	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
238	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
239
240	kfree(lpfc_vpd_data);
241out_free_mbox:
242	mempool_free(pmb, phba->mbox_mem_pool);
243	return 0;
244}
245
246/**
247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
248 * @phba: pointer to lpfc hba data structure.
249 * @pmboxq: pointer to the driver internal queue element for mailbox command.
250 *
251 * This is the completion handler for driver's configuring asynchronous event
252 * mailbox command to the device. If the mailbox command returns successfully,
253 * it will set internal async event support flag to 1; otherwise, it will
254 * set internal async event support flag to 0.
255 **/
256static void
257lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
258{
259	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
260		phba->temp_sensor_support = 1;
261	else
262		phba->temp_sensor_support = 0;
263	mempool_free(pmboxq, phba->mbox_mem_pool);
264	return;
265}
266
267/**
268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
269 * @phba: pointer to lpfc hba data structure.
270 * @pmboxq: pointer to the driver internal queue element for mailbox command.
271 *
272 * This is the completion handler for dump mailbox command for getting
273 * wake up parameters. When this command complete, the response contain
274 * Option rom version of the HBA. This function translate the version number
275 * into a human readable string and store it in OptionROMVersion.
276 **/
277static void
278lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
279{
280	struct prog_id *prg;
281	uint32_t prog_id_word;
282	char dist = ' ';
283	/* character array used for decoding dist type. */
284	char dist_char[] = "nabx";
285
286	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
287		mempool_free(pmboxq, phba->mbox_mem_pool);
288		return;
289	}
290
291	prg = (struct prog_id *) &prog_id_word;
292
293	/* word 7 contain option rom version */
294	prog_id_word = pmboxq->u.mb.un.varWords[7];
295
296	/* Decode the Option rom version word to a readable string */
297	if (prg->dist < 4)
298		dist = dist_char[prg->dist];
299
300	if ((prg->dist == 3) && (prg->num == 0))
301		sprintf(phba->OptionROMVersion, "%d.%d%d",
302			prg->ver, prg->rev, prg->lev);
303	else
304		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
305			prg->ver, prg->rev, prg->lev,
306			dist, prg->num);
307	mempool_free(pmboxq, phba->mbox_mem_pool);
308	return;
309}
310
311/**
312 * lpfc_config_port_post - Perform lpfc initialization after config port
313 * @phba: pointer to lpfc hba data structure.
314 *
315 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
316 * command call. It performs all internal resource and state setups on the
317 * port: post IOCB buffers, enable appropriate host interrupt attentions,
318 * ELS ring timers, etc.
319 *
320 * Return codes
321 *   0 - success.
322 *   Any other value - error.
323 **/
324int
325lpfc_config_port_post(struct lpfc_hba *phba)
326{
327	struct lpfc_vport *vport = phba->pport;
328	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
329	LPFC_MBOXQ_t *pmb;
330	MAILBOX_t *mb;
331	struct lpfc_dmabuf *mp;
332	struct lpfc_sli *psli = &phba->sli;
333	uint32_t status, timeout;
334	int i, j;
335	int rc;
336
337	spin_lock_irq(&phba->hbalock);
338	/*
339	 * If the Config port completed correctly the HBA is not
340	 * over heated any more.
341	 */
342	if (phba->over_temp_state == HBA_OVER_TEMP)
343		phba->over_temp_state = HBA_NORMAL_TEMP;
344	spin_unlock_irq(&phba->hbalock);
345
346	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
347	if (!pmb) {
348		phba->link_state = LPFC_HBA_ERROR;
349		return -ENOMEM;
350	}
351	mb = &pmb->u.mb;
352
353	/* Get login parameters for NID.  */
354	rc = lpfc_read_sparam(phba, pmb, 0);
355	if (rc) {
356		mempool_free(pmb, phba->mbox_mem_pool);
357		return -ENOMEM;
358	}
359
360	pmb->vport = vport;
361	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
362		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
363				"0448 Adapter failed init, mbxCmd x%x "
364				"READ_SPARM mbxStatus x%x\n",
365				mb->mbxCommand, mb->mbxStatus);
366		phba->link_state = LPFC_HBA_ERROR;
367		mp = (struct lpfc_dmabuf *) pmb->context1;
368		mempool_free(pmb, phba->mbox_mem_pool);
369		lpfc_mbuf_free(phba, mp->virt, mp->phys);
370		kfree(mp);
371		return -EIO;
372	}
373
374	mp = (struct lpfc_dmabuf *) pmb->context1;
375
376	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
377	lpfc_mbuf_free(phba, mp->virt, mp->phys);
378	kfree(mp);
379	pmb->context1 = NULL;
380
381	if (phba->cfg_soft_wwnn)
382		u64_to_wwn(phba->cfg_soft_wwnn,
383			   vport->fc_sparam.nodeName.u.wwn);
384	if (phba->cfg_soft_wwpn)
385		u64_to_wwn(phba->cfg_soft_wwpn,
386			   vport->fc_sparam.portName.u.wwn);
387	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
388	       sizeof (struct lpfc_name));
389	memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
390	       sizeof (struct lpfc_name));
391
392	/* Update the fc_host data structures with new wwn. */
393	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
394	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
395	fc_host_max_npiv_vports(shost) = phba->max_vpi;
396
397	/* If no serial number in VPD data, use low 6 bytes of WWNN */
398	/* This should be consolidated into parse_vpd ? - mr */
399	if (phba->SerialNumber[0] == 0) {
400		uint8_t *outptr;
401
402		outptr = &vport->fc_nodename.u.s.IEEE[0];
403		for (i = 0; i < 12; i++) {
404			status = *outptr++;
405			j = ((status & 0xf0) >> 4);
406			if (j <= 9)
407				phba->SerialNumber[i] =
408				    (char)((uint8_t) 0x30 + (uint8_t) j);
409			else
410				phba->SerialNumber[i] =
411				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
412			i++;
413			j = (status & 0xf);
414			if (j <= 9)
415				phba->SerialNumber[i] =
416				    (char)((uint8_t) 0x30 + (uint8_t) j);
417			else
418				phba->SerialNumber[i] =
419				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
420		}
421	}
422
423	lpfc_read_config(phba, pmb);
424	pmb->vport = vport;
425	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
426		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
427				"0453 Adapter failed to init, mbxCmd x%x "
428				"READ_CONFIG, mbxStatus x%x\n",
429				mb->mbxCommand, mb->mbxStatus);
430		phba->link_state = LPFC_HBA_ERROR;
431		mempool_free( pmb, phba->mbox_mem_pool);
432		return -EIO;
433	}
434
435	/* Check if the port is disabled */
436	lpfc_sli_read_link_ste(phba);
437
438	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
439	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
440		phba->cfg_hba_queue_depth =
441			(mb->un.varRdConfig.max_xri + 1) -
442					lpfc_sli4_get_els_iocb_cnt(phba);
443
444	phba->lmt = mb->un.varRdConfig.lmt;
445
446	/* Get the default values for Model Name and Description */
447	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
448
449	if ((phba->cfg_link_speed > LINK_SPEED_10G)
450	    || ((phba->cfg_link_speed == LINK_SPEED_1G)
451		&& !(phba->lmt & LMT_1Gb))
452	    || ((phba->cfg_link_speed == LINK_SPEED_2G)
453		&& !(phba->lmt & LMT_2Gb))
454	    || ((phba->cfg_link_speed == LINK_SPEED_4G)
455		&& !(phba->lmt & LMT_4Gb))
456	    || ((phba->cfg_link_speed == LINK_SPEED_8G)
457		&& !(phba->lmt & LMT_8Gb))
458	    || ((phba->cfg_link_speed == LINK_SPEED_10G)
459		&& !(phba->lmt & LMT_10Gb))) {
460		/* Reset link speed to auto */
461		lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
462			"1302 Invalid speed for this board: "
463			"Reset link speed to auto: x%x\n",
464			phba->cfg_link_speed);
465			phba->cfg_link_speed = LINK_SPEED_AUTO;
466	}
467
468	phba->link_state = LPFC_LINK_DOWN;
469
470	/* Only process IOCBs on ELS ring till hba_state is READY */
471	if (psli->ring[psli->extra_ring].cmdringaddr)
472		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
473	if (psli->ring[psli->fcp_ring].cmdringaddr)
474		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
475	if (psli->ring[psli->next_ring].cmdringaddr)
476		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
477
478	/* Post receive buffers for desired rings */
479	if (phba->sli_rev != 3)
480		lpfc_post_rcv_buf(phba);
481
482	/*
483	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
484	 */
485	if (phba->intr_type == MSIX) {
486		rc = lpfc_config_msi(phba, pmb);
487		if (rc) {
488			mempool_free(pmb, phba->mbox_mem_pool);
489			return -EIO;
490		}
491		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
492		if (rc != MBX_SUCCESS) {
493			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
494					"0352 Config MSI mailbox command "
495					"failed, mbxCmd x%x, mbxStatus x%x\n",
496					pmb->u.mb.mbxCommand,
497					pmb->u.mb.mbxStatus);
498			mempool_free(pmb, phba->mbox_mem_pool);
499			return -EIO;
500		}
501	}
502
503	spin_lock_irq(&phba->hbalock);
504	/* Initialize ERATT handling flag */
505	phba->hba_flag &= ~HBA_ERATT_HANDLED;
506
507	/* Enable appropriate host interrupts */
508	status = readl(phba->HCregaddr);
509	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
510	if (psli->num_rings > 0)
511		status |= HC_R0INT_ENA;
512	if (psli->num_rings > 1)
513		status |= HC_R1INT_ENA;
514	if (psli->num_rings > 2)
515		status |= HC_R2INT_ENA;
516	if (psli->num_rings > 3)
517		status |= HC_R3INT_ENA;
518
519	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
520	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
521		status &= ~(HC_R0INT_ENA);
522
523	writel(status, phba->HCregaddr);
524	readl(phba->HCregaddr); /* flush */
525	spin_unlock_irq(&phba->hbalock);
526
527	/* Set up ring-0 (ELS) timer */
528	timeout = phba->fc_ratov * 2;
529	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
530	/* Set up heart beat (HB) timer */
531	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
532	phba->hb_outstanding = 0;
533	phba->last_completion_time = jiffies;
534	/* Set up error attention (ERATT) polling timer */
535	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
536
537	if (phba->hba_flag & LINK_DISABLED) {
538		lpfc_printf_log(phba,
539			KERN_ERR, LOG_INIT,
540			"2598 Adapter Link is disabled.\n");
541		lpfc_down_link(phba, pmb);
542		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
543		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
544		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
545			lpfc_printf_log(phba,
546			KERN_ERR, LOG_INIT,
547			"2599 Adapter failed to issue DOWN_LINK"
548			" mbox command rc 0x%x\n", rc);
549
550			mempool_free(pmb, phba->mbox_mem_pool);
551			return -EIO;
552		}
553	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
554		lpfc_init_link(phba, pmb, phba->cfg_topology,
555			phba->cfg_link_speed);
556		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
557		lpfc_set_loopback_flag(phba);
558		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
559		if (rc != MBX_SUCCESS) {
560			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
561				"0454 Adapter failed to init, mbxCmd x%x "
562				"INIT_LINK, mbxStatus x%x\n",
563				mb->mbxCommand, mb->mbxStatus);
564
565			/* Clear all interrupt enable conditions */
566			writel(0, phba->HCregaddr);
567			readl(phba->HCregaddr); /* flush */
568			/* Clear all pending interrupts */
569			writel(0xffffffff, phba->HAregaddr);
570			readl(phba->HAregaddr); /* flush */
571
572			phba->link_state = LPFC_HBA_ERROR;
573			if (rc != MBX_BUSY)
574				mempool_free(pmb, phba->mbox_mem_pool);
575			return -EIO;
576		}
577	}
578	/* MBOX buffer will be freed in mbox compl */
579	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
580	if (!pmb) {
581		phba->link_state = LPFC_HBA_ERROR;
582		return -ENOMEM;
583	}
584
585	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
586	pmb->mbox_cmpl = lpfc_config_async_cmpl;
587	pmb->vport = phba->pport;
588	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
589
590	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
591		lpfc_printf_log(phba,
592				KERN_ERR,
593				LOG_INIT,
594				"0456 Adapter failed to issue "
595				"ASYNCEVT_ENABLE mbox status x%x\n",
596				rc);
597		mempool_free(pmb, phba->mbox_mem_pool);
598	}
599
600	/* Get Option rom version */
601	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
602	if (!pmb) {
603		phba->link_state = LPFC_HBA_ERROR;
604		return -ENOMEM;
605	}
606
607	lpfc_dump_wakeup_param(phba, pmb);
608	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
609	pmb->vport = phba->pport;
610	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
611
612	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
613		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
614				"to get Option ROM version status x%x\n", rc);
615		mempool_free(pmb, phba->mbox_mem_pool);
616	}
617
618	return 0;
619}
620
621/**
622 * lpfc_hba_init_link - Initialize the FC link
623 * @phba: pointer to lpfc hba data structure.
624 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
625 *
626 * This routine will issue the INIT_LINK mailbox command call.
627 * It is available to other drivers through the lpfc_hba data
628 * structure for use as a delayed link up mechanism with the
629 * module parameter lpfc_suppress_link_up.
630 *
631 * Return code
632 *		0 - success
633 *		Any other value - error
634 **/
635int
636lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
637{
638	struct lpfc_vport *vport = phba->pport;
639	LPFC_MBOXQ_t *pmb;
640	MAILBOX_t *mb;
641	int rc;
642
643	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
644	if (!pmb) {
645		phba->link_state = LPFC_HBA_ERROR;
646		return -ENOMEM;
647	}
648	mb = &pmb->u.mb;
649	pmb->vport = vport;
650
651	lpfc_init_link(phba, pmb, phba->cfg_topology,
652		phba->cfg_link_speed);
653	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
654	lpfc_set_loopback_flag(phba);
655	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
656	if (rc != MBX_SUCCESS) {
657		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
658			"0498 Adapter failed to init, mbxCmd x%x "
659			"INIT_LINK, mbxStatus x%x\n",
660			mb->mbxCommand, mb->mbxStatus);
661		/* Clear all interrupt enable conditions */
662		writel(0, phba->HCregaddr);
663		readl(phba->HCregaddr); /* flush */
664		/* Clear all pending interrupts */
665		writel(0xffffffff, phba->HAregaddr);
666		readl(phba->HAregaddr); /* flush */
667		phba->link_state = LPFC_HBA_ERROR;
668		if (rc != MBX_BUSY || flag == MBX_POLL)
669			mempool_free(pmb, phba->mbox_mem_pool);
670		return -EIO;
671	}
672	phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
673	if (flag == MBX_POLL)
674		mempool_free(pmb, phba->mbox_mem_pool);
675
676	return 0;
677}
678
679/**
680 * lpfc_hba_down_link - this routine downs the FC link
681 * @phba: pointer to lpfc hba data structure.
682 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
683 *
684 * This routine will issue the DOWN_LINK mailbox command call.
685 * It is available to other drivers through the lpfc_hba data
686 * structure for use to stop the link.
687 *
688 * Return code
689 *		0 - success
690 *		Any other value - error
691 **/
692int
693lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
694{
695	LPFC_MBOXQ_t *pmb;
696	int rc;
697
698	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
699	if (!pmb) {
700		phba->link_state = LPFC_HBA_ERROR;
701		return -ENOMEM;
702	}
703
704	lpfc_printf_log(phba,
705		KERN_ERR, LOG_INIT,
706		"0491 Adapter Link is disabled.\n");
707	lpfc_down_link(phba, pmb);
708	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
709	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
710	if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
711		lpfc_printf_log(phba,
712		KERN_ERR, LOG_INIT,
713		"2522 Adapter failed to issue DOWN_LINK"
714		" mbox command rc 0x%x\n", rc);
715
716		mempool_free(pmb, phba->mbox_mem_pool);
717		return -EIO;
718	}
719	if (flag == MBX_POLL)
720		mempool_free(pmb, phba->mbox_mem_pool);
721
722	return 0;
723}
724
725/**
726 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
727 * @phba: pointer to lpfc HBA data structure.
728 *
729 * This routine will do LPFC uninitialization before the HBA is reset when
730 * bringing down the SLI Layer.
731 *
732 * Return codes
733 *   0 - success.
734 *   Any other value - error.
735 **/
736int
737lpfc_hba_down_prep(struct lpfc_hba *phba)
738{
739	struct lpfc_vport **vports;
740	int i;
741
742	if (phba->sli_rev <= LPFC_SLI_REV3) {
743		/* Disable interrupts */
744		writel(0, phba->HCregaddr);
745		readl(phba->HCregaddr); /* flush */
746	}
747
748	if (phba->pport->load_flag & FC_UNLOADING)
749		lpfc_cleanup_discovery_resources(phba->pport);
750	else {
751		vports = lpfc_create_vport_work_array(phba);
752		if (vports != NULL)
753			for (i = 0; i <= phba->max_vports &&
754				vports[i] != NULL; i++)
755				lpfc_cleanup_discovery_resources(vports[i]);
756		lpfc_destroy_vport_work_array(phba, vports);
757	}
758	return 0;
759}
760
761/**
762 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
763 * @phba: pointer to lpfc HBA data structure.
764 *
765 * This routine will do uninitialization after the HBA is reset when bring
766 * down the SLI Layer.
767 *
768 * Return codes
769 *   0 - success.
770 *   Any other value - error.
771 **/
772static int
773lpfc_hba_down_post_s3(struct lpfc_hba *phba)
774{
775	struct lpfc_sli *psli = &phba->sli;
776	struct lpfc_sli_ring *pring;
777	struct lpfc_dmabuf *mp, *next_mp;
778	LIST_HEAD(completions);
779	int i;
780
781	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
782		lpfc_sli_hbqbuf_free_all(phba);
783	else {
784		/* Cleanup preposted buffers on the ELS ring */
785		pring = &psli->ring[LPFC_ELS_RING];
786		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
787			list_del(&mp->list);
788			pring->postbufq_cnt--;
789			lpfc_mbuf_free(phba, mp->virt, mp->phys);
790			kfree(mp);
791		}
792	}
793
794	spin_lock_irq(&phba->hbalock);
795	for (i = 0; i < psli->num_rings; i++) {
796		pring = &psli->ring[i];
797
798		/* At this point in time the HBA is either reset or DOA. Either
799		 * way, nothing should be on txcmplq as it will NEVER complete.
800		 */
801		list_splice_init(&pring->txcmplq, &completions);
802		pring->txcmplq_cnt = 0;
803		spin_unlock_irq(&phba->hbalock);
804
805		/* Cancel all the IOCBs from the completions list */
806		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
807				      IOERR_SLI_ABORTED);
808
809		lpfc_sli_abort_iocb_ring(phba, pring);
810		spin_lock_irq(&phba->hbalock);
811	}
812	spin_unlock_irq(&phba->hbalock);
813
814	return 0;
815}
816/**
817 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
818 * @phba: pointer to lpfc HBA data structure.
819 *
820 * This routine will do uninitialization after the HBA is reset when bring
821 * down the SLI Layer.
822 *
823 * Return codes
824 *   0 - success.
825 *   Any other value - error.
826 **/
827static int
828lpfc_hba_down_post_s4(struct lpfc_hba *phba)
829{
830	struct lpfc_scsi_buf *psb, *psb_next;
831	LIST_HEAD(aborts);
832	int ret;
833	unsigned long iflag = 0;
834	struct lpfc_sglq *sglq_entry = NULL;
835
836	ret = lpfc_hba_down_post_s3(phba);
837	if (ret)
838		return ret;
839	/* At this point in time the HBA is either reset or DOA. Either
840	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
841	 * on the lpfc_sgl_list so that it can either be freed if the
842	 * driver is unloading or reposted if the driver is restarting
843	 * the port.
844	 */
845	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
846					/* scsl_buf_list */
847	/* abts_sgl_list_lock required because worker thread uses this
848	 * list.
849	 */
850	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
851	list_for_each_entry(sglq_entry,
852		&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
853		sglq_entry->state = SGL_FREED;
854
855	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
856			&phba->sli4_hba.lpfc_sgl_list);
857	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
858	/* abts_scsi_buf_list_lock required because worker thread uses this
859	 * list.
860	 */
861	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
862	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
863			&aborts);
864	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
865	spin_unlock_irq(&phba->hbalock);
866
867	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
868		psb->pCmd = NULL;
869		psb->status = IOSTAT_SUCCESS;
870	}
871	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
872	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
873	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
874	return 0;
875}
876
877/**
878 * lpfc_hba_down_post - Wrapper func for hba down post routine
879 * @phba: pointer to lpfc HBA data structure.
880 *
881 * This routine wraps the actual SLI3 or SLI4 routine for performing
882 * uninitialization after the HBA is reset when bring down the SLI Layer.
883 *
884 * Return codes
885 *   0 - success.
886 *   Any other value - error.
887 **/
888int
889lpfc_hba_down_post(struct lpfc_hba *phba)
890{
891	return (*phba->lpfc_hba_down_post)(phba);
892}
893
894/**
895 * lpfc_hb_timeout - The HBA-timer timeout handler
896 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
897 *
898 * This is the HBA-timer timeout handler registered to the lpfc driver. When
899 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
900 * work-port-events bitmap and the worker thread is notified. This timeout
901 * event will be used by the worker thread to invoke the actual timeout
902 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
903 * be performed in the timeout handler and the HBA timeout event bit shall
904 * be cleared by the worker thread after it has taken the event bitmap out.
905 **/
906static void
907lpfc_hb_timeout(unsigned long ptr)
908{
909	struct lpfc_hba *phba;
910	uint32_t tmo_posted;
911	unsigned long iflag;
912
913	phba = (struct lpfc_hba *)ptr;
914
915	/* Check for heart beat timeout conditions */
916	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
917	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
918	if (!tmo_posted)
919		phba->pport->work_port_events |= WORKER_HB_TMO;
920	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
921
922	/* Tell the worker thread there is work to do */
923	if (!tmo_posted)
924		lpfc_worker_wake_up(phba);
925	return;
926}
927
928/**
929 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
930 * @phba: pointer to lpfc hba data structure.
931 * @pmboxq: pointer to the driver internal queue element for mailbox command.
932 *
933 * This is the callback function to the lpfc heart-beat mailbox command.
934 * If configured, the lpfc driver issues the heart-beat mailbox command to
935 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
936 * heart-beat mailbox command is issued, the driver shall set up heart-beat
937 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
938 * heart-beat outstanding state. Once the mailbox command comes back and
939 * no error conditions detected, the heart-beat mailbox command timer is
940 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
941 * state is cleared for the next heart-beat. If the timer expired with the
942 * heart-beat outstanding state set, the driver will put the HBA offline.
943 **/
944static void
945lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
946{
947	unsigned long drvr_flag;
948
949	spin_lock_irqsave(&phba->hbalock, drvr_flag);
950	phba->hb_outstanding = 0;
951	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
952
953	/* Check and reset heart-beat timer is necessary */
954	mempool_free(pmboxq, phba->mbox_mem_pool);
955	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
956		!(phba->link_state == LPFC_HBA_ERROR) &&
957		!(phba->pport->load_flag & FC_UNLOADING))
958		mod_timer(&phba->hb_tmofunc,
959			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
960	return;
961}
962
963/**
964 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
965 * @phba: pointer to lpfc hba data structure.
966 *
967 * This is the actual HBA-timer timeout handler to be invoked by the worker
968 * thread whenever the HBA timer fired and HBA-timeout event posted. This
969 * handler performs any periodic operations needed for the device. If such
970 * periodic event has already been attended to either in the interrupt handler
971 * or by processing slow-ring or fast-ring events within the HBA-timer
972 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
973 * the timer for the next timeout period. If lpfc heart-beat mailbox command
974 * is configured and there is no heart-beat mailbox command outstanding, a
975 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
976 * has been a heart-beat mailbox command outstanding, the HBA shall be put
977 * to offline.
978 **/
979void
980lpfc_hb_timeout_handler(struct lpfc_hba *phba)
981{
982	struct lpfc_vport **vports;
983	LPFC_MBOXQ_t *pmboxq;
984	struct lpfc_dmabuf *buf_ptr;
985	int retval, i;
986	struct lpfc_sli *psli = &phba->sli;
987	LIST_HEAD(completions);
988
989	vports = lpfc_create_vport_work_array(phba);
990	if (vports != NULL)
991		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
992			lpfc_rcv_seq_check_edtov(vports[i]);
993	lpfc_destroy_vport_work_array(phba, vports);
994
995	if ((phba->link_state == LPFC_HBA_ERROR) ||
996		(phba->pport->load_flag & FC_UNLOADING) ||
997		(phba->pport->fc_flag & FC_OFFLINE_MODE))
998		return;
999
1000	spin_lock_irq(&phba->pport->work_port_lock);
1001
1002	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
1003		jiffies)) {
1004		spin_unlock_irq(&phba->pport->work_port_lock);
1005		if (!phba->hb_outstanding)
1006			mod_timer(&phba->hb_tmofunc,
1007				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1008		else
1009			mod_timer(&phba->hb_tmofunc,
1010				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1011		return;
1012	}
1013	spin_unlock_irq(&phba->pport->work_port_lock);
1014
1015	if (phba->elsbuf_cnt &&
1016		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1017		spin_lock_irq(&phba->hbalock);
1018		list_splice_init(&phba->elsbuf, &completions);
1019		phba->elsbuf_cnt = 0;
1020		phba->elsbuf_prev_cnt = 0;
1021		spin_unlock_irq(&phba->hbalock);
1022
1023		while (!list_empty(&completions)) {
1024			list_remove_head(&completions, buf_ptr,
1025				struct lpfc_dmabuf, list);
1026			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1027			kfree(buf_ptr);
1028		}
1029	}
1030	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1031
1032	/* If there is no heart beat outstanding, issue a heartbeat command */
1033	if (phba->cfg_enable_hba_heartbeat) {
1034		if (!phba->hb_outstanding) {
1035			pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
1036			if (!pmboxq) {
1037				mod_timer(&phba->hb_tmofunc,
1038					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1039				return;
1040			}
1041
1042			lpfc_heart_beat(phba, pmboxq);
1043			pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1044			pmboxq->vport = phba->pport;
1045			retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1046
1047			if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1048				mempool_free(pmboxq, phba->mbox_mem_pool);
1049				mod_timer(&phba->hb_tmofunc,
1050					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1051				return;
1052			}
1053			mod_timer(&phba->hb_tmofunc,
1054				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1055			phba->hb_outstanding = 1;
1056			return;
1057		} else {
1058			/*
1059			* If heart beat timeout called with hb_outstanding set
1060			* we need to take the HBA offline.
1061			*/
1062			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1063					"0459 Adapter heartbeat failure, "
1064					"taking this port offline.\n");
1065
1066			spin_lock_irq(&phba->hbalock);
1067			psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1068			spin_unlock_irq(&phba->hbalock);
1069
1070			lpfc_offline_prep(phba);
1071			lpfc_offline(phba);
1072			lpfc_unblock_mgmt_io(phba);
1073			phba->link_state = LPFC_HBA_ERROR;
1074			lpfc_hba_down_post(phba);
1075		}
1076	}
1077}
1078
1079/**
1080 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1081 * @phba: pointer to lpfc hba data structure.
1082 *
1083 * This routine is called to bring the HBA offline when HBA hardware error
1084 * other than Port Error 6 has been detected.
1085 **/
1086static void
1087lpfc_offline_eratt(struct lpfc_hba *phba)
1088{
1089	struct lpfc_sli   *psli = &phba->sli;
1090
1091	spin_lock_irq(&phba->hbalock);
1092	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1093	spin_unlock_irq(&phba->hbalock);
1094	lpfc_offline_prep(phba);
1095
1096	lpfc_offline(phba);
1097	lpfc_reset_barrier(phba);
1098	spin_lock_irq(&phba->hbalock);
1099	lpfc_sli_brdreset(phba);
1100	spin_unlock_irq(&phba->hbalock);
1101	lpfc_hba_down_post(phba);
1102	lpfc_sli_brdready(phba, HS_MBRDY);
1103	lpfc_unblock_mgmt_io(phba);
1104	phba->link_state = LPFC_HBA_ERROR;
1105	return;
1106}
1107
1108/**
1109 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1110 * @phba: pointer to lpfc hba data structure.
1111 *
1112 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1113 * other than Port Error 6 has been detected.
1114 **/
1115static void
1116lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1117{
1118	lpfc_offline_prep(phba);
1119	lpfc_offline(phba);
1120	lpfc_sli4_brdreset(phba);
1121	lpfc_hba_down_post(phba);
1122	lpfc_sli4_post_status_check(phba);
1123	lpfc_unblock_mgmt_io(phba);
1124	phba->link_state = LPFC_HBA_ERROR;
1125}
1126
1127/**
1128 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1129 * @phba: pointer to lpfc hba data structure.
1130 *
1131 * This routine is invoked to handle the deferred HBA hardware error
1132 * conditions. This type of error is indicated by HBA by setting ER1
1133 * and another ER bit in the host status register. The driver will
1134 * wait until the ER1 bit clears before handling the error condition.
1135 **/
1136static void
1137lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1138{
1139	uint32_t old_host_status = phba->work_hs;
1140	struct lpfc_sli_ring  *pring;
1141	struct lpfc_sli *psli = &phba->sli;
1142
1143	/* If the pci channel is offline, ignore possible errors,
1144	 * since we cannot communicate with the pci card anyway.
1145	 */
1146	if (pci_channel_offline(phba->pcidev)) {
1147		spin_lock_irq(&phba->hbalock);
1148		phba->hba_flag &= ~DEFER_ERATT;
1149		spin_unlock_irq(&phba->hbalock);
1150		return;
1151	}
1152
1153	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1154		"0479 Deferred Adapter Hardware Error "
1155		"Data: x%x x%x x%x\n",
1156		phba->work_hs,
1157		phba->work_status[0], phba->work_status[1]);
1158
1159	spin_lock_irq(&phba->hbalock);
1160	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1161	spin_unlock_irq(&phba->hbalock);
1162
1163
1164	/*
1165	 * Firmware stops when it triggred erratt. That could cause the I/Os
1166	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1167	 * SCSI layer retry it after re-establishing link.
1168	 */
1169	pring = &psli->ring[psli->fcp_ring];
1170	lpfc_sli_abort_iocb_ring(phba, pring);
1171
1172	/*
1173	 * There was a firmware error. Take the hba offline and then
1174	 * attempt to restart it.
1175	 */
1176	lpfc_offline_prep(phba);
1177	lpfc_offline(phba);
1178
1179	/* Wait for the ER1 bit to clear.*/
1180	while (phba->work_hs & HS_FFER1) {
1181		msleep(100);
1182		phba->work_hs = readl(phba->HSregaddr);
1183		/* If driver is unloading let the worker thread continue */
1184		if (phba->pport->load_flag & FC_UNLOADING) {
1185			phba->work_hs = 0;
1186			break;
1187		}
1188	}
1189
1190	/*
1191	 * This is to ptrotect against a race condition in which
1192	 * first write to the host attention register clear the
1193	 * host status register.
1194	 */
1195	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1196		phba->work_hs = old_host_status & ~HS_FFER1;
1197
1198	spin_lock_irq(&phba->hbalock);
1199	phba->hba_flag &= ~DEFER_ERATT;
1200	spin_unlock_irq(&phba->hbalock);
1201	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1202	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1203}
1204
1205static void
1206lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1207{
1208	struct lpfc_board_event_header board_event;
1209	struct Scsi_Host *shost;
1210
1211	board_event.event_type = FC_REG_BOARD_EVENT;
1212	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1213	shost = lpfc_shost_from_vport(phba->pport);
1214	fc_host_post_vendor_event(shost, fc_get_event_number(),
1215				  sizeof(board_event),
1216				  (char *) &board_event,
1217				  LPFC_NL_VENDOR_ID);
1218}
1219
1220/**
1221 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1222 * @phba: pointer to lpfc hba data structure.
1223 *
1224 * This routine is invoked to handle the following HBA hardware error
1225 * conditions:
1226 * 1 - HBA error attention interrupt
1227 * 2 - DMA ring index out of range
1228 * 3 - Mailbox command came back as unknown
1229 **/
1230static void
1231lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1232{
1233	struct lpfc_vport *vport = phba->pport;
1234	struct lpfc_sli   *psli = &phba->sli;
1235	struct lpfc_sli_ring  *pring;
1236	uint32_t event_data;
1237	unsigned long temperature;
1238	struct temp_event temp_event_data;
1239	struct Scsi_Host  *shost;
1240
1241	/* If the pci channel is offline, ignore possible errors,
1242	 * since we cannot communicate with the pci card anyway.
1243	 */
1244	if (pci_channel_offline(phba->pcidev)) {
1245		spin_lock_irq(&phba->hbalock);
1246		phba->hba_flag &= ~DEFER_ERATT;
1247		spin_unlock_irq(&phba->hbalock);
1248		return;
1249	}
1250
1251	/* If resets are disabled then leave the HBA alone and return */
1252	if (!phba->cfg_enable_hba_reset)
1253		return;
1254
1255	/* Send an internal error event to mgmt application */
1256	lpfc_board_errevt_to_mgmt(phba);
1257
1258	if (phba->hba_flag & DEFER_ERATT)
1259		lpfc_handle_deferred_eratt(phba);
1260
1261	if (phba->work_hs & HS_FFER6) {
1262		/* Re-establishing Link */
1263		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1264				"1301 Re-establishing Link "
1265				"Data: x%x x%x x%x\n",
1266				phba->work_hs,
1267				phba->work_status[0], phba->work_status[1]);
1268
1269		spin_lock_irq(&phba->hbalock);
1270		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1271		spin_unlock_irq(&phba->hbalock);
1272
1273		/*
1274		* Firmware stops when it triggled erratt with HS_FFER6.
1275		* That could cause the I/Os dropped by the firmware.
1276		* Error iocb (I/O) on txcmplq and let the SCSI layer
1277		* retry it after re-establishing link.
1278		*/
1279		pring = &psli->ring[psli->fcp_ring];
1280		lpfc_sli_abort_iocb_ring(phba, pring);
1281
1282		/*
1283		 * There was a firmware error.  Take the hba offline and then
1284		 * attempt to restart it.
1285		 */
1286		lpfc_offline_prep(phba);
1287		lpfc_offline(phba);
1288		lpfc_sli_brdrestart(phba);
1289		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1290			lpfc_unblock_mgmt_io(phba);
1291			return;
1292		}
1293		lpfc_unblock_mgmt_io(phba);
1294	} else if (phba->work_hs & HS_CRIT_TEMP) {
1295		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1296		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1297		temp_event_data.event_code = LPFC_CRIT_TEMP;
1298		temp_event_data.data = (uint32_t)temperature;
1299
1300		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1301				"0406 Adapter maximum temperature exceeded "
1302				"(%ld), taking this port offline "
1303				"Data: x%x x%x x%x\n",
1304				temperature, phba->work_hs,
1305				phba->work_status[0], phba->work_status[1]);
1306
1307		shost = lpfc_shost_from_vport(phba->pport);
1308		fc_host_post_vendor_event(shost, fc_get_event_number(),
1309					  sizeof(temp_event_data),
1310					  (char *) &temp_event_data,
1311					  SCSI_NL_VID_TYPE_PCI
1312					  | PCI_VENDOR_ID_EMULEX);
1313
1314		spin_lock_irq(&phba->hbalock);
1315		phba->over_temp_state = HBA_OVER_TEMP;
1316		spin_unlock_irq(&phba->hbalock);
1317		lpfc_offline_eratt(phba);
1318
1319	} else {
1320		/* The if clause above forces this code path when the status
1321		 * failure is a value other than FFER6. Do not call the offline
1322		 * twice. This is the adapter hardware error path.
1323		 */
1324		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1325				"0457 Adapter Hardware Error "
1326				"Data: x%x x%x x%x\n",
1327				phba->work_hs,
1328				phba->work_status[0], phba->work_status[1]);
1329
1330		event_data = FC_REG_DUMP_EVENT;
1331		shost = lpfc_shost_from_vport(vport);
1332		fc_host_post_vendor_event(shost, fc_get_event_number(),
1333				sizeof(event_data), (char *) &event_data,
1334				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1335
1336		lpfc_offline_eratt(phba);
1337	}
1338	return;
1339}
1340
1341/**
1342 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1343 * @phba: pointer to lpfc hba data structure.
1344 *
1345 * This routine is invoked to handle the SLI4 HBA hardware error attention
1346 * conditions.
1347 **/
1348static void
1349lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1350{
1351	struct lpfc_vport *vport = phba->pport;
1352	uint32_t event_data;
1353	struct Scsi_Host *shost;
1354
1355	/* If the pci channel is offline, ignore possible errors, since
1356	 * we cannot communicate with the pci card anyway.
1357	 */
1358	if (pci_channel_offline(phba->pcidev))
1359		return;
1360	/* If resets are disabled then leave the HBA alone and return */
1361	if (!phba->cfg_enable_hba_reset)
1362		return;
1363
1364	/* Send an internal error event to mgmt application */
1365	lpfc_board_errevt_to_mgmt(phba);
1366
1367	/* For now, the actual action for SLI4 device handling is not
1368	 * specified yet, just treated it as adaptor hardware failure
1369	 */
1370	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1371			"0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1372			phba->work_status[0], phba->work_status[1]);
1373
1374	event_data = FC_REG_DUMP_EVENT;
1375	shost = lpfc_shost_from_vport(vport);
1376	fc_host_post_vendor_event(shost, fc_get_event_number(),
1377				  sizeof(event_data), (char *) &event_data,
1378				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1379
1380	lpfc_sli4_offline_eratt(phba);
1381}
1382
1383/**
1384 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1385 * @phba: pointer to lpfc HBA data structure.
1386 *
1387 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1388 * routine from the API jump table function pointer from the lpfc_hba struct.
1389 *
1390 * Return codes
1391 *   0 - success.
1392 *   Any other value - error.
1393 **/
1394void
1395lpfc_handle_eratt(struct lpfc_hba *phba)
1396{
1397	(*phba->lpfc_handle_eratt)(phba);
1398}
1399
1400/**
1401 * lpfc_handle_latt - The HBA link event handler
1402 * @phba: pointer to lpfc hba data structure.
1403 *
1404 * This routine is invoked from the worker thread to handle a HBA host
1405 * attention link event.
1406 **/
1407void
1408lpfc_handle_latt(struct lpfc_hba *phba)
1409{
1410	struct lpfc_vport *vport = phba->pport;
1411	struct lpfc_sli   *psli = &phba->sli;
1412	LPFC_MBOXQ_t *pmb;
1413	volatile uint32_t control;
1414	struct lpfc_dmabuf *mp;
1415	int rc = 0;
1416
1417	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1418	if (!pmb) {
1419		rc = 1;
1420		goto lpfc_handle_latt_err_exit;
1421	}
1422
1423	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1424	if (!mp) {
1425		rc = 2;
1426		goto lpfc_handle_latt_free_pmb;
1427	}
1428
1429	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1430	if (!mp->virt) {
1431		rc = 3;
1432		goto lpfc_handle_latt_free_mp;
1433	}
1434
1435	/* Cleanup any outstanding ELS commands */
1436	lpfc_els_flush_all_cmd(phba);
1437
1438	psli->slistat.link_event++;
1439	lpfc_read_la(phba, pmb, mp);
1440	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
1441	pmb->vport = vport;
1442	/* Block ELS IOCBs until we have processed this mbox command */
1443	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1444	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1445	if (rc == MBX_NOT_FINISHED) {
1446		rc = 4;
1447		goto lpfc_handle_latt_free_mbuf;
1448	}
1449
1450	/* Clear Link Attention in HA REG */
1451	spin_lock_irq(&phba->hbalock);
1452	writel(HA_LATT, phba->HAregaddr);
1453	readl(phba->HAregaddr); /* flush */
1454	spin_unlock_irq(&phba->hbalock);
1455
1456	return;
1457
1458lpfc_handle_latt_free_mbuf:
1459	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1460	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1461lpfc_handle_latt_free_mp:
1462	kfree(mp);
1463lpfc_handle_latt_free_pmb:
1464	mempool_free(pmb, phba->mbox_mem_pool);
1465lpfc_handle_latt_err_exit:
1466	/* Enable Link attention interrupts */
1467	spin_lock_irq(&phba->hbalock);
1468	psli->sli_flag |= LPFC_PROCESS_LA;
1469	control = readl(phba->HCregaddr);
1470	control |= HC_LAINT_ENA;
1471	writel(control, phba->HCregaddr);
1472	readl(phba->HCregaddr); /* flush */
1473
1474	/* Clear Link Attention in HA REG */
1475	writel(HA_LATT, phba->HAregaddr);
1476	readl(phba->HAregaddr); /* flush */
1477	spin_unlock_irq(&phba->hbalock);
1478	lpfc_linkdown(phba);
1479	phba->link_state = LPFC_HBA_ERROR;
1480
1481	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1482		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1483
1484	return;
1485}
1486
1487/**
1488 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1489 * @phba: pointer to lpfc hba data structure.
1490 * @vpd: pointer to the vital product data.
1491 * @len: length of the vital product data in bytes.
1492 *
1493 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1494 * an array of characters. In this routine, the ModelName, ProgramType, and
1495 * ModelDesc, etc. fields of the phba data structure will be populated.
1496 *
1497 * Return codes
1498 *   0 - pointer to the VPD passed in is NULL
1499 *   1 - success
1500 **/
1501int
1502lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1503{
1504	uint8_t lenlo, lenhi;
1505	int Length;
1506	int i, j;
1507	int finished = 0;
1508	int index = 0;
1509
1510	if (!vpd)
1511		return 0;
1512
1513	/* Vital Product */
1514	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1515			"0455 Vital Product Data: x%x x%x x%x x%x\n",
1516			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1517			(uint32_t) vpd[3]);
1518	while (!finished && (index < (len - 4))) {
1519		switch (vpd[index]) {
1520		case 0x82:
1521		case 0x91:
1522			index += 1;
1523			lenlo = vpd[index];
1524			index += 1;
1525			lenhi = vpd[index];
1526			index += 1;
1527			i = ((((unsigned short)lenhi) << 8) + lenlo);
1528			index += i;
1529			break;
1530		case 0x90:
1531			index += 1;
1532			lenlo = vpd[index];
1533			index += 1;
1534			lenhi = vpd[index];
1535			index += 1;
1536			Length = ((((unsigned short)lenhi) << 8) + lenlo);
1537			if (Length > len - index)
1538				Length = len - index;
1539			while (Length > 0) {
1540			/* Look for Serial Number */
1541			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1542				index += 2;
1543				i = vpd[index];
1544				index += 1;
1545				j = 0;
1546				Length -= (3+i);
1547				while(i--) {
1548					phba->SerialNumber[j++] = vpd[index++];
1549					if (j == 31)
1550						break;
1551				}
1552				phba->SerialNumber[j] = 0;
1553				continue;
1554			}
1555			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1556				phba->vpd_flag |= VPD_MODEL_DESC;
1557				index += 2;
1558				i = vpd[index];
1559				index += 1;
1560				j = 0;
1561				Length -= (3+i);
1562				while(i--) {
1563					phba->ModelDesc[j++] = vpd[index++];
1564					if (j == 255)
1565						break;
1566				}
1567				phba->ModelDesc[j] = 0;
1568				continue;
1569			}
1570			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1571				phba->vpd_flag |= VPD_MODEL_NAME;
1572				index += 2;
1573				i = vpd[index];
1574				index += 1;
1575				j = 0;
1576				Length -= (3+i);
1577				while(i--) {
1578					phba->ModelName[j++] = vpd[index++];
1579					if (j == 79)
1580						break;
1581				}
1582				phba->ModelName[j] = 0;
1583				continue;
1584			}
1585			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1586				phba->vpd_flag |= VPD_PROGRAM_TYPE;
1587				index += 2;
1588				i = vpd[index];
1589				index += 1;
1590				j = 0;
1591				Length -= (3+i);
1592				while(i--) {
1593					phba->ProgramType[j++] = vpd[index++];
1594					if (j == 255)
1595						break;
1596				}
1597				phba->ProgramType[j] = 0;
1598				continue;
1599			}
1600			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1601				phba->vpd_flag |= VPD_PORT;
1602				index += 2;
1603				i = vpd[index];
1604				index += 1;
1605				j = 0;
1606				Length -= (3+i);
1607				while(i--) {
1608				phba->Port[j++] = vpd[index++];
1609				if (j == 19)
1610					break;
1611				}
1612				phba->Port[j] = 0;
1613				continue;
1614			}
1615			else {
1616				index += 2;
1617				i = vpd[index];
1618				index += 1;
1619				index += i;
1620				Length -= (3 + i);
1621			}
1622		}
1623		finished = 0;
1624		break;
1625		case 0x78:
1626			finished = 1;
1627			break;
1628		default:
1629			index ++;
1630			break;
1631		}
1632	}
1633
1634	return(1);
1635}
1636
1637/**
1638 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1639 * @phba: pointer to lpfc hba data structure.
1640 * @mdp: pointer to the data structure to hold the derived model name.
1641 * @descp: pointer to the data structure to hold the derived description.
1642 *
1643 * This routine retrieves HBA's description based on its registered PCI device
1644 * ID. The @descp passed into this function points to an array of 256 chars. It
1645 * shall be returned with the model name, maximum speed, and the host bus type.
1646 * The @mdp passed into this function points to an array of 80 chars. When the
1647 * function returns, the @mdp will be filled with the model name.
1648 **/
1649static void
1650lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1651{
1652	lpfc_vpd_t *vp;
1653	uint16_t dev_id = phba->pcidev->device;
1654	int max_speed;
1655	int GE = 0;
1656	int oneConnect = 0; /* default is not a oneConnect */
1657	struct {
1658		char *name;
1659		char *bus;
1660		char *function;
1661	} m = {"<Unknown>", "", ""};
1662
1663	if (mdp && mdp[0] != '\0'
1664		&& descp && descp[0] != '\0')
1665		return;
1666
1667	if (phba->lmt & LMT_10Gb)
1668		max_speed = 10;
1669	else if (phba->lmt & LMT_8Gb)
1670		max_speed = 8;
1671	else if (phba->lmt & LMT_4Gb)
1672		max_speed = 4;
1673	else if (phba->lmt & LMT_2Gb)
1674		max_speed = 2;
1675	else
1676		max_speed = 1;
1677
1678	vp = &phba->vpd;
1679
1680	switch (dev_id) {
1681	case PCI_DEVICE_ID_FIREFLY:
1682		m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1683		break;
1684	case PCI_DEVICE_ID_SUPERFLY:
1685		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1686			m = (typeof(m)){"LP7000", "PCI",
1687					"Fibre Channel Adapter"};
1688		else
1689			m = (typeof(m)){"LP7000E", "PCI",
1690					"Fibre Channel Adapter"};
1691		break;
1692	case PCI_DEVICE_ID_DRAGONFLY:
1693		m = (typeof(m)){"LP8000", "PCI",
1694				"Fibre Channel Adapter"};
1695		break;
1696	case PCI_DEVICE_ID_CENTAUR:
1697		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1698			m = (typeof(m)){"LP9002", "PCI",
1699					"Fibre Channel Adapter"};
1700		else
1701			m = (typeof(m)){"LP9000", "PCI",
1702					"Fibre Channel Adapter"};
1703		break;
1704	case PCI_DEVICE_ID_RFLY:
1705		m = (typeof(m)){"LP952", "PCI",
1706				"Fibre Channel Adapter"};
1707		break;
1708	case PCI_DEVICE_ID_PEGASUS:
1709		m = (typeof(m)){"LP9802", "PCI-X",
1710				"Fibre Channel Adapter"};
1711		break;
1712	case PCI_DEVICE_ID_THOR:
1713		m = (typeof(m)){"LP10000", "PCI-X",
1714				"Fibre Channel Adapter"};
1715		break;
1716	case PCI_DEVICE_ID_VIPER:
1717		m = (typeof(m)){"LPX1000",  "PCI-X",
1718				"Fibre Channel Adapter"};
1719		break;
1720	case PCI_DEVICE_ID_PFLY:
1721		m = (typeof(m)){"LP982", "PCI-X",
1722				"Fibre Channel Adapter"};
1723		break;
1724	case PCI_DEVICE_ID_TFLY:
1725		m = (typeof(m)){"LP1050", "PCI-X",
1726				"Fibre Channel Adapter"};
1727		break;
1728	case PCI_DEVICE_ID_HELIOS:
1729		m = (typeof(m)){"LP11000", "PCI-X2",
1730				"Fibre Channel Adapter"};
1731		break;
1732	case PCI_DEVICE_ID_HELIOS_SCSP:
1733		m = (typeof(m)){"LP11000-SP", "PCI-X2",
1734				"Fibre Channel Adapter"};
1735		break;
1736	case PCI_DEVICE_ID_HELIOS_DCSP:
1737		m = (typeof(m)){"LP11002-SP",  "PCI-X2",
1738				"Fibre Channel Adapter"};
1739		break;
1740	case PCI_DEVICE_ID_NEPTUNE:
1741		m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1742		break;
1743	case PCI_DEVICE_ID_NEPTUNE_SCSP:
1744		m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1745		break;
1746	case PCI_DEVICE_ID_NEPTUNE_DCSP:
1747		m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1748		break;
1749	case PCI_DEVICE_ID_BMID:
1750		m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1751		break;
1752	case PCI_DEVICE_ID_BSMB:
1753		m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1754		break;
1755	case PCI_DEVICE_ID_ZEPHYR:
1756		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1757		break;
1758	case PCI_DEVICE_ID_ZEPHYR_SCSP:
1759		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1760		break;
1761	case PCI_DEVICE_ID_ZEPHYR_DCSP:
1762		m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1763		GE = 1;
1764		break;
1765	case PCI_DEVICE_ID_ZMID:
1766		m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1767		break;
1768	case PCI_DEVICE_ID_ZSMB:
1769		m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1770		break;
1771	case PCI_DEVICE_ID_LP101:
1772		m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1773		break;
1774	case PCI_DEVICE_ID_LP10000S:
1775		m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1776		break;
1777	case PCI_DEVICE_ID_LP11000S:
1778		m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1779		break;
1780	case PCI_DEVICE_ID_LPE11000S:
1781		m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1782		break;
1783	case PCI_DEVICE_ID_SAT:
1784		m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1785		break;
1786	case PCI_DEVICE_ID_SAT_MID:
1787		m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1788		break;
1789	case PCI_DEVICE_ID_SAT_SMB:
1790		m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1791		break;
1792	case PCI_DEVICE_ID_SAT_DCSP:
1793		m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1794		break;
1795	case PCI_DEVICE_ID_SAT_SCSP:
1796		m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1797		break;
1798	case PCI_DEVICE_ID_SAT_S:
1799		m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1800		break;
1801	case PCI_DEVICE_ID_HORNET:
1802		m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1803		GE = 1;
1804		break;
1805	case PCI_DEVICE_ID_PROTEUS_VF:
1806		m = (typeof(m)){"LPev12000", "PCIe IOV",
1807				"Fibre Channel Adapter"};
1808		break;
1809	case PCI_DEVICE_ID_PROTEUS_PF:
1810		m = (typeof(m)){"LPev12000", "PCIe IOV",
1811				"Fibre Channel Adapter"};
1812		break;
1813	case PCI_DEVICE_ID_PROTEUS_S:
1814		m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1815				"Fibre Channel Adapter"};
1816		break;
1817	case PCI_DEVICE_ID_TIGERSHARK:
1818		oneConnect = 1;
1819		m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1820		break;
1821	case PCI_DEVICE_ID_TOMCAT:
1822		oneConnect = 1;
1823		m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1824		break;
1825	case PCI_DEVICE_ID_FALCON:
1826		m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1827				"EmulexSecure Fibre"};
1828		break;
1829	default:
1830		m = (typeof(m)){"Unknown", "", ""};
1831		break;
1832	}
1833
1834	if (mdp && mdp[0] == '\0')
1835		snprintf(mdp, 79,"%s", m.name);
1836	/* oneConnect hba requires special processing, they are all initiators
1837	 * and we put the port number on the end
1838	 */
1839	if (descp && descp[0] == '\0') {
1840		if (oneConnect)
1841			snprintf(descp, 255,
1842				"Emulex OneConnect %s, %s Initiator, Port %s",
1843				m.name, m.function,
1844				phba->Port);
1845		else
1846			snprintf(descp, 255,
1847				"Emulex %s %d%s %s %s",
1848				m.name, max_speed, (GE) ? "GE" : "Gb",
1849				m.bus, m.function);
1850	}
1851}
1852
1853/**
1854 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1855 * @phba: pointer to lpfc hba data structure.
1856 * @pring: pointer to a IOCB ring.
1857 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1858 *
1859 * This routine posts a given number of IOCBs with the associated DMA buffer
1860 * descriptors specified by the cnt argument to the given IOCB ring.
1861 *
1862 * Return codes
1863 *   The number of IOCBs NOT able to be posted to the IOCB ring.
1864 **/
1865int
1866lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1867{
1868	IOCB_t *icmd;
1869	struct lpfc_iocbq *iocb;
1870	struct lpfc_dmabuf *mp1, *mp2;
1871
1872	cnt += pring->missbufcnt;
1873
1874	/* While there are buffers to post */
1875	while (cnt > 0) {
1876		/* Allocate buffer for  command iocb */
1877		iocb = lpfc_sli_get_iocbq(phba);
1878		if (iocb == NULL) {
1879			pring->missbufcnt = cnt;
1880			return cnt;
1881		}
1882		icmd = &iocb->iocb;
1883
1884		/* 2 buffers can be posted per command */
1885		/* Allocate buffer to post */
1886		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1887		if (mp1)
1888		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1889		if (!mp1 || !mp1->virt) {
1890			kfree(mp1);
1891			lpfc_sli_release_iocbq(phba, iocb);
1892			pring->missbufcnt = cnt;
1893			return cnt;
1894		}
1895
1896		INIT_LIST_HEAD(&mp1->list);
1897		/* Allocate buffer to post */
1898		if (cnt > 1) {
1899			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1900			if (mp2)
1901				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1902							    &mp2->phys);
1903			if (!mp2 || !mp2->virt) {
1904				kfree(mp2);
1905				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1906				kfree(mp1);
1907				lpfc_sli_release_iocbq(phba, iocb);
1908				pring->missbufcnt = cnt;
1909				return cnt;
1910			}
1911
1912			INIT_LIST_HEAD(&mp2->list);
1913		} else {
1914			mp2 = NULL;
1915		}
1916
1917		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1918		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1919		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1920		icmd->ulpBdeCount = 1;
1921		cnt--;
1922		if (mp2) {
1923			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1924			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1925			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1926			cnt--;
1927			icmd->ulpBdeCount = 2;
1928		}
1929
1930		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1931		icmd->ulpLe = 1;
1932
1933		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1934		    IOCB_ERROR) {
1935			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1936			kfree(mp1);
1937			cnt++;
1938			if (mp2) {
1939				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1940				kfree(mp2);
1941				cnt++;
1942			}
1943			lpfc_sli_release_iocbq(phba, iocb);
1944			pring->missbufcnt = cnt;
1945			return cnt;
1946		}
1947		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1948		if (mp2)
1949			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1950	}
1951	pring->missbufcnt = 0;
1952	return 0;
1953}
1954
1955/**
1956 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
1957 * @phba: pointer to lpfc hba data structure.
1958 *
1959 * This routine posts initial receive IOCB buffers to the ELS ring. The
1960 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1961 * set to 64 IOCBs.
1962 *
1963 * Return codes
1964 *   0 - success (currently always success)
1965 **/
1966static int
1967lpfc_post_rcv_buf(struct lpfc_hba *phba)
1968{
1969	struct lpfc_sli *psli = &phba->sli;
1970
1971	/* Ring 0, ELS / CT buffers */
1972	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
1973	/* Ring 2 - FCP no buffers needed */
1974
1975	return 0;
1976}
1977
1978#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1979
1980/**
1981 * lpfc_sha_init - Set up initial array of hash table entries
1982 * @HashResultPointer: pointer to an array as hash table.
1983 *
1984 * This routine sets up the initial values to the array of hash table entries
1985 * for the LC HBAs.
1986 **/
1987static void
1988lpfc_sha_init(uint32_t * HashResultPointer)
1989{
1990	HashResultPointer[0] = 0x67452301;
1991	HashResultPointer[1] = 0xEFCDAB89;
1992	HashResultPointer[2] = 0x98BADCFE;
1993	HashResultPointer[3] = 0x10325476;
1994	HashResultPointer[4] = 0xC3D2E1F0;
1995}
1996
1997/**
1998 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
1999 * @HashResultPointer: pointer to an initial/result hash table.
2000 * @HashWorkingPointer: pointer to an working hash table.
2001 *
2002 * This routine iterates an initial hash table pointed by @HashResultPointer
2003 * with the values from the working hash table pointeed by @HashWorkingPointer.
2004 * The results are putting back to the initial hash table, returned through
2005 * the @HashResultPointer as the result hash table.
2006 **/
2007static void
2008lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2009{
2010	int t;
2011	uint32_t TEMP;
2012	uint32_t A, B, C, D, E;
2013	t = 16;
2014	do {
2015		HashWorkingPointer[t] =
2016		    S(1,
2017		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2018								     8] ^
2019		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2020	} while (++t <= 79);
2021	t = 0;
2022	A = HashResultPointer[0];
2023	B = HashResultPointer[1];
2024	C = HashResultPointer[2];
2025	D = HashResultPointer[3];
2026	E = HashResultPointer[4];
2027
2028	do {
2029		if (t < 20) {
2030			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2031		} else if (t < 40) {
2032			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2033		} else if (t < 60) {
2034			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2035		} else {
2036			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2037		}
2038		TEMP += S(5, A) + E + HashWorkingPointer[t];
2039		E = D;
2040		D = C;
2041		C = S(30, B);
2042		B = A;
2043		A = TEMP;
2044	} while (++t <= 79);
2045
2046	HashResultPointer[0] += A;
2047	HashResultPointer[1] += B;
2048	HashResultPointer[2] += C;
2049	HashResultPointer[3] += D;
2050	HashResultPointer[4] += E;
2051
2052}
2053
2054/**
2055 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2056 * @RandomChallenge: pointer to the entry of host challenge random number array.
2057 * @HashWorking: pointer to the entry of the working hash array.
2058 *
2059 * This routine calculates the working hash array referred by @HashWorking
2060 * from the challenge random numbers associated with the host, referred by
2061 * @RandomChallenge. The result is put into the entry of the working hash
2062 * array and returned by reference through @HashWorking.
2063 **/
2064static void
2065lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2066{
2067	*HashWorking = (*RandomChallenge ^ *HashWorking);
2068}
2069
2070/**
2071 * lpfc_hba_init - Perform special handling for LC HBA initialization
2072 * @phba: pointer to lpfc hba data structure.
2073 * @hbainit: pointer to an array of unsigned 32-bit integers.
2074 *
2075 * This routine performs the special handling for LC HBA initialization.
2076 **/
2077void
2078lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2079{
2080	int t;
2081	uint32_t *HashWorking;
2082	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2083
2084	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2085	if (!HashWorking)
2086		return;
2087
2088	HashWorking[0] = HashWorking[78] = *pwwnn++;
2089	HashWorking[1] = HashWorking[79] = *pwwnn;
2090
2091	for (t = 0; t < 7; t++)
2092		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2093
2094	lpfc_sha_init(hbainit);
2095	lpfc_sha_iterate(hbainit, HashWorking);
2096	kfree(HashWorking);
2097}
2098
2099/**
2100 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2101 * @vport: pointer to a virtual N_Port data structure.
2102 *
2103 * This routine performs the necessary cleanups before deleting the @vport.
2104 * It invokes the discovery state machine to perform necessary state
2105 * transitions and to release the ndlps associated with the @vport. Note,
2106 * the physical port is treated as @vport 0.
2107 **/
2108void
2109lpfc_cleanup(struct lpfc_vport *vport)
2110{
2111	struct lpfc_hba   *phba = vport->phba;
2112	struct lpfc_nodelist *ndlp, *next_ndlp;
2113	int i = 0;
2114
2115	if (phba->link_state > LPFC_LINK_DOWN)
2116		lpfc_port_link_failure(vport);
2117
2118	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2119		if (!NLP_CHK_NODE_ACT(ndlp)) {
2120			ndlp = lpfc_enable_node(vport, ndlp,
2121						NLP_STE_UNUSED_NODE);
2122			if (!ndlp)
2123				continue;
2124			spin_lock_irq(&phba->ndlp_lock);
2125			NLP_SET_FREE_REQ(ndlp);
2126			spin_unlock_irq(&phba->ndlp_lock);
2127			/* Trigger the release of the ndlp memory */
2128			lpfc_nlp_put(ndlp);
2129			continue;
2130		}
2131		spin_lock_irq(&phba->ndlp_lock);
2132		if (NLP_CHK_FREE_REQ(ndlp)) {
2133			/* The ndlp should not be in memory free mode already */
2134			spin_unlock_irq(&phba->ndlp_lock);
2135			continue;
2136		} else
2137			/* Indicate request for freeing ndlp memory */
2138			NLP_SET_FREE_REQ(ndlp);
2139		spin_unlock_irq(&phba->ndlp_lock);
2140
2141		if (vport->port_type != LPFC_PHYSICAL_PORT &&
2142		    ndlp->nlp_DID == Fabric_DID) {
2143			/* Just free up ndlp with Fabric_DID for vports */
2144			lpfc_nlp_put(ndlp);
2145			continue;
2146		}
2147
2148		if (ndlp->nlp_type & NLP_FABRIC)
2149			lpfc_disc_state_machine(vport, ndlp, NULL,
2150					NLP_EVT_DEVICE_RECOVERY);
2151
2152		lpfc_disc_state_machine(vport, ndlp, NULL,
2153					     NLP_EVT_DEVICE_RM);
2154
2155	}
2156
2157	/* At this point, ALL ndlp's should be gone
2158	 * because of the previous NLP_EVT_DEVICE_RM.
2159	 * Lets wait for this to happen, if needed.
2160	 */
2161	while (!list_empty(&vport->fc_nodes)) {
2162		if (i++ > 3000) {
2163			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2164				"0233 Nodelist not empty\n");
2165			list_for_each_entry_safe(ndlp, next_ndlp,
2166						&vport->fc_nodes, nlp_listp) {
2167				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2168						LOG_NODE,
2169						"0282 did:x%x ndlp:x%p "
2170						"usgmap:x%x refcnt:%d\n",
2171						ndlp->nlp_DID, (void *)ndlp,
2172						ndlp->nlp_usg_map,
2173						atomic_read(
2174							&ndlp->kref.refcount));
2175			}
2176			break;
2177		}
2178
2179		/* Wait for any activity on ndlps to settle */
2180		msleep(10);
2181	}
2182}
2183
2184/**
2185 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2186 * @vport: pointer to a virtual N_Port data structure.
2187 *
2188 * This routine stops all the timers associated with a @vport. This function
2189 * is invoked before disabling or deleting a @vport. Note that the physical
2190 * port is treated as @vport 0.
2191 **/
2192void
2193lpfc_stop_vport_timers(struct lpfc_vport *vport)
2194{
2195	del_timer_sync(&vport->els_tmofunc);
2196	del_timer_sync(&vport->fc_fdmitmo);
2197	lpfc_can_disctmo(vport);
2198	return;
2199}
2200
2201/**
2202 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2203 * @phba: pointer to lpfc hba data structure.
2204 *
2205 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2206 * caller of this routine should already hold the host lock.
2207 **/
2208void
2209__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2210{
2211	/* Clear pending FCF rediscovery wait and failover in progress flags */
2212	phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
2213				FCF_DEAD_DISC |
2214				FCF_ACVL_DISC);
2215	/* Now, try to stop the timer */
2216	del_timer(&phba->fcf.redisc_wait);
2217}
2218
2219/**
2220 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2221 * @phba: pointer to lpfc hba data structure.
2222 *
2223 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2224 * checks whether the FCF rediscovery wait timer is pending with the host
2225 * lock held before proceeding with disabling the timer and clearing the
2226 * wait timer pendig flag.
2227 **/
2228void
2229lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2230{
2231	spin_lock_irq(&phba->hbalock);
2232	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2233		/* FCF rediscovery timer already fired or stopped */
2234		spin_unlock_irq(&phba->hbalock);
2235		return;
2236	}
2237	__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2238	spin_unlock_irq(&phba->hbalock);
2239}
2240
2241/**
2242 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2243 * @phba: pointer to lpfc hba data structure.
2244 *
2245 * This routine stops all the timers associated with a HBA. This function is
2246 * invoked before either putting a HBA offline or unloading the driver.
2247 **/
2248void
2249lpfc_stop_hba_timers(struct lpfc_hba *phba)
2250{
2251	lpfc_stop_vport_timers(phba->pport);
2252	del_timer_sync(&phba->sli.mbox_tmo);
2253	del_timer_sync(&phba->fabric_block_timer);
2254	del_timer_sync(&phba->eratt_poll);
2255	del_timer_sync(&phba->hb_tmofunc);
2256	phba->hb_outstanding = 0;
2257
2258	switch (phba->pci_dev_grp) {
2259	case LPFC_PCI_DEV_LP:
2260		/* Stop any LightPulse device specific driver timers */
2261		del_timer_sync(&phba->fcp_poll_timer);
2262		break;
2263	case LPFC_PCI_DEV_OC:
2264		/* Stop any OneConnect device sepcific driver timers */
2265		lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2266		break;
2267	default:
2268		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2269				"0297 Invalid device group (x%x)\n",
2270				phba->pci_dev_grp);
2271		break;
2272	}
2273	return;
2274}
2275
2276/**
2277 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2278 * @phba: pointer to lpfc hba data structure.
2279 *
2280 * This routine marks a HBA's management interface as blocked. Once the HBA's
2281 * management interface is marked as blocked, all the user space access to
2282 * the HBA, whether they are from sysfs interface or libdfc interface will
2283 * all be blocked. The HBA is set to block the management interface when the
2284 * driver prepares the HBA interface for online or offline.
2285 **/
2286static void
2287lpfc_block_mgmt_io(struct lpfc_hba * phba)
2288{
2289	unsigned long iflag;
2290	uint8_t actcmd = MBX_HEARTBEAT;
2291	unsigned long timeout;
2292
2293
2294	spin_lock_irqsave(&phba->hbalock, iflag);
2295	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2296	if (phba->sli.mbox_active)
2297		actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2298	spin_unlock_irqrestore(&phba->hbalock, iflag);
2299	/* Determine how long we might wait for the active mailbox
2300	 * command to be gracefully completed by firmware.
2301	 */
2302	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
2303			jiffies;
2304	/* Wait for the outstnading mailbox command to complete */
2305	while (phba->sli.mbox_active) {
2306		/* Check active mailbox complete status every 2ms */
2307		msleep(2);
2308		if (time_after(jiffies, timeout)) {
2309			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2310				"2813 Mgmt IO is Blocked %x "
2311				"- mbox cmd %x still active\n",
2312				phba->sli.sli_flag, actcmd);
2313			break;
2314		}
2315	}
2316}
2317
2318/**
2319 * lpfc_online - Initialize and bring a HBA online
2320 * @phba: pointer to lpfc hba data structure.
2321 *
2322 * This routine initializes the HBA and brings a HBA online. During this
2323 * process, the management interface is blocked to prevent user space access
2324 * to the HBA interfering with the driver initialization.
2325 *
2326 * Return codes
2327 *   0 - successful
2328 *   1 - failed
2329 **/
2330int
2331lpfc_online(struct lpfc_hba *phba)
2332{
2333	struct lpfc_vport *vport;
2334	struct lpfc_vport **vports;
2335	int i;
2336
2337	if (!phba)
2338		return 0;
2339	vport = phba->pport;
2340
2341	if (!(vport->fc_flag & FC_OFFLINE_MODE))
2342		return 0;
2343
2344	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2345			"0458 Bring Adapter online\n");
2346
2347	lpfc_block_mgmt_io(phba);
2348
2349	if (!lpfc_sli_queue_setup(phba)) {
2350		lpfc_unblock_mgmt_io(phba);
2351		return 1;
2352	}
2353
2354	if (phba->sli_rev == LPFC_SLI_REV4) {
2355		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2356			lpfc_unblock_mgmt_io(phba);
2357			return 1;
2358		}
2359	} else {
2360		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
2361			lpfc_unblock_mgmt_io(phba);
2362			return 1;
2363		}
2364	}
2365
2366	vports = lpfc_create_vport_work_array(phba);
2367	if (vports != NULL)
2368		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2369			struct Scsi_Host *shost;
2370			shost = lpfc_shost_from_vport(vports[i]);
2371			spin_lock_irq(shost->host_lock);
2372			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2373			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2374				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2375			if (phba->sli_rev == LPFC_SLI_REV4)
2376				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2377			spin_unlock_irq(shost->host_lock);
2378		}
2379		lpfc_destroy_vport_work_array(phba, vports);
2380
2381	lpfc_unblock_mgmt_io(phba);
2382	return 0;
2383}
2384
2385/**
2386 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2387 * @phba: pointer to lpfc hba data structure.
2388 *
2389 * This routine marks a HBA's management interface as not blocked. Once the
2390 * HBA's management interface is marked as not blocked, all the user space
2391 * access to the HBA, whether they are from sysfs interface or libdfc
2392 * interface will be allowed. The HBA is set to block the management interface
2393 * when the driver prepares the HBA interface for online or offline and then
2394 * set to unblock the management interface afterwards.
2395 **/
2396void
2397lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2398{
2399	unsigned long iflag;
2400
2401	spin_lock_irqsave(&phba->hbalock, iflag);
2402	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2403	spin_unlock_irqrestore(&phba->hbalock, iflag);
2404}
2405
2406/**
2407 * lpfc_offline_prep - Prepare a HBA to be brought offline
2408 * @phba: pointer to lpfc hba data structure.
2409 *
2410 * This routine is invoked to prepare a HBA to be brought offline. It performs
2411 * unregistration login to all the nodes on all vports and flushes the mailbox
2412 * queue to make it ready to be brought offline.
2413 **/
2414void
2415lpfc_offline_prep(struct lpfc_hba * phba)
2416{
2417	struct lpfc_vport *vport = phba->pport;
2418	struct lpfc_nodelist  *ndlp, *next_ndlp;
2419	struct lpfc_vport **vports;
2420	struct Scsi_Host *shost;
2421	int i;
2422
2423	if (vport->fc_flag & FC_OFFLINE_MODE)
2424		return;
2425
2426	lpfc_block_mgmt_io(phba);
2427
2428	lpfc_linkdown(phba);
2429
2430	/* Issue an unreg_login to all nodes on all vports */
2431	vports = lpfc_create_vport_work_array(phba);
2432	if (vports != NULL) {
2433		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2434			if (vports[i]->load_flag & FC_UNLOADING)
2435				continue;
2436			shost = lpfc_shost_from_vport(vports[i]);
2437			spin_lock_irq(shost->host_lock);
2438			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2439			vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2440			vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2441			spin_unlock_irq(shost->host_lock);
2442
2443			shost =	lpfc_shost_from_vport(vports[i]);
2444			list_for_each_entry_safe(ndlp, next_ndlp,
2445						 &vports[i]->fc_nodes,
2446						 nlp_listp) {
2447				if (!NLP_CHK_NODE_ACT(ndlp))
2448					continue;
2449				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2450					continue;
2451				if (ndlp->nlp_type & NLP_FABRIC) {
2452					lpfc_disc_state_machine(vports[i], ndlp,
2453						NULL, NLP_EVT_DEVICE_RECOVERY);
2454					lpfc_disc_state_machine(vports[i], ndlp,
2455						NULL, NLP_EVT_DEVICE_RM);
2456				}
2457				spin_lock_irq(shost->host_lock);
2458				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2459				spin_unlock_irq(shost->host_lock);
2460				lpfc_unreg_rpi(vports[i], ndlp);
2461			}
2462		}
2463	}
2464	lpfc_destroy_vport_work_array(phba, vports);
2465
2466	lpfc_sli_mbox_sys_shutdown(phba);
2467}
2468
2469/**
2470 * lpfc_offline - Bring a HBA offline
2471 * @phba: pointer to lpfc hba data structure.
2472 *
2473 * This routine actually brings a HBA offline. It stops all the timers
2474 * associated with the HBA, brings down the SLI layer, and eventually
2475 * marks the HBA as in offline state for the upper layer protocol.
2476 **/
2477void
2478lpfc_offline(struct lpfc_hba *phba)
2479{
2480	struct Scsi_Host  *shost;
2481	struct lpfc_vport **vports;
2482	int i;
2483
2484	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2485		return;
2486
2487	/* stop port and all timers associated with this hba */
2488	lpfc_stop_port(phba);
2489	vports = lpfc_create_vport_work_array(phba);
2490	if (vports != NULL)
2491		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2492			lpfc_stop_vport_timers(vports[i]);
2493	lpfc_destroy_vport_work_array(phba, vports);
2494	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2495			"0460 Bring Adapter offline\n");
2496	/* Bring down the SLI Layer and cleanup.  The HBA is offline
2497	   now.  */
2498	lpfc_sli_hba_down(phba);
2499	spin_lock_irq(&phba->hbalock);
2500	phba->work_ha = 0;
2501	spin_unlock_irq(&phba->hbalock);
2502	vports = lpfc_create_vport_work_array(phba);
2503	if (vports != NULL)
2504		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2505			shost = lpfc_shost_from_vport(vports[i]);
2506			spin_lock_irq(shost->host_lock);
2507			vports[i]->work_port_events = 0;
2508			vports[i]->fc_flag |= FC_OFFLINE_MODE;
2509			spin_unlock_irq(shost->host_lock);
2510		}
2511	lpfc_destroy_vport_work_array(phba, vports);
2512}
2513
2514/**
2515 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2516 * @phba: pointer to lpfc hba data structure.
2517 *
2518 * This routine is to free all the SCSI buffers and IOCBs from the driver
2519 * list back to kernel. It is called from lpfc_pci_remove_one to free
2520 * the internal resources before the device is removed from the system.
2521 *
2522 * Return codes
2523 *   0 - successful (for now, it always returns 0)
2524 **/
2525static int
2526lpfc_scsi_free(struct lpfc_hba *phba)
2527{
2528	struct lpfc_scsi_buf *sb, *sb_next;
2529	struct lpfc_iocbq *io, *io_next;
2530
2531	spin_lock_irq(&phba->hbalock);
2532	/* Release all the lpfc_scsi_bufs maintained by this host. */
2533	spin_lock(&phba->scsi_buf_list_lock);
2534	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2535		list_del(&sb->list);
2536		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2537			      sb->dma_handle);
2538		kfree(sb);
2539		phba->total_scsi_bufs--;
2540	}
2541	spin_unlock(&phba->scsi_buf_list_lock);
2542
2543	/* Release all the lpfc_iocbq entries maintained by this host. */
2544	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2545		list_del(&io->list);
2546		kfree(io);
2547		phba->total_iocbq_bufs--;
2548	}
2549	spin_unlock_irq(&phba->hbalock);
2550	return 0;
2551}
2552
2553/**
2554 * lpfc_create_port - Create an FC port
2555 * @phba: pointer to lpfc hba data structure.
2556 * @instance: a unique integer ID to this FC port.
2557 * @dev: pointer to the device data structure.
2558 *
2559 * This routine creates a FC port for the upper layer protocol. The FC port
2560 * can be created on top of either a physical port or a virtual port provided
2561 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2562 * and associates the FC port created before adding the shost into the SCSI
2563 * layer.
2564 *
2565 * Return codes
2566 *   @vport - pointer to the virtual N_Port data structure.
2567 *   NULL - port create failed.
2568 **/
2569struct lpfc_vport *
2570lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2571{
2572	struct lpfc_vport *vport;
2573	struct Scsi_Host  *shost;
2574	int error = 0;
2575
2576	if (dev != &phba->pcidev->dev)
2577		shost = scsi_host_alloc(&lpfc_vport_template,
2578					sizeof(struct lpfc_vport));
2579	else
2580		shost = scsi_host_alloc(&lpfc_template,
2581					sizeof(struct lpfc_vport));
2582	if (!shost)
2583		goto out;
2584
2585	vport = (struct lpfc_vport *) shost->hostdata;
2586	vport->phba = phba;
2587	vport->load_flag |= FC_LOADING;
2588	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2589	vport->fc_rscn_flush = 0;
2590
2591	lpfc_get_vport_cfgparam(vport);
2592	shost->unique_id = instance;
2593	shost->max_id = LPFC_MAX_TARGET;
2594	shost->max_lun = vport->cfg_max_luns;
2595	shost->this_id = -1;
2596	shost->max_cmd_len = 16;
2597	if (phba->sli_rev == LPFC_SLI_REV4) {
2598		shost->dma_boundary =
2599			phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2600		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2601	}
2602
2603	/*
2604	 * Set initial can_queue value since 0 is no longer supported and
2605	 * scsi_add_host will fail. This will be adjusted later based on the
2606	 * max xri value determined in hba setup.
2607	 */
2608	shost->can_queue = phba->cfg_hba_queue_depth - 10;
2609	if (dev != &phba->pcidev->dev) {
2610		shost->transportt = lpfc_vport_transport_template;
2611		vport->port_type = LPFC_NPIV_PORT;
2612	} else {
2613		shost->transportt = lpfc_transport_template;
2614		vport->port_type = LPFC_PHYSICAL_PORT;
2615	}
2616
2617	/* Initialize all internally managed lists. */
2618	INIT_LIST_HEAD(&vport->fc_nodes);
2619	INIT_LIST_HEAD(&vport->rcv_buffer_list);
2620	spin_lock_init(&vport->work_port_lock);
2621
2622	init_timer(&vport->fc_disctmo);
2623	vport->fc_disctmo.function = lpfc_disc_timeout;
2624	vport->fc_disctmo.data = (unsigned long)vport;
2625
2626	init_timer(&vport->fc_fdmitmo);
2627	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2628	vport->fc_fdmitmo.data = (unsigned long)vport;
2629
2630	init_timer(&vport->els_tmofunc);
2631	vport->els_tmofunc.function = lpfc_els_timeout;
2632	vport->els_tmofunc.data = (unsigned long)vport;
2633	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2634	if (error)
2635		goto out_put_shost;
2636
2637	spin_lock_irq(&phba->hbalock);
2638	list_add_tail(&vport->listentry, &phba->port_list);
2639	spin_unlock_irq(&phba->hbalock);
2640	return vport;
2641
2642out_put_shost:
2643	scsi_host_put(shost);
2644out:
2645	return NULL;
2646}
2647
2648/**
2649 * destroy_port -  destroy an FC port
2650 * @vport: pointer to an lpfc virtual N_Port data structure.
2651 *
2652 * This routine destroys a FC port from the upper layer protocol. All the
2653 * resources associated with the port are released.
2654 **/
2655void
2656destroy_port(struct lpfc_vport *vport)
2657{
2658	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2659	struct lpfc_hba  *phba = vport->phba;
2660
2661	lpfc_debugfs_terminate(vport);
2662	fc_remove_host(shost);
2663	scsi_remove_host(shost);
2664
2665	spin_lock_irq(&phba->hbalock);
2666	list_del_init(&vport->listentry);
2667	spin_unlock_irq(&phba->hbalock);
2668
2669	lpfc_cleanup(vport);
2670	return;
2671}
2672
2673/**
2674 * lpfc_get_instance - Get a unique integer ID
2675 *
2676 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2677 * uses the kernel idr facility to perform the task.
2678 *
2679 * Return codes:
2680 *   instance - a unique integer ID allocated as the new instance.
2681 *   -1 - lpfc get instance failed.
2682 **/
2683int
2684lpfc_get_instance(void)
2685{
2686	int instance = 0;
2687
2688	/* Assign an unused number */
2689	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2690		return -1;
2691	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2692		return -1;
2693	return instance;
2694}
2695
2696/**
2697 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2698 * @shost: pointer to SCSI host data structure.
2699 * @time: elapsed time of the scan in jiffies.
2700 *
2701 * This routine is called by the SCSI layer with a SCSI host to determine
2702 * whether the scan host is finished.
2703 *
2704 * Note: there is no scan_start function as adapter initialization will have
2705 * asynchronously kicked off the link initialization.
2706 *
2707 * Return codes
2708 *   0 - SCSI host scan is not over yet.
2709 *   1 - SCSI host scan is over.
2710 **/
2711int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2712{
2713	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2714	struct lpfc_hba   *phba = vport->phba;
2715	int stat = 0;
2716
2717	spin_lock_irq(shost->host_lock);
2718
2719	if (vport->load_flag & FC_UNLOADING) {
2720		stat = 1;
2721		goto finished;
2722	}
2723	if (time >= 30 * HZ) {
2724		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2725				"0461 Scanning longer than 30 "
2726				"seconds.  Continuing initialization\n");
2727		stat = 1;
2728		goto finished;
2729	}
2730	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2731		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2732				"0465 Link down longer than 15 "
2733				"seconds.  Continuing initialization\n");
2734		stat = 1;
2735		goto finished;
2736	}
2737
2738	if (vport->port_state != LPFC_VPORT_READY)
2739		goto finished;
2740	if (vport->num_disc_nodes || vport->fc_prli_sent)
2741		goto finished;
2742	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2743		goto finished;
2744	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2745		goto finished;
2746
2747	stat = 1;
2748
2749finished:
2750	spin_unlock_irq(shost->host_lock);
2751	return stat;
2752}
2753
2754/**
2755 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2756 * @shost: pointer to SCSI host data structure.
2757 *
2758 * This routine initializes a given SCSI host attributes on a FC port. The
2759 * SCSI host can be either on top of a physical port or a virtual port.
2760 **/
2761void lpfc_host_attrib_init(struct Scsi_Host *shost)
2762{
2763	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2764	struct lpfc_hba   *phba = vport->phba;
2765	/*
2766	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
2767	 */
2768
2769	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2770	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2771	fc_host_supported_classes(shost) = FC_COS_CLASS3;
2772
2773	memset(fc_host_supported_fc4s(shost), 0,
2774	       sizeof(fc_host_supported_fc4s(shost)));
2775	fc_host_supported_fc4s(shost)[2] = 1;
2776	fc_host_supported_fc4s(shost)[7] = 1;
2777
2778	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2779				 sizeof fc_host_symbolic_name(shost));
2780
2781	fc_host_supported_speeds(shost) = 0;
2782	if (phba->lmt & LMT_10Gb)
2783		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2784	if (phba->lmt & LMT_8Gb)
2785		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2786	if (phba->lmt & LMT_4Gb)
2787		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2788	if (phba->lmt & LMT_2Gb)
2789		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2790	if (phba->lmt & LMT_1Gb)
2791		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2792
2793	fc_host_maxframe_size(shost) =
2794		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2795		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2796
2797	/* This value is also unchanging */
2798	memset(fc_host_active_fc4s(shost), 0,
2799	       sizeof(fc_host_active_fc4s(shost)));
2800	fc_host_active_fc4s(shost)[2] = 1;
2801	fc_host_active_fc4s(shost)[7] = 1;
2802
2803	fc_host_max_npiv_vports(shost) = phba->max_vpi;
2804	spin_lock_irq(shost->host_lock);
2805	vport->load_flag &= ~FC_LOADING;
2806	spin_unlock_irq(shost->host_lock);
2807}
2808
2809/**
2810 * lpfc_stop_port_s3 - Stop SLI3 device port
2811 * @phba: pointer to lpfc hba data structure.
2812 *
2813 * This routine is invoked to stop an SLI3 device port, it stops the device
2814 * from generating interrupts and stops the device driver's timers for the
2815 * device.
2816 **/
2817static void
2818lpfc_stop_port_s3(struct lpfc_hba *phba)
2819{
2820	/* Clear all interrupt enable conditions */
2821	writel(0, phba->HCregaddr);
2822	readl(phba->HCregaddr); /* flush */
2823	/* Clear all pending interrupts */
2824	writel(0xffffffff, phba->HAregaddr);
2825	readl(phba->HAregaddr); /* flush */
2826
2827	/* Reset some HBA SLI setup states */
2828	lpfc_stop_hba_timers(phba);
2829	phba->pport->work_port_events = 0;
2830}
2831
2832/**
2833 * lpfc_stop_port_s4 - Stop SLI4 device port
2834 * @phba: pointer to lpfc hba data structure.
2835 *
2836 * This routine is invoked to stop an SLI4 device port, it stops the device
2837 * from generating interrupts and stops the device driver's timers for the
2838 * device.
2839 **/
2840static void
2841lpfc_stop_port_s4(struct lpfc_hba *phba)
2842{
2843	/* Reset some HBA SLI4 setup states */
2844	lpfc_stop_hba_timers(phba);
2845	phba->pport->work_port_events = 0;
2846	phba->sli4_hba.intr_enable = 0;
2847}
2848
2849/**
2850 * lpfc_stop_port - Wrapper function for stopping hba port
2851 * @phba: Pointer to HBA context object.
2852 *
2853 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2854 * the API jump table function pointer from the lpfc_hba struct.
2855 **/
2856void
2857lpfc_stop_port(struct lpfc_hba *phba)
2858{
2859	phba->lpfc_stop_port(phba);
2860}
2861
2862/**
2863 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2864 * @phba: pointer to lpfc hba data structure.
2865 *
2866 * This routine is invoked to remove the driver default fcf record from
2867 * the port.  This routine currently acts on FCF Index 0.
2868 *
2869 **/
2870void
2871lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2872{
2873	int rc = 0;
2874	LPFC_MBOXQ_t *mboxq;
2875	struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2876	uint32_t mbox_tmo, req_len;
2877	uint32_t shdr_status, shdr_add_status;
2878
2879	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2880	if (!mboxq) {
2881		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2882			"2020 Failed to allocate mbox for ADD_FCF cmd\n");
2883		return;
2884	}
2885
2886	req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2887		  sizeof(struct lpfc_sli4_cfg_mhdr);
2888	rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2889			      LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2890			      req_len, LPFC_SLI4_MBX_EMBED);
2891	/*
2892	 * In phase 1, there is a single FCF index, 0.  In phase2, the driver
2893	 * supports multiple FCF indices.
2894	 */
2895	del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2896	bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2897	bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2898	       phba->fcf.current_rec.fcf_indx);
2899
2900	if (!phba->sli4_hba.intr_enable)
2901		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2902	else {
2903		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2904		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2905	}
2906	/* The IOCTL status is embedded in the mailbox subheader. */
2907	shdr_status = bf_get(lpfc_mbox_hdr_status,
2908			     &del_fcf_record->header.cfg_shdr.response);
2909	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2910				 &del_fcf_record->header.cfg_shdr.response);
2911	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2912		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2913				"2516 DEL FCF of default FCF Index failed "
2914				"mbx status x%x, status x%x add_status x%x\n",
2915				rc, shdr_status, shdr_add_status);
2916	}
2917	if (rc != MBX_TIMEOUT)
2918		mempool_free(mboxq, phba->mbox_mem_pool);
2919}
2920
2921/**
2922 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
2923 * @phba: Pointer to hba for which this call is being executed.
2924 *
2925 * This routine starts the timer waiting for the FCF rediscovery to complete.
2926 **/
2927void
2928lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
2929{
2930	unsigned long fcf_redisc_wait_tmo =
2931		(jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
2932	/* Start fcf rediscovery wait period timer */
2933	mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
2934	spin_lock_irq(&phba->hbalock);
2935	/* Allow action to new fcf asynchronous event */
2936	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2937	/* Mark the FCF rediscovery pending state */
2938	phba->fcf.fcf_flag |= FCF_REDISC_PEND;
2939	spin_unlock_irq(&phba->hbalock);
2940}
2941
2942/**
2943 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
2944 * @ptr: Map to lpfc_hba data structure pointer.
2945 *
2946 * This routine is invoked when waiting for FCF table rediscover has been
2947 * timed out. If new FCF record(s) has (have) been discovered during the
2948 * wait period, a new FCF event shall be added to the FCOE async event
2949 * list, and then worker thread shall be waked up for processing from the
2950 * worker thread context.
2951 **/
2952void
2953lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2954{
2955	struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2956
2957	/* Don't send FCF rediscovery event if timer cancelled */
2958	spin_lock_irq(&phba->hbalock);
2959	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2960		spin_unlock_irq(&phba->hbalock);
2961		return;
2962	}
2963	/* Clear FCF rediscovery timer pending flag */
2964	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2965	/* FCF rediscovery event to worker thread */
2966	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2967	spin_unlock_irq(&phba->hbalock);
2968	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2969			"2776 FCF rediscover wait timer expired, post "
2970			"a worker thread event for FCF table scan\n");
2971	/* wake up worker thread */
2972	lpfc_worker_wake_up(phba);
2973}
2974
2975/**
2976 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
2977 * @phba: pointer to lpfc hba data structure.
2978 *
2979 * This function uses the QUERY_FW_CFG mailbox command to determine if the
2980 * firmware loaded supports FCoE. A return of zero indicates that the mailbox
2981 * was successful and the firmware supports FCoE. Any other return indicates
2982 * a error. It is assumed that this function will be called before interrupts
2983 * are enabled.
2984 **/
2985static int
2986lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
2987{
2988	int rc = 0;
2989	LPFC_MBOXQ_t *mboxq;
2990	struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
2991	uint32_t length;
2992	uint32_t shdr_status, shdr_add_status;
2993
2994	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2995	if (!mboxq) {
2996		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2997				"2621 Failed to allocate mbox for "
2998				"query firmware config cmd\n");
2999		return -ENOMEM;
3000	}
3001	query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
3002	length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
3003		  sizeof(struct lpfc_sli4_cfg_mhdr));
3004	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
3005			 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
3006			 length, LPFC_SLI4_MBX_EMBED);
3007	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3008	/* The IOCTL status is embedded in the mailbox subheader. */
3009	shdr_status = bf_get(lpfc_mbox_hdr_status,
3010			     &query_fw_cfg->header.cfg_shdr.response);
3011	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
3012				 &query_fw_cfg->header.cfg_shdr.response);
3013	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
3014		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3015				"2622 Query Firmware Config failed "
3016				"mbx status x%x, status x%x add_status x%x\n",
3017				rc, shdr_status, shdr_add_status);
3018		return -EINVAL;
3019	}
3020	if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
3021		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3022				"2623 FCoE Function not supported by firmware. "
3023				"Function mode = %08x\n",
3024				query_fw_cfg->function_mode);
3025		return -EINVAL;
3026	}
3027	if (rc != MBX_TIMEOUT)
3028		mempool_free(mboxq, phba->mbox_mem_pool);
3029	return 0;
3030}
3031
3032/**
3033 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3034 * @phba: pointer to lpfc hba data structure.
3035 * @acqe_link: pointer to the async link completion queue entry.
3036 *
3037 * This routine is to parse the SLI4 link-attention link fault code and
3038 * translate it into the base driver's read link attention mailbox command
3039 * status.
3040 *
3041 * Return: Link-attention status in terms of base driver's coding.
3042 **/
3043static uint16_t
3044lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3045			   struct lpfc_acqe_link *acqe_link)
3046{
3047	uint16_t latt_fault;
3048
3049	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3050	case LPFC_ASYNC_LINK_FAULT_NONE:
3051	case LPFC_ASYNC_LINK_FAULT_LOCAL:
3052	case LPFC_ASYNC_LINK_FAULT_REMOTE:
3053		latt_fault = 0;
3054		break;
3055	default:
3056		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3057				"0398 Invalid link fault code: x%x\n",
3058				bf_get(lpfc_acqe_link_fault, acqe_link));
3059		latt_fault = MBXERR_ERROR;
3060		break;
3061	}
3062	return latt_fault;
3063}
3064
3065/**
3066 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3067 * @phba: pointer to lpfc hba data structure.
3068 * @acqe_link: pointer to the async link completion queue entry.
3069 *
3070 * This routine is to parse the SLI4 link attention type and translate it
3071 * into the base driver's link attention type coding.
3072 *
3073 * Return: Link attention type in terms of base driver's coding.
3074 **/
3075static uint8_t
3076lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3077			  struct lpfc_acqe_link *acqe_link)
3078{
3079	uint8_t att_type;
3080
3081	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3082	case LPFC_ASYNC_LINK_STATUS_DOWN:
3083	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3084		att_type = AT_LINK_DOWN;
3085		break;
3086	case LPFC_ASYNC_LINK_STATUS_UP:
3087		/* Ignore physical link up events - wait for logical link up */
3088		att_type = AT_RESERVED;
3089		break;
3090	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3091		att_type = AT_LINK_UP;
3092		break;
3093	default:
3094		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3095				"0399 Invalid link attention type: x%x\n",
3096				bf_get(lpfc_acqe_link_status, acqe_link));
3097		att_type = AT_RESERVED;
3098		break;
3099	}
3100	return att_type;
3101}
3102
3103/**
3104 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3105 * @phba: pointer to lpfc hba data structure.
3106 * @acqe_link: pointer to the async link completion queue entry.
3107 *
3108 * This routine is to parse the SLI4 link-attention link speed and translate
3109 * it into the base driver's link-attention link speed coding.
3110 *
3111 * Return: Link-attention link speed in terms of base driver's coding.
3112 **/
3113static uint8_t
3114lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3115				struct lpfc_acqe_link *acqe_link)
3116{
3117	uint8_t link_speed;
3118
3119	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3120	case LPFC_ASYNC_LINK_SPEED_ZERO:
3121		link_speed = LA_UNKNW_LINK;
3122		break;
3123	case LPFC_ASYNC_LINK_SPEED_10MBPS:
3124		link_speed = LA_UNKNW_LINK;
3125		break;
3126	case LPFC_ASYNC_LINK_SPEED_100MBPS:
3127		link_speed = LA_UNKNW_LINK;
3128		break;
3129	case LPFC_ASYNC_LINK_SPEED_1GBPS:
3130		link_speed = LA_1GHZ_LINK;
3131		break;
3132	case LPFC_ASYNC_LINK_SPEED_10GBPS:
3133		link_speed = LA_10GHZ_LINK;
3134		break;
3135	default:
3136		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3137				"0483 Invalid link-attention link speed: x%x\n",
3138				bf_get(lpfc_acqe_link_speed, acqe_link));
3139		link_speed = LA_UNKNW_LINK;
3140		break;
3141	}
3142	return link_speed;
3143}
3144
3145/**
3146 * lpfc_sli4_async_link_evt - Process the asynchronous link event
3147 * @phba: pointer to lpfc hba data structure.
3148 * @acqe_link: pointer to the async link completion queue entry.
3149 *
3150 * This routine is to handle the SLI4 asynchronous link event.
3151 **/
3152static void
3153lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3154			 struct lpfc_acqe_link *acqe_link)
3155{
3156	struct lpfc_dmabuf *mp;
3157	LPFC_MBOXQ_t *pmb;
3158	MAILBOX_t *mb;
3159	READ_LA_VAR *la;
3160	uint8_t att_type;
3161
3162	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3163	if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
3164		return;
3165	phba->fcoe_eventtag = acqe_link->event_tag;
3166	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3167	if (!pmb) {
3168		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3169				"0395 The mboxq allocation failed\n");
3170		return;
3171	}
3172	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3173	if (!mp) {
3174		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3175				"0396 The lpfc_dmabuf allocation failed\n");
3176		goto out_free_pmb;
3177	}
3178	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3179	if (!mp->virt) {
3180		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3181				"0397 The mbuf allocation failed\n");
3182		goto out_free_dmabuf;
3183	}
3184
3185	/* Cleanup any outstanding ELS commands */
3186	lpfc_els_flush_all_cmd(phba);
3187
3188	/* Block ELS IOCBs until we have done process link event */
3189	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3190
3191	/* Update link event statistics */
3192	phba->sli.slistat.link_event++;
3193
3194	/* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
3195	lpfc_read_la(phba, pmb, mp);
3196	pmb->vport = phba->pport;
3197
3198	/* Parse and translate status field */
3199	mb = &pmb->u.mb;
3200	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3201
3202	/* Parse and translate link attention fields */
3203	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
3204	la->eventTag = acqe_link->event_tag;
3205	la->attType = att_type;
3206	la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
3207
3208	/* Fake the the following irrelvant fields */
3209	la->topology = TOPOLOGY_PT_PT;
3210	la->granted_AL_PA = 0;
3211	la->il = 0;
3212	la->pb = 0;
3213	la->fa = 0;
3214	la->mm = 0;
3215
3216	/* Keep the link status for extra SLI4 state machine reference */
3217	phba->sli4_hba.link_state.speed =
3218				bf_get(lpfc_acqe_link_speed, acqe_link);
3219	phba->sli4_hba.link_state.duplex =
3220				bf_get(lpfc_acqe_link_duplex, acqe_link);
3221	phba->sli4_hba.link_state.status =
3222				bf_get(lpfc_acqe_link_status, acqe_link);
3223	phba->sli4_hba.link_state.physical =
3224				bf_get(lpfc_acqe_link_physical, acqe_link);
3225	phba->sli4_hba.link_state.fault =
3226				bf_get(lpfc_acqe_link_fault, acqe_link);
3227	phba->sli4_hba.link_state.logical_speed =
3228				bf_get(lpfc_acqe_qos_link_speed, acqe_link);
3229
3230	/* Invoke the lpfc_handle_latt mailbox command callback function */
3231	lpfc_mbx_cmpl_read_la(phba, pmb);
3232
3233	return;
3234
3235out_free_dmabuf:
3236	kfree(mp);
3237out_free_pmb:
3238	mempool_free(pmb, phba->mbox_mem_pool);
3239}
3240
3241/**
3242 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3243 * @vport: pointer to vport data structure.
3244 *
3245 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3246 * response to a CVL event.
3247 *
3248 * Return the pointer to the ndlp with the vport if successful, otherwise
3249 * return NULL.
3250 **/
3251static struct lpfc_nodelist *
3252lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3253{
3254	struct lpfc_nodelist *ndlp;
3255	struct Scsi_Host *shost;
3256	struct lpfc_hba *phba;
3257
3258	if (!vport)
3259		return NULL;
3260	phba = vport->phba;
3261	if (!phba)
3262		return NULL;
3263	ndlp = lpfc_findnode_did(vport, Fabric_DID);
3264	if (!ndlp) {
3265		/* Cannot find existing Fabric ndlp, so allocate a new one */
3266		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3267		if (!ndlp)
3268			return 0;
3269		lpfc_nlp_init(vport, ndlp, Fabric_DID);
3270		/* Set the node type */
3271		ndlp->nlp_type |= NLP_FABRIC;
3272		/* Put ndlp onto node list */
3273		lpfc_enqueue_node(vport, ndlp);
3274	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
3275		/* re-setup ndlp without removing from node list */
3276		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3277		if (!ndlp)
3278			return 0;
3279	}
3280	if (phba->pport->port_state <= LPFC_FLOGI)
3281		return NULL;
3282	/* If virtual link is not yet instantiated ignore CVL */
3283	if (vport->port_state <= LPFC_FDISC)
3284		return NULL;
3285	shost = lpfc_shost_from_vport(vport);
3286	if (!shost)
3287		return NULL;
3288	lpfc_linkdown_port(vport);
3289	lpfc_cleanup_pending_mbox(vport);
3290	spin_lock_irq(shost->host_lock);
3291	vport->fc_flag |= FC_VPORT_CVL_RCVD;
3292	spin_unlock_irq(shost->host_lock);
3293
3294	return ndlp;
3295}
3296
3297/**
3298 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3299 * @vport: pointer to lpfc hba data structure.
3300 *
3301 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3302 * response to a FCF dead event.
3303 **/
3304static void
3305lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3306{
3307	struct lpfc_vport **vports;
3308	int i;
3309
3310	vports = lpfc_create_vport_work_array(phba);
3311	if (vports)
3312		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3313			lpfc_sli4_perform_vport_cvl(vports[i]);
3314	lpfc_destroy_vport_work_array(phba, vports);
3315}
3316
3317/**
3318 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
3319 * @phba: pointer to lpfc hba data structure.
3320 * @acqe_link: pointer to the async fcoe completion queue entry.
3321 *
3322 * This routine is to handle the SLI4 asynchronous fcoe event.
3323 **/
3324static void
3325lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3326			 struct lpfc_acqe_fcoe *acqe_fcoe)
3327{
3328	uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
3329	int rc;
3330	struct lpfc_vport *vport;
3331	struct lpfc_nodelist *ndlp;
3332	struct Scsi_Host  *shost;
3333	int active_vlink_present;
3334	struct lpfc_vport **vports;
3335	int i;
3336
3337	phba->fc_eventTag = acqe_fcoe->event_tag;
3338	phba->fcoe_eventtag = acqe_fcoe->event_tag;
3339	switch (event_type) {
3340	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
3341	case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
3342		if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
3343			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3344					LOG_DISCOVERY,
3345					"2546 New FCF found event: "
3346					"evt_tag:x%x, fcf_index:x%x\n",
3347					acqe_fcoe->event_tag,
3348					acqe_fcoe->index);
3349		else
3350			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3351					LOG_DISCOVERY,
3352					"2788 FCF parameter modified event: "
3353					"evt_tag:x%x, fcf_index:x%x\n",
3354					acqe_fcoe->event_tag,
3355					acqe_fcoe->index);
3356		spin_lock_irq(&phba->hbalock);
3357		if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
3358		    (phba->hba_flag & FCF_DISC_INPROGRESS)) {
3359			/*
3360			 * If the current FCF is in discovered state or
3361			 * FCF discovery is in progress, do nothing.
3362			 */
3363			spin_unlock_irq(&phba->hbalock);
3364			break;
3365		}
3366
3367		if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3368			/*
3369			 * If fast FCF failover rescan event is pending,
3370			 * do nothing.
3371			 */
3372			spin_unlock_irq(&phba->hbalock);
3373			break;
3374		}
3375		spin_unlock_irq(&phba->hbalock);
3376
3377		if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
3378		    !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
3379			/*
3380			 * During period of FCF discovery, read the FCF
3381			 * table record indexed by the event to update
3382			 * FCF round robin failover eligible FCF bmask.
3383			 */
3384			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3385					LOG_DISCOVERY,
3386					"2779 Read new FCF record with "
3387					"fcf_index:x%x for updating FCF "
3388					"round robin failover bmask\n",
3389					acqe_fcoe->index);
3390			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3391		}
3392
3393		/* Otherwise, scan the entire FCF table and re-discover SAN */
3394		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3395				"2770 Start FCF table scan due to new FCF "
3396				"event: evt_tag:x%x, fcf_index:x%x\n",
3397				acqe_fcoe->event_tag, acqe_fcoe->index);
3398		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3399						     LPFC_FCOE_FCF_GET_FIRST);
3400		if (rc)
3401			lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3402					"2547 Issue FCF scan read FCF mailbox "
3403					"command failed 0x%x\n", rc);
3404		break;
3405
3406	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
3407		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3408			"2548 FCF Table full count 0x%x tag 0x%x\n",
3409			bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
3410			acqe_fcoe->event_tag);
3411		break;
3412
3413	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
3414		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3415			"2549 FCF disconnected from network index 0x%x"
3416			" tag 0x%x\n", acqe_fcoe->index,
3417			acqe_fcoe->event_tag);
3418		/* If the event is not for currently used fcf do nothing */
3419		if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
3420			break;
3421		/* We request port to rediscover the entire FCF table for
3422		 * a fast recovery from case that the current FCF record
3423		 * is no longer valid if we are not in the middle of FCF
3424		 * failover process already.
3425		 */
3426		spin_lock_irq(&phba->hbalock);
3427		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3428			spin_unlock_irq(&phba->hbalock);
3429			/* Update FLOGI FCF failover eligible FCF bmask */
3430			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
3431			break;
3432		}
3433		/* Mark the fast failover process in progress */
3434		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3435		spin_unlock_irq(&phba->hbalock);
3436		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3437				"2771 Start FCF fast failover process due to "
3438				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3439				"\n", acqe_fcoe->event_tag, acqe_fcoe->index);
3440		rc = lpfc_sli4_redisc_fcf_table(phba);
3441		if (rc) {
3442			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3443					LOG_DISCOVERY,
3444					"2772 Issue FCF rediscover mabilbox "
3445					"command failed, fail through to FCF "
3446					"dead event\n");
3447			spin_lock_irq(&phba->hbalock);
3448			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3449			spin_unlock_irq(&phba->hbalock);
3450			/*
3451			 * Last resort will fail over by treating this
3452			 * as a link down to FCF registration.
3453			 */
3454			lpfc_sli4_fcf_dead_failthrough(phba);
3455		} else
3456			/* Handling fast FCF failover to a DEAD FCF event
3457			 * is considered equalivant to receiving CVL to all
3458			 * vports.
3459			 */
3460			lpfc_sli4_perform_all_vport_cvl(phba);
3461		break;
3462	case LPFC_FCOE_EVENT_TYPE_CVL:
3463		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3464			"2718 Clear Virtual Link Received for VPI 0x%x"
3465			" tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3466		vport = lpfc_find_vport_by_vpid(phba,
3467				acqe_fcoe->index - phba->vpi_base);
3468		ndlp = lpfc_sli4_perform_vport_cvl(vport);
3469		if (!ndlp)
3470			break;
3471		active_vlink_present = 0;
3472
3473		vports = lpfc_create_vport_work_array(phba);
3474		if (vports) {
3475			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3476					i++) {
3477				if ((!(vports[i]->fc_flag &
3478					FC_VPORT_CVL_RCVD)) &&
3479					(vports[i]->port_state > LPFC_FDISC)) {
3480					active_vlink_present = 1;
3481					break;
3482				}
3483			}
3484			lpfc_destroy_vport_work_array(phba, vports);
3485		}
3486
3487		if (active_vlink_present) {
3488			/*
3489			 * If there are other active VLinks present,
3490			 * re-instantiate the Vlink using FDISC.
3491			 */
3492			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3493			shost = lpfc_shost_from_vport(vport);
3494			spin_lock_irq(shost->host_lock);
3495			ndlp->nlp_flag |= NLP_DELAY_TMO;
3496			spin_unlock_irq(shost->host_lock);
3497			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3498			vport->port_state = LPFC_FDISC;
3499		} else {
3500			/*
3501			 * Otherwise, we request port to rediscover
3502			 * the entire FCF table for a fast recovery
3503			 * from possible case that the current FCF
3504			 * is no longer valid if we are not already
3505			 * in the FCF failover process.
3506			 */
3507			spin_lock_irq(&phba->hbalock);
3508			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3509				spin_unlock_irq(&phba->hbalock);
3510				break;
3511			}
3512			/* Mark the fast failover process in progress */
3513			phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3514			spin_unlock_irq(&phba->hbalock);
3515			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3516					LOG_DISCOVERY,
3517					"2773 Start FCF fast failover due "
3518					"to CVL event: evt_tag:x%x\n",
3519					acqe_fcoe->event_tag);
3520			rc = lpfc_sli4_redisc_fcf_table(phba);
3521			if (rc) {
3522				lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3523						LOG_DISCOVERY,
3524						"2774 Issue FCF rediscover "
3525						"mabilbox command failed, "
3526						"through to CVL event\n");
3527				spin_lock_irq(&phba->hbalock);
3528				phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3529				spin_unlock_irq(&phba->hbalock);
3530				/*
3531				 * Last resort will be re-try on the
3532				 * the current registered FCF entry.
3533				 */
3534				lpfc_retry_pport_discovery(phba);
3535			}
3536		}
3537		break;
3538	default:
3539		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3540			"0288 Unknown FCoE event type 0x%x event tag "
3541			"0x%x\n", event_type, acqe_fcoe->event_tag);
3542		break;
3543	}
3544}
3545
3546/**
3547 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3548 * @phba: pointer to lpfc hba data structure.
3549 * @acqe_link: pointer to the async dcbx completion queue entry.
3550 *
3551 * This routine is to handle the SLI4 asynchronous dcbx event.
3552 **/
3553static void
3554lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3555			 struct lpfc_acqe_dcbx *acqe_dcbx)
3556{
3557	phba->fc_eventTag = acqe_dcbx->event_tag;
3558	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3559			"0290 The SLI4 DCBX asynchronous event is not "
3560			"handled yet\n");
3561}
3562
3563/**
3564 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
3565 * @phba: pointer to lpfc hba data structure.
3566 * @acqe_link: pointer to the async grp5 completion queue entry.
3567 *
3568 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
3569 * is an asynchronous notified of a logical link speed change.  The Port
3570 * reports the logical link speed in units of 10Mbps.
3571 **/
3572static void
3573lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
3574			 struct lpfc_acqe_grp5 *acqe_grp5)
3575{
3576	uint16_t prev_ll_spd;
3577
3578	phba->fc_eventTag = acqe_grp5->event_tag;
3579	phba->fcoe_eventtag = acqe_grp5->event_tag;
3580	prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
3581	phba->sli4_hba.link_state.logical_speed =
3582		(bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
3583	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3584			"2789 GRP5 Async Event: Updating logical link speed "
3585			"from %dMbps to %dMbps\n", (prev_ll_spd * 10),
3586			(phba->sli4_hba.link_state.logical_speed*10));
3587}
3588
3589/**
3590 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3591 * @phba: pointer to lpfc hba data structure.
3592 *
3593 * This routine is invoked by the worker thread to process all the pending
3594 * SLI4 asynchronous events.
3595 **/
3596void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3597{
3598	struct lpfc_cq_event *cq_event;
3599
3600	/* First, declare the async event has been handled */
3601	spin_lock_irq(&phba->hbalock);
3602	phba->hba_flag &= ~ASYNC_EVENT;
3603	spin_unlock_irq(&phba->hbalock);
3604	/* Now, handle all the async events */
3605	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3606		/* Get the first event from the head of the event queue */
3607		spin_lock_irq(&phba->hbalock);
3608		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3609				 cq_event, struct lpfc_cq_event, list);
3610		spin_unlock_irq(&phba->hbalock);
3611		/* Process the asynchronous event */
3612		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3613		case LPFC_TRAILER_CODE_LINK:
3614			lpfc_sli4_async_link_evt(phba,
3615						 &cq_event->cqe.acqe_link);
3616			break;
3617		case LPFC_TRAILER_CODE_FCOE:
3618			lpfc_sli4_async_fcoe_evt(phba,
3619						 &cq_event->cqe.acqe_fcoe);
3620			break;
3621		case LPFC_TRAILER_CODE_DCBX:
3622			lpfc_sli4_async_dcbx_evt(phba,
3623						 &cq_event->cqe.acqe_dcbx);
3624			break;
3625		case LPFC_TRAILER_CODE_GRP5:
3626			lpfc_sli4_async_grp5_evt(phba,
3627						 &cq_event->cqe.acqe_grp5);
3628			break;
3629		default:
3630			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3631					"1804 Invalid asynchrous event code: "
3632					"x%x\n", bf_get(lpfc_trailer_code,
3633					&cq_event->cqe.mcqe_cmpl));
3634			break;
3635		}
3636		/* Free the completion event processed to the free pool */
3637		lpfc_sli4_cq_event_release(phba, cq_event);
3638	}
3639}
3640
3641/**
3642 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3643 * @phba: pointer to lpfc hba data structure.
3644 *
3645 * This routine is invoked by the worker thread to process FCF table
3646 * rediscovery pending completion event.
3647 **/
3648void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3649{
3650	int rc;
3651
3652	spin_lock_irq(&phba->hbalock);
3653	/* Clear FCF rediscovery timeout event */
3654	phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3655	/* Clear driver fast failover FCF record flag */
3656	phba->fcf.failover_rec.flag = 0;
3657	/* Set state for FCF fast failover */
3658	phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3659	spin_unlock_irq(&phba->hbalock);
3660
3661	/* Scan FCF table from the first entry to re-discover SAN */
3662	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3663			"2777 Start FCF table scan after FCF "
3664			"rediscovery quiescent period over\n");
3665	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3666	if (rc)
3667		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3668				"2747 Issue FCF scan read FCF mailbox "
3669				"command failed 0x%x\n", rc);
3670}
3671
3672/**
3673 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3674 * @phba: pointer to lpfc hba data structure.
3675 * @dev_grp: The HBA PCI-Device group number.
3676 *
3677 * This routine is invoked to set up the per HBA PCI-Device group function
3678 * API jump table entries.
3679 *
3680 * Return: 0 if success, otherwise -ENODEV
3681 **/
3682int
3683lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3684{
3685	int rc;
3686
3687	/* Set up lpfc PCI-device group */
3688	phba->pci_dev_grp = dev_grp;
3689
3690	/* The LPFC_PCI_DEV_OC uses SLI4 */
3691	if (dev_grp == LPFC_PCI_DEV_OC)
3692		phba->sli_rev = LPFC_SLI_REV4;
3693
3694	/* Set up device INIT API function jump table */
3695	rc = lpfc_init_api_table_setup(phba, dev_grp);
3696	if (rc)
3697		return -ENODEV;
3698	/* Set up SCSI API function jump table */
3699	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3700	if (rc)
3701		return -ENODEV;
3702	/* Set up SLI API function jump table */
3703	rc = lpfc_sli_api_table_setup(phba, dev_grp);
3704	if (rc)
3705		return -ENODEV;
3706	/* Set up MBOX API function jump table */
3707	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3708	if (rc)
3709		return -ENODEV;
3710
3711	return 0;
3712}
3713
3714/**
3715 * lpfc_log_intr_mode - Log the active interrupt mode
3716 * @phba: pointer to lpfc hba data structure.
3717 * @intr_mode: active interrupt mode adopted.
3718 *
3719 * This routine it invoked to log the currently used active interrupt mode
3720 * to the device.
3721 **/
3722static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3723{
3724	switch (intr_mode) {
3725	case 0:
3726		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3727				"0470 Enable INTx interrupt mode.\n");
3728		break;
3729	case 1:
3730		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3731				"0481 Enabled MSI interrupt mode.\n");
3732		break;
3733	case 2:
3734		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3735				"0480 Enabled MSI-X interrupt mode.\n");
3736		break;
3737	default:
3738		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3739				"0482 Illegal interrupt mode.\n");
3740		break;
3741	}
3742	return;
3743}
3744
3745/**
3746 * lpfc_enable_pci_dev - Enable a generic PCI device.
3747 * @phba: pointer to lpfc hba data structure.
3748 *
3749 * This routine is invoked to enable the PCI device that is common to all
3750 * PCI devices.
3751 *
3752 * Return codes
3753 * 	0 - successful
3754 * 	other values - error
3755 **/
3756static int
3757lpfc_enable_pci_dev(struct lpfc_hba *phba)
3758{
3759	struct pci_dev *pdev;
3760	int bars;
3761
3762	/* Obtain PCI device reference */
3763	if (!phba->pcidev)
3764		goto out_error;
3765	else
3766		pdev = phba->pcidev;
3767	/* Select PCI BARs */
3768	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3769	/* Enable PCI device */
3770	if (pci_enable_device_mem(pdev))
3771		goto out_error;
3772	/* Request PCI resource for the device */
3773	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3774		goto out_disable_device;
3775	/* Set up device as PCI master and save state for EEH */
3776	pci_set_master(pdev);
3777	pci_try_set_mwi(pdev);
3778	pci_save_state(pdev);
3779
3780	return 0;
3781
3782out_disable_device:
3783	pci_disable_device(pdev);
3784out_error:
3785	return -ENODEV;
3786}
3787
3788/**
3789 * lpfc_disable_pci_dev - Disable a generic PCI device.
3790 * @phba: pointer to lpfc hba data structure.
3791 *
3792 * This routine is invoked to disable the PCI device that is common to all
3793 * PCI devices.
3794 **/
3795static void
3796lpfc_disable_pci_dev(struct lpfc_hba *phba)
3797{
3798	struct pci_dev *pdev;
3799	int bars;
3800
3801	/* Obtain PCI device reference */
3802	if (!phba->pcidev)
3803		return;
3804	else
3805		pdev = phba->pcidev;
3806	/* Select PCI BARs */
3807	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3808	/* Release PCI resource and disable PCI device */
3809	pci_release_selected_regions(pdev, bars);
3810	pci_disable_device(pdev);
3811	/* Null out PCI private reference to driver */
3812	pci_set_drvdata(pdev, NULL);
3813
3814	return;
3815}
3816
3817/**
3818 * lpfc_reset_hba - Reset a hba
3819 * @phba: pointer to lpfc hba data structure.
3820 *
3821 * This routine is invoked to reset a hba device. It brings the HBA
3822 * offline, performs a board restart, and then brings the board back
3823 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3824 * on outstanding mailbox commands.
3825 **/
3826void
3827lpfc_reset_hba(struct lpfc_hba *phba)
3828{
3829	/* If resets are disabled then set error state and return. */
3830	if (!phba->cfg_enable_hba_reset) {
3831		phba->link_state = LPFC_HBA_ERROR;
3832		return;
3833	}
3834	lpfc_offline_prep(phba);
3835	lpfc_offline(phba);
3836	lpfc_sli_brdrestart(phba);
3837	lpfc_online(phba);
3838	lpfc_unblock_mgmt_io(phba);
3839}
3840
3841/**
3842 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3843 * @phba: pointer to lpfc hba data structure.
3844 *
3845 * This routine is invoked to set up the driver internal resources specific to
3846 * support the SLI-3 HBA device it attached to.
3847 *
3848 * Return codes
3849 * 	0 - successful
3850 * 	other values - error
3851 **/
3852static int
3853lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3854{
3855	struct lpfc_sli *psli;
3856
3857	/*
3858	 * Initialize timers used by driver
3859	 */
3860
3861	/* Heartbeat timer */
3862	init_timer(&phba->hb_tmofunc);
3863	phba->hb_tmofunc.function = lpfc_hb_timeout;
3864	phba->hb_tmofunc.data = (unsigned long)phba;
3865
3866	psli = &phba->sli;
3867	/* MBOX heartbeat timer */
3868	init_timer(&psli->mbox_tmo);
3869	psli->mbox_tmo.function = lpfc_mbox_timeout;
3870	psli->mbox_tmo.data = (unsigned long) phba;
3871	/* FCP polling mode timer */
3872	init_timer(&phba->fcp_poll_timer);
3873	phba->fcp_poll_timer.function = lpfc_poll_timeout;
3874	phba->fcp_poll_timer.data = (unsigned long) phba;
3875	/* Fabric block timer */
3876	init_timer(&phba->fabric_block_timer);
3877	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3878	phba->fabric_block_timer.data = (unsigned long) phba;
3879	/* EA polling mode timer */
3880	init_timer(&phba->eratt_poll);
3881	phba->eratt_poll.function = lpfc_poll_eratt;
3882	phba->eratt_poll.data = (unsigned long) phba;
3883
3884	/* Host attention work mask setup */
3885	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3886	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3887
3888	/* Get all the module params for configuring this host */
3889	lpfc_get_cfgparam(phba);
3890	if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
3891		phba->menlo_flag |= HBA_MENLO_SUPPORT;
3892		/* check for menlo minimum sg count */
3893		if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
3894			phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
3895	}
3896
3897	/*
3898	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3899	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3900	 * 2 segments are added since the IOCB needs a command and response bde.
3901	 */
3902	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3903		sizeof(struct fcp_rsp) +
3904			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3905
3906	if (phba->cfg_enable_bg) {
3907		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3908		phba->cfg_sg_dma_buf_size +=
3909			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3910	}
3911
3912	/* Also reinitialize the host templates with new values. */
3913	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3914	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3915
3916	phba->max_vpi = LPFC_MAX_VPI;
3917	/* This will be set to correct value after config_port mbox */
3918	phba->max_vports = 0;
3919
3920	/*
3921	 * Initialize the SLI Layer to run with lpfc HBAs.
3922	 */
3923	lpfc_sli_setup(phba);
3924	lpfc_sli_queue_setup(phba);
3925
3926	/* Allocate device driver memory */
3927	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3928		return -ENOMEM;
3929
3930	return 0;
3931}
3932
3933/**
3934 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3935 * @phba: pointer to lpfc hba data structure.
3936 *
3937 * This routine is invoked to unset the driver internal resources set up
3938 * specific for supporting the SLI-3 HBA device it attached to.
3939 **/
3940static void
3941lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3942{
3943	/* Free device driver memory allocated */
3944	lpfc_mem_free_all(phba);
3945
3946	return;
3947}
3948
3949/**
3950 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3951 * @phba: pointer to lpfc hba data structure.
3952 *
3953 * This routine is invoked to set up the driver internal resources specific to
3954 * support the SLI-4 HBA device it attached to.
3955 *
3956 * Return codes
3957 * 	0 - successful
3958 * 	other values - error
3959 **/
3960static int
3961lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3962{
3963	struct lpfc_sli *psli;
3964	LPFC_MBOXQ_t *mboxq;
3965	int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3966	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3967	struct lpfc_mqe *mqe;
3968	int longs;
3969
3970	/* Before proceed, wait for POST done and device ready */
3971	rc = lpfc_sli4_post_status_check(phba);
3972	if (rc)
3973		return -ENODEV;
3974
3975	/*
3976	 * Initialize timers used by driver
3977	 */
3978
3979	/* Heartbeat timer */
3980	init_timer(&phba->hb_tmofunc);
3981	phba->hb_tmofunc.function = lpfc_hb_timeout;
3982	phba->hb_tmofunc.data = (unsigned long)phba;
3983
3984	psli = &phba->sli;
3985	/* MBOX heartbeat timer */
3986	init_timer(&psli->mbox_tmo);
3987	psli->mbox_tmo.function = lpfc_mbox_timeout;
3988	psli->mbox_tmo.data = (unsigned long) phba;
3989	/* Fabric block timer */
3990	init_timer(&phba->fabric_block_timer);
3991	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3992	phba->fabric_block_timer.data = (unsigned long) phba;
3993	/* EA polling mode timer */
3994	init_timer(&phba->eratt_poll);
3995	phba->eratt_poll.function = lpfc_poll_eratt;
3996	phba->eratt_poll.data = (unsigned long) phba;
3997	/* FCF rediscover timer */
3998	init_timer(&phba->fcf.redisc_wait);
3999	phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4000	phba->fcf.redisc_wait.data = (unsigned long)phba;
4001
4002	/*
4003	 * We need to do a READ_CONFIG mailbox command here before
4004	 * calling lpfc_get_cfgparam. For VFs this will report the
4005	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
4006	 * All of the resources allocated
4007	 * for this Port are tied to these values.
4008	 */
4009	/* Get all the module params for configuring this host */
4010	lpfc_get_cfgparam(phba);
4011	phba->max_vpi = LPFC_MAX_VPI;
4012	/* This will be set to correct value after the read_config mbox */
4013	phba->max_vports = 0;
4014
4015	/* Program the default value of vlan_id and fc_map */
4016	phba->valid_vlan = 0;
4017	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4018	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4019	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4020
4021	/*
4022	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4023	 * used to create the sg_dma_buf_pool must be dynamically calculated.
4024	 * 2 segments are added since the IOCB needs a command and response bde.
4025	 * To insure that the scsi sgl does not cross a 4k page boundary only
4026	 * sgl sizes of must be a power of 2.
4027	 */
4028	buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4029		    ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
4030	/* Feature Level 1 hardware is limited to 2 pages */
4031	if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
4032	     LPFC_SLI_INTF_FEATURELEVEL1_1))
4033		max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
4034	else
4035		max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
4036	for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4037	     dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4038	     dma_buf_size = dma_buf_size << 1)
4039		;
4040	if (dma_buf_size == max_buf_size)
4041		phba->cfg_sg_seg_cnt = (dma_buf_size -
4042			sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
4043			(2 * sizeof(struct sli4_sge))) /
4044				sizeof(struct sli4_sge);
4045	phba->cfg_sg_dma_buf_size = dma_buf_size;
4046
4047	/* Initialize buffer queue management fields */
4048	hbq_count = lpfc_sli_hbq_count();
4049	for (i = 0; i < hbq_count; ++i)
4050		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4051	INIT_LIST_HEAD(&phba->rb_pend_list);
4052	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4053	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
4054
4055	/*
4056	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
4057	 */
4058	/* Initialize the Abort scsi buffer list used by driver */
4059	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
4060	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
4061	/* This abort list used by worker thread */
4062	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4063
4064	/*
4065	 * Initialize dirver internal slow-path work queues
4066	 */
4067
4068	/* Driver internel slow-path CQ Event pool */
4069	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
4070	/* Response IOCB work queue list */
4071	INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
4072	/* Asynchronous event CQ Event work queue list */
4073	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4074	/* Fast-path XRI aborted CQ Event work queue list */
4075	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4076	/* Slow-path XRI aborted CQ Event work queue list */
4077	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4078	/* Receive queue CQ Event work queue list */
4079	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4080
4081	/* Initialize the driver internal SLI layer lists. */
4082	lpfc_sli_setup(phba);
4083	lpfc_sli_queue_setup(phba);
4084
4085	/* Allocate device driver memory */
4086	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4087	if (rc)
4088		return -ENOMEM;
4089
4090	/* Create the bootstrap mailbox command */
4091	rc = lpfc_create_bootstrap_mbox(phba);
4092	if (unlikely(rc))
4093		goto out_free_mem;
4094
4095	/* Set up the host's endian order with the device. */
4096	rc = lpfc_setup_endian_order(phba);
4097	if (unlikely(rc))
4098		goto out_free_bsmbx;
4099
4100	rc = lpfc_sli4_fw_cfg_check(phba);
4101	if (unlikely(rc))
4102		goto out_free_bsmbx;
4103
4104	/* Set up the hba's configuration parameters. */
4105	rc = lpfc_sli4_read_config(phba);
4106	if (unlikely(rc))
4107		goto out_free_bsmbx;
4108
4109	/* Perform a function reset */
4110	rc = lpfc_pci_function_reset(phba);
4111	if (unlikely(rc))
4112		goto out_free_bsmbx;
4113
4114	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4115						       GFP_KERNEL);
4116	if (!mboxq) {
4117		rc = -ENOMEM;
4118		goto out_free_bsmbx;
4119	}
4120
4121	/* Get the Supported Pages. It is always available. */
4122	lpfc_supported_pages(mboxq);
4123	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4124	if (unlikely(rc)) {
4125		rc = -EIO;
4126		mempool_free(mboxq, phba->mbox_mem_pool);
4127		goto out_free_bsmbx;
4128	}
4129
4130	mqe = &mboxq->u.mqe;
4131	memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4132	       LPFC_MAX_SUPPORTED_PAGES);
4133	for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4134		switch (pn_page[i]) {
4135		case LPFC_SLI4_PARAMETERS:
4136			phba->sli4_hba.pc_sli4_params.supported = 1;
4137			break;
4138		default:
4139			break;
4140		}
4141	}
4142
4143	/* Read the port's SLI4 Parameters capabilities if supported. */
4144	if (phba->sli4_hba.pc_sli4_params.supported)
4145		rc = lpfc_pc_sli4_params_get(phba, mboxq);
4146	mempool_free(mboxq, phba->mbox_mem_pool);
4147	if (rc) {
4148		rc = -EIO;
4149		goto out_free_bsmbx;
4150	}
4151	/* Create all the SLI4 queues */
4152	rc = lpfc_sli4_queue_create(phba);
4153	if (rc)
4154		goto out_free_bsmbx;
4155
4156	/* Create driver internal CQE event pool */
4157	rc = lpfc_sli4_cq_event_pool_create(phba);
4158	if (rc)
4159		goto out_destroy_queue;
4160
4161	/* Initialize and populate the iocb list per host */
4162	rc = lpfc_init_sgl_list(phba);
4163	if (rc) {
4164		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4165				"1400 Failed to initialize sgl list.\n");
4166		goto out_destroy_cq_event_pool;
4167	}
4168	rc = lpfc_init_active_sgl_array(phba);
4169	if (rc) {
4170		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4171				"1430 Failed to initialize sgl list.\n");
4172		goto out_free_sgl_list;
4173	}
4174
4175	rc = lpfc_sli4_init_rpi_hdrs(phba);
4176	if (rc) {
4177		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4178				"1432 Failed to initialize rpi headers.\n");
4179		goto out_free_active_sgl;
4180	}
4181
4182	/* Allocate eligible FCF bmask memory for FCF round robin failover */
4183	longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4184	phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4185					 GFP_KERNEL);
4186	if (!phba->fcf.fcf_rr_bmask) {
4187		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4188				"2759 Failed allocate memory for FCF round "
4189				"robin failover bmask\n");
4190		goto out_remove_rpi_hdrs;
4191	}
4192
4193	phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4194				    phba->cfg_fcp_eq_count), GFP_KERNEL);
4195	if (!phba->sli4_hba.fcp_eq_hdl) {
4196		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4197				"2572 Failed allocate memory for fast-path "
4198				"per-EQ handle array\n");
4199		goto out_free_fcf_rr_bmask;
4200	}
4201
4202	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4203				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4204	if (!phba->sli4_hba.msix_entries) {
4205		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4206				"2573 Failed allocate memory for msi-x "
4207				"interrupt vector entries\n");
4208		goto out_free_fcp_eq_hdl;
4209	}
4210
4211	return rc;
4212
4213out_free_fcp_eq_hdl:
4214	kfree(phba->sli4_hba.fcp_eq_hdl);
4215out_free_fcf_rr_bmask:
4216	kfree(phba->fcf.fcf_rr_bmask);
4217out_remove_rpi_hdrs:
4218	lpfc_sli4_remove_rpi_hdrs(phba);
4219out_free_active_sgl:
4220	lpfc_free_active_sgl(phba);
4221out_free_sgl_list:
4222	lpfc_free_sgl_list(phba);
4223out_destroy_cq_event_pool:
4224	lpfc_sli4_cq_event_pool_destroy(phba);
4225out_destroy_queue:
4226	lpfc_sli4_queue_destroy(phba);
4227out_free_bsmbx:
4228	lpfc_destroy_bootstrap_mbox(phba);
4229out_free_mem:
4230	lpfc_mem_free(phba);
4231	return rc;
4232}
4233
4234/**
4235 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4236 * @phba: pointer to lpfc hba data structure.
4237 *
4238 * This routine is invoked to unset the driver internal resources set up
4239 * specific for supporting the SLI-4 HBA device it attached to.
4240 **/
4241static void
4242lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4243{
4244	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4245
4246	/* unregister default FCFI from the HBA */
4247	lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
4248
4249	/* Free the default FCR table */
4250	lpfc_sli_remove_dflt_fcf(phba);
4251
4252	/* Free memory allocated for msi-x interrupt vector entries */
4253	kfree(phba->sli4_hba.msix_entries);
4254
4255	/* Free memory allocated for fast-path work queue handles */
4256	kfree(phba->sli4_hba.fcp_eq_hdl);
4257
4258	/* Free the allocated rpi headers. */
4259	lpfc_sli4_remove_rpi_hdrs(phba);
4260	lpfc_sli4_remove_rpis(phba);
4261
4262	/* Free eligible FCF index bmask */
4263	kfree(phba->fcf.fcf_rr_bmask);
4264
4265	/* Free the ELS sgl list */
4266	lpfc_free_active_sgl(phba);
4267	lpfc_free_sgl_list(phba);
4268
4269	/* Free the SCSI sgl management array */
4270	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4271
4272	/* Free the SLI4 queues */
4273	lpfc_sli4_queue_destroy(phba);
4274
4275	/* Free the completion queue EQ event pool */
4276	lpfc_sli4_cq_event_release_all(phba);
4277	lpfc_sli4_cq_event_pool_destroy(phba);
4278
4279	/* Reset SLI4 HBA FCoE function */
4280	lpfc_pci_function_reset(phba);
4281
4282	/* Free the bsmbx region. */
4283	lpfc_destroy_bootstrap_mbox(phba);
4284
4285	/* Free the SLI Layer memory with SLI4 HBAs */
4286	lpfc_mem_free_all(phba);
4287
4288	/* Free the current connect table */
4289	list_for_each_entry_safe(conn_entry, next_conn_entry,
4290		&phba->fcf_conn_rec_list, list) {
4291		list_del_init(&conn_entry->list);
4292		kfree(conn_entry);
4293	}
4294
4295	return;
4296}
4297
4298/**
4299 * lpfc_init_api_table_setup - Set up init api fucntion jump table
4300 * @phba: The hba struct for which this call is being executed.
4301 * @dev_grp: The HBA PCI-Device group number.
4302 *
4303 * This routine sets up the device INIT interface API function jump table
4304 * in @phba struct.
4305 *
4306 * Returns: 0 - success, -ENODEV - failure.
4307 **/
4308int
4309lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4310{
4311	phba->lpfc_hba_init_link = lpfc_hba_init_link;
4312	phba->lpfc_hba_down_link = lpfc_hba_down_link;
4313	switch (dev_grp) {
4314	case LPFC_PCI_DEV_LP:
4315		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4316		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4317		phba->lpfc_stop_port = lpfc_stop_port_s3;
4318		break;
4319	case LPFC_PCI_DEV_OC:
4320		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4321		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4322		phba->lpfc_stop_port = lpfc_stop_port_s4;
4323		break;
4324	default:
4325		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4326				"1431 Invalid HBA PCI-device group: 0x%x\n",
4327				dev_grp);
4328		return -ENODEV;
4329		break;
4330	}
4331	return 0;
4332}
4333
4334/**
4335 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4336 * @phba: pointer to lpfc hba data structure.
4337 *
4338 * This routine is invoked to set up the driver internal resources before the
4339 * device specific resource setup to support the HBA device it attached to.
4340 *
4341 * Return codes
4342 *	0 - successful
4343 *	other values - error
4344 **/
4345static int
4346lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4347{
4348	/*
4349	 * Driver resources common to all SLI revisions
4350	 */
4351	atomic_set(&phba->fast_event_count, 0);
4352	spin_lock_init(&phba->hbalock);
4353
4354	/* Initialize ndlp management spinlock */
4355	spin_lock_init(&phba->ndlp_lock);
4356
4357	INIT_LIST_HEAD(&phba->port_list);
4358	INIT_LIST_HEAD(&phba->work_list);
4359	init_waitqueue_head(&phba->wait_4_mlo_m_q);
4360
4361	/* Initialize the wait queue head for the kernel thread */
4362	init_waitqueue_head(&phba->work_waitq);
4363
4364	/* Initialize the scsi buffer list used by driver for scsi IO */
4365	spin_lock_init(&phba->scsi_buf_list_lock);
4366	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4367
4368	/* Initialize the fabric iocb list */
4369	INIT_LIST_HEAD(&phba->fabric_iocb_list);
4370
4371	/* Initialize list to save ELS buffers */
4372	INIT_LIST_HEAD(&phba->elsbuf);
4373
4374	/* Initialize FCF connection rec list */
4375	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4376
4377	return 0;
4378}
4379
4380/**
4381 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4382 * @phba: pointer to lpfc hba data structure.
4383 *
4384 * This routine is invoked to set up the driver internal resources after the
4385 * device specific resource setup to support the HBA device it attached to.
4386 *
4387 * Return codes
4388 * 	0 - successful
4389 * 	other values - error
4390 **/
4391static int
4392lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4393{
4394	int error;
4395
4396	/* Startup the kernel thread for this host adapter. */
4397	phba->worker_thread = kthread_run(lpfc_do_work, phba,
4398					  "lpfc_worker_%d", phba->brd_no);
4399	if (IS_ERR(phba->worker_thread)) {
4400		error = PTR_ERR(phba->worker_thread);
4401		return error;
4402	}
4403
4404	return 0;
4405}
4406
4407/**
4408 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4409 * @phba: pointer to lpfc hba data structure.
4410 *
4411 * This routine is invoked to unset the driver internal resources set up after
4412 * the device specific resource setup for supporting the HBA device it
4413 * attached to.
4414 **/
4415static void
4416lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4417{
4418	/* Stop kernel worker thread */
4419	kthread_stop(phba->worker_thread);
4420}
4421
4422/**
4423 * lpfc_free_iocb_list - Free iocb list.
4424 * @phba: pointer to lpfc hba data structure.
4425 *
4426 * This routine is invoked to free the driver's IOCB list and memory.
4427 **/
4428static void
4429lpfc_free_iocb_list(struct lpfc_hba *phba)
4430{
4431	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4432
4433	spin_lock_irq(&phba->hbalock);
4434	list_for_each_entry_safe(iocbq_entry, iocbq_next,
4435				 &phba->lpfc_iocb_list, list) {
4436		list_del(&iocbq_entry->list);
4437		kfree(iocbq_entry);
4438		phba->total_iocbq_bufs--;
4439	}
4440	spin_unlock_irq(&phba->hbalock);
4441
4442	return;
4443}
4444
4445/**
4446 * lpfc_init_iocb_list - Allocate and initialize iocb list.
4447 * @phba: pointer to lpfc hba data structure.
4448 *
4449 * This routine is invoked to allocate and initizlize the driver's IOCB
4450 * list and set up the IOCB tag array accordingly.
4451 *
4452 * Return codes
4453 *	0 - successful
4454 *	other values - error
4455 **/
4456static int
4457lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4458{
4459	struct lpfc_iocbq *iocbq_entry = NULL;
4460	uint16_t iotag;
4461	int i;
4462
4463	/* Initialize and populate the iocb list per host.  */
4464	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
4465	for (i = 0; i < iocb_count; i++) {
4466		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
4467		if (iocbq_entry == NULL) {
4468			printk(KERN_ERR "%s: only allocated %d iocbs of "
4469				"expected %d count. Unloading driver.\n",
4470				__func__, i, LPFC_IOCB_LIST_CNT);
4471			goto out_free_iocbq;
4472		}
4473
4474		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4475		if (iotag == 0) {
4476			kfree(iocbq_entry);
4477			printk(KERN_ERR "%s: failed to allocate IOTAG. "
4478				"Unloading driver.\n", __func__);
4479			goto out_free_iocbq;
4480		}
4481		iocbq_entry->sli4_xritag = NO_XRI;
4482
4483		spin_lock_irq(&phba->hbalock);
4484		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4485		phba->total_iocbq_bufs++;
4486		spin_unlock_irq(&phba->hbalock);
4487	}
4488
4489	return 0;
4490
4491out_free_iocbq:
4492	lpfc_free_iocb_list(phba);
4493
4494	return -ENOMEM;
4495}
4496
4497/**
4498 * lpfc_free_sgl_list - Free sgl list.
4499 * @phba: pointer to lpfc hba data structure.
4500 *
4501 * This routine is invoked to free the driver's sgl list and memory.
4502 **/
4503static void
4504lpfc_free_sgl_list(struct lpfc_hba *phba)
4505{
4506	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4507	LIST_HEAD(sglq_list);
4508	int rc = 0;
4509
4510	spin_lock_irq(&phba->hbalock);
4511	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
4512	spin_unlock_irq(&phba->hbalock);
4513
4514	list_for_each_entry_safe(sglq_entry, sglq_next,
4515				 &sglq_list, list) {
4516		list_del(&sglq_entry->list);
4517		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
4518		kfree(sglq_entry);
4519		phba->sli4_hba.total_sglq_bufs--;
4520	}
4521	rc = lpfc_sli4_remove_all_sgl_pages(phba);
4522	if (rc) {
4523		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4524			"2005 Unable to deregister pages from HBA: %x\n", rc);
4525	}
4526	kfree(phba->sli4_hba.lpfc_els_sgl_array);
4527}
4528
4529/**
4530 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
4531 * @phba: pointer to lpfc hba data structure.
4532 *
4533 * This routine is invoked to allocate the driver's active sgl memory.
4534 * This array will hold the sglq_entry's for active IOs.
4535 **/
4536static int
4537lpfc_init_active_sgl_array(struct lpfc_hba *phba)
4538{
4539	int size;
4540	size = sizeof(struct lpfc_sglq *);
4541	size *= phba->sli4_hba.max_cfg_param.max_xri;
4542
4543	phba->sli4_hba.lpfc_sglq_active_list =
4544		kzalloc(size, GFP_KERNEL);
4545	if (!phba->sli4_hba.lpfc_sglq_active_list)
4546		return -ENOMEM;
4547	return 0;
4548}
4549
4550/**
4551 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
4552 * @phba: pointer to lpfc hba data structure.
4553 *
4554 * This routine is invoked to walk through the array of active sglq entries
4555 * and free all of the resources.
4556 * This is just a place holder for now.
4557 **/
4558static void
4559lpfc_free_active_sgl(struct lpfc_hba *phba)
4560{
4561	kfree(phba->sli4_hba.lpfc_sglq_active_list);
4562}
4563
4564/**
4565 * lpfc_init_sgl_list - Allocate and initialize sgl list.
4566 * @phba: pointer to lpfc hba data structure.
4567 *
4568 * This routine is invoked to allocate and initizlize the driver's sgl
4569 * list and set up the sgl xritag tag array accordingly.
4570 *
4571 * Return codes
4572 *	0 - successful
4573 *	other values - error
4574 **/
4575static int
4576lpfc_init_sgl_list(struct lpfc_hba *phba)
4577{
4578	struct lpfc_sglq *sglq_entry = NULL;
4579	int i;
4580	int els_xri_cnt;
4581
4582	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4583	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4584				"2400 lpfc_init_sgl_list els %d.\n",
4585				els_xri_cnt);
4586	/* Initialize and populate the sglq list per host/VF. */
4587	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
4588	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
4589
4590	/* Sanity check on XRI management */
4591	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
4592		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4593				"2562 No room left for SCSI XRI allocation: "
4594				"max_xri=%d, els_xri=%d\n",
4595				phba->sli4_hba.max_cfg_param.max_xri,
4596				els_xri_cnt);
4597		return -ENOMEM;
4598	}
4599
4600	/* Allocate memory for the ELS XRI management array */
4601	phba->sli4_hba.lpfc_els_sgl_array =
4602			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
4603			GFP_KERNEL);
4604
4605	if (!phba->sli4_hba.lpfc_els_sgl_array) {
4606		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4607				"2401 Failed to allocate memory for ELS "
4608				"XRI management array of size %d.\n",
4609				els_xri_cnt);
4610		return -ENOMEM;
4611	}
4612
4613	/* Keep the SCSI XRI into the XRI management array */
4614	phba->sli4_hba.scsi_xri_max =
4615			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4616	phba->sli4_hba.scsi_xri_cnt = 0;
4617
4618	phba->sli4_hba.lpfc_scsi_psb_array =
4619			kzalloc((sizeof(struct lpfc_scsi_buf *) *
4620			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
4621
4622	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
4623		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4624				"2563 Failed to allocate memory for SCSI "
4625				"XRI management array of size %d.\n",
4626				phba->sli4_hba.scsi_xri_max);
4627		kfree(phba->sli4_hba.lpfc_els_sgl_array);
4628		return -ENOMEM;
4629	}
4630
4631	for (i = 0; i < els_xri_cnt; i++) {
4632		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
4633		if (sglq_entry == NULL) {
4634			printk(KERN_ERR "%s: only allocated %d sgls of "
4635				"expected %d count. Unloading driver.\n",
4636				__func__, i, els_xri_cnt);
4637			goto out_free_mem;
4638		}
4639
4640		sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
4641		if (sglq_entry->sli4_xritag == NO_XRI) {
4642			kfree(sglq_entry);
4643			printk(KERN_ERR "%s: failed to allocate XRI.\n"
4644				"Unloading driver.\n", __func__);
4645			goto out_free_mem;
4646		}
4647		sglq_entry->buff_type = GEN_BUFF_TYPE;
4648		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4649		if (sglq_entry->virt == NULL) {
4650			kfree(sglq_entry);
4651			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
4652				"Unloading driver.\n", __func__);
4653			goto out_free_mem;
4654		}
4655		sglq_entry->sgl = sglq_entry->virt;
4656		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4657
4658		/* The list order is used by later block SGL registraton */
4659		spin_lock_irq(&phba->hbalock);
4660		sglq_entry->state = SGL_FREED;
4661		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4662		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4663		phba->sli4_hba.total_sglq_bufs++;
4664		spin_unlock_irq(&phba->hbalock);
4665	}
4666	return 0;
4667
4668out_free_mem:
4669	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4670	lpfc_free_sgl_list(phba);
4671	return -ENOMEM;
4672}
4673
4674/**
4675 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
4676 * @phba: pointer to lpfc hba data structure.
4677 *
4678 * This routine is invoked to post rpi header templates to the
4679 * HBA consistent with the SLI-4 interface spec.  This routine
4680 * posts a PAGE_SIZE memory region to the port to hold up to
4681 * PAGE_SIZE modulo 64 rpi context headers.
4682 * No locks are held here because this is an initialization routine
4683 * called only from probe or lpfc_online when interrupts are not
4684 * enabled and the driver is reinitializing the device.
4685 *
4686 * Return codes
4687 * 	0 - successful
4688 * 	ENOMEM - No availble memory
4689 *      EIO - The mailbox failed to complete successfully.
4690 **/
4691int
4692lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4693{
4694	int rc = 0;
4695	int longs;
4696	uint16_t rpi_count;
4697	struct lpfc_rpi_hdr *rpi_hdr;
4698
4699	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4700
4701	/*
4702	 * Provision an rpi bitmask range for discovery. The total count
4703	 * is the difference between max and base + 1.
4704	 */
4705	rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
4706		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4707
4708	longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4709	phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
4710					   GFP_KERNEL);
4711	if (!phba->sli4_hba.rpi_bmask)
4712		return -ENOMEM;
4713
4714	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4715	if (!rpi_hdr) {
4716		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4717				"0391 Error during rpi post operation\n");
4718		lpfc_sli4_remove_rpis(phba);
4719		rc = -ENODEV;
4720	}
4721
4722	return rc;
4723}
4724
4725/**
4726 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4727 * @phba: pointer to lpfc hba data structure.
4728 *
4729 * This routine is invoked to allocate a single 4KB memory region to
4730 * support rpis and stores them in the phba.  This single region
4731 * provides support for up to 64 rpis.  The region is used globally
4732 * by the device.
4733 *
4734 * Returns:
4735 *   A valid rpi hdr on success.
4736 *   A NULL pointer on any failure.
4737 **/
4738struct lpfc_rpi_hdr *
4739lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4740{
4741	uint16_t rpi_limit, curr_rpi_range;
4742	struct lpfc_dmabuf *dmabuf;
4743	struct lpfc_rpi_hdr *rpi_hdr;
4744
4745	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4746		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4747
4748	spin_lock_irq(&phba->hbalock);
4749	curr_rpi_range = phba->sli4_hba.next_rpi;
4750	spin_unlock_irq(&phba->hbalock);
4751
4752	/*
4753	 * The port has a limited number of rpis. The increment here
4754	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4755	 * and to allow the full max_rpi range per port.
4756	 */
4757	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4758		return NULL;
4759
4760	/*
4761	 * First allocate the protocol header region for the port.  The
4762	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4763	 */
4764	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4765	if (!dmabuf)
4766		return NULL;
4767
4768	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4769					  LPFC_HDR_TEMPLATE_SIZE,
4770					  &dmabuf->phys,
4771					  GFP_KERNEL);
4772	if (!dmabuf->virt) {
4773		rpi_hdr = NULL;
4774		goto err_free_dmabuf;
4775	}
4776
4777	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4778	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4779		rpi_hdr = NULL;
4780		goto err_free_coherent;
4781	}
4782
4783	/* Save the rpi header data for cleanup later. */
4784	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4785	if (!rpi_hdr)
4786		goto err_free_coherent;
4787
4788	rpi_hdr->dmabuf = dmabuf;
4789	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4790	rpi_hdr->page_count = 1;
4791	spin_lock_irq(&phba->hbalock);
4792	rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4793	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4794
4795	/*
4796	 * The next_rpi stores the next module-64 rpi value to post
4797	 * in any subsequent rpi memory region postings.
4798	 */
4799	phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4800	spin_unlock_irq(&phba->hbalock);
4801	return rpi_hdr;
4802
4803 err_free_coherent:
4804	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4805			  dmabuf->virt, dmabuf->phys);
4806 err_free_dmabuf:
4807	kfree(dmabuf);
4808	return NULL;
4809}
4810
4811/**
4812 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4813 * @phba: pointer to lpfc hba data structure.
4814 *
4815 * This routine is invoked to remove all memory resources allocated
4816 * to support rpis. This routine presumes the caller has released all
4817 * rpis consumed by fabric or port logins and is prepared to have
4818 * the header pages removed.
4819 **/
4820void
4821lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4822{
4823	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4824
4825	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4826				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4827		list_del(&rpi_hdr->list);
4828		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4829				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4830		kfree(rpi_hdr->dmabuf);
4831		kfree(rpi_hdr);
4832	}
4833
4834	phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4835	memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4836}
4837
4838/**
4839 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4840 * @pdev: pointer to pci device data structure.
4841 *
4842 * This routine is invoked to allocate the driver hba data structure for an
4843 * HBA device. If the allocation is successful, the phba reference to the
4844 * PCI device data structure is set.
4845 *
4846 * Return codes
4847 *      pointer to @phba - successful
4848 *      NULL - error
4849 **/
4850static struct lpfc_hba *
4851lpfc_hba_alloc(struct pci_dev *pdev)
4852{
4853	struct lpfc_hba *phba;
4854
4855	/* Allocate memory for HBA structure */
4856	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4857	if (!phba) {
4858		dev_err(&pdev->dev, "failed to allocate hba struct\n");
4859		return NULL;
4860	}
4861
4862	/* Set reference to PCI device in HBA structure */
4863	phba->pcidev = pdev;
4864
4865	/* Assign an unused board number */
4866	phba->brd_no = lpfc_get_instance();
4867	if (phba->brd_no < 0) {
4868		kfree(phba);
4869		return NULL;
4870	}
4871
4872	spin_lock_init(&phba->ct_ev_lock);
4873	INIT_LIST_HEAD(&phba->ct_ev_waiters);
4874
4875	return phba;
4876}
4877
4878/**
4879 * lpfc_hba_free - Free driver hba data structure with a device.
4880 * @phba: pointer to lpfc hba data structure.
4881 *
4882 * This routine is invoked to free the driver hba data structure with an
4883 * HBA device.
4884 **/
4885static void
4886lpfc_hba_free(struct lpfc_hba *phba)
4887{
4888	/* Release the driver assigned board number */
4889	idr_remove(&lpfc_hba_index, phba->brd_no);
4890
4891	kfree(phba);
4892	return;
4893}
4894
4895/**
4896 * lpfc_create_shost - Create hba physical port with associated scsi host.
4897 * @phba: pointer to lpfc hba data structure.
4898 *
4899 * This routine is invoked to create HBA physical port and associate a SCSI
4900 * host with it.
4901 *
4902 * Return codes
4903 *      0 - successful
4904 *      other values - error
4905 **/
4906static int
4907lpfc_create_shost(struct lpfc_hba *phba)
4908{
4909	struct lpfc_vport *vport;
4910	struct Scsi_Host  *shost;
4911
4912	/* Initialize HBA FC structure */
4913	phba->fc_edtov = FF_DEF_EDTOV;
4914	phba->fc_ratov = FF_DEF_RATOV;
4915	phba->fc_altov = FF_DEF_ALTOV;
4916	phba->fc_arbtov = FF_DEF_ARBTOV;
4917
4918	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4919	if (!vport)
4920		return -ENODEV;
4921
4922	shost = lpfc_shost_from_vport(vport);
4923	phba->pport = vport;
4924	lpfc_debugfs_initialize(vport);
4925	/* Put reference to SCSI host to driver's device private data */
4926	pci_set_drvdata(phba->pcidev, shost);
4927
4928	return 0;
4929}
4930
4931/**
4932 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4933 * @phba: pointer to lpfc hba data structure.
4934 *
4935 * This routine is invoked to destroy HBA physical port and the associated
4936 * SCSI host.
4937 **/
4938static void
4939lpfc_destroy_shost(struct lpfc_hba *phba)
4940{
4941	struct lpfc_vport *vport = phba->pport;
4942
4943	/* Destroy physical port that associated with the SCSI host */
4944	destroy_port(vport);
4945
4946	return;
4947}
4948
4949/**
4950 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4951 * @phba: pointer to lpfc hba data structure.
4952 * @shost: the shost to be used to detect Block guard settings.
4953 *
4954 * This routine sets up the local Block guard protocol settings for @shost.
4955 * This routine also allocates memory for debugging bg buffers.
4956 **/
4957static void
4958lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4959{
4960	int pagecnt = 10;
4961	if (lpfc_prot_mask && lpfc_prot_guard) {
4962		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4963				"1478 Registering BlockGuard with the "
4964				"SCSI layer\n");
4965		scsi_host_set_prot(shost, lpfc_prot_mask);
4966		scsi_host_set_guard(shost, lpfc_prot_guard);
4967	}
4968	if (!_dump_buf_data) {
4969		while (pagecnt) {
4970			spin_lock_init(&_dump_buf_lock);
4971			_dump_buf_data =
4972				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4973			if (_dump_buf_data) {
4974				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4975					"9043 BLKGRD: allocated %d pages for "
4976				       "_dump_buf_data at 0x%p\n",
4977				       (1 << pagecnt), _dump_buf_data);
4978				_dump_buf_data_order = pagecnt;
4979				memset(_dump_buf_data, 0,
4980				       ((1 << PAGE_SHIFT) << pagecnt));
4981				break;
4982			} else
4983				--pagecnt;
4984		}
4985		if (!_dump_buf_data_order)
4986			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4987				"9044 BLKGRD: ERROR unable to allocate "
4988			       "memory for hexdump\n");
4989	} else
4990		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4991			"9045 BLKGRD: already allocated _dump_buf_data=0x%p"
4992		       "\n", _dump_buf_data);
4993	if (!_dump_buf_dif) {
4994		while (pagecnt) {
4995			_dump_buf_dif =
4996				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4997			if (_dump_buf_dif) {
4998				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4999					"9046 BLKGRD: allocated %d pages for "
5000				       "_dump_buf_dif at 0x%p\n",
5001				       (1 << pagecnt), _dump_buf_dif);
5002				_dump_buf_dif_order = pagecnt;
5003				memset(_dump_buf_dif, 0,
5004				       ((1 << PAGE_SHIFT) << pagecnt));
5005				break;
5006			} else
5007				--pagecnt;
5008		}
5009		if (!_dump_buf_dif_order)
5010			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5011			"9047 BLKGRD: ERROR unable to allocate "
5012			       "memory for hexdump\n");
5013	} else
5014		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5015			"9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
5016		       _dump_buf_dif);
5017}
5018
5019/**
5020 * lpfc_post_init_setup - Perform necessary device post initialization setup.
5021 * @phba: pointer to lpfc hba data structure.
5022 *
5023 * This routine is invoked to perform all the necessary post initialization
5024 * setup for the device.
5025 **/
5026static void
5027lpfc_post_init_setup(struct lpfc_hba *phba)
5028{
5029	struct Scsi_Host  *shost;
5030	struct lpfc_adapter_event_header adapter_event;
5031
5032	/* Get the default values for Model Name and Description */
5033	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
5034
5035	/*
5036	 * hba setup may have changed the hba_queue_depth so we need to
5037	 * adjust the value of can_queue.
5038	 */
5039	shost = pci_get_drvdata(phba->pcidev);
5040	shost->can_queue = phba->cfg_hba_queue_depth - 10;
5041	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
5042		lpfc_setup_bg(phba, shost);
5043
5044	lpfc_host_attrib_init(shost);
5045
5046	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
5047		spin_lock_irq(shost->host_lock);
5048		lpfc_poll_start_timer(phba);
5049		spin_unlock_irq(shost->host_lock);
5050	}
5051
5052	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5053			"0428 Perform SCSI scan\n");
5054	/* Send board arrival event to upper layer */
5055	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
5056	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
5057	fc_host_post_vendor_event(shost, fc_get_event_number(),
5058				  sizeof(adapter_event),
5059				  (char *) &adapter_event,
5060				  LPFC_NL_VENDOR_ID);
5061	return;
5062}
5063
5064/**
5065 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
5066 * @phba: pointer to lpfc hba data structure.
5067 *
5068 * This routine is invoked to set up the PCI device memory space for device
5069 * with SLI-3 interface spec.
5070 *
5071 * Return codes
5072 * 	0 - successful
5073 * 	other values - error
5074 **/
5075static int
5076lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5077{
5078	struct pci_dev *pdev;
5079	unsigned long bar0map_len, bar2map_len;
5080	int i, hbq_count;
5081	void *ptr;
5082	int error = -ENODEV;
5083
5084	/* Obtain PCI device reference */
5085	if (!phba->pcidev)
5086		return error;
5087	else
5088		pdev = phba->pcidev;
5089
5090	/* Set the device DMA mask size */
5091	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5092	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5093		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5094		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5095			return error;
5096		}
5097	}
5098
5099	/* Get the bus address of Bar0 and Bar2 and the number of bytes
5100	 * required by each mapping.
5101	 */
5102	phba->pci_bar0_map = pci_resource_start(pdev, 0);
5103	bar0map_len = pci_resource_len(pdev, 0);
5104
5105	phba->pci_bar2_map = pci_resource_start(pdev, 2);
5106	bar2map_len = pci_resource_len(pdev, 2);
5107
5108	/* Map HBA SLIM to a kernel virtual address. */
5109	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5110	if (!phba->slim_memmap_p) {
5111		dev_printk(KERN_ERR, &pdev->dev,
5112			   "ioremap failed for SLIM memory.\n");
5113		goto out;
5114	}
5115
5116	/* Map HBA Control Registers to a kernel virtual address. */
5117	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5118	if (!phba->ctrl_regs_memmap_p) {
5119		dev_printk(KERN_ERR, &pdev->dev,
5120			   "ioremap failed for HBA control registers.\n");
5121		goto out_iounmap_slim;
5122	}
5123
5124	/* Allocate memory for SLI-2 structures */
5125	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5126					       SLI2_SLIM_SIZE,
5127					       &phba->slim2p.phys,
5128					       GFP_KERNEL);
5129	if (!phba->slim2p.virt)
5130		goto out_iounmap;
5131
5132	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5133	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5134	phba->mbox_ext = (phba->slim2p.virt +
5135		offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5136	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5137	phba->IOCBs = (phba->slim2p.virt +
5138		       offsetof(struct lpfc_sli2_slim, IOCBs));
5139
5140	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5141						 lpfc_sli_hbq_size(),
5142						 &phba->hbqslimp.phys,
5143						 GFP_KERNEL);
5144	if (!phba->hbqslimp.virt)
5145		goto out_free_slim;
5146
5147	hbq_count = lpfc_sli_hbq_count();
5148	ptr = phba->hbqslimp.virt;
5149	for (i = 0; i < hbq_count; ++i) {
5150		phba->hbqs[i].hbq_virt = ptr;
5151		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5152		ptr += (lpfc_hbq_defs[i]->entry_count *
5153			sizeof(struct lpfc_hbq_entry));
5154	}
5155	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5156	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5157
5158	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5159
5160	INIT_LIST_HEAD(&phba->rb_pend_list);
5161
5162	phba->MBslimaddr = phba->slim_memmap_p;
5163	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5164	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5165	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5166	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5167
5168	return 0;
5169
5170out_free_slim:
5171	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5172			  phba->slim2p.virt, phba->slim2p.phys);
5173out_iounmap:
5174	iounmap(phba->ctrl_regs_memmap_p);
5175out_iounmap_slim:
5176	iounmap(phba->slim_memmap_p);
5177out:
5178	return error;
5179}
5180
5181/**
5182 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5183 * @phba: pointer to lpfc hba data structure.
5184 *
5185 * This routine is invoked to unset the PCI device memory space for device
5186 * with SLI-3 interface spec.
5187 **/
5188static void
5189lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5190{
5191	struct pci_dev *pdev;
5192
5193	/* Obtain PCI device reference */
5194	if (!phba->pcidev)
5195		return;
5196	else
5197		pdev = phba->pcidev;
5198
5199	/* Free coherent DMA memory allocated */
5200	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5201			  phba->hbqslimp.virt, phba->hbqslimp.phys);
5202	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5203			  phba->slim2p.virt, phba->slim2p.phys);
5204
5205	/* I/O memory unmap */
5206	iounmap(phba->ctrl_regs_memmap_p);
5207	iounmap(phba->slim_memmap_p);
5208
5209	return;
5210}
5211
5212/**
5213 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
5214 * @phba: pointer to lpfc hba data structure.
5215 *
5216 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5217 * done and check status.
5218 *
5219 * Return 0 if successful, otherwise -ENODEV.
5220 **/
5221int
5222lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5223{
5224	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
5225	int i, port_error = -ENODEV;
5226
5227	if (!phba->sli4_hba.STAregaddr)
5228		return -ENODEV;
5229
5230	/* Wait up to 30 seconds for the SLI Port POST done and ready */
5231	for (i = 0; i < 3000; i++) {
5232		sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
5233		/* Encounter fatal POST error, break out */
5234		if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
5235			port_error = -ENODEV;
5236			break;
5237		}
5238		if (LPFC_POST_STAGE_ARMFW_READY ==
5239		    bf_get(lpfc_hst_state_port_status, &sta_reg)) {
5240			port_error = 0;
5241			break;
5242		}
5243		msleep(10);
5244	}
5245
5246	if (port_error)
5247		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5248			"1408 Failure HBA POST Status: sta_reg=0x%x, "
5249			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
5250			"dl=x%x, pstatus=x%x\n", sta_reg.word0,
5251			bf_get(lpfc_hst_state_perr, &sta_reg),
5252			bf_get(lpfc_hst_state_sfi, &sta_reg),
5253			bf_get(lpfc_hst_state_nip, &sta_reg),
5254			bf_get(lpfc_hst_state_ipc, &sta_reg),
5255			bf_get(lpfc_hst_state_xrom, &sta_reg),
5256			bf_get(lpfc_hst_state_dl, &sta_reg),
5257			bf_get(lpfc_hst_state_port_status, &sta_reg));
5258
5259	/* Log device information */
5260	phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
5261	if (bf_get(lpfc_sli_intf_valid,
5262		   &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
5263		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5264				"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
5265				"FeatureL1=0x%x, FeatureL2=0x%x\n",
5266				bf_get(lpfc_sli_intf_sli_family,
5267				       &phba->sli4_hba.sli_intf),
5268				bf_get(lpfc_sli_intf_slirev,
5269				       &phba->sli4_hba.sli_intf),
5270				bf_get(lpfc_sli_intf_featurelevel1,
5271				       &phba->sli4_hba.sli_intf),
5272				bf_get(lpfc_sli_intf_featurelevel2,
5273				       &phba->sli4_hba.sli_intf));
5274	}
5275	phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
5276	phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
5277	/* With uncoverable error, log the error message and return error */
5278	uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
5279	uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
5280	if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5281	    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5282		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5283				"1422 HBA Unrecoverable error: "
5284				"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
5285				"ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
5286				uerrlo_reg.word0, uerrhi_reg.word0,
5287				phba->sli4_hba.ue_mask_lo,
5288				phba->sli4_hba.ue_mask_hi);
5289		return -ENODEV;
5290	}
5291
5292	return port_error;
5293}
5294
5295/**
5296 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5297 * @phba: pointer to lpfc hba data structure.
5298 *
5299 * This routine is invoked to set up SLI4 BAR0 PCI config space register
5300 * memory map.
5301 **/
5302static void
5303lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
5304{
5305	phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5306					LPFC_UERR_STATUS_LO;
5307	phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5308					LPFC_UERR_STATUS_HI;
5309	phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5310					LPFC_UE_MASK_LO;
5311	phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5312					LPFC_UE_MASK_HI;
5313	phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
5314					LPFC_SLI_INTF;
5315}
5316
5317/**
5318 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5319 * @phba: pointer to lpfc hba data structure.
5320 *
5321 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5322 * memory map.
5323 **/
5324static void
5325lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5326{
5327
5328	phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5329				    LPFC_HST_STATE;
5330	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5331				    LPFC_HST_ISR0;
5332	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5333				    LPFC_HST_IMR0;
5334	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5335				     LPFC_HST_ISCR0;
5336	return;
5337}
5338
5339/**
5340 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
5341 * @phba: pointer to lpfc hba data structure.
5342 * @vf: virtual function number
5343 *
5344 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5345 * based on the given viftual function number, @vf.
5346 *
5347 * Return 0 if successful, otherwise -ENODEV.
5348 **/
5349static int
5350lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5351{
5352	if (vf > LPFC_VIR_FUNC_MAX)
5353		return -ENODEV;
5354
5355	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5356				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5357	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5358				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5359	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5360				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5361	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5362				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5363	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5364				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5365	return 0;
5366}
5367
5368/**
5369 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
5370 * @phba: pointer to lpfc hba data structure.
5371 *
5372 * This routine is invoked to create the bootstrap mailbox
5373 * region consistent with the SLI-4 interface spec.  This
5374 * routine allocates all memory necessary to communicate
5375 * mailbox commands to the port and sets up all alignment
5376 * needs.  No locks are expected to be held when calling
5377 * this routine.
5378 *
5379 * Return codes
5380 * 	0 - successful
5381 * 	ENOMEM - could not allocated memory.
5382 **/
5383static int
5384lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
5385{
5386	uint32_t bmbx_size;
5387	struct lpfc_dmabuf *dmabuf;
5388	struct dma_address *dma_address;
5389	uint32_t pa_addr;
5390	uint64_t phys_addr;
5391
5392	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5393	if (!dmabuf)
5394		return -ENOMEM;
5395
5396	/*
5397	 * The bootstrap mailbox region is comprised of 2 parts
5398	 * plus an alignment restriction of 16 bytes.
5399	 */
5400	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5401	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5402					  bmbx_size,
5403					  &dmabuf->phys,
5404					  GFP_KERNEL);
5405	if (!dmabuf->virt) {
5406		kfree(dmabuf);
5407		return -ENOMEM;
5408	}
5409	memset(dmabuf->virt, 0, bmbx_size);
5410
5411	/*
5412	 * Initialize the bootstrap mailbox pointers now so that the register
5413	 * operations are simple later.  The mailbox dma address is required
5414	 * to be 16-byte aligned.  Also align the virtual memory as each
5415	 * maibox is copied into the bmbx mailbox region before issuing the
5416	 * command to the port.
5417	 */
5418	phba->sli4_hba.bmbx.dmabuf = dmabuf;
5419	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
5420
5421	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
5422					      LPFC_ALIGN_16_BYTE);
5423	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
5424					      LPFC_ALIGN_16_BYTE);
5425
5426	/*
5427	 * Set the high and low physical addresses now.  The SLI4 alignment
5428	 * requirement is 16 bytes and the mailbox is posted to the port
5429	 * as two 30-bit addresses.  The other data is a bit marking whether
5430	 * the 30-bit address is the high or low address.
5431	 * Upcast bmbx aphys to 64bits so shift instruction compiles
5432	 * clean on 32 bit machines.
5433	 */
5434	dma_address = &phba->sli4_hba.bmbx.dma_address;
5435	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
5436	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
5437	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
5438					   LPFC_BMBX_BIT1_ADDR_HI);
5439
5440	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
5441	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
5442					   LPFC_BMBX_BIT1_ADDR_LO);
5443	return 0;
5444}
5445
5446/**
5447 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
5448 * @phba: pointer to lpfc hba data structure.
5449 *
5450 * This routine is invoked to teardown the bootstrap mailbox
5451 * region and release all host resources. This routine requires
5452 * the caller to ensure all mailbox commands recovered, no
5453 * additional mailbox comands are sent, and interrupts are disabled
5454 * before calling this routine.
5455 *
5456 **/
5457static void
5458lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5459{
5460	dma_free_coherent(&phba->pcidev->dev,
5461			  phba->sli4_hba.bmbx.bmbx_size,
5462			  phba->sli4_hba.bmbx.dmabuf->virt,
5463			  phba->sli4_hba.bmbx.dmabuf->phys);
5464
5465	kfree(phba->sli4_hba.bmbx.dmabuf);
5466	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
5467}
5468
5469/**
5470 * lpfc_sli4_read_config - Get the config parameters.
5471 * @phba: pointer to lpfc hba data structure.
5472 *
5473 * This routine is invoked to read the configuration parameters from the HBA.
5474 * The configuration parameters are used to set the base and maximum values
5475 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
5476 * allocation for the port.
5477 *
5478 * Return codes
5479 * 	0 - successful
5480 * 	ENOMEM - No availble memory
5481 *      EIO - The mailbox failed to complete successfully.
5482 **/
5483static int
5484lpfc_sli4_read_config(struct lpfc_hba *phba)
5485{
5486	LPFC_MBOXQ_t *pmb;
5487	struct lpfc_mbx_read_config *rd_config;
5488	uint32_t rc = 0;
5489
5490	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5491	if (!pmb) {
5492		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5493				"2011 Unable to allocate memory for issuing "
5494				"SLI_CONFIG_SPECIAL mailbox command\n");
5495		return -ENOMEM;
5496	}
5497
5498	lpfc_read_config(phba, pmb);
5499
5500	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5501	if (rc != MBX_SUCCESS) {
5502		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5503			"2012 Mailbox failed , mbxCmd x%x "
5504			"READ_CONFIG, mbxStatus x%x\n",
5505			bf_get(lpfc_mqe_command, &pmb->u.mqe),
5506			bf_get(lpfc_mqe_status, &pmb->u.mqe));
5507		rc = -EIO;
5508	} else {
5509		rd_config = &pmb->u.mqe.un.rd_config;
5510		phba->sli4_hba.max_cfg_param.max_xri =
5511			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5512		phba->sli4_hba.max_cfg_param.xri_base =
5513			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
5514		phba->sli4_hba.max_cfg_param.max_vpi =
5515			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
5516		phba->sli4_hba.max_cfg_param.vpi_base =
5517			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
5518		phba->sli4_hba.max_cfg_param.max_rpi =
5519			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
5520		phba->sli4_hba.max_cfg_param.rpi_base =
5521			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
5522		phba->sli4_hba.max_cfg_param.max_vfi =
5523			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
5524		phba->sli4_hba.max_cfg_param.vfi_base =
5525			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5526		phba->sli4_hba.max_cfg_param.max_fcfi =
5527			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5528		phba->sli4_hba.max_cfg_param.fcfi_base =
5529			bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
5530		phba->sli4_hba.max_cfg_param.max_eq =
5531			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5532		phba->sli4_hba.max_cfg_param.max_rq =
5533			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
5534		phba->sli4_hba.max_cfg_param.max_wq =
5535			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
5536		phba->sli4_hba.max_cfg_param.max_cq =
5537			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
5538		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
5539		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
5540		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
5541		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5542		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
5543		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
5544				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
5545		phba->max_vports = phba->max_vpi;
5546		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5547				"2003 cfg params XRI(B:%d M:%d), "
5548				"VPI(B:%d M:%d) "
5549				"VFI(B:%d M:%d) "
5550				"RPI(B:%d M:%d) "
5551				"FCFI(B:%d M:%d)\n",
5552				phba->sli4_hba.max_cfg_param.xri_base,
5553				phba->sli4_hba.max_cfg_param.max_xri,
5554				phba->sli4_hba.max_cfg_param.vpi_base,
5555				phba->sli4_hba.max_cfg_param.max_vpi,
5556				phba->sli4_hba.max_cfg_param.vfi_base,
5557				phba->sli4_hba.max_cfg_param.max_vfi,
5558				phba->sli4_hba.max_cfg_param.rpi_base,
5559				phba->sli4_hba.max_cfg_param.max_rpi,
5560				phba->sli4_hba.max_cfg_param.fcfi_base,
5561				phba->sli4_hba.max_cfg_param.max_fcfi);
5562	}
5563	mempool_free(pmb, phba->mbox_mem_pool);
5564
5565	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
5566	if (phba->cfg_hba_queue_depth >
5567		(phba->sli4_hba.max_cfg_param.max_xri -
5568			lpfc_sli4_get_els_iocb_cnt(phba)))
5569		phba->cfg_hba_queue_depth =
5570			phba->sli4_hba.max_cfg_param.max_xri -
5571				lpfc_sli4_get_els_iocb_cnt(phba);
5572	return rc;
5573}
5574
5575/**
5576 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
5577 * @phba: pointer to lpfc hba data structure.
5578 *
5579 * This routine is invoked to setup the host-side endian order to the
5580 * HBA consistent with the SLI-4 interface spec.
5581 *
5582 * Return codes
5583 * 	0 - successful
5584 * 	ENOMEM - No availble memory
5585 *      EIO - The mailbox failed to complete successfully.
5586 **/
5587static int
5588lpfc_setup_endian_order(struct lpfc_hba *phba)
5589{
5590	LPFC_MBOXQ_t *mboxq;
5591	uint32_t rc = 0;
5592	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
5593				      HOST_ENDIAN_HIGH_WORD1};
5594
5595	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5596	if (!mboxq) {
5597		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5598				"0492 Unable to allocate memory for issuing "
5599				"SLI_CONFIG_SPECIAL mailbox command\n");
5600		return -ENOMEM;
5601	}
5602
5603	/*
5604	 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
5605	 * words to contain special data values and no other data.
5606	 */
5607	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
5608	memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
5609	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5610	if (rc != MBX_SUCCESS) {
5611		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5612				"0493 SLI_CONFIG_SPECIAL mailbox failed with "
5613				"status x%x\n",
5614				rc);
5615		rc = -EIO;
5616	}
5617
5618	mempool_free(mboxq, phba->mbox_mem_pool);
5619	return rc;
5620}
5621
5622/**
5623 * lpfc_sli4_queue_create - Create all the SLI4 queues
5624 * @phba: pointer to lpfc hba data structure.
5625 *
5626 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
5627 * operation. For each SLI4 queue type, the parameters such as queue entry
5628 * count (queue depth) shall be taken from the module parameter. For now,
5629 * we just use some constant number as place holder.
5630 *
5631 * Return codes
5632 *      0 - successful
5633 *      ENOMEM - No availble memory
5634 *      EIO - The mailbox failed to complete successfully.
5635 **/
5636static int
5637lpfc_sli4_queue_create(struct lpfc_hba *phba)
5638{
5639	struct lpfc_queue *qdesc;
5640	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5641	int cfg_fcp_wq_count;
5642	int cfg_fcp_eq_count;
5643
5644	/*
5645	 * Sanity check for confiugred queue parameters against the run-time
5646	 * device parameters
5647	 */
5648
5649	/* Sanity check on FCP fast-path WQ parameters */
5650	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
5651	if (cfg_fcp_wq_count >
5652	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
5653		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
5654				   LPFC_SP_WQN_DEF;
5655		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
5656			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5657					"2581 Not enough WQs (%d) from "
5658					"the pci function for supporting "
5659					"FCP WQs (%d)\n",
5660					phba->sli4_hba.max_cfg_param.max_wq,
5661					phba->cfg_fcp_wq_count);
5662			goto out_error;
5663		}
5664		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5665				"2582 Not enough WQs (%d) from the pci "
5666				"function for supporting the requested "
5667				"FCP WQs (%d), the actual FCP WQs can "
5668				"be supported: %d\n",
5669				phba->sli4_hba.max_cfg_param.max_wq,
5670				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
5671	}
5672	/* The actual number of FCP work queues adopted */
5673	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
5674
5675	/* Sanity check on FCP fast-path EQ parameters */
5676	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
5677	if (cfg_fcp_eq_count >
5678	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
5679		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
5680				   LPFC_SP_EQN_DEF;
5681		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
5682			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5683					"2574 Not enough EQs (%d) from the "
5684					"pci function for supporting FCP "
5685					"EQs (%d)\n",
5686					phba->sli4_hba.max_cfg_param.max_eq,
5687					phba->cfg_fcp_eq_count);
5688			goto out_error;
5689		}
5690		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5691				"2575 Not enough EQs (%d) from the pci "
5692				"function for supporting the requested "
5693				"FCP EQs (%d), the actual FCP EQs can "
5694				"be supported: %d\n",
5695				phba->sli4_hba.max_cfg_param.max_eq,
5696				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
5697	}
5698	/* It does not make sense to have more EQs than WQs */
5699	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
5700		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5701				"2593 The FCP EQ count(%d) cannot be greater "
5702				"than the FCP WQ count(%d), limiting the "
5703				"FCP EQ count to %d\n", cfg_fcp_eq_count,
5704				phba->cfg_fcp_wq_count,
5705				phba->cfg_fcp_wq_count);
5706		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
5707	}
5708	/* The actual number of FCP event queues adopted */
5709	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
5710	/* The overall number of event queues used */
5711	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
5712
5713	/*
5714	 * Create Event Queues (EQs)
5715	 */
5716
5717	/* Get EQ depth from module parameter, fake the default for now */
5718	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
5719	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
5720
5721	/* Create slow path event queue */
5722	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5723				      phba->sli4_hba.eq_ecount);
5724	if (!qdesc) {
5725		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5726				"0496 Failed allocate slow-path EQ\n");
5727		goto out_error;
5728	}
5729	phba->sli4_hba.sp_eq = qdesc;
5730
5731	/* Create fast-path FCP Event Queue(s) */
5732	phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
5733			       phba->cfg_fcp_eq_count), GFP_KERNEL);
5734	if (!phba->sli4_hba.fp_eq) {
5735		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5736				"2576 Failed allocate memory for fast-path "
5737				"EQ record array\n");
5738		goto out_free_sp_eq;
5739	}
5740	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5741		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5742					      phba->sli4_hba.eq_ecount);
5743		if (!qdesc) {
5744			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5745					"0497 Failed allocate fast-path EQ\n");
5746			goto out_free_fp_eq;
5747		}
5748		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5749	}
5750
5751	/*
5752	 * Create Complete Queues (CQs)
5753	 */
5754
5755	/* Get CQ depth from module parameter, fake the default for now */
5756	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5757	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5758
5759	/* Create slow-path Mailbox Command Complete Queue */
5760	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5761				      phba->sli4_hba.cq_ecount);
5762	if (!qdesc) {
5763		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5764				"0500 Failed allocate slow-path mailbox CQ\n");
5765		goto out_free_fp_eq;
5766	}
5767	phba->sli4_hba.mbx_cq = qdesc;
5768
5769	/* Create slow-path ELS Complete Queue */
5770	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5771				      phba->sli4_hba.cq_ecount);
5772	if (!qdesc) {
5773		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5774				"0501 Failed allocate slow-path ELS CQ\n");
5775		goto out_free_mbx_cq;
5776	}
5777	phba->sli4_hba.els_cq = qdesc;
5778
5779
5780	/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5781	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5782				phba->cfg_fcp_eq_count), GFP_KERNEL);
5783	if (!phba->sli4_hba.fcp_cq) {
5784		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5785				"2577 Failed allocate memory for fast-path "
5786				"CQ record array\n");
5787		goto out_free_els_cq;
5788	}
5789	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5790		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5791					      phba->sli4_hba.cq_ecount);
5792		if (!qdesc) {
5793			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5794					"0499 Failed allocate fast-path FCP "
5795					"CQ (%d)\n", fcp_cqidx);
5796			goto out_free_fcp_cq;
5797		}
5798		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5799	}
5800
5801	/* Create Mailbox Command Queue */
5802	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5803	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5804
5805	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5806				      phba->sli4_hba.mq_ecount);
5807	if (!qdesc) {
5808		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5809				"0505 Failed allocate slow-path MQ\n");
5810		goto out_free_fcp_cq;
5811	}
5812	phba->sli4_hba.mbx_wq = qdesc;
5813
5814	/*
5815	 * Create all the Work Queues (WQs)
5816	 */
5817	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5818	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5819
5820	/* Create slow-path ELS Work Queue */
5821	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5822				      phba->sli4_hba.wq_ecount);
5823	if (!qdesc) {
5824		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5825				"0504 Failed allocate slow-path ELS WQ\n");
5826		goto out_free_mbx_wq;
5827	}
5828	phba->sli4_hba.els_wq = qdesc;
5829
5830	/* Create fast-path FCP Work Queue(s) */
5831	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5832				phba->cfg_fcp_wq_count), GFP_KERNEL);
5833	if (!phba->sli4_hba.fcp_wq) {
5834		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5835				"2578 Failed allocate memory for fast-path "
5836				"WQ record array\n");
5837		goto out_free_els_wq;
5838	}
5839	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5840		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5841					      phba->sli4_hba.wq_ecount);
5842		if (!qdesc) {
5843			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5844					"0503 Failed allocate fast-path FCP "
5845					"WQ (%d)\n", fcp_wqidx);
5846			goto out_free_fcp_wq;
5847		}
5848		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5849	}
5850
5851	/*
5852	 * Create Receive Queue (RQ)
5853	 */
5854	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5855	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5856
5857	/* Create Receive Queue for header */
5858	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5859				      phba->sli4_hba.rq_ecount);
5860	if (!qdesc) {
5861		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5862				"0506 Failed allocate receive HRQ\n");
5863		goto out_free_fcp_wq;
5864	}
5865	phba->sli4_hba.hdr_rq = qdesc;
5866
5867	/* Create Receive Queue for data */
5868	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5869				      phba->sli4_hba.rq_ecount);
5870	if (!qdesc) {
5871		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5872				"0507 Failed allocate receive DRQ\n");
5873		goto out_free_hdr_rq;
5874	}
5875	phba->sli4_hba.dat_rq = qdesc;
5876
5877	return 0;
5878
5879out_free_hdr_rq:
5880	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5881	phba->sli4_hba.hdr_rq = NULL;
5882out_free_fcp_wq:
5883	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5884		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5885		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5886	}
5887	kfree(phba->sli4_hba.fcp_wq);
5888out_free_els_wq:
5889	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5890	phba->sli4_hba.els_wq = NULL;
5891out_free_mbx_wq:
5892	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5893	phba->sli4_hba.mbx_wq = NULL;
5894out_free_fcp_cq:
5895	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5896		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5897		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5898	}
5899	kfree(phba->sli4_hba.fcp_cq);
5900out_free_els_cq:
5901	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5902	phba->sli4_hba.els_cq = NULL;
5903out_free_mbx_cq:
5904	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5905	phba->sli4_hba.mbx_cq = NULL;
5906out_free_fp_eq:
5907	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5908		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5909		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5910	}
5911	kfree(phba->sli4_hba.fp_eq);
5912out_free_sp_eq:
5913	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5914	phba->sli4_hba.sp_eq = NULL;
5915out_error:
5916	return -ENOMEM;
5917}
5918
5919/**
5920 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5921 * @phba: pointer to lpfc hba data structure.
5922 *
5923 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5924 * operation.
5925 *
5926 * Return codes
5927 *      0 - successful
5928 *      ENOMEM - No availble memory
5929 *      EIO - The mailbox failed to complete successfully.
5930 **/
5931static void
5932lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5933{
5934	int fcp_qidx;
5935
5936	/* Release mailbox command work queue */
5937	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5938	phba->sli4_hba.mbx_wq = NULL;
5939
5940	/* Release ELS work queue */
5941	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5942	phba->sli4_hba.els_wq = NULL;
5943
5944	/* Release FCP work queue */
5945	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5946		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5947	kfree(phba->sli4_hba.fcp_wq);
5948	phba->sli4_hba.fcp_wq = NULL;
5949
5950	/* Release unsolicited receive queue */
5951	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5952	phba->sli4_hba.hdr_rq = NULL;
5953	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5954	phba->sli4_hba.dat_rq = NULL;
5955
5956	/* Release ELS complete queue */
5957	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5958	phba->sli4_hba.els_cq = NULL;
5959
5960	/* Release mailbox command complete queue */
5961	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5962	phba->sli4_hba.mbx_cq = NULL;
5963
5964	/* Release FCP response complete queue */
5965	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5966		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5967	kfree(phba->sli4_hba.fcp_cq);
5968	phba->sli4_hba.fcp_cq = NULL;
5969
5970	/* Release fast-path event queue */
5971	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5972		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5973	kfree(phba->sli4_hba.fp_eq);
5974	phba->sli4_hba.fp_eq = NULL;
5975
5976	/* Release slow-path event queue */
5977	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5978	phba->sli4_hba.sp_eq = NULL;
5979
5980	return;
5981}
5982
5983/**
5984 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5985 * @phba: pointer to lpfc hba data structure.
5986 *
5987 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5988 * operation.
5989 *
5990 * Return codes
5991 *      0 - successful
5992 *      ENOMEM - No availble memory
5993 *      EIO - The mailbox failed to complete successfully.
5994 **/
5995int
5996lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5997{
5998	int rc = -ENOMEM;
5999	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6000	int fcp_cq_index = 0;
6001
6002	/*
6003	 * Set up Event Queues (EQs)
6004	 */
6005
6006	/* Set up slow-path event queue */
6007	if (!phba->sli4_hba.sp_eq) {
6008		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6009				"0520 Slow-path EQ not allocated\n");
6010		goto out_error;
6011	}
6012	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
6013			    LPFC_SP_DEF_IMAX);
6014	if (rc) {
6015		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6016				"0521 Failed setup of slow-path EQ: "
6017				"rc = 0x%x\n", rc);
6018		goto out_error;
6019	}
6020	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6021			"2583 Slow-path EQ setup: queue-id=%d\n",
6022			phba->sli4_hba.sp_eq->queue_id);
6023
6024	/* Set up fast-path event queue */
6025	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6026		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
6027			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6028					"0522 Fast-path EQ (%d) not "
6029					"allocated\n", fcp_eqidx);
6030			goto out_destroy_fp_eq;
6031		}
6032		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
6033				    phba->cfg_fcp_imax);
6034		if (rc) {
6035			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6036					"0523 Failed setup of fast-path EQ "
6037					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
6038			goto out_destroy_fp_eq;
6039		}
6040		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6041				"2584 Fast-path EQ setup: "
6042				"queue[%d]-id=%d\n", fcp_eqidx,
6043				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
6044	}
6045
6046	/*
6047	 * Set up Complete Queues (CQs)
6048	 */
6049
6050	/* Set up slow-path MBOX Complete Queue as the first CQ */
6051	if (!phba->sli4_hba.mbx_cq) {
6052		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6053				"0528 Mailbox CQ not allocated\n");
6054		goto out_destroy_fp_eq;
6055	}
6056	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
6057			    LPFC_MCQ, LPFC_MBOX);
6058	if (rc) {
6059		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6060				"0529 Failed setup of slow-path mailbox CQ: "
6061				"rc = 0x%x\n", rc);
6062		goto out_destroy_fp_eq;
6063	}
6064	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6065			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6066			phba->sli4_hba.mbx_cq->queue_id,
6067			phba->sli4_hba.sp_eq->queue_id);
6068
6069	/* Set up slow-path ELS Complete Queue */
6070	if (!phba->sli4_hba.els_cq) {
6071		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6072				"0530 ELS CQ not allocated\n");
6073		goto out_destroy_mbx_cq;
6074	}
6075	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
6076			    LPFC_WCQ, LPFC_ELS);
6077	if (rc) {
6078		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6079				"0531 Failed setup of slow-path ELS CQ: "
6080				"rc = 0x%x\n", rc);
6081		goto out_destroy_mbx_cq;
6082	}
6083	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6084			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
6085			phba->sli4_hba.els_cq->queue_id,
6086			phba->sli4_hba.sp_eq->queue_id);
6087
6088	/* Set up fast-path FCP Response Complete Queue */
6089	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
6090		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6091			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6092					"0526 Fast-path FCP CQ (%d) not "
6093					"allocated\n", fcp_cqidx);
6094			goto out_destroy_fcp_cq;
6095		}
6096		rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
6097				    phba->sli4_hba.fp_eq[fcp_cqidx],
6098				    LPFC_WCQ, LPFC_FCP);
6099		if (rc) {
6100			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6101					"0527 Failed setup of fast-path FCP "
6102					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6103			goto out_destroy_fcp_cq;
6104		}
6105		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6106				"2588 FCP CQ setup: cq[%d]-id=%d, "
6107				"parent eq[%d]-id=%d\n",
6108				fcp_cqidx,
6109				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6110				fcp_cqidx,
6111				phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
6112	}
6113
6114	/*
6115	 * Set up all the Work Queues (WQs)
6116	 */
6117
6118	/* Set up Mailbox Command Queue */
6119	if (!phba->sli4_hba.mbx_wq) {
6120		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6121				"0538 Slow-path MQ not allocated\n");
6122		goto out_destroy_fcp_cq;
6123	}
6124	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
6125			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
6126	if (rc) {
6127		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6128				"0539 Failed setup of slow-path MQ: "
6129				"rc = 0x%x\n", rc);
6130		goto out_destroy_fcp_cq;
6131	}
6132	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6133			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
6134			phba->sli4_hba.mbx_wq->queue_id,
6135			phba->sli4_hba.mbx_cq->queue_id);
6136
6137	/* Set up slow-path ELS Work Queue */
6138	if (!phba->sli4_hba.els_wq) {
6139		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6140				"0536 Slow-path ELS WQ not allocated\n");
6141		goto out_destroy_mbx_wq;
6142	}
6143	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
6144			    phba->sli4_hba.els_cq, LPFC_ELS);
6145	if (rc) {
6146		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6147				"0537 Failed setup of slow-path ELS WQ: "
6148				"rc = 0x%x\n", rc);
6149		goto out_destroy_mbx_wq;
6150	}
6151	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6152			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
6153			phba->sli4_hba.els_wq->queue_id,
6154			phba->sli4_hba.els_cq->queue_id);
6155
6156	/* Set up fast-path FCP Work Queue */
6157	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6158		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6159			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6160					"0534 Fast-path FCP WQ (%d) not "
6161					"allocated\n", fcp_wqidx);
6162			goto out_destroy_fcp_wq;
6163		}
6164		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6165				    phba->sli4_hba.fcp_cq[fcp_cq_index],
6166				    LPFC_FCP);
6167		if (rc) {
6168			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6169					"0535 Failed setup of fast-path FCP "
6170					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6171			goto out_destroy_fcp_wq;
6172		}
6173		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6174				"2591 FCP WQ setup: wq[%d]-id=%d, "
6175				"parent cq[%d]-id=%d\n",
6176				fcp_wqidx,
6177				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6178				fcp_cq_index,
6179				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6180		/* Round robin FCP Work Queue's Completion Queue assignment */
6181		fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
6182	}
6183
6184	/*
6185	 * Create Receive Queue (RQ)
6186	 */
6187	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6188		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6189				"0540 Receive Queue not allocated\n");
6190		goto out_destroy_fcp_wq;
6191	}
6192	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6193			    phba->sli4_hba.els_cq, LPFC_USOL);
6194	if (rc) {
6195		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6196				"0541 Failed setup of Receive Queue: "
6197				"rc = 0x%x\n", rc);
6198		goto out_destroy_fcp_wq;
6199	}
6200	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6201			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6202			"parent cq-id=%d\n",
6203			phba->sli4_hba.hdr_rq->queue_id,
6204			phba->sli4_hba.dat_rq->queue_id,
6205			phba->sli4_hba.els_cq->queue_id);
6206	return 0;
6207
6208out_destroy_fcp_wq:
6209	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6210		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6211	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6212out_destroy_mbx_wq:
6213	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6214out_destroy_fcp_cq:
6215	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6216		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6217	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6218out_destroy_mbx_cq:
6219	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6220out_destroy_fp_eq:
6221	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6222		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6223	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6224out_error:
6225	return rc;
6226}
6227
6228/**
6229 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
6230 * @phba: pointer to lpfc hba data structure.
6231 *
6232 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
6233 * operation.
6234 *
6235 * Return codes
6236 *      0 - successful
6237 *      ENOMEM - No availble memory
6238 *      EIO - The mailbox failed to complete successfully.
6239 **/
6240void
6241lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6242{
6243	int fcp_qidx;
6244
6245	/* Unset mailbox command work queue */
6246	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6247	/* Unset ELS work queue */
6248	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6249	/* Unset unsolicited receive queue */
6250	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
6251	/* Unset FCP work queue */
6252	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6253		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
6254	/* Unset mailbox command complete queue */
6255	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6256	/* Unset ELS complete queue */
6257	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6258	/* Unset FCP response complete queue */
6259	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6260		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6261	/* Unset fast-path event queue */
6262	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6263		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6264	/* Unset slow-path event queue */
6265	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6266}
6267
6268/**
6269 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
6270 * @phba: pointer to lpfc hba data structure.
6271 *
6272 * This routine is invoked to allocate and set up a pool of completion queue
6273 * events. The body of the completion queue event is a completion queue entry
6274 * CQE. For now, this pool is used for the interrupt service routine to queue
6275 * the following HBA completion queue events for the worker thread to process:
6276 *   - Mailbox asynchronous events
6277 *   - Receive queue completion unsolicited events
6278 * Later, this can be used for all the slow-path events.
6279 *
6280 * Return codes
6281 *      0 - successful
6282 *      -ENOMEM - No availble memory
6283 **/
6284static int
6285lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
6286{
6287	struct lpfc_cq_event *cq_event;
6288	int i;
6289
6290	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
6291		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
6292		if (!cq_event)
6293			goto out_pool_create_fail;
6294		list_add_tail(&cq_event->list,
6295			      &phba->sli4_hba.sp_cqe_event_pool);
6296	}
6297	return 0;
6298
6299out_pool_create_fail:
6300	lpfc_sli4_cq_event_pool_destroy(phba);
6301	return -ENOMEM;
6302}
6303
6304/**
6305 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
6306 * @phba: pointer to lpfc hba data structure.
6307 *
6308 * This routine is invoked to free the pool of completion queue events at
6309 * driver unload time. Note that, it is the responsibility of the driver
6310 * cleanup routine to free all the outstanding completion-queue events
6311 * allocated from this pool back into the pool before invoking this routine
6312 * to destroy the pool.
6313 **/
6314static void
6315lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
6316{
6317	struct lpfc_cq_event *cq_event, *next_cq_event;
6318
6319	list_for_each_entry_safe(cq_event, next_cq_event,
6320				 &phba->sli4_hba.sp_cqe_event_pool, list) {
6321		list_del(&cq_event->list);
6322		kfree(cq_event);
6323	}
6324}
6325
6326/**
6327 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6328 * @phba: pointer to lpfc hba data structure.
6329 *
6330 * This routine is the lock free version of the API invoked to allocate a
6331 * completion-queue event from the free pool.
6332 *
6333 * Return: Pointer to the newly allocated completion-queue event if successful
6334 *         NULL otherwise.
6335 **/
6336struct lpfc_cq_event *
6337__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6338{
6339	struct lpfc_cq_event *cq_event = NULL;
6340
6341	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
6342			 struct lpfc_cq_event, list);
6343	return cq_event;
6344}
6345
6346/**
6347 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6348 * @phba: pointer to lpfc hba data structure.
6349 *
6350 * This routine is the lock version of the API invoked to allocate a
6351 * completion-queue event from the free pool.
6352 *
6353 * Return: Pointer to the newly allocated completion-queue event if successful
6354 *         NULL otherwise.
6355 **/
6356struct lpfc_cq_event *
6357lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6358{
6359	struct lpfc_cq_event *cq_event;
6360	unsigned long iflags;
6361
6362	spin_lock_irqsave(&phba->hbalock, iflags);
6363	cq_event = __lpfc_sli4_cq_event_alloc(phba);
6364	spin_unlock_irqrestore(&phba->hbalock, iflags);
6365	return cq_event;
6366}
6367
6368/**
6369 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6370 * @phba: pointer to lpfc hba data structure.
6371 * @cq_event: pointer to the completion queue event to be freed.
6372 *
6373 * This routine is the lock free version of the API invoked to release a
6374 * completion-queue event back into the free pool.
6375 **/
6376void
6377__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6378			     struct lpfc_cq_event *cq_event)
6379{
6380	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
6381}
6382
6383/**
6384 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6385 * @phba: pointer to lpfc hba data structure.
6386 * @cq_event: pointer to the completion queue event to be freed.
6387 *
6388 * This routine is the lock version of the API invoked to release a
6389 * completion-queue event back into the free pool.
6390 **/
6391void
6392lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6393			   struct lpfc_cq_event *cq_event)
6394{
6395	unsigned long iflags;
6396	spin_lock_irqsave(&phba->hbalock, iflags);
6397	__lpfc_sli4_cq_event_release(phba, cq_event);
6398	spin_unlock_irqrestore(&phba->hbalock, iflags);
6399}
6400
6401/**
6402 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
6403 * @phba: pointer to lpfc hba data structure.
6404 *
6405 * This routine is to free all the pending completion-queue events to the
6406 * back into the free pool for device reset.
6407 **/
6408static void
6409lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
6410{
6411	LIST_HEAD(cqelist);
6412	struct lpfc_cq_event *cqe;
6413	unsigned long iflags;
6414
6415	/* Retrieve all the pending WCQEs from pending WCQE lists */
6416	spin_lock_irqsave(&phba->hbalock, iflags);
6417	/* Pending FCP XRI abort events */
6418	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
6419			 &cqelist);
6420	/* Pending ELS XRI abort events */
6421	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
6422			 &cqelist);
6423	/* Pending asynnc events */
6424	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
6425			 &cqelist);
6426	spin_unlock_irqrestore(&phba->hbalock, iflags);
6427
6428	while (!list_empty(&cqelist)) {
6429		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
6430		lpfc_sli4_cq_event_release(phba, cqe);
6431	}
6432}
6433
6434/**
6435 * lpfc_pci_function_reset - Reset pci function.
6436 * @phba: pointer to lpfc hba data structure.
6437 *
6438 * This routine is invoked to request a PCI function reset. It will destroys
6439 * all resources assigned to the PCI function which originates this request.
6440 *
6441 * Return codes
6442 *      0 - successful
6443 *      ENOMEM - No availble memory
6444 *      EIO - The mailbox failed to complete successfully.
6445 **/
6446int
6447lpfc_pci_function_reset(struct lpfc_hba *phba)
6448{
6449	LPFC_MBOXQ_t *mboxq;
6450	uint32_t rc = 0;
6451	uint32_t shdr_status, shdr_add_status;
6452	union lpfc_sli4_cfg_shdr *shdr;
6453
6454	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6455	if (!mboxq) {
6456		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6457				"0494 Unable to allocate memory for issuing "
6458				"SLI_FUNCTION_RESET mailbox command\n");
6459		return -ENOMEM;
6460	}
6461
6462	/* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
6463	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6464			 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
6465			 LPFC_SLI4_MBX_EMBED);
6466	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6467	shdr = (union lpfc_sli4_cfg_shdr *)
6468		&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6469	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6470	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6471	if (rc != MBX_TIMEOUT)
6472		mempool_free(mboxq, phba->mbox_mem_pool);
6473	if (shdr_status || shdr_add_status || rc) {
6474		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6475				"0495 SLI_FUNCTION_RESET mailbox failed with "
6476				"status x%x add_status x%x, mbx status x%x\n",
6477				shdr_status, shdr_add_status, rc);
6478		rc = -ENXIO;
6479	}
6480	return rc;
6481}
6482
6483/**
6484 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
6485 * @phba: pointer to lpfc hba data structure.
6486 * @cnt: number of nop mailbox commands to send.
6487 *
6488 * This routine is invoked to send a number @cnt of NOP mailbox command and
6489 * wait for each command to complete.
6490 *
6491 * Return: the number of NOP mailbox command completed.
6492 **/
6493static int
6494lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
6495{
6496	LPFC_MBOXQ_t *mboxq;
6497	int length, cmdsent;
6498	uint32_t mbox_tmo;
6499	uint32_t rc = 0;
6500	uint32_t shdr_status, shdr_add_status;
6501	union lpfc_sli4_cfg_shdr *shdr;
6502
6503	if (cnt == 0) {
6504		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6505				"2518 Requested to send 0 NOP mailbox cmd\n");
6506		return cnt;
6507	}
6508
6509	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6510	if (!mboxq) {
6511		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6512				"2519 Unable to allocate memory for issuing "
6513				"NOP mailbox command\n");
6514		return 0;
6515	}
6516
6517	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
6518	length = (sizeof(struct lpfc_mbx_nop) -
6519		  sizeof(struct lpfc_sli4_cfg_mhdr));
6520	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6521			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
6522
6523	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6524	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
6525		if (!phba->sli4_hba.intr_enable)
6526			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6527		else
6528			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6529		if (rc == MBX_TIMEOUT)
6530			break;
6531		/* Check return status */
6532		shdr = (union lpfc_sli4_cfg_shdr *)
6533			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6534		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6535		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
6536					 &shdr->response);
6537		if (shdr_status || shdr_add_status || rc) {
6538			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6539					"2520 NOP mailbox command failed "
6540					"status x%x add_status x%x mbx "
6541					"status x%x\n", shdr_status,
6542					shdr_add_status, rc);
6543			break;
6544		}
6545	}
6546
6547	if (rc != MBX_TIMEOUT)
6548		mempool_free(mboxq, phba->mbox_mem_pool);
6549
6550	return cmdsent;
6551}
6552
6553/**
6554 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
6555 * @phba: pointer to lpfc hba data structure.
6556 * @fcfi: fcf index.
6557 *
6558 * This routine is invoked to unregister a FCFI from device.
6559 **/
6560void
6561lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
6562{
6563	LPFC_MBOXQ_t *mbox;
6564	uint32_t mbox_tmo;
6565	int rc;
6566	unsigned long flags;
6567
6568	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6569
6570	if (!mbox)
6571		return;
6572
6573	lpfc_unreg_fcfi(mbox, fcfi);
6574
6575	if (!phba->sli4_hba.intr_enable)
6576		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6577	else {
6578		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6579		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6580	}
6581	if (rc != MBX_TIMEOUT)
6582		mempool_free(mbox, phba->mbox_mem_pool);
6583	if (rc != MBX_SUCCESS)
6584		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6585				"2517 Unregister FCFI command failed "
6586				"status %d, mbxStatus x%x\n", rc,
6587				bf_get(lpfc_mqe_status, &mbox->u.mqe));
6588	else {
6589		spin_lock_irqsave(&phba->hbalock, flags);
6590		/* Mark the FCFI is no longer registered */
6591		phba->fcf.fcf_flag &=
6592			~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE);
6593		spin_unlock_irqrestore(&phba->hbalock, flags);
6594	}
6595}
6596
6597/**
6598 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
6599 * @phba: pointer to lpfc hba data structure.
6600 *
6601 * This routine is invoked to set up the PCI device memory space for device
6602 * with SLI-4 interface spec.
6603 *
6604 * Return codes
6605 * 	0 - successful
6606 * 	other values - error
6607 **/
6608static int
6609lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
6610{
6611	struct pci_dev *pdev;
6612	unsigned long bar0map_len, bar1map_len, bar2map_len;
6613	int error = -ENODEV;
6614
6615	/* Obtain PCI device reference */
6616	if (!phba->pcidev)
6617		return error;
6618	else
6619		pdev = phba->pcidev;
6620
6621	/* Set the device DMA mask size */
6622	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6623	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6624		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6625		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6626			return error;
6627		}
6628	}
6629
6630	/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
6631	 * number of bytes required by each mapping. They are actually
6632	 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
6633	 */
6634	if (pci_resource_start(pdev, 0)) {
6635		phba->pci_bar0_map = pci_resource_start(pdev, 0);
6636		bar0map_len = pci_resource_len(pdev, 0);
6637	} else {
6638		phba->pci_bar0_map = pci_resource_start(pdev, 1);
6639		bar0map_len = pci_resource_len(pdev, 1);
6640	}
6641	phba->pci_bar1_map = pci_resource_start(pdev, 2);
6642	bar1map_len = pci_resource_len(pdev, 2);
6643
6644	phba->pci_bar2_map = pci_resource_start(pdev, 4);
6645	bar2map_len = pci_resource_len(pdev, 4);
6646
6647	/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
6648	phba->sli4_hba.conf_regs_memmap_p =
6649				ioremap(phba->pci_bar0_map, bar0map_len);
6650	if (!phba->sli4_hba.conf_regs_memmap_p) {
6651		dev_printk(KERN_ERR, &pdev->dev,
6652			   "ioremap failed for SLI4 PCI config registers.\n");
6653		goto out;
6654	}
6655
6656	/* Map SLI4 HBA Control Register base to a kernel virtual address. */
6657	phba->sli4_hba.ctrl_regs_memmap_p =
6658				ioremap(phba->pci_bar1_map, bar1map_len);
6659	if (!phba->sli4_hba.ctrl_regs_memmap_p) {
6660		dev_printk(KERN_ERR, &pdev->dev,
6661			   "ioremap failed for SLI4 HBA control registers.\n");
6662		goto out_iounmap_conf;
6663	}
6664
6665	/* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
6666	phba->sli4_hba.drbl_regs_memmap_p =
6667				ioremap(phba->pci_bar2_map, bar2map_len);
6668	if (!phba->sli4_hba.drbl_regs_memmap_p) {
6669		dev_printk(KERN_ERR, &pdev->dev,
6670			   "ioremap failed for SLI4 HBA doorbell registers.\n");
6671		goto out_iounmap_ctrl;
6672	}
6673
6674	/* Set up BAR0 PCI config space register memory map */
6675	lpfc_sli4_bar0_register_memmap(phba);
6676
6677	/* Set up BAR1 register memory map */
6678	lpfc_sli4_bar1_register_memmap(phba);
6679
6680	/* Set up BAR2 register memory map */
6681	error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
6682	if (error)
6683		goto out_iounmap_all;
6684
6685	return 0;
6686
6687out_iounmap_all:
6688	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6689out_iounmap_ctrl:
6690	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6691out_iounmap_conf:
6692	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6693out:
6694	return error;
6695}
6696
6697/**
6698 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
6699 * @phba: pointer to lpfc hba data structure.
6700 *
6701 * This routine is invoked to unset the PCI device memory space for device
6702 * with SLI-4 interface spec.
6703 **/
6704static void
6705lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
6706{
6707	struct pci_dev *pdev;
6708
6709	/* Obtain PCI device reference */
6710	if (!phba->pcidev)
6711		return;
6712	else
6713		pdev = phba->pcidev;
6714
6715	/* Free coherent DMA memory allocated */
6716
6717	/* Unmap I/O memory space */
6718	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6719	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6720	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6721
6722	return;
6723}
6724
6725/**
6726 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6727 * @phba: pointer to lpfc hba data structure.
6728 *
6729 * This routine is invoked to enable the MSI-X interrupt vectors to device
6730 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6731 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6732 * invoked, enables either all or nothing, depending on the current
6733 * availability of PCI vector resources. The device driver is responsible
6734 * for calling the individual request_irq() to register each MSI-X vector
6735 * with a interrupt handler, which is done in this function. Note that
6736 * later when device is unloading, the driver should always call free_irq()
6737 * on all MSI-X vectors it has done request_irq() on before calling
6738 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
6739 * will be left with MSI-X enabled and leaks its vectors.
6740 *
6741 * Return codes
6742 *   0 - successful
6743 *   other values - error
6744 **/
6745static int
6746lpfc_sli_enable_msix(struct lpfc_hba *phba)
6747{
6748	int rc, i;
6749	LPFC_MBOXQ_t *pmb;
6750
6751	/* Set up MSI-X multi-message vectors */
6752	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6753		phba->msix_entries[i].entry = i;
6754
6755	/* Configure MSI-X capability structure */
6756	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
6757				ARRAY_SIZE(phba->msix_entries));
6758	if (rc) {
6759		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6760				"0420 PCI enable MSI-X failed (%d)\n", rc);
6761		goto msi_fail_out;
6762	}
6763	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6764		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6765				"0477 MSI-X entry[%d]: vector=x%x "
6766				"message=%d\n", i,
6767				phba->msix_entries[i].vector,
6768				phba->msix_entries[i].entry);
6769	/*
6770	 * Assign MSI-X vectors to interrupt handlers
6771	 */
6772
6773	/* vector-0 is associated to slow-path handler */
6774	rc = request_irq(phba->msix_entries[0].vector,
6775			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6776			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6777	if (rc) {
6778		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6779				"0421 MSI-X slow-path request_irq failed "
6780				"(%d)\n", rc);
6781		goto msi_fail_out;
6782	}
6783
6784	/* vector-1 is associated to fast-path handler */
6785	rc = request_irq(phba->msix_entries[1].vector,
6786			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6787			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
6788
6789	if (rc) {
6790		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6791				"0429 MSI-X fast-path request_irq failed "
6792				"(%d)\n", rc);
6793		goto irq_fail_out;
6794	}
6795
6796	/*
6797	 * Configure HBA MSI-X attention conditions to messages
6798	 */
6799	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6800
6801	if (!pmb) {
6802		rc = -ENOMEM;
6803		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6804				"0474 Unable to allocate memory for issuing "
6805				"MBOX_CONFIG_MSI command\n");
6806		goto mem_fail_out;
6807	}
6808	rc = lpfc_config_msi(phba, pmb);
6809	if (rc)
6810		goto mbx_fail_out;
6811	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6812	if (rc != MBX_SUCCESS) {
6813		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
6814				"0351 Config MSI mailbox command failed, "
6815				"mbxCmd x%x, mbxStatus x%x\n",
6816				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
6817		goto mbx_fail_out;
6818	}
6819
6820	/* Free memory allocated for mailbox command */
6821	mempool_free(pmb, phba->mbox_mem_pool);
6822	return rc;
6823
6824mbx_fail_out:
6825	/* Free memory allocated for mailbox command */
6826	mempool_free(pmb, phba->mbox_mem_pool);
6827
6828mem_fail_out:
6829	/* free the irq already requested */
6830	free_irq(phba->msix_entries[1].vector, phba);
6831
6832irq_fail_out:
6833	/* free the irq already requested */
6834	free_irq(phba->msix_entries[0].vector, phba);
6835
6836msi_fail_out:
6837	/* Unconfigure MSI-X capability structure */
6838	pci_disable_msix(phba->pcidev);
6839	return rc;
6840}
6841
6842/**
6843 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
6844 * @phba: pointer to lpfc hba data structure.
6845 *
6846 * This routine is invoked to release the MSI-X vectors and then disable the
6847 * MSI-X interrupt mode to device with SLI-3 interface spec.
6848 **/
6849static void
6850lpfc_sli_disable_msix(struct lpfc_hba *phba)
6851{
6852	int i;
6853
6854	/* Free up MSI-X multi-message vectors */
6855	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6856		free_irq(phba->msix_entries[i].vector, phba);
6857	/* Disable MSI-X */
6858	pci_disable_msix(phba->pcidev);
6859
6860	return;
6861}
6862
6863/**
6864 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
6865 * @phba: pointer to lpfc hba data structure.
6866 *
6867 * This routine is invoked to enable the MSI interrupt mode to device with
6868 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
6869 * enable the MSI vector. The device driver is responsible for calling the
6870 * request_irq() to register MSI vector with a interrupt the handler, which
6871 * is done in this function.
6872 *
6873 * Return codes
6874 * 	0 - successful
6875 * 	other values - error
6876 */
6877static int
6878lpfc_sli_enable_msi(struct lpfc_hba *phba)
6879{
6880	int rc;
6881
6882	rc = pci_enable_msi(phba->pcidev);
6883	if (!rc)
6884		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6885				"0462 PCI enable MSI mode success.\n");
6886	else {
6887		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6888				"0471 PCI enable MSI mode failed (%d)\n", rc);
6889		return rc;
6890	}
6891
6892	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6893			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6894	if (rc) {
6895		pci_disable_msi(phba->pcidev);
6896		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6897				"0478 MSI request_irq failed (%d)\n", rc);
6898	}
6899	return rc;
6900}
6901
6902/**
6903 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
6904 * @phba: pointer to lpfc hba data structure.
6905 *
6906 * This routine is invoked to disable the MSI interrupt mode to device with
6907 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
6908 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6909 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6910 * its vector.
6911 */
6912static void
6913lpfc_sli_disable_msi(struct lpfc_hba *phba)
6914{
6915	free_irq(phba->pcidev->irq, phba);
6916	pci_disable_msi(phba->pcidev);
6917	return;
6918}
6919
6920/**
6921 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
6922 * @phba: pointer to lpfc hba data structure.
6923 *
6924 * This routine is invoked to enable device interrupt and associate driver's
6925 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
6926 * spec. Depends on the interrupt mode configured to the driver, the driver
6927 * will try to fallback from the configured interrupt mode to an interrupt
6928 * mode which is supported by the platform, kernel, and device in the order
6929 * of:
6930 * MSI-X -> MSI -> IRQ.
6931 *
6932 * Return codes
6933 *   0 - successful
6934 *   other values - error
6935 **/
6936static uint32_t
6937lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6938{
6939	uint32_t intr_mode = LPFC_INTR_ERROR;
6940	int retval;
6941
6942	if (cfg_mode == 2) {
6943		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6944		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6945		if (!retval) {
6946			/* Now, try to enable MSI-X interrupt mode */
6947			retval = lpfc_sli_enable_msix(phba);
6948			if (!retval) {
6949				/* Indicate initialization to MSI-X mode */
6950				phba->intr_type = MSIX;
6951				intr_mode = 2;
6952			}
6953		}
6954	}
6955
6956	/* Fallback to MSI if MSI-X initialization failed */
6957	if (cfg_mode >= 1 && phba->intr_type == NONE) {
6958		retval = lpfc_sli_enable_msi(phba);
6959		if (!retval) {
6960			/* Indicate initialization to MSI mode */
6961			phba->intr_type = MSI;
6962			intr_mode = 1;
6963		}
6964	}
6965
6966	/* Fallback to INTx if both MSI-X/MSI initalization failed */
6967	if (phba->intr_type == NONE) {
6968		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6969				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6970		if (!retval) {
6971			/* Indicate initialization to INTx mode */
6972			phba->intr_type = INTx;
6973			intr_mode = 0;
6974		}
6975	}
6976	return intr_mode;
6977}
6978
6979/**
6980 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6981 * @phba: pointer to lpfc hba data structure.
6982 *
6983 * This routine is invoked to disable device interrupt and disassociate the
6984 * driver's interrupt handler(s) from interrupt vector(s) to device with
6985 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6986 * release the interrupt vector(s) for the message signaled interrupt.
6987 **/
6988static void
6989lpfc_sli_disable_intr(struct lpfc_hba *phba)
6990{
6991	/* Disable the currently initialized interrupt mode */
6992	if (phba->intr_type == MSIX)
6993		lpfc_sli_disable_msix(phba);
6994	else if (phba->intr_type == MSI)
6995		lpfc_sli_disable_msi(phba);
6996	else if (phba->intr_type == INTx)
6997		free_irq(phba->pcidev->irq, phba);
6998
6999	/* Reset interrupt management states */
7000	phba->intr_type = NONE;
7001	phba->sli.slistat.sli_intr = 0;
7002
7003	return;
7004}
7005
7006/**
7007 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
7008 * @phba: pointer to lpfc hba data structure.
7009 *
7010 * This routine is invoked to enable the MSI-X interrupt vectors to device
7011 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
7012 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
7013 * enables either all or nothing, depending on the current availability of
7014 * PCI vector resources. The device driver is responsible for calling the
7015 * individual request_irq() to register each MSI-X vector with a interrupt
7016 * handler, which is done in this function. Note that later when device is
7017 * unloading, the driver should always call free_irq() on all MSI-X vectors
7018 * it has done request_irq() on before calling pci_disable_msix(). Failure
7019 * to do so results in a BUG_ON() and a device will be left with MSI-X
7020 * enabled and leaks its vectors.
7021 *
7022 * Return codes
7023 * 0 - successful
7024 * other values - error
7025 **/
7026static int
7027lpfc_sli4_enable_msix(struct lpfc_hba *phba)
7028{
7029	int rc, index;
7030
7031	/* Set up MSI-X multi-message vectors */
7032	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
7033		phba->sli4_hba.msix_entries[index].entry = index;
7034
7035	/* Configure MSI-X capability structure */
7036	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
7037			     phba->sli4_hba.cfg_eqn);
7038	if (rc) {
7039		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7040				"0484 PCI enable MSI-X failed (%d)\n", rc);
7041		goto msi_fail_out;
7042	}
7043	/* Log MSI-X vector assignment */
7044	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
7045		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7046				"0489 MSI-X entry[%d]: vector=x%x "
7047				"message=%d\n", index,
7048				phba->sli4_hba.msix_entries[index].vector,
7049				phba->sli4_hba.msix_entries[index].entry);
7050	/*
7051	 * Assign MSI-X vectors to interrupt handlers
7052	 */
7053
7054	/* The first vector must associated to slow-path handler for MQ */
7055	rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7056			 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
7057			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7058	if (rc) {
7059		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7060				"0485 MSI-X slow-path request_irq failed "
7061				"(%d)\n", rc);
7062		goto msi_fail_out;
7063	}
7064
7065	/* The rest of the vector(s) are associated to fast-path handler(s) */
7066	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
7067		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
7068		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
7069		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
7070				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
7071				 LPFC_FP_DRIVER_HANDLER_NAME,
7072				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7073		if (rc) {
7074			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7075					"0486 MSI-X fast-path (%d) "
7076					"request_irq failed (%d)\n", index, rc);
7077			goto cfg_fail_out;
7078		}
7079	}
7080
7081	return rc;
7082
7083cfg_fail_out:
7084	/* free the irq already requested */
7085	for (--index; index >= 1; index--)
7086		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
7087			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7088
7089	/* free the irq already requested */
7090	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7091
7092msi_fail_out:
7093	/* Unconfigure MSI-X capability structure */
7094	pci_disable_msix(phba->pcidev);
7095	return rc;
7096}
7097
7098/**
7099 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
7100 * @phba: pointer to lpfc hba data structure.
7101 *
7102 * This routine is invoked to release the MSI-X vectors and then disable the
7103 * MSI-X interrupt mode to device with SLI-4 interface spec.
7104 **/
7105static void
7106lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7107{
7108	int index;
7109
7110	/* Free up MSI-X multi-message vectors */
7111	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7112
7113	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
7114		free_irq(phba->sli4_hba.msix_entries[index].vector,
7115			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7116	/* Disable MSI-X */
7117	pci_disable_msix(phba->pcidev);
7118
7119	return;
7120}
7121
7122/**
7123 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
7124 * @phba: pointer to lpfc hba data structure.
7125 *
7126 * This routine is invoked to enable the MSI interrupt mode to device with
7127 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
7128 * to enable the MSI vector. The device driver is responsible for calling
7129 * the request_irq() to register MSI vector with a interrupt the handler,
7130 * which is done in this function.
7131 *
7132 * Return codes
7133 * 	0 - successful
7134 * 	other values - error
7135 **/
7136static int
7137lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7138{
7139	int rc, index;
7140
7141	rc = pci_enable_msi(phba->pcidev);
7142	if (!rc)
7143		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7144				"0487 PCI enable MSI mode success.\n");
7145	else {
7146		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7147				"0488 PCI enable MSI mode failed (%d)\n", rc);
7148		return rc;
7149	}
7150
7151	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7152			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7153	if (rc) {
7154		pci_disable_msi(phba->pcidev);
7155		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7156				"0490 MSI request_irq failed (%d)\n", rc);
7157	}
7158
7159	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
7160		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7161		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7162	}
7163
7164	return rc;
7165}
7166
7167/**
7168 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
7169 * @phba: pointer to lpfc hba data structure.
7170 *
7171 * This routine is invoked to disable the MSI interrupt mode to device with
7172 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
7173 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7174 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7175 * its vector.
7176 **/
7177static void
7178lpfc_sli4_disable_msi(struct lpfc_hba *phba)
7179{
7180	free_irq(phba->pcidev->irq, phba);
7181	pci_disable_msi(phba->pcidev);
7182	return;
7183}
7184
7185/**
7186 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
7187 * @phba: pointer to lpfc hba data structure.
7188 *
7189 * This routine is invoked to enable device interrupt and associate driver's
7190 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
7191 * interface spec. Depends on the interrupt mode configured to the driver,
7192 * the driver will try to fallback from the configured interrupt mode to an
7193 * interrupt mode which is supported by the platform, kernel, and device in
7194 * the order of:
7195 * MSI-X -> MSI -> IRQ.
7196 *
7197 * Return codes
7198 * 	0 - successful
7199 * 	other values - error
7200 **/
7201static uint32_t
7202lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7203{
7204	uint32_t intr_mode = LPFC_INTR_ERROR;
7205	int retval, index;
7206
7207	if (cfg_mode == 2) {
7208		/* Preparation before conf_msi mbox cmd */
7209		retval = 0;
7210		if (!retval) {
7211			/* Now, try to enable MSI-X interrupt mode */
7212			retval = lpfc_sli4_enable_msix(phba);
7213			if (!retval) {
7214				/* Indicate initialization to MSI-X mode */
7215				phba->intr_type = MSIX;
7216				intr_mode = 2;
7217			}
7218		}
7219	}
7220
7221	/* Fallback to MSI if MSI-X initialization failed */
7222	if (cfg_mode >= 1 && phba->intr_type == NONE) {
7223		retval = lpfc_sli4_enable_msi(phba);
7224		if (!retval) {
7225			/* Indicate initialization to MSI mode */
7226			phba->intr_type = MSI;
7227			intr_mode = 1;
7228		}
7229	}
7230
7231	/* Fallback to INTx if both MSI-X/MSI initalization failed */
7232	if (phba->intr_type == NONE) {
7233		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7234				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7235		if (!retval) {
7236			/* Indicate initialization to INTx mode */
7237			phba->intr_type = INTx;
7238			intr_mode = 0;
7239			for (index = 0; index < phba->cfg_fcp_eq_count;
7240			     index++) {
7241				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7242				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7243			}
7244		}
7245	}
7246	return intr_mode;
7247}
7248
7249/**
7250 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
7251 * @phba: pointer to lpfc hba data structure.
7252 *
7253 * This routine is invoked to disable device interrupt and disassociate
7254 * the driver's interrupt handler(s) from interrupt vector(s) to device
7255 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
7256 * will release the interrupt vector(s) for the message signaled interrupt.
7257 **/
7258static void
7259lpfc_sli4_disable_intr(struct lpfc_hba *phba)
7260{
7261	/* Disable the currently initialized interrupt mode */
7262	if (phba->intr_type == MSIX)
7263		lpfc_sli4_disable_msix(phba);
7264	else if (phba->intr_type == MSI)
7265		lpfc_sli4_disable_msi(phba);
7266	else if (phba->intr_type == INTx)
7267		free_irq(phba->pcidev->irq, phba);
7268
7269	/* Reset interrupt management states */
7270	phba->intr_type = NONE;
7271	phba->sli.slistat.sli_intr = 0;
7272
7273	return;
7274}
7275
7276/**
7277 * lpfc_unset_hba - Unset SLI3 hba device initialization
7278 * @phba: pointer to lpfc hba data structure.
7279 *
7280 * This routine is invoked to unset the HBA device initialization steps to
7281 * a device with SLI-3 interface spec.
7282 **/
7283static void
7284lpfc_unset_hba(struct lpfc_hba *phba)
7285{
7286	struct lpfc_vport *vport = phba->pport;
7287	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7288
7289	spin_lock_irq(shost->host_lock);
7290	vport->load_flag |= FC_UNLOADING;
7291	spin_unlock_irq(shost->host_lock);
7292
7293	lpfc_stop_hba_timers(phba);
7294
7295	phba->pport->work_port_events = 0;
7296
7297	lpfc_sli_hba_down(phba);
7298
7299	lpfc_sli_brdrestart(phba);
7300
7301	lpfc_sli_disable_intr(phba);
7302
7303	return;
7304}
7305
7306/**
7307 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
7308 * @phba: pointer to lpfc hba data structure.
7309 *
7310 * This routine is invoked to unset the HBA device initialization steps to
7311 * a device with SLI-4 interface spec.
7312 **/
7313static void
7314lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7315{
7316	struct lpfc_vport *vport = phba->pport;
7317	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7318
7319	spin_lock_irq(shost->host_lock);
7320	vport->load_flag |= FC_UNLOADING;
7321	spin_unlock_irq(shost->host_lock);
7322
7323	phba->pport->work_port_events = 0;
7324
7325	lpfc_sli4_hba_down(phba);
7326
7327	lpfc_sli4_disable_intr(phba);
7328
7329	return;
7330}
7331
7332/**
7333 * lpfc_sli4_hba_unset - Unset the fcoe hba
7334 * @phba: Pointer to HBA context object.
7335 *
7336 * This function is called in the SLI4 code path to reset the HBA's FCoE
7337 * function. The caller is not required to hold any lock. This routine
7338 * issues PCI function reset mailbox command to reset the FCoE function.
7339 * At the end of the function, it calls lpfc_hba_down_post function to
7340 * free any pending commands.
7341 **/
7342static void
7343lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7344{
7345	int wait_cnt = 0;
7346	LPFC_MBOXQ_t *mboxq;
7347
7348	lpfc_stop_hba_timers(phba);
7349	phba->sli4_hba.intr_enable = 0;
7350
7351	/*
7352	 * Gracefully wait out the potential current outstanding asynchronous
7353	 * mailbox command.
7354	 */
7355
7356	/* First, block any pending async mailbox command from posted */
7357	spin_lock_irq(&phba->hbalock);
7358	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7359	spin_unlock_irq(&phba->hbalock);
7360	/* Now, trying to wait it out if we can */
7361	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7362		msleep(10);
7363		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
7364			break;
7365	}
7366	/* Forcefully release the outstanding mailbox command if timed out */
7367	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7368		spin_lock_irq(&phba->hbalock);
7369		mboxq = phba->sli.mbox_active;
7370		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7371		__lpfc_mbox_cmpl_put(phba, mboxq);
7372		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7373		phba->sli.mbox_active = NULL;
7374		spin_unlock_irq(&phba->hbalock);
7375	}
7376
7377	/* Tear down the queues in the HBA */
7378	lpfc_sli4_queue_unset(phba);
7379
7380	/* Disable PCI subsystem interrupt */
7381	lpfc_sli4_disable_intr(phba);
7382
7383	/* Stop kthread signal shall trigger work_done one more time */
7384	kthread_stop(phba->worker_thread);
7385
7386	/* Stop the SLI4 device port */
7387	phba->pport->work_port_events = 0;
7388}
7389
7390 /**
7391 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
7392 * @phba: Pointer to HBA context object.
7393 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
7394 *
7395 * This function is called in the SLI4 code path to read the port's
7396 * sli4 capabilities.
7397 *
7398 * This function may be be called from any context that can block-wait
7399 * for the completion.  The expectation is that this routine is called
7400 * typically from probe_one or from the online routine.
7401 **/
7402int
7403lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7404{
7405	int rc;
7406	struct lpfc_mqe *mqe;
7407	struct lpfc_pc_sli4_params *sli4_params;
7408	uint32_t mbox_tmo;
7409
7410	rc = 0;
7411	mqe = &mboxq->u.mqe;
7412
7413	/* Read the port's SLI4 Parameters port capabilities */
7414	lpfc_sli4_params(mboxq);
7415	if (!phba->sli4_hba.intr_enable)
7416		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7417	else {
7418		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
7419		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7420	}
7421
7422	if (unlikely(rc))
7423		return 1;
7424
7425	sli4_params = &phba->sli4_hba.pc_sli4_params;
7426	sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
7427	sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
7428	sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
7429	sli4_params->featurelevel_1 = bf_get(featurelevel_1,
7430					     &mqe->un.sli4_params);
7431	sli4_params->featurelevel_2 = bf_get(featurelevel_2,
7432					     &mqe->un.sli4_params);
7433	sli4_params->proto_types = mqe->un.sli4_params.word3;
7434	sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
7435	sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
7436	sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
7437	sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
7438	sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
7439	sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
7440	sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
7441	sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
7442	sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
7443	sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
7444	sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
7445	sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
7446	sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
7447	sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
7448	sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
7449	sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
7450	sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
7451	sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7452	sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7453	sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
7454	return rc;
7455}
7456
7457/**
7458 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
7459 * @pdev: pointer to PCI device
7460 * @pid: pointer to PCI device identifier
7461 *
7462 * This routine is to be called to attach a device with SLI-3 interface spec
7463 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7464 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7465 * information of the device and driver to see if the driver state that it can
7466 * support this kind of device. If the match is successful, the driver core
7467 * invokes this routine. If this routine determines it can claim the HBA, it
7468 * does all the initialization that it needs to do to handle the HBA properly.
7469 *
7470 * Return code
7471 * 	0 - driver can claim the device
7472 * 	negative value - driver can not claim the device
7473 **/
7474static int __devinit
7475lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
7476{
7477	struct lpfc_hba   *phba;
7478	struct lpfc_vport *vport = NULL;
7479	struct Scsi_Host  *shost = NULL;
7480	int error;
7481	uint32_t cfg_mode, intr_mode;
7482
7483	/* Allocate memory for HBA structure */
7484	phba = lpfc_hba_alloc(pdev);
7485	if (!phba)
7486		return -ENOMEM;
7487
7488	/* Perform generic PCI device enabling operation */
7489	error = lpfc_enable_pci_dev(phba);
7490	if (error) {
7491		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7492				"1401 Failed to enable pci device.\n");
7493		goto out_free_phba;
7494	}
7495
7496	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
7497	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
7498	if (error)
7499		goto out_disable_pci_dev;
7500
7501	/* Set up SLI-3 specific device PCI memory space */
7502	error = lpfc_sli_pci_mem_setup(phba);
7503	if (error) {
7504		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7505				"1402 Failed to set up pci memory space.\n");
7506		goto out_disable_pci_dev;
7507	}
7508
7509	/* Set up phase-1 common device driver resources */
7510	error = lpfc_setup_driver_resource_phase1(phba);
7511	if (error) {
7512		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7513				"1403 Failed to set up driver resource.\n");
7514		goto out_unset_pci_mem_s3;
7515	}
7516
7517	/* Set up SLI-3 specific device driver resources */
7518	error = lpfc_sli_driver_resource_setup(phba);
7519	if (error) {
7520		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7521				"1404 Failed to set up driver resource.\n");
7522		goto out_unset_pci_mem_s3;
7523	}
7524
7525	/* Initialize and populate the iocb list per host */
7526	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
7527	if (error) {
7528		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7529				"1405 Failed to initialize iocb list.\n");
7530		goto out_unset_driver_resource_s3;
7531	}
7532
7533	/* Set up common device driver resources */
7534	error = lpfc_setup_driver_resource_phase2(phba);
7535	if (error) {
7536		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7537				"1406 Failed to set up driver resource.\n");
7538		goto out_free_iocb_list;
7539	}
7540
7541	/* Create SCSI host to the physical port */
7542	error = lpfc_create_shost(phba);
7543	if (error) {
7544		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7545				"1407 Failed to create scsi host.\n");
7546		goto out_unset_driver_resource;
7547	}
7548
7549	/* Configure sysfs attributes */
7550	vport = phba->pport;
7551	error = lpfc_alloc_sysfs_attr(vport);
7552	if (error) {
7553		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7554				"1476 Failed to allocate sysfs attr\n");
7555		goto out_destroy_shost;
7556	}
7557
7558	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
7559	/* Now, trying to enable interrupt and bring up the device */
7560	cfg_mode = phba->cfg_use_msi;
7561	while (true) {
7562		/* Put device to a known state before enabling interrupt */
7563		lpfc_stop_port(phba);
7564		/* Configure and enable interrupt */
7565		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
7566		if (intr_mode == LPFC_INTR_ERROR) {
7567			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7568					"0431 Failed to enable interrupt.\n");
7569			error = -ENODEV;
7570			goto out_free_sysfs_attr;
7571		}
7572		/* SLI-3 HBA setup */
7573		if (lpfc_sli_hba_setup(phba)) {
7574			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7575					"1477 Failed to set up hba\n");
7576			error = -ENODEV;
7577			goto out_remove_device;
7578		}
7579
7580		/* Wait 50ms for the interrupts of previous mailbox commands */
7581		msleep(50);
7582		/* Check active interrupts on message signaled interrupts */
7583		if (intr_mode == 0 ||
7584		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
7585			/* Log the current active interrupt mode */
7586			phba->intr_mode = intr_mode;
7587			lpfc_log_intr_mode(phba, intr_mode);
7588			break;
7589		} else {
7590			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7591					"0447 Configure interrupt mode (%d) "
7592					"failed active interrupt test.\n",
7593					intr_mode);
7594			/* Disable the current interrupt mode */
7595			lpfc_sli_disable_intr(phba);
7596			/* Try next level of interrupt mode */
7597			cfg_mode = --intr_mode;
7598		}
7599	}
7600
7601	/* Perform post initialization setup */
7602	lpfc_post_init_setup(phba);
7603
7604	/* Check if there are static vports to be created. */
7605	lpfc_create_static_vport(phba);
7606
7607	return 0;
7608
7609out_remove_device:
7610	lpfc_unset_hba(phba);
7611out_free_sysfs_attr:
7612	lpfc_free_sysfs_attr(vport);
7613out_destroy_shost:
7614	lpfc_destroy_shost(phba);
7615out_unset_driver_resource:
7616	lpfc_unset_driver_resource_phase2(phba);
7617out_free_iocb_list:
7618	lpfc_free_iocb_list(phba);
7619out_unset_driver_resource_s3:
7620	lpfc_sli_driver_resource_unset(phba);
7621out_unset_pci_mem_s3:
7622	lpfc_sli_pci_mem_unset(phba);
7623out_disable_pci_dev:
7624	lpfc_disable_pci_dev(phba);
7625	if (shost)
7626		scsi_host_put(shost);
7627out_free_phba:
7628	lpfc_hba_free(phba);
7629	return error;
7630}
7631
7632/**
7633 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
7634 * @pdev: pointer to PCI device
7635 *
7636 * This routine is to be called to disattach a device with SLI-3 interface
7637 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7638 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7639 * device to be removed from the PCI subsystem properly.
7640 **/
7641static void __devexit
7642lpfc_pci_remove_one_s3(struct pci_dev *pdev)
7643{
7644	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
7645	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7646	struct lpfc_vport **vports;
7647	struct lpfc_hba   *phba = vport->phba;
7648	int i;
7649	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
7650
7651	spin_lock_irq(&phba->hbalock);
7652	vport->load_flag |= FC_UNLOADING;
7653	spin_unlock_irq(&phba->hbalock);
7654
7655	lpfc_free_sysfs_attr(vport);
7656
7657	/* Release all the vports against this physical port */
7658	vports = lpfc_create_vport_work_array(phba);
7659	if (vports != NULL)
7660		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7661			fc_vport_terminate(vports[i]->fc_vport);
7662	lpfc_destroy_vport_work_array(phba, vports);
7663
7664	/* Remove FC host and then SCSI host with the physical port */
7665	fc_remove_host(shost);
7666	scsi_remove_host(shost);
7667	lpfc_cleanup(vport);
7668
7669	/*
7670	 * Bring down the SLI Layer. This step disable all interrupts,
7671	 * clears the rings, discards all mailbox commands, and resets
7672	 * the HBA.
7673	 */
7674
7675	/* HBA interrupt will be diabled after this call */
7676	lpfc_sli_hba_down(phba);
7677	/* Stop kthread signal shall trigger work_done one more time */
7678	kthread_stop(phba->worker_thread);
7679	/* Final cleanup of txcmplq and reset the HBA */
7680	lpfc_sli_brdrestart(phba);
7681
7682	lpfc_stop_hba_timers(phba);
7683	spin_lock_irq(&phba->hbalock);
7684	list_del_init(&vport->listentry);
7685	spin_unlock_irq(&phba->hbalock);
7686
7687	lpfc_debugfs_terminate(vport);
7688
7689	/* Disable interrupt */
7690	lpfc_sli_disable_intr(phba);
7691
7692	pci_set_drvdata(pdev, NULL);
7693	scsi_host_put(shost);
7694
7695	/*
7696	 * Call scsi_free before mem_free since scsi bufs are released to their
7697	 * corresponding pools here.
7698	 */
7699	lpfc_scsi_free(phba);
7700	lpfc_mem_free_all(phba);
7701
7702	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7703			  phba->hbqslimp.virt, phba->hbqslimp.phys);
7704
7705	/* Free resources associated with SLI2 interface */
7706	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7707			  phba->slim2p.virt, phba->slim2p.phys);
7708
7709	/* unmap adapter SLIM and Control Registers */
7710	iounmap(phba->ctrl_regs_memmap_p);
7711	iounmap(phba->slim_memmap_p);
7712
7713	lpfc_hba_free(phba);
7714
7715	pci_release_selected_regions(pdev, bars);
7716	pci_disable_device(pdev);
7717}
7718
7719/**
7720 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
7721 * @pdev: pointer to PCI device
7722 * @msg: power management message
7723 *
7724 * This routine is to be called from the kernel's PCI subsystem to support
7725 * system Power Management (PM) to device with SLI-3 interface spec. When
7726 * PM invokes this method, it quiesces the device by stopping the driver's
7727 * worker thread for the device, turning off device's interrupt and DMA,
7728 * and bring the device offline. Note that as the driver implements the
7729 * minimum PM requirements to a power-aware driver's PM support for the
7730 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7731 * to the suspend() method call will be treated as SUSPEND and the driver will
7732 * fully reinitialize its device during resume() method call, the driver will
7733 * set device to PCI_D3hot state in PCI config space instead of setting it
7734 * according to the @msg provided by the PM.
7735 *
7736 * Return code
7737 * 	0 - driver suspended the device
7738 * 	Error otherwise
7739 **/
7740static int
7741lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
7742{
7743	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7744	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7745
7746	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7747			"0473 PCI device Power Management suspend.\n");
7748
7749	/* Bring down the device */
7750	lpfc_offline_prep(phba);
7751	lpfc_offline(phba);
7752	kthread_stop(phba->worker_thread);
7753
7754	/* Disable interrupt from device */
7755	lpfc_sli_disable_intr(phba);
7756
7757	/* Save device state to PCI config space */
7758	pci_save_state(pdev);
7759	pci_set_power_state(pdev, PCI_D3hot);
7760
7761	return 0;
7762}
7763
7764/**
7765 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
7766 * @pdev: pointer to PCI device
7767 *
7768 * This routine is to be called from the kernel's PCI subsystem to support
7769 * system Power Management (PM) to device with SLI-3 interface spec. When PM
7770 * invokes this method, it restores the device's PCI config space state and
7771 * fully reinitializes the device and brings it online. Note that as the
7772 * driver implements the minimum PM requirements to a power-aware driver's
7773 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
7774 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
7775 * driver will fully reinitialize its device during resume() method call,
7776 * the device will be set to PCI_D0 directly in PCI config space before
7777 * restoring the state.
7778 *
7779 * Return code
7780 * 	0 - driver suspended the device
7781 * 	Error otherwise
7782 **/
7783static int
7784lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7785{
7786	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7787	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7788	uint32_t intr_mode;
7789	int error;
7790
7791	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7792			"0452 PCI device Power Management resume.\n");
7793
7794	/* Restore device state from PCI config space */
7795	pci_set_power_state(pdev, PCI_D0);
7796	pci_restore_state(pdev);
7797
7798	/*
7799	 * As the new kernel behavior of pci_restore_state() API call clears
7800	 * device saved_state flag, need to save the restored state again.
7801	 */
7802	pci_save_state(pdev);
7803
7804	if (pdev->is_busmaster)
7805		pci_set_master(pdev);
7806
7807	/* Startup the kernel thread for this host adapter. */
7808	phba->worker_thread = kthread_run(lpfc_do_work, phba,
7809					"lpfc_worker_%d", phba->brd_no);
7810	if (IS_ERR(phba->worker_thread)) {
7811		error = PTR_ERR(phba->worker_thread);
7812		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7813				"0434 PM resume failed to start worker "
7814				"thread: error=x%x.\n", error);
7815		return error;
7816	}
7817
7818	/* Configure and enable interrupt */
7819	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7820	if (intr_mode == LPFC_INTR_ERROR) {
7821		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7822				"0430 PM resume Failed to enable interrupt\n");
7823		return -EIO;
7824	} else
7825		phba->intr_mode = intr_mode;
7826
7827	/* Restart HBA and bring it online */
7828	lpfc_sli_brdrestart(phba);
7829	lpfc_online(phba);
7830
7831	/* Log the current active interrupt mode */
7832	lpfc_log_intr_mode(phba, phba->intr_mode);
7833
7834	return 0;
7835}
7836
7837/**
7838 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
7839 * @phba: pointer to lpfc hba data structure.
7840 *
7841 * This routine is called to prepare the SLI3 device for PCI slot recover. It
7842 * aborts all the outstanding SCSI I/Os to the pci device.
7843 **/
7844static void
7845lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
7846{
7847	struct lpfc_sli *psli = &phba->sli;
7848	struct lpfc_sli_ring  *pring;
7849
7850	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7851			"2723 PCI channel I/O abort preparing for recovery\n");
7852
7853	/*
7854	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
7855	 * and let the SCSI mid-layer to retry them to recover.
7856	 */
7857	pring = &psli->ring[psli->fcp_ring];
7858	lpfc_sli_abort_iocb_ring(phba, pring);
7859}
7860
7861/**
7862 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
7863 * @phba: pointer to lpfc hba data structure.
7864 *
7865 * This routine is called to prepare the SLI3 device for PCI slot reset. It
7866 * disables the device interrupt and pci device, and aborts the internal FCP
7867 * pending I/Os.
7868 **/
7869static void
7870lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7871{
7872	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7873			"2710 PCI channel disable preparing for reset\n");
7874
7875	/* Block all SCSI devices' I/Os on the host */
7876	lpfc_scsi_dev_block(phba);
7877
7878	/* stop all timers */
7879	lpfc_stop_hba_timers(phba);
7880
7881	/* Disable interrupt and pci device */
7882	lpfc_sli_disable_intr(phba);
7883	pci_disable_device(phba->pcidev);
7884	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
7885	lpfc_sli_flush_fcp_rings(phba);
7886}
7887
7888/**
7889 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
7890 * @phba: pointer to lpfc hba data structure.
7891 *
7892 * This routine is called to prepare the SLI3 device for PCI slot permanently
7893 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
7894 * pending I/Os.
7895 **/
7896static void
7897lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7898{
7899	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7900			"2711 PCI channel permanent disable for failure\n");
7901	/* Block all SCSI devices' I/Os on the host */
7902	lpfc_scsi_dev_block(phba);
7903
7904	/* stop all timers */
7905	lpfc_stop_hba_timers(phba);
7906
7907	/* Clean up all driver's outstanding SCSI I/Os */
7908	lpfc_sli_flush_fcp_rings(phba);
7909}
7910
7911/**
7912 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7913 * @pdev: pointer to PCI device.
7914 * @state: the current PCI connection state.
7915 *
7916 * This routine is called from the PCI subsystem for I/O error handling to
7917 * device with SLI-3 interface spec. This function is called by the PCI
7918 * subsystem after a PCI bus error affecting this device has been detected.
7919 * When this function is invoked, it will need to stop all the I/Os and
7920 * interrupt(s) to the device. Once that is done, it will return
7921 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7922 * as desired.
7923 *
7924 * Return codes
7925 * 	PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
7926 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7927 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7928 **/
7929static pci_ers_result_t
7930lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7931{
7932	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7933	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7934
7935	switch (state) {
7936	case pci_channel_io_normal:
7937		/* Non-fatal error, prepare for recovery */
7938		lpfc_sli_prep_dev_for_recover(phba);
7939		return PCI_ERS_RESULT_CAN_RECOVER;
7940	case pci_channel_io_frozen:
7941		/* Fatal error, prepare for slot reset */
7942		lpfc_sli_prep_dev_for_reset(phba);
7943		return PCI_ERS_RESULT_NEED_RESET;
7944	case pci_channel_io_perm_failure:
7945		/* Permanent failure, prepare for device down */
7946		lpfc_prep_dev_for_perm_failure(phba);
7947		return PCI_ERS_RESULT_DISCONNECT;
7948	default:
7949		/* Unknown state, prepare and request slot reset */
7950		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7951				"0472 Unknown PCI error state: x%x\n", state);
7952		lpfc_sli_prep_dev_for_reset(phba);
7953		return PCI_ERS_RESULT_NEED_RESET;
7954	}
7955}
7956
7957/**
7958 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
7959 * @pdev: pointer to PCI device.
7960 *
7961 * This routine is called from the PCI subsystem for error handling to
7962 * device with SLI-3 interface spec. This is called after PCI bus has been
7963 * reset to restart the PCI card from scratch, as if from a cold-boot.
7964 * During the PCI subsystem error recovery, after driver returns
7965 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7966 * recovery and then call this routine before calling the .resume method
7967 * to recover the device. This function will initialize the HBA device,
7968 * enable the interrupt, but it will just put the HBA to offline state
7969 * without passing any I/O traffic.
7970 *
7971 * Return codes
7972 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7973 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7974 */
7975static pci_ers_result_t
7976lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7977{
7978	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7979	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7980	struct lpfc_sli *psli = &phba->sli;
7981	uint32_t intr_mode;
7982
7983	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
7984	if (pci_enable_device_mem(pdev)) {
7985		printk(KERN_ERR "lpfc: Cannot re-enable "
7986			"PCI device after reset.\n");
7987		return PCI_ERS_RESULT_DISCONNECT;
7988	}
7989
7990	pci_restore_state(pdev);
7991
7992	/*
7993	 * As the new kernel behavior of pci_restore_state() API call clears
7994	 * device saved_state flag, need to save the restored state again.
7995	 */
7996	pci_save_state(pdev);
7997
7998	if (pdev->is_busmaster)
7999		pci_set_master(pdev);
8000
8001	spin_lock_irq(&phba->hbalock);
8002	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8003	spin_unlock_irq(&phba->hbalock);
8004
8005	/* Configure and enable interrupt */
8006	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8007	if (intr_mode == LPFC_INTR_ERROR) {
8008		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8009				"0427 Cannot re-enable interrupt after "
8010				"slot reset.\n");
8011		return PCI_ERS_RESULT_DISCONNECT;
8012	} else
8013		phba->intr_mode = intr_mode;
8014
8015	/* Take device offline; this will perform cleanup */
8016	lpfc_offline(phba);
8017	lpfc_sli_brdrestart(phba);
8018
8019	/* Log the current active interrupt mode */
8020	lpfc_log_intr_mode(phba, phba->intr_mode);
8021
8022	return PCI_ERS_RESULT_RECOVERED;
8023}
8024
8025/**
8026 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
8027 * @pdev: pointer to PCI device
8028 *
8029 * This routine is called from the PCI subsystem for error handling to device
8030 * with SLI-3 interface spec. It is called when kernel error recovery tells
8031 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8032 * error recovery. After this call, traffic can start to flow from this device
8033 * again.
8034 */
8035static void
8036lpfc_io_resume_s3(struct pci_dev *pdev)
8037{
8038	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8039	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8040
8041	/* Bring device online, it will be no-op for non-fatal error resume */
8042	lpfc_online(phba);
8043
8044	/* Clean up Advanced Error Reporting (AER) if needed */
8045	if (phba->hba_flag & HBA_AER_ENABLED)
8046		pci_cleanup_aer_uncorrect_error_status(pdev);
8047}
8048
8049/**
8050 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
8051 * @phba: pointer to lpfc hba data structure.
8052 *
8053 * returns the number of ELS/CT IOCBs to reserve
8054 **/
8055int
8056lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
8057{
8058	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
8059
8060	if (phba->sli_rev == LPFC_SLI_REV4) {
8061		if (max_xri <= 100)
8062			return 10;
8063		else if (max_xri <= 256)
8064			return 25;
8065		else if (max_xri <= 512)
8066			return 50;
8067		else if (max_xri <= 1024)
8068			return 100;
8069		else
8070			return 150;
8071	} else
8072		return 0;
8073}
8074
8075/**
8076 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
8077 * @pdev: pointer to PCI device
8078 * @pid: pointer to PCI device identifier
8079 *
8080 * This routine is called from the kernel's PCI subsystem to device with
8081 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8082 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8083 * information of the device and driver to see if the driver state that it
8084 * can support this kind of device. If the match is successful, the driver
8085 * core invokes this routine. If this routine determines it can claim the HBA,
8086 * it does all the initialization that it needs to do to handle the HBA
8087 * properly.
8088 *
8089 * Return code
8090 * 	0 - driver can claim the device
8091 * 	negative value - driver can not claim the device
8092 **/
8093static int __devinit
8094lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8095{
8096	struct lpfc_hba   *phba;
8097	struct lpfc_vport *vport = NULL;
8098	struct Scsi_Host  *shost = NULL;
8099	int error;
8100	uint32_t cfg_mode, intr_mode;
8101	int mcnt;
8102
8103	/* Allocate memory for HBA structure */
8104	phba = lpfc_hba_alloc(pdev);
8105	if (!phba)
8106		return -ENOMEM;
8107
8108	/* Perform generic PCI device enabling operation */
8109	error = lpfc_enable_pci_dev(phba);
8110	if (error) {
8111		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8112				"1409 Failed to enable pci device.\n");
8113		goto out_free_phba;
8114	}
8115
8116	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
8117	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
8118	if (error)
8119		goto out_disable_pci_dev;
8120
8121	/* Set up SLI-4 specific device PCI memory space */
8122	error = lpfc_sli4_pci_mem_setup(phba);
8123	if (error) {
8124		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8125				"1410 Failed to set up pci memory space.\n");
8126		goto out_disable_pci_dev;
8127	}
8128
8129	/* Set up phase-1 common device driver resources */
8130	error = lpfc_setup_driver_resource_phase1(phba);
8131	if (error) {
8132		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8133				"1411 Failed to set up driver resource.\n");
8134		goto out_unset_pci_mem_s4;
8135	}
8136
8137	/* Set up SLI-4 Specific device driver resources */
8138	error = lpfc_sli4_driver_resource_setup(phba);
8139	if (error) {
8140		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8141				"1412 Failed to set up driver resource.\n");
8142		goto out_unset_pci_mem_s4;
8143	}
8144
8145	/* Initialize and populate the iocb list per host */
8146	error = lpfc_init_iocb_list(phba,
8147			phba->sli4_hba.max_cfg_param.max_xri);
8148	if (error) {
8149		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8150				"1413 Failed to initialize iocb list.\n");
8151		goto out_unset_driver_resource_s4;
8152	}
8153
8154	/* Set up common device driver resources */
8155	error = lpfc_setup_driver_resource_phase2(phba);
8156	if (error) {
8157		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8158				"1414 Failed to set up driver resource.\n");
8159		goto out_free_iocb_list;
8160	}
8161
8162	/* Create SCSI host to the physical port */
8163	error = lpfc_create_shost(phba);
8164	if (error) {
8165		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8166				"1415 Failed to create scsi host.\n");
8167		goto out_unset_driver_resource;
8168	}
8169
8170	/* Configure sysfs attributes */
8171	vport = phba->pport;
8172	error = lpfc_alloc_sysfs_attr(vport);
8173	if (error) {
8174		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8175				"1416 Failed to allocate sysfs attr\n");
8176		goto out_destroy_shost;
8177	}
8178
8179	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8180	/* Now, trying to enable interrupt and bring up the device */
8181	cfg_mode = phba->cfg_use_msi;
8182	while (true) {
8183		/* Put device to a known state before enabling interrupt */
8184		lpfc_stop_port(phba);
8185		/* Configure and enable interrupt */
8186		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
8187		if (intr_mode == LPFC_INTR_ERROR) {
8188			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8189					"0426 Failed to enable interrupt.\n");
8190			error = -ENODEV;
8191			goto out_free_sysfs_attr;
8192		}
8193		/* Default to single FCP EQ for non-MSI-X */
8194		if (phba->intr_type != MSIX)
8195			phba->cfg_fcp_eq_count = 1;
8196		/* Set up SLI-4 HBA */
8197		if (lpfc_sli4_hba_setup(phba)) {
8198			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8199					"1421 Failed to set up hba\n");
8200			error = -ENODEV;
8201			goto out_disable_intr;
8202		}
8203
8204		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
8205		if (intr_mode != 0)
8206			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
8207							    LPFC_ACT_INTR_CNT);
8208
8209		/* Check active interrupts received only for MSI/MSI-X */
8210		if (intr_mode == 0 ||
8211		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
8212			/* Log the current active interrupt mode */
8213			phba->intr_mode = intr_mode;
8214			lpfc_log_intr_mode(phba, intr_mode);
8215			break;
8216		}
8217		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8218				"0451 Configure interrupt mode (%d) "
8219				"failed active interrupt test.\n",
8220				intr_mode);
8221		/* Unset the preivous SLI-4 HBA setup */
8222		lpfc_sli4_unset_hba(phba);
8223		/* Try next level of interrupt mode */
8224		cfg_mode = --intr_mode;
8225	}
8226
8227	/* Perform post initialization setup */
8228	lpfc_post_init_setup(phba);
8229
8230	/* Check if there are static vports to be created. */
8231	lpfc_create_static_vport(phba);
8232
8233	return 0;
8234
8235out_disable_intr:
8236	lpfc_sli4_disable_intr(phba);
8237out_free_sysfs_attr:
8238	lpfc_free_sysfs_attr(vport);
8239out_destroy_shost:
8240	lpfc_destroy_shost(phba);
8241out_unset_driver_resource:
8242	lpfc_unset_driver_resource_phase2(phba);
8243out_free_iocb_list:
8244	lpfc_free_iocb_list(phba);
8245out_unset_driver_resource_s4:
8246	lpfc_sli4_driver_resource_unset(phba);
8247out_unset_pci_mem_s4:
8248	lpfc_sli4_pci_mem_unset(phba);
8249out_disable_pci_dev:
8250	lpfc_disable_pci_dev(phba);
8251	if (shost)
8252		scsi_host_put(shost);
8253out_free_phba:
8254	lpfc_hba_free(phba);
8255	return error;
8256}
8257
8258/**
8259 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
8260 * @pdev: pointer to PCI device
8261 *
8262 * This routine is called from the kernel's PCI subsystem to device with
8263 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8264 * removed from PCI bus, it performs all the necessary cleanup for the HBA
8265 * device to be removed from the PCI subsystem properly.
8266 **/
8267static void __devexit
8268lpfc_pci_remove_one_s4(struct pci_dev *pdev)
8269{
8270	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8271	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8272	struct lpfc_vport **vports;
8273	struct lpfc_hba *phba = vport->phba;
8274	int i;
8275
8276	/* Mark the device unloading flag */
8277	spin_lock_irq(&phba->hbalock);
8278	vport->load_flag |= FC_UNLOADING;
8279	spin_unlock_irq(&phba->hbalock);
8280
8281	/* Free the HBA sysfs attributes */
8282	lpfc_free_sysfs_attr(vport);
8283
8284	/* Release all the vports against this physical port */
8285	vports = lpfc_create_vport_work_array(phba);
8286	if (vports != NULL)
8287		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8288			fc_vport_terminate(vports[i]->fc_vport);
8289	lpfc_destroy_vport_work_array(phba, vports);
8290
8291	/* Remove FC host and then SCSI host with the physical port */
8292	fc_remove_host(shost);
8293	scsi_remove_host(shost);
8294
8295	/* Perform cleanup on the physical port */
8296	lpfc_cleanup(vport);
8297
8298	/*
8299	 * Bring down the SLI Layer. This step disables all interrupts,
8300	 * clears the rings, discards all mailbox commands, and resets
8301	 * the HBA FCoE function.
8302	 */
8303	lpfc_debugfs_terminate(vport);
8304	lpfc_sli4_hba_unset(phba);
8305
8306	spin_lock_irq(&phba->hbalock);
8307	list_del_init(&vport->listentry);
8308	spin_unlock_irq(&phba->hbalock);
8309
8310	/* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
8311	 * buffers are released to their corresponding pools here.
8312	 */
8313	lpfc_scsi_free(phba);
8314	lpfc_sli4_driver_resource_unset(phba);
8315
8316	/* Unmap adapter Control and Doorbell registers */
8317	lpfc_sli4_pci_mem_unset(phba);
8318
8319	/* Release PCI resources and disable device's PCI function */
8320	scsi_host_put(shost);
8321	lpfc_disable_pci_dev(phba);
8322
8323	/* Finally, free the driver's device data structure */
8324	lpfc_hba_free(phba);
8325
8326	return;
8327}
8328
8329/**
8330 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
8331 * @pdev: pointer to PCI device
8332 * @msg: power management message
8333 *
8334 * This routine is called from the kernel's PCI subsystem to support system
8335 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
8336 * this method, it quiesces the device by stopping the driver's worker
8337 * thread for the device, turning off device's interrupt and DMA, and bring
8338 * the device offline. Note that as the driver implements the minimum PM
8339 * requirements to a power-aware driver's PM support for suspend/resume -- all
8340 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
8341 * method call will be treated as SUSPEND and the driver will fully
8342 * reinitialize its device during resume() method call, the driver will set
8343 * device to PCI_D3hot state in PCI config space instead of setting it
8344 * according to the @msg provided by the PM.
8345 *
8346 * Return code
8347 * 	0 - driver suspended the device
8348 * 	Error otherwise
8349 **/
8350static int
8351lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
8352{
8353	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8354	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8355
8356	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8357			"0298 PCI device Power Management suspend.\n");
8358
8359	/* Bring down the device */
8360	lpfc_offline_prep(phba);
8361	lpfc_offline(phba);
8362	kthread_stop(phba->worker_thread);
8363
8364	/* Disable interrupt from device */
8365	lpfc_sli4_disable_intr(phba);
8366
8367	/* Save device state to PCI config space */
8368	pci_save_state(pdev);
8369	pci_set_power_state(pdev, PCI_D3hot);
8370
8371	return 0;
8372}
8373
8374/**
8375 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
8376 * @pdev: pointer to PCI device
8377 *
8378 * This routine is called from the kernel's PCI subsystem to support system
8379 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
8380 * this method, it restores the device's PCI config space state and fully
8381 * reinitializes the device and brings it online. Note that as the driver
8382 * implements the minimum PM requirements to a power-aware driver's PM for
8383 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8384 * to the suspend() method call will be treated as SUSPEND and the driver
8385 * will fully reinitialize its device during resume() method call, the device
8386 * will be set to PCI_D0 directly in PCI config space before restoring the
8387 * state.
8388 *
8389 * Return code
8390 * 	0 - driver suspended the device
8391 * 	Error otherwise
8392 **/
8393static int
8394lpfc_pci_resume_one_s4(struct pci_dev *pdev)
8395{
8396	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8397	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8398	uint32_t intr_mode;
8399	int error;
8400
8401	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8402			"0292 PCI device Power Management resume.\n");
8403
8404	/* Restore device state from PCI config space */
8405	pci_set_power_state(pdev, PCI_D0);
8406	pci_restore_state(pdev);
8407
8408	/*
8409	 * As the new kernel behavior of pci_restore_state() API call clears
8410	 * device saved_state flag, need to save the restored state again.
8411	 */
8412	pci_save_state(pdev);
8413
8414	if (pdev->is_busmaster)
8415		pci_set_master(pdev);
8416
8417	 /* Startup the kernel thread for this host adapter. */
8418	phba->worker_thread = kthread_run(lpfc_do_work, phba,
8419					"lpfc_worker_%d", phba->brd_no);
8420	if (IS_ERR(phba->worker_thread)) {
8421		error = PTR_ERR(phba->worker_thread);
8422		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8423				"0293 PM resume failed to start worker "
8424				"thread: error=x%x.\n", error);
8425		return error;
8426	}
8427
8428	/* Configure and enable interrupt */
8429	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
8430	if (intr_mode == LPFC_INTR_ERROR) {
8431		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8432				"0294 PM resume Failed to enable interrupt\n");
8433		return -EIO;
8434	} else
8435		phba->intr_mode = intr_mode;
8436
8437	/* Restart HBA and bring it online */
8438	lpfc_sli_brdrestart(phba);
8439	lpfc_online(phba);
8440
8441	/* Log the current active interrupt mode */
8442	lpfc_log_intr_mode(phba, phba->intr_mode);
8443
8444	return 0;
8445}
8446
8447/**
8448 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
8449 * @pdev: pointer to PCI device.
8450 * @state: the current PCI connection state.
8451 *
8452 * This routine is called from the PCI subsystem for error handling to device
8453 * with SLI-4 interface spec. This function is called by the PCI subsystem
8454 * after a PCI bus error affecting this device has been detected. When this
8455 * function is invoked, it will need to stop all the I/Os and interrupt(s)
8456 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
8457 * for the PCI subsystem to perform proper recovery as desired.
8458 *
8459 * Return codes
8460 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8461 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8462 **/
8463static pci_ers_result_t
8464lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8465{
8466	return PCI_ERS_RESULT_NEED_RESET;
8467}
8468
8469/**
8470 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
8471 * @pdev: pointer to PCI device.
8472 *
8473 * This routine is called from the PCI subsystem for error handling to device
8474 * with SLI-4 interface spec. It is called after PCI bus has been reset to
8475 * restart the PCI card from scratch, as if from a cold-boot. During the
8476 * PCI subsystem error recovery, after the driver returns
8477 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
8478 * recovery and then call this routine before calling the .resume method to
8479 * recover the device. This function will initialize the HBA device, enable
8480 * the interrupt, but it will just put the HBA to offline state without
8481 * passing any I/O traffic.
8482 *
8483 * Return codes
8484 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
8485 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8486 */
8487static pci_ers_result_t
8488lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8489{
8490	return PCI_ERS_RESULT_RECOVERED;
8491}
8492
8493/**
8494 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
8495 * @pdev: pointer to PCI device
8496 *
8497 * This routine is called from the PCI subsystem for error handling to device
8498 * with SLI-4 interface spec. It is called when kernel error recovery tells
8499 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8500 * error recovery. After this call, traffic can start to flow from this device
8501 * again.
8502 **/
8503static void
8504lpfc_io_resume_s4(struct pci_dev *pdev)
8505{
8506	return;
8507}
8508
8509/**
8510 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
8511 * @pdev: pointer to PCI device
8512 * @pid: pointer to PCI device identifier
8513 *
8514 * This routine is to be registered to the kernel's PCI subsystem. When an
8515 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
8516 * at PCI device-specific information of the device and driver to see if the
8517 * driver state that it can support this kind of device. If the match is
8518 * successful, the driver core invokes this routine. This routine dispatches
8519 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
8520 * do all the initialization that it needs to do to handle the HBA device
8521 * properly.
8522 *
8523 * Return code
8524 * 	0 - driver can claim the device
8525 * 	negative value - driver can not claim the device
8526 **/
8527static int __devinit
8528lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
8529{
8530	int rc;
8531	struct lpfc_sli_intf intf;
8532
8533	if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
8534		return -ENODEV;
8535
8536	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
8537	    (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
8538		rc = lpfc_pci_probe_one_s4(pdev, pid);
8539	else
8540		rc = lpfc_pci_probe_one_s3(pdev, pid);
8541
8542	return rc;
8543}
8544
8545/**
8546 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
8547 * @pdev: pointer to PCI device
8548 *
8549 * This routine is to be registered to the kernel's PCI subsystem. When an
8550 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
8551 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
8552 * remove routine, which will perform all the necessary cleanup for the
8553 * device to be removed from the PCI subsystem properly.
8554 **/
8555static void __devexit
8556lpfc_pci_remove_one(struct pci_dev *pdev)
8557{
8558	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8559	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8560
8561	switch (phba->pci_dev_grp) {
8562	case LPFC_PCI_DEV_LP:
8563		lpfc_pci_remove_one_s3(pdev);
8564		break;
8565	case LPFC_PCI_DEV_OC:
8566		lpfc_pci_remove_one_s4(pdev);
8567		break;
8568	default:
8569		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8570				"1424 Invalid PCI device group: 0x%x\n",
8571				phba->pci_dev_grp);
8572		break;
8573	}
8574	return;
8575}
8576
8577/**
8578 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
8579 * @pdev: pointer to PCI device
8580 * @msg: power management message
8581 *
8582 * This routine is to be registered to the kernel's PCI subsystem to support
8583 * system Power Management (PM). When PM invokes this method, it dispatches
8584 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
8585 * suspend the device.
8586 *
8587 * Return code
8588 * 	0 - driver suspended the device
8589 * 	Error otherwise
8590 **/
8591static int
8592lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
8593{
8594	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8595	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8596	int rc = -ENODEV;
8597
8598	switch (phba->pci_dev_grp) {
8599	case LPFC_PCI_DEV_LP:
8600		rc = lpfc_pci_suspend_one_s3(pdev, msg);
8601		break;
8602	case LPFC_PCI_DEV_OC:
8603		rc = lpfc_pci_suspend_one_s4(pdev, msg);
8604		break;
8605	default:
8606		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8607				"1425 Invalid PCI device group: 0x%x\n",
8608				phba->pci_dev_grp);
8609		break;
8610	}
8611	return rc;
8612}
8613
8614/**
8615 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
8616 * @pdev: pointer to PCI device
8617 *
8618 * This routine is to be registered to the kernel's PCI subsystem to support
8619 * system Power Management (PM). When PM invokes this method, it dispatches
8620 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
8621 * resume the device.
8622 *
8623 * Return code
8624 * 	0 - driver suspended the device
8625 * 	Error otherwise
8626 **/
8627static int
8628lpfc_pci_resume_one(struct pci_dev *pdev)
8629{
8630	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8631	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8632	int rc = -ENODEV;
8633
8634	switch (phba->pci_dev_grp) {
8635	case LPFC_PCI_DEV_LP:
8636		rc = lpfc_pci_resume_one_s3(pdev);
8637		break;
8638	case LPFC_PCI_DEV_OC:
8639		rc = lpfc_pci_resume_one_s4(pdev);
8640		break;
8641	default:
8642		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8643				"1426 Invalid PCI device group: 0x%x\n",
8644				phba->pci_dev_grp);
8645		break;
8646	}
8647	return rc;
8648}
8649
8650/**
8651 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
8652 * @pdev: pointer to PCI device.
8653 * @state: the current PCI connection state.
8654 *
8655 * This routine is registered to the PCI subsystem for error handling. This
8656 * function is called by the PCI subsystem after a PCI bus error affecting
8657 * this device has been detected. When this routine is invoked, it dispatches
8658 * the action to the proper SLI-3 or SLI-4 device error detected handling
8659 * routine, which will perform the proper error detected operation.
8660 *
8661 * Return codes
8662 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8663 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8664 **/
8665static pci_ers_result_t
8666lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
8667{
8668	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8669	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8670	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
8671
8672	switch (phba->pci_dev_grp) {
8673	case LPFC_PCI_DEV_LP:
8674		rc = lpfc_io_error_detected_s3(pdev, state);
8675		break;
8676	case LPFC_PCI_DEV_OC:
8677		rc = lpfc_io_error_detected_s4(pdev, state);
8678		break;
8679	default:
8680		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8681				"1427 Invalid PCI device group: 0x%x\n",
8682				phba->pci_dev_grp);
8683		break;
8684	}
8685	return rc;
8686}
8687
8688/**
8689 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
8690 * @pdev: pointer to PCI device.
8691 *
8692 * This routine is registered to the PCI subsystem for error handling. This
8693 * function is called after PCI bus has been reset to restart the PCI card
8694 * from scratch, as if from a cold-boot. When this routine is invoked, it
8695 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
8696 * routine, which will perform the proper device reset.
8697 *
8698 * Return codes
8699 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
8700 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8701 **/
8702static pci_ers_result_t
8703lpfc_io_slot_reset(struct pci_dev *pdev)
8704{
8705	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8706	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8707	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
8708
8709	switch (phba->pci_dev_grp) {
8710	case LPFC_PCI_DEV_LP:
8711		rc = lpfc_io_slot_reset_s3(pdev);
8712		break;
8713	case LPFC_PCI_DEV_OC:
8714		rc = lpfc_io_slot_reset_s4(pdev);
8715		break;
8716	default:
8717		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8718				"1428 Invalid PCI device group: 0x%x\n",
8719				phba->pci_dev_grp);
8720		break;
8721	}
8722	return rc;
8723}
8724
8725/**
8726 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
8727 * @pdev: pointer to PCI device
8728 *
8729 * This routine is registered to the PCI subsystem for error handling. It
8730 * is called when kernel error recovery tells the lpfc driver that it is
8731 * OK to resume normal PCI operation after PCI bus error recovery. When
8732 * this routine is invoked, it dispatches the action to the proper SLI-3
8733 * or SLI-4 device io_resume routine, which will resume the device operation.
8734 **/
8735static void
8736lpfc_io_resume(struct pci_dev *pdev)
8737{
8738	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8739	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8740
8741	switch (phba->pci_dev_grp) {
8742	case LPFC_PCI_DEV_LP:
8743		lpfc_io_resume_s3(pdev);
8744		break;
8745	case LPFC_PCI_DEV_OC:
8746		lpfc_io_resume_s4(pdev);
8747		break;
8748	default:
8749		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8750				"1429 Invalid PCI device group: 0x%x\n",
8751				phba->pci_dev_grp);
8752		break;
8753	}
8754	return;
8755}
8756
8757static struct pci_device_id lpfc_id_table[] = {
8758	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
8759		PCI_ANY_ID, PCI_ANY_ID, },
8760	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
8761		PCI_ANY_ID, PCI_ANY_ID, },
8762	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
8763		PCI_ANY_ID, PCI_ANY_ID, },
8764	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
8765		PCI_ANY_ID, PCI_ANY_ID, },
8766	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
8767		PCI_ANY_ID, PCI_ANY_ID, },
8768	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
8769		PCI_ANY_ID, PCI_ANY_ID, },
8770	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
8771		PCI_ANY_ID, PCI_ANY_ID, },
8772	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
8773		PCI_ANY_ID, PCI_ANY_ID, },
8774	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
8775		PCI_ANY_ID, PCI_ANY_ID, },
8776	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
8777		PCI_ANY_ID, PCI_ANY_ID, },
8778	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
8779		PCI_ANY_ID, PCI_ANY_ID, },
8780	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
8781		PCI_ANY_ID, PCI_ANY_ID, },
8782	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
8783		PCI_ANY_ID, PCI_ANY_ID, },
8784	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
8785		PCI_ANY_ID, PCI_ANY_ID, },
8786	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
8787		PCI_ANY_ID, PCI_ANY_ID, },
8788	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
8789		PCI_ANY_ID, PCI_ANY_ID, },
8790	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
8791		PCI_ANY_ID, PCI_ANY_ID, },
8792	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
8793		PCI_ANY_ID, PCI_ANY_ID, },
8794	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
8795		PCI_ANY_ID, PCI_ANY_ID, },
8796	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
8797		PCI_ANY_ID, PCI_ANY_ID, },
8798	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
8799		PCI_ANY_ID, PCI_ANY_ID, },
8800	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
8801		PCI_ANY_ID, PCI_ANY_ID, },
8802	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
8803		PCI_ANY_ID, PCI_ANY_ID, },
8804	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
8805		PCI_ANY_ID, PCI_ANY_ID, },
8806	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
8807		PCI_ANY_ID, PCI_ANY_ID, },
8808	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
8809		PCI_ANY_ID, PCI_ANY_ID, },
8810	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
8811		PCI_ANY_ID, PCI_ANY_ID, },
8812	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
8813		PCI_ANY_ID, PCI_ANY_ID, },
8814	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
8815		PCI_ANY_ID, PCI_ANY_ID, },
8816	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
8817		PCI_ANY_ID, PCI_ANY_ID, },
8818	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
8819		PCI_ANY_ID, PCI_ANY_ID, },
8820	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
8821		PCI_ANY_ID, PCI_ANY_ID, },
8822	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
8823		PCI_ANY_ID, PCI_ANY_ID, },
8824	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
8825		PCI_ANY_ID, PCI_ANY_ID, },
8826	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
8827		PCI_ANY_ID, PCI_ANY_ID, },
8828	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
8829		PCI_ANY_ID, PCI_ANY_ID, },
8830	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
8831		PCI_ANY_ID, PCI_ANY_ID, },
8832	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
8833		PCI_ANY_ID, PCI_ANY_ID, },
8834	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
8835		PCI_ANY_ID, PCI_ANY_ID, },
8836	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
8837		PCI_ANY_ID, PCI_ANY_ID, },
8838	{ 0 }
8839};
8840
8841MODULE_DEVICE_TABLE(pci, lpfc_id_table);
8842
8843static struct pci_error_handlers lpfc_err_handler = {
8844	.error_detected = lpfc_io_error_detected,
8845	.slot_reset = lpfc_io_slot_reset,
8846	.resume = lpfc_io_resume,
8847};
8848
8849static struct pci_driver lpfc_driver = {
8850	.name		= LPFC_DRIVER_NAME,
8851	.id_table	= lpfc_id_table,
8852	.probe		= lpfc_pci_probe_one,
8853	.remove		= __devexit_p(lpfc_pci_remove_one),
8854	.suspend        = lpfc_pci_suspend_one,
8855	.resume		= lpfc_pci_resume_one,
8856	.err_handler    = &lpfc_err_handler,
8857};
8858
8859/**
8860 * lpfc_init - lpfc module initialization routine
8861 *
8862 * This routine is to be invoked when the lpfc module is loaded into the
8863 * kernel. The special kernel macro module_init() is used to indicate the
8864 * role of this routine to the kernel as lpfc module entry point.
8865 *
8866 * Return codes
8867 *   0 - successful
8868 *   -ENOMEM - FC attach transport failed
8869 *   all others - failed
8870 */
8871static int __init
8872lpfc_init(void)
8873{
8874	int error = 0;
8875
8876	printk(LPFC_MODULE_DESC "\n");
8877	printk(LPFC_COPYRIGHT "\n");
8878
8879	if (lpfc_enable_npiv) {
8880		lpfc_transport_functions.vport_create = lpfc_vport_create;
8881		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
8882	}
8883	lpfc_transport_template =
8884				fc_attach_transport(&lpfc_transport_functions);
8885	if (lpfc_transport_template == NULL)
8886		return -ENOMEM;
8887	if (lpfc_enable_npiv) {
8888		lpfc_vport_transport_template =
8889			fc_attach_transport(&lpfc_vport_transport_functions);
8890		if (lpfc_vport_transport_template == NULL) {
8891			fc_release_transport(lpfc_transport_template);
8892			return -ENOMEM;
8893		}
8894	}
8895	error = pci_register_driver(&lpfc_driver);
8896	if (error) {
8897		fc_release_transport(lpfc_transport_template);
8898		if (lpfc_enable_npiv)
8899			fc_release_transport(lpfc_vport_transport_template);
8900	}
8901
8902	return error;
8903}
8904
8905/**
8906 * lpfc_exit - lpfc module removal routine
8907 *
8908 * This routine is invoked when the lpfc module is removed from the kernel.
8909 * The special kernel macro module_exit() is used to indicate the role of
8910 * this routine to the kernel as lpfc module exit point.
8911 */
8912static void __exit
8913lpfc_exit(void)
8914{
8915	pci_unregister_driver(&lpfc_driver);
8916	fc_release_transport(lpfc_transport_template);
8917	if (lpfc_enable_npiv)
8918		fc_release_transport(lpfc_vport_transport_template);
8919	if (_dump_buf_data) {
8920		printk(KERN_ERR	"9062 BLKGRD: freeing %lu pages for "
8921				"_dump_buf_data at 0x%p\n",
8922				(1L << _dump_buf_data_order), _dump_buf_data);
8923		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
8924	}
8925
8926	if (_dump_buf_dif) {
8927		printk(KERN_ERR	"9049 BLKGRD: freeing %lu pages for "
8928				"_dump_buf_dif at 0x%p\n",
8929				(1L << _dump_buf_dif_order), _dump_buf_dif);
8930		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
8931	}
8932}
8933
8934module_init(lpfc_init);
8935module_exit(lpfc_exit);
8936MODULE_LICENSE("GPL");
8937MODULE_DESCRIPTION(LPFC_MODULE_DESC);
8938MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
8939MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
8940