lpfc_init.c revision d7c479929b6804f4e9d5fb5f721aba31622f3d97
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
29#include <linux/spinlock.h>
30#include <linux/ctype.h>
31#include <linux/aer.h>
32#include <linux/slab.h>
33
34#include <scsi/scsi.h>
35#include <scsi/scsi_device.h>
36#include <scsi/scsi_host.h>
37#include <scsi/scsi_transport_fc.h>
38
39#include "lpfc_hw4.h"
40#include "lpfc_hw.h"
41#include "lpfc_sli.h"
42#include "lpfc_sli4.h"
43#include "lpfc_nl.h"
44#include "lpfc_disc.h"
45#include "lpfc_scsi.h"
46#include "lpfc.h"
47#include "lpfc_logmsg.h"
48#include "lpfc_crtn.h"
49#include "lpfc_vport.h"
50#include "lpfc_version.h"
51
52char *_dump_buf_data;
53unsigned long _dump_buf_data_order;
54char *_dump_buf_dif;
55unsigned long _dump_buf_dif_order;
56spinlock_t _dump_buf_lock;
57
58static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
59static int lpfc_post_rcv_buf(struct lpfc_hba *);
60static int lpfc_sli4_queue_create(struct lpfc_hba *);
61static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
62static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
63static int lpfc_setup_endian_order(struct lpfc_hba *);
64static int lpfc_sli4_read_config(struct lpfc_hba *);
65static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
66static void lpfc_free_sgl_list(struct lpfc_hba *);
67static int lpfc_init_sgl_list(struct lpfc_hba *);
68static int lpfc_init_active_sgl_array(struct lpfc_hba *);
69static void lpfc_free_active_sgl(struct lpfc_hba *);
70static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
71static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
72static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
73static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
74static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
75
76static struct scsi_transport_template *lpfc_transport_template = NULL;
77static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
78static DEFINE_IDR(lpfc_hba_index);
79
80/**
81 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
82 * @phba: pointer to lpfc hba data structure.
83 *
84 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
85 * mailbox command. It retrieves the revision information from the HBA and
86 * collects the Vital Product Data (VPD) about the HBA for preparing the
87 * configuration of the HBA.
88 *
89 * Return codes:
90 *   0 - success.
91 *   -ERESTART - requests the SLI layer to reset the HBA and try again.
92 *   Any other value - indicates an error.
93 **/
94int
95lpfc_config_port_prep(struct lpfc_hba *phba)
96{
97	lpfc_vpd_t *vp = &phba->vpd;
98	int i = 0, rc;
99	LPFC_MBOXQ_t *pmb;
100	MAILBOX_t *mb;
101	char *lpfc_vpd_data = NULL;
102	uint16_t offset = 0;
103	static char licensed[56] =
104		    "key unlock for use with gnu public licensed code only\0";
105	static int init_key = 1;
106
107	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
108	if (!pmb) {
109		phba->link_state = LPFC_HBA_ERROR;
110		return -ENOMEM;
111	}
112
113	mb = &pmb->u.mb;
114	phba->link_state = LPFC_INIT_MBX_CMDS;
115
116	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
117		if (init_key) {
118			uint32_t *ptext = (uint32_t *) licensed;
119
120			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
121				*ptext = cpu_to_be32(*ptext);
122			init_key = 0;
123		}
124
125		lpfc_read_nv(phba, pmb);
126		memset((char*)mb->un.varRDnvp.rsvd3, 0,
127			sizeof (mb->un.varRDnvp.rsvd3));
128		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
129			 sizeof (licensed));
130
131		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
132
133		if (rc != MBX_SUCCESS) {
134			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
135					"0324 Config Port initialization "
136					"error, mbxCmd x%x READ_NVPARM, "
137					"mbxStatus x%x\n",
138					mb->mbxCommand, mb->mbxStatus);
139			mempool_free(pmb, phba->mbox_mem_pool);
140			return -ERESTART;
141		}
142		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
143		       sizeof(phba->wwnn));
144		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
145		       sizeof(phba->wwpn));
146	}
147
148	phba->sli3_options = 0x0;
149
150	/* Setup and issue mailbox READ REV command */
151	lpfc_read_rev(phba, pmb);
152	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
153	if (rc != MBX_SUCCESS) {
154		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
155				"0439 Adapter failed to init, mbxCmd x%x "
156				"READ_REV, mbxStatus x%x\n",
157				mb->mbxCommand, mb->mbxStatus);
158		mempool_free( pmb, phba->mbox_mem_pool);
159		return -ERESTART;
160	}
161
162
163	/*
164	 * The value of rr must be 1 since the driver set the cv field to 1.
165	 * This setting requires the FW to set all revision fields.
166	 */
167	if (mb->un.varRdRev.rr == 0) {
168		vp->rev.rBit = 0;
169		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
170				"0440 Adapter failed to init, READ_REV has "
171				"missing revision information.\n");
172		mempool_free(pmb, phba->mbox_mem_pool);
173		return -ERESTART;
174	}
175
176	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
177		mempool_free(pmb, phba->mbox_mem_pool);
178		return -EINVAL;
179	}
180
181	/* Save information as VPD data */
182	vp->rev.rBit = 1;
183	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
184	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
185	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
186	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
187	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
188	vp->rev.biuRev = mb->un.varRdRev.biuRev;
189	vp->rev.smRev = mb->un.varRdRev.smRev;
190	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
191	vp->rev.endecRev = mb->un.varRdRev.endecRev;
192	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
193	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
194	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
195	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
196	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
197	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
198
199	/* If the sli feature level is less then 9, we must
200	 * tear down all RPIs and VPIs on link down if NPIV
201	 * is enabled.
202	 */
203	if (vp->rev.feaLevelHigh < 9)
204		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
205
206	if (lpfc_is_LC_HBA(phba->pcidev->device))
207		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
208						sizeof (phba->RandomData));
209
210	/* Get adapter VPD information */
211	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
212	if (!lpfc_vpd_data)
213		goto out_free_mbox;
214
215	do {
216		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
218
219		if (rc != MBX_SUCCESS) {
220			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
221					"0441 VPD not present on adapter, "
222					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
223					mb->mbxCommand, mb->mbxStatus);
224			mb->un.varDmp.word_cnt = 0;
225		}
226		/* dump mem may return a zero when finished or we got a
227		 * mailbox error, either way we are done.
228		 */
229		if (mb->un.varDmp.word_cnt == 0)
230			break;
231		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
232			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
233		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
234				      lpfc_vpd_data + offset,
235				      mb->un.varDmp.word_cnt);
236		offset += mb->un.varDmp.word_cnt;
237	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
238	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
239
240	kfree(lpfc_vpd_data);
241out_free_mbox:
242	mempool_free(pmb, phba->mbox_mem_pool);
243	return 0;
244}
245
246/**
247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
248 * @phba: pointer to lpfc hba data structure.
249 * @pmboxq: pointer to the driver internal queue element for mailbox command.
250 *
251 * This is the completion handler for driver's configuring asynchronous event
252 * mailbox command to the device. If the mailbox command returns successfully,
253 * it will set internal async event support flag to 1; otherwise, it will
254 * set internal async event support flag to 0.
255 **/
256static void
257lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
258{
259	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
260		phba->temp_sensor_support = 1;
261	else
262		phba->temp_sensor_support = 0;
263	mempool_free(pmboxq, phba->mbox_mem_pool);
264	return;
265}
266
267/**
268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
269 * @phba: pointer to lpfc hba data structure.
270 * @pmboxq: pointer to the driver internal queue element for mailbox command.
271 *
272 * This is the completion handler for dump mailbox command for getting
273 * wake up parameters. When this command complete, the response contain
274 * Option rom version of the HBA. This function translate the version number
275 * into a human readable string and store it in OptionROMVersion.
276 **/
277static void
278lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
279{
280	struct prog_id *prg;
281	uint32_t prog_id_word;
282	char dist = ' ';
283	/* character array used for decoding dist type. */
284	char dist_char[] = "nabx";
285
286	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
287		mempool_free(pmboxq, phba->mbox_mem_pool);
288		return;
289	}
290
291	prg = (struct prog_id *) &prog_id_word;
292
293	/* word 7 contain option rom version */
294	prog_id_word = pmboxq->u.mb.un.varWords[7];
295
296	/* Decode the Option rom version word to a readable string */
297	if (prg->dist < 4)
298		dist = dist_char[prg->dist];
299
300	if ((prg->dist == 3) && (prg->num == 0))
301		sprintf(phba->OptionROMVersion, "%d.%d%d",
302			prg->ver, prg->rev, prg->lev);
303	else
304		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
305			prg->ver, prg->rev, prg->lev,
306			dist, prg->num);
307	mempool_free(pmboxq, phba->mbox_mem_pool);
308	return;
309}
310
311/**
312 * lpfc_config_port_post - Perform lpfc initialization after config port
313 * @phba: pointer to lpfc hba data structure.
314 *
315 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
316 * command call. It performs all internal resource and state setups on the
317 * port: post IOCB buffers, enable appropriate host interrupt attentions,
318 * ELS ring timers, etc.
319 *
320 * Return codes
321 *   0 - success.
322 *   Any other value - error.
323 **/
324int
325lpfc_config_port_post(struct lpfc_hba *phba)
326{
327	struct lpfc_vport *vport = phba->pport;
328	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
329	LPFC_MBOXQ_t *pmb;
330	MAILBOX_t *mb;
331	struct lpfc_dmabuf *mp;
332	struct lpfc_sli *psli = &phba->sli;
333	uint32_t status, timeout;
334	int i, j;
335	int rc;
336
337	spin_lock_irq(&phba->hbalock);
338	/*
339	 * If the Config port completed correctly the HBA is not
340	 * over heated any more.
341	 */
342	if (phba->over_temp_state == HBA_OVER_TEMP)
343		phba->over_temp_state = HBA_NORMAL_TEMP;
344	spin_unlock_irq(&phba->hbalock);
345
346	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
347	if (!pmb) {
348		phba->link_state = LPFC_HBA_ERROR;
349		return -ENOMEM;
350	}
351	mb = &pmb->u.mb;
352
353	/* Get login parameters for NID.  */
354	rc = lpfc_read_sparam(phba, pmb, 0);
355	if (rc) {
356		mempool_free(pmb, phba->mbox_mem_pool);
357		return -ENOMEM;
358	}
359
360	pmb->vport = vport;
361	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
362		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
363				"0448 Adapter failed init, mbxCmd x%x "
364				"READ_SPARM mbxStatus x%x\n",
365				mb->mbxCommand, mb->mbxStatus);
366		phba->link_state = LPFC_HBA_ERROR;
367		mp = (struct lpfc_dmabuf *) pmb->context1;
368		mempool_free(pmb, phba->mbox_mem_pool);
369		lpfc_mbuf_free(phba, mp->virt, mp->phys);
370		kfree(mp);
371		return -EIO;
372	}
373
374	mp = (struct lpfc_dmabuf *) pmb->context1;
375
376	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
377	lpfc_mbuf_free(phba, mp->virt, mp->phys);
378	kfree(mp);
379	pmb->context1 = NULL;
380
381	if (phba->cfg_soft_wwnn)
382		u64_to_wwn(phba->cfg_soft_wwnn,
383			   vport->fc_sparam.nodeName.u.wwn);
384	if (phba->cfg_soft_wwpn)
385		u64_to_wwn(phba->cfg_soft_wwpn,
386			   vport->fc_sparam.portName.u.wwn);
387	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
388	       sizeof (struct lpfc_name));
389	memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
390	       sizeof (struct lpfc_name));
391
392	/* Update the fc_host data structures with new wwn. */
393	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
394	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
395	fc_host_max_npiv_vports(shost) = phba->max_vpi;
396
397	/* If no serial number in VPD data, use low 6 bytes of WWNN */
398	/* This should be consolidated into parse_vpd ? - mr */
399	if (phba->SerialNumber[0] == 0) {
400		uint8_t *outptr;
401
402		outptr = &vport->fc_nodename.u.s.IEEE[0];
403		for (i = 0; i < 12; i++) {
404			status = *outptr++;
405			j = ((status & 0xf0) >> 4);
406			if (j <= 9)
407				phba->SerialNumber[i] =
408				    (char)((uint8_t) 0x30 + (uint8_t) j);
409			else
410				phba->SerialNumber[i] =
411				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
412			i++;
413			j = (status & 0xf);
414			if (j <= 9)
415				phba->SerialNumber[i] =
416				    (char)((uint8_t) 0x30 + (uint8_t) j);
417			else
418				phba->SerialNumber[i] =
419				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
420		}
421	}
422
423	lpfc_read_config(phba, pmb);
424	pmb->vport = vport;
425	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
426		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
427				"0453 Adapter failed to init, mbxCmd x%x "
428				"READ_CONFIG, mbxStatus x%x\n",
429				mb->mbxCommand, mb->mbxStatus);
430		phba->link_state = LPFC_HBA_ERROR;
431		mempool_free( pmb, phba->mbox_mem_pool);
432		return -EIO;
433	}
434
435	/* Check if the port is disabled */
436	lpfc_sli_read_link_ste(phba);
437
438	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
439	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
440		phba->cfg_hba_queue_depth =
441			(mb->un.varRdConfig.max_xri + 1) -
442					lpfc_sli4_get_els_iocb_cnt(phba);
443
444	phba->lmt = mb->un.varRdConfig.lmt;
445
446	/* Get the default values for Model Name and Description */
447	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
448
449	if ((phba->cfg_link_speed > LINK_SPEED_10G)
450	    || ((phba->cfg_link_speed == LINK_SPEED_1G)
451		&& !(phba->lmt & LMT_1Gb))
452	    || ((phba->cfg_link_speed == LINK_SPEED_2G)
453		&& !(phba->lmt & LMT_2Gb))
454	    || ((phba->cfg_link_speed == LINK_SPEED_4G)
455		&& !(phba->lmt & LMT_4Gb))
456	    || ((phba->cfg_link_speed == LINK_SPEED_8G)
457		&& !(phba->lmt & LMT_8Gb))
458	    || ((phba->cfg_link_speed == LINK_SPEED_10G)
459		&& !(phba->lmt & LMT_10Gb))) {
460		/* Reset link speed to auto */
461		lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
462			"1302 Invalid speed for this board: "
463			"Reset link speed to auto: x%x\n",
464			phba->cfg_link_speed);
465			phba->cfg_link_speed = LINK_SPEED_AUTO;
466	}
467
468	phba->link_state = LPFC_LINK_DOWN;
469
470	/* Only process IOCBs on ELS ring till hba_state is READY */
471	if (psli->ring[psli->extra_ring].cmdringaddr)
472		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
473	if (psli->ring[psli->fcp_ring].cmdringaddr)
474		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
475	if (psli->ring[psli->next_ring].cmdringaddr)
476		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
477
478	/* Post receive buffers for desired rings */
479	if (phba->sli_rev != 3)
480		lpfc_post_rcv_buf(phba);
481
482	/*
483	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
484	 */
485	if (phba->intr_type == MSIX) {
486		rc = lpfc_config_msi(phba, pmb);
487		if (rc) {
488			mempool_free(pmb, phba->mbox_mem_pool);
489			return -EIO;
490		}
491		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
492		if (rc != MBX_SUCCESS) {
493			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
494					"0352 Config MSI mailbox command "
495					"failed, mbxCmd x%x, mbxStatus x%x\n",
496					pmb->u.mb.mbxCommand,
497					pmb->u.mb.mbxStatus);
498			mempool_free(pmb, phba->mbox_mem_pool);
499			return -EIO;
500		}
501	}
502
503	spin_lock_irq(&phba->hbalock);
504	/* Initialize ERATT handling flag */
505	phba->hba_flag &= ~HBA_ERATT_HANDLED;
506
507	/* Enable appropriate host interrupts */
508	status = readl(phba->HCregaddr);
509	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
510	if (psli->num_rings > 0)
511		status |= HC_R0INT_ENA;
512	if (psli->num_rings > 1)
513		status |= HC_R1INT_ENA;
514	if (psli->num_rings > 2)
515		status |= HC_R2INT_ENA;
516	if (psli->num_rings > 3)
517		status |= HC_R3INT_ENA;
518
519	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
520	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
521		status &= ~(HC_R0INT_ENA);
522
523	writel(status, phba->HCregaddr);
524	readl(phba->HCregaddr); /* flush */
525	spin_unlock_irq(&phba->hbalock);
526
527	/* Set up ring-0 (ELS) timer */
528	timeout = phba->fc_ratov * 2;
529	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
530	/* Set up heart beat (HB) timer */
531	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
532	phba->hb_outstanding = 0;
533	phba->last_completion_time = jiffies;
534	/* Set up error attention (ERATT) polling timer */
535	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
536
537	if (phba->hba_flag & LINK_DISABLED) {
538		lpfc_printf_log(phba,
539			KERN_ERR, LOG_INIT,
540			"2598 Adapter Link is disabled.\n");
541		lpfc_down_link(phba, pmb);
542		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
543		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
544		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
545			lpfc_printf_log(phba,
546			KERN_ERR, LOG_INIT,
547			"2599 Adapter failed to issue DOWN_LINK"
548			" mbox command rc 0x%x\n", rc);
549
550			mempool_free(pmb, phba->mbox_mem_pool);
551			return -EIO;
552		}
553	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
554		lpfc_init_link(phba, pmb, phba->cfg_topology,
555			phba->cfg_link_speed);
556		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
557		lpfc_set_loopback_flag(phba);
558		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
559		if (rc != MBX_SUCCESS) {
560			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
561				"0454 Adapter failed to init, mbxCmd x%x "
562				"INIT_LINK, mbxStatus x%x\n",
563				mb->mbxCommand, mb->mbxStatus);
564
565			/* Clear all interrupt enable conditions */
566			writel(0, phba->HCregaddr);
567			readl(phba->HCregaddr); /* flush */
568			/* Clear all pending interrupts */
569			writel(0xffffffff, phba->HAregaddr);
570			readl(phba->HAregaddr); /* flush */
571
572			phba->link_state = LPFC_HBA_ERROR;
573			if (rc != MBX_BUSY)
574				mempool_free(pmb, phba->mbox_mem_pool);
575			return -EIO;
576		}
577	}
578	/* MBOX buffer will be freed in mbox compl */
579	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
580	if (!pmb) {
581		phba->link_state = LPFC_HBA_ERROR;
582		return -ENOMEM;
583	}
584
585	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
586	pmb->mbox_cmpl = lpfc_config_async_cmpl;
587	pmb->vport = phba->pport;
588	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
589
590	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
591		lpfc_printf_log(phba,
592				KERN_ERR,
593				LOG_INIT,
594				"0456 Adapter failed to issue "
595				"ASYNCEVT_ENABLE mbox status x%x\n",
596				rc);
597		mempool_free(pmb, phba->mbox_mem_pool);
598	}
599
600	/* Get Option rom version */
601	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
602	if (!pmb) {
603		phba->link_state = LPFC_HBA_ERROR;
604		return -ENOMEM;
605	}
606
607	lpfc_dump_wakeup_param(phba, pmb);
608	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
609	pmb->vport = phba->pport;
610	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
611
612	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
613		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
614				"to get Option ROM version status x%x\n", rc);
615		mempool_free(pmb, phba->mbox_mem_pool);
616	}
617
618	return 0;
619}
620
621/**
622 * lpfc_hba_init_link - Initialize the FC link
623 * @phba: pointer to lpfc hba data structure.
624 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
625 *
626 * This routine will issue the INIT_LINK mailbox command call.
627 * It is available to other drivers through the lpfc_hba data
628 * structure for use as a delayed link up mechanism with the
629 * module parameter lpfc_suppress_link_up.
630 *
631 * Return code
632 *		0 - success
633 *		Any other value - error
634 **/
635int
636lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
637{
638	struct lpfc_vport *vport = phba->pport;
639	LPFC_MBOXQ_t *pmb;
640	MAILBOX_t *mb;
641	int rc;
642
643	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
644	if (!pmb) {
645		phba->link_state = LPFC_HBA_ERROR;
646		return -ENOMEM;
647	}
648	mb = &pmb->u.mb;
649	pmb->vport = vport;
650
651	lpfc_init_link(phba, pmb, phba->cfg_topology,
652		phba->cfg_link_speed);
653	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
654	lpfc_set_loopback_flag(phba);
655	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
656	if (rc != MBX_SUCCESS) {
657		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
658			"0498 Adapter failed to init, mbxCmd x%x "
659			"INIT_LINK, mbxStatus x%x\n",
660			mb->mbxCommand, mb->mbxStatus);
661		/* Clear all interrupt enable conditions */
662		writel(0, phba->HCregaddr);
663		readl(phba->HCregaddr); /* flush */
664		/* Clear all pending interrupts */
665		writel(0xffffffff, phba->HAregaddr);
666		readl(phba->HAregaddr); /* flush */
667		phba->link_state = LPFC_HBA_ERROR;
668		if (rc != MBX_BUSY || flag == MBX_POLL)
669			mempool_free(pmb, phba->mbox_mem_pool);
670		return -EIO;
671	}
672	phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
673	if (flag == MBX_POLL)
674		mempool_free(pmb, phba->mbox_mem_pool);
675
676	return 0;
677}
678
679/**
680 * lpfc_hba_down_link - this routine downs the FC link
681 * @phba: pointer to lpfc hba data structure.
682 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
683 *
684 * This routine will issue the DOWN_LINK mailbox command call.
685 * It is available to other drivers through the lpfc_hba data
686 * structure for use to stop the link.
687 *
688 * Return code
689 *		0 - success
690 *		Any other value - error
691 **/
692int
693lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
694{
695	LPFC_MBOXQ_t *pmb;
696	int rc;
697
698	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
699	if (!pmb) {
700		phba->link_state = LPFC_HBA_ERROR;
701		return -ENOMEM;
702	}
703
704	lpfc_printf_log(phba,
705		KERN_ERR, LOG_INIT,
706		"0491 Adapter Link is disabled.\n");
707	lpfc_down_link(phba, pmb);
708	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
709	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
710	if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
711		lpfc_printf_log(phba,
712		KERN_ERR, LOG_INIT,
713		"2522 Adapter failed to issue DOWN_LINK"
714		" mbox command rc 0x%x\n", rc);
715
716		mempool_free(pmb, phba->mbox_mem_pool);
717		return -EIO;
718	}
719	if (flag == MBX_POLL)
720		mempool_free(pmb, phba->mbox_mem_pool);
721
722	return 0;
723}
724
725/**
726 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
727 * @phba: pointer to lpfc HBA data structure.
728 *
729 * This routine will do LPFC uninitialization before the HBA is reset when
730 * bringing down the SLI Layer.
731 *
732 * Return codes
733 *   0 - success.
734 *   Any other value - error.
735 **/
736int
737lpfc_hba_down_prep(struct lpfc_hba *phba)
738{
739	struct lpfc_vport **vports;
740	int i;
741
742	if (phba->sli_rev <= LPFC_SLI_REV3) {
743		/* Disable interrupts */
744		writel(0, phba->HCregaddr);
745		readl(phba->HCregaddr); /* flush */
746	}
747
748	if (phba->pport->load_flag & FC_UNLOADING)
749		lpfc_cleanup_discovery_resources(phba->pport);
750	else {
751		vports = lpfc_create_vport_work_array(phba);
752		if (vports != NULL)
753			for (i = 0; i <= phba->max_vports &&
754				vports[i] != NULL; i++)
755				lpfc_cleanup_discovery_resources(vports[i]);
756		lpfc_destroy_vport_work_array(phba, vports);
757	}
758	return 0;
759}
760
761/**
762 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
763 * @phba: pointer to lpfc HBA data structure.
764 *
765 * This routine will do uninitialization after the HBA is reset when bring
766 * down the SLI Layer.
767 *
768 * Return codes
769 *   0 - success.
770 *   Any other value - error.
771 **/
772static int
773lpfc_hba_down_post_s3(struct lpfc_hba *phba)
774{
775	struct lpfc_sli *psli = &phba->sli;
776	struct lpfc_sli_ring *pring;
777	struct lpfc_dmabuf *mp, *next_mp;
778	LIST_HEAD(completions);
779	int i;
780
781	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
782		lpfc_sli_hbqbuf_free_all(phba);
783	else {
784		/* Cleanup preposted buffers on the ELS ring */
785		pring = &psli->ring[LPFC_ELS_RING];
786		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
787			list_del(&mp->list);
788			pring->postbufq_cnt--;
789			lpfc_mbuf_free(phba, mp->virt, mp->phys);
790			kfree(mp);
791		}
792	}
793
794	spin_lock_irq(&phba->hbalock);
795	for (i = 0; i < psli->num_rings; i++) {
796		pring = &psli->ring[i];
797
798		/* At this point in time the HBA is either reset or DOA. Either
799		 * way, nothing should be on txcmplq as it will NEVER complete.
800		 */
801		list_splice_init(&pring->txcmplq, &completions);
802		pring->txcmplq_cnt = 0;
803		spin_unlock_irq(&phba->hbalock);
804
805		/* Cancel all the IOCBs from the completions list */
806		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
807				      IOERR_SLI_ABORTED);
808
809		lpfc_sli_abort_iocb_ring(phba, pring);
810		spin_lock_irq(&phba->hbalock);
811	}
812	spin_unlock_irq(&phba->hbalock);
813
814	return 0;
815}
816/**
817 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
818 * @phba: pointer to lpfc HBA data structure.
819 *
820 * This routine will do uninitialization after the HBA is reset when bring
821 * down the SLI Layer.
822 *
823 * Return codes
824 *   0 - success.
825 *   Any other value - error.
826 **/
827static int
828lpfc_hba_down_post_s4(struct lpfc_hba *phba)
829{
830	struct lpfc_scsi_buf *psb, *psb_next;
831	LIST_HEAD(aborts);
832	int ret;
833	unsigned long iflag = 0;
834	struct lpfc_sglq *sglq_entry = NULL;
835
836	ret = lpfc_hba_down_post_s3(phba);
837	if (ret)
838		return ret;
839	/* At this point in time the HBA is either reset or DOA. Either
840	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
841	 * on the lpfc_sgl_list so that it can either be freed if the
842	 * driver is unloading or reposted if the driver is restarting
843	 * the port.
844	 */
845	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
846					/* scsl_buf_list */
847	/* abts_sgl_list_lock required because worker thread uses this
848	 * list.
849	 */
850	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
851	list_for_each_entry(sglq_entry,
852		&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
853		sglq_entry->state = SGL_FREED;
854
855	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
856			&phba->sli4_hba.lpfc_sgl_list);
857	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
858	/* abts_scsi_buf_list_lock required because worker thread uses this
859	 * list.
860	 */
861	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
862	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
863			&aborts);
864	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
865	spin_unlock_irq(&phba->hbalock);
866
867	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
868		psb->pCmd = NULL;
869		psb->status = IOSTAT_SUCCESS;
870	}
871	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
872	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
873	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
874	return 0;
875}
876
877/**
878 * lpfc_hba_down_post - Wrapper func for hba down post routine
879 * @phba: pointer to lpfc HBA data structure.
880 *
881 * This routine wraps the actual SLI3 or SLI4 routine for performing
882 * uninitialization after the HBA is reset when bring down the SLI Layer.
883 *
884 * Return codes
885 *   0 - success.
886 *   Any other value - error.
887 **/
888int
889lpfc_hba_down_post(struct lpfc_hba *phba)
890{
891	return (*phba->lpfc_hba_down_post)(phba);
892}
893
894/**
895 * lpfc_hb_timeout - The HBA-timer timeout handler
896 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
897 *
898 * This is the HBA-timer timeout handler registered to the lpfc driver. When
899 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
900 * work-port-events bitmap and the worker thread is notified. This timeout
901 * event will be used by the worker thread to invoke the actual timeout
902 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
903 * be performed in the timeout handler and the HBA timeout event bit shall
904 * be cleared by the worker thread after it has taken the event bitmap out.
905 **/
906static void
907lpfc_hb_timeout(unsigned long ptr)
908{
909	struct lpfc_hba *phba;
910	uint32_t tmo_posted;
911	unsigned long iflag;
912
913	phba = (struct lpfc_hba *)ptr;
914
915	/* Check for heart beat timeout conditions */
916	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
917	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
918	if (!tmo_posted)
919		phba->pport->work_port_events |= WORKER_HB_TMO;
920	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
921
922	/* Tell the worker thread there is work to do */
923	if (!tmo_posted)
924		lpfc_worker_wake_up(phba);
925	return;
926}
927
928/**
929 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
930 * @phba: pointer to lpfc hba data structure.
931 * @pmboxq: pointer to the driver internal queue element for mailbox command.
932 *
933 * This is the callback function to the lpfc heart-beat mailbox command.
934 * If configured, the lpfc driver issues the heart-beat mailbox command to
935 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
936 * heart-beat mailbox command is issued, the driver shall set up heart-beat
937 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
938 * heart-beat outstanding state. Once the mailbox command comes back and
939 * no error conditions detected, the heart-beat mailbox command timer is
940 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
941 * state is cleared for the next heart-beat. If the timer expired with the
942 * heart-beat outstanding state set, the driver will put the HBA offline.
943 **/
944static void
945lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
946{
947	unsigned long drvr_flag;
948
949	spin_lock_irqsave(&phba->hbalock, drvr_flag);
950	phba->hb_outstanding = 0;
951	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
952
953	/* Check and reset heart-beat timer is necessary */
954	mempool_free(pmboxq, phba->mbox_mem_pool);
955	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
956		!(phba->link_state == LPFC_HBA_ERROR) &&
957		!(phba->pport->load_flag & FC_UNLOADING))
958		mod_timer(&phba->hb_tmofunc,
959			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
960	return;
961}
962
963/**
964 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
965 * @phba: pointer to lpfc hba data structure.
966 *
967 * This is the actual HBA-timer timeout handler to be invoked by the worker
968 * thread whenever the HBA timer fired and HBA-timeout event posted. This
969 * handler performs any periodic operations needed for the device. If such
970 * periodic event has already been attended to either in the interrupt handler
971 * or by processing slow-ring or fast-ring events within the HBA-timer
972 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
973 * the timer for the next timeout period. If lpfc heart-beat mailbox command
974 * is configured and there is no heart-beat mailbox command outstanding, a
975 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
976 * has been a heart-beat mailbox command outstanding, the HBA shall be put
977 * to offline.
978 **/
979void
980lpfc_hb_timeout_handler(struct lpfc_hba *phba)
981{
982	struct lpfc_vport **vports;
983	LPFC_MBOXQ_t *pmboxq;
984	struct lpfc_dmabuf *buf_ptr;
985	int retval, i;
986	struct lpfc_sli *psli = &phba->sli;
987	LIST_HEAD(completions);
988
989	vports = lpfc_create_vport_work_array(phba);
990	if (vports != NULL)
991		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
992			lpfc_rcv_seq_check_edtov(vports[i]);
993	lpfc_destroy_vport_work_array(phba, vports);
994
995	if ((phba->link_state == LPFC_HBA_ERROR) ||
996		(phba->pport->load_flag & FC_UNLOADING) ||
997		(phba->pport->fc_flag & FC_OFFLINE_MODE))
998		return;
999
1000	spin_lock_irq(&phba->pport->work_port_lock);
1001
1002	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
1003		jiffies)) {
1004		spin_unlock_irq(&phba->pport->work_port_lock);
1005		if (!phba->hb_outstanding)
1006			mod_timer(&phba->hb_tmofunc,
1007				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1008		else
1009			mod_timer(&phba->hb_tmofunc,
1010				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1011		return;
1012	}
1013	spin_unlock_irq(&phba->pport->work_port_lock);
1014
1015	if (phba->elsbuf_cnt &&
1016		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1017		spin_lock_irq(&phba->hbalock);
1018		list_splice_init(&phba->elsbuf, &completions);
1019		phba->elsbuf_cnt = 0;
1020		phba->elsbuf_prev_cnt = 0;
1021		spin_unlock_irq(&phba->hbalock);
1022
1023		while (!list_empty(&completions)) {
1024			list_remove_head(&completions, buf_ptr,
1025				struct lpfc_dmabuf, list);
1026			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1027			kfree(buf_ptr);
1028		}
1029	}
1030	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1031
1032	/* If there is no heart beat outstanding, issue a heartbeat command */
1033	if (phba->cfg_enable_hba_heartbeat) {
1034		if (!phba->hb_outstanding) {
1035			pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
1036			if (!pmboxq) {
1037				mod_timer(&phba->hb_tmofunc,
1038					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1039				return;
1040			}
1041
1042			lpfc_heart_beat(phba, pmboxq);
1043			pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1044			pmboxq->vport = phba->pport;
1045			retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1046
1047			if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1048				mempool_free(pmboxq, phba->mbox_mem_pool);
1049				mod_timer(&phba->hb_tmofunc,
1050					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1051				return;
1052			}
1053			mod_timer(&phba->hb_tmofunc,
1054				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1055			phba->hb_outstanding = 1;
1056			return;
1057		} else {
1058			/*
1059			* If heart beat timeout called with hb_outstanding set
1060			* we need to take the HBA offline.
1061			*/
1062			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1063					"0459 Adapter heartbeat failure, "
1064					"taking this port offline.\n");
1065
1066			spin_lock_irq(&phba->hbalock);
1067			psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1068			spin_unlock_irq(&phba->hbalock);
1069
1070			lpfc_offline_prep(phba);
1071			lpfc_offline(phba);
1072			lpfc_unblock_mgmt_io(phba);
1073			phba->link_state = LPFC_HBA_ERROR;
1074			lpfc_hba_down_post(phba);
1075		}
1076	}
1077}
1078
1079/**
1080 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1081 * @phba: pointer to lpfc hba data structure.
1082 *
1083 * This routine is called to bring the HBA offline when HBA hardware error
1084 * other than Port Error 6 has been detected.
1085 **/
1086static void
1087lpfc_offline_eratt(struct lpfc_hba *phba)
1088{
1089	struct lpfc_sli   *psli = &phba->sli;
1090
1091	spin_lock_irq(&phba->hbalock);
1092	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1093	spin_unlock_irq(&phba->hbalock);
1094	lpfc_offline_prep(phba);
1095
1096	lpfc_offline(phba);
1097	lpfc_reset_barrier(phba);
1098	spin_lock_irq(&phba->hbalock);
1099	lpfc_sli_brdreset(phba);
1100	spin_unlock_irq(&phba->hbalock);
1101	lpfc_hba_down_post(phba);
1102	lpfc_sli_brdready(phba, HS_MBRDY);
1103	lpfc_unblock_mgmt_io(phba);
1104	phba->link_state = LPFC_HBA_ERROR;
1105	return;
1106}
1107
1108/**
1109 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1110 * @phba: pointer to lpfc hba data structure.
1111 *
1112 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1113 * other than Port Error 6 has been detected.
1114 **/
1115static void
1116lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1117{
1118	lpfc_offline_prep(phba);
1119	lpfc_offline(phba);
1120	lpfc_sli4_brdreset(phba);
1121	lpfc_hba_down_post(phba);
1122	lpfc_sli4_post_status_check(phba);
1123	lpfc_unblock_mgmt_io(phba);
1124	phba->link_state = LPFC_HBA_ERROR;
1125}
1126
1127/**
1128 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1129 * @phba: pointer to lpfc hba data structure.
1130 *
1131 * This routine is invoked to handle the deferred HBA hardware error
1132 * conditions. This type of error is indicated by HBA by setting ER1
1133 * and another ER bit in the host status register. The driver will
1134 * wait until the ER1 bit clears before handling the error condition.
1135 **/
1136static void
1137lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1138{
1139	uint32_t old_host_status = phba->work_hs;
1140	struct lpfc_sli_ring  *pring;
1141	struct lpfc_sli *psli = &phba->sli;
1142
1143	/* If the pci channel is offline, ignore possible errors,
1144	 * since we cannot communicate with the pci card anyway.
1145	 */
1146	if (pci_channel_offline(phba->pcidev)) {
1147		spin_lock_irq(&phba->hbalock);
1148		phba->hba_flag &= ~DEFER_ERATT;
1149		spin_unlock_irq(&phba->hbalock);
1150		return;
1151	}
1152
1153	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1154		"0479 Deferred Adapter Hardware Error "
1155		"Data: x%x x%x x%x\n",
1156		phba->work_hs,
1157		phba->work_status[0], phba->work_status[1]);
1158
1159	spin_lock_irq(&phba->hbalock);
1160	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1161	spin_unlock_irq(&phba->hbalock);
1162
1163
1164	/*
1165	 * Firmware stops when it triggred erratt. That could cause the I/Os
1166	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1167	 * SCSI layer retry it after re-establishing link.
1168	 */
1169	pring = &psli->ring[psli->fcp_ring];
1170	lpfc_sli_abort_iocb_ring(phba, pring);
1171
1172	/*
1173	 * There was a firmware error. Take the hba offline and then
1174	 * attempt to restart it.
1175	 */
1176	lpfc_offline_prep(phba);
1177	lpfc_offline(phba);
1178
1179	/* Wait for the ER1 bit to clear.*/
1180	while (phba->work_hs & HS_FFER1) {
1181		msleep(100);
1182		phba->work_hs = readl(phba->HSregaddr);
1183		/* If driver is unloading let the worker thread continue */
1184		if (phba->pport->load_flag & FC_UNLOADING) {
1185			phba->work_hs = 0;
1186			break;
1187		}
1188	}
1189
1190	/*
1191	 * This is to ptrotect against a race condition in which
1192	 * first write to the host attention register clear the
1193	 * host status register.
1194	 */
1195	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1196		phba->work_hs = old_host_status & ~HS_FFER1;
1197
1198	spin_lock_irq(&phba->hbalock);
1199	phba->hba_flag &= ~DEFER_ERATT;
1200	spin_unlock_irq(&phba->hbalock);
1201	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1202	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1203}
1204
1205static void
1206lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1207{
1208	struct lpfc_board_event_header board_event;
1209	struct Scsi_Host *shost;
1210
1211	board_event.event_type = FC_REG_BOARD_EVENT;
1212	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1213	shost = lpfc_shost_from_vport(phba->pport);
1214	fc_host_post_vendor_event(shost, fc_get_event_number(),
1215				  sizeof(board_event),
1216				  (char *) &board_event,
1217				  LPFC_NL_VENDOR_ID);
1218}
1219
1220/**
1221 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1222 * @phba: pointer to lpfc hba data structure.
1223 *
1224 * This routine is invoked to handle the following HBA hardware error
1225 * conditions:
1226 * 1 - HBA error attention interrupt
1227 * 2 - DMA ring index out of range
1228 * 3 - Mailbox command came back as unknown
1229 **/
1230static void
1231lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1232{
1233	struct lpfc_vport *vport = phba->pport;
1234	struct lpfc_sli   *psli = &phba->sli;
1235	struct lpfc_sli_ring  *pring;
1236	uint32_t event_data;
1237	unsigned long temperature;
1238	struct temp_event temp_event_data;
1239	struct Scsi_Host  *shost;
1240
1241	/* If the pci channel is offline, ignore possible errors,
1242	 * since we cannot communicate with the pci card anyway.
1243	 */
1244	if (pci_channel_offline(phba->pcidev)) {
1245		spin_lock_irq(&phba->hbalock);
1246		phba->hba_flag &= ~DEFER_ERATT;
1247		spin_unlock_irq(&phba->hbalock);
1248		return;
1249	}
1250
1251	/* If resets are disabled then leave the HBA alone and return */
1252	if (!phba->cfg_enable_hba_reset)
1253		return;
1254
1255	/* Send an internal error event to mgmt application */
1256	lpfc_board_errevt_to_mgmt(phba);
1257
1258	if (phba->hba_flag & DEFER_ERATT)
1259		lpfc_handle_deferred_eratt(phba);
1260
1261	if (phba->work_hs & HS_FFER6) {
1262		/* Re-establishing Link */
1263		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1264				"1301 Re-establishing Link "
1265				"Data: x%x x%x x%x\n",
1266				phba->work_hs,
1267				phba->work_status[0], phba->work_status[1]);
1268
1269		spin_lock_irq(&phba->hbalock);
1270		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1271		spin_unlock_irq(&phba->hbalock);
1272
1273		/*
1274		* Firmware stops when it triggled erratt with HS_FFER6.
1275		* That could cause the I/Os dropped by the firmware.
1276		* Error iocb (I/O) on txcmplq and let the SCSI layer
1277		* retry it after re-establishing link.
1278		*/
1279		pring = &psli->ring[psli->fcp_ring];
1280		lpfc_sli_abort_iocb_ring(phba, pring);
1281
1282		/*
1283		 * There was a firmware error.  Take the hba offline and then
1284		 * attempt to restart it.
1285		 */
1286		lpfc_offline_prep(phba);
1287		lpfc_offline(phba);
1288		lpfc_sli_brdrestart(phba);
1289		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1290			lpfc_unblock_mgmt_io(phba);
1291			return;
1292		}
1293		lpfc_unblock_mgmt_io(phba);
1294	} else if (phba->work_hs & HS_CRIT_TEMP) {
1295		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1296		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1297		temp_event_data.event_code = LPFC_CRIT_TEMP;
1298		temp_event_data.data = (uint32_t)temperature;
1299
1300		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1301				"0406 Adapter maximum temperature exceeded "
1302				"(%ld), taking this port offline "
1303				"Data: x%x x%x x%x\n",
1304				temperature, phba->work_hs,
1305				phba->work_status[0], phba->work_status[1]);
1306
1307		shost = lpfc_shost_from_vport(phba->pport);
1308		fc_host_post_vendor_event(shost, fc_get_event_number(),
1309					  sizeof(temp_event_data),
1310					  (char *) &temp_event_data,
1311					  SCSI_NL_VID_TYPE_PCI
1312					  | PCI_VENDOR_ID_EMULEX);
1313
1314		spin_lock_irq(&phba->hbalock);
1315		phba->over_temp_state = HBA_OVER_TEMP;
1316		spin_unlock_irq(&phba->hbalock);
1317		lpfc_offline_eratt(phba);
1318
1319	} else {
1320		/* The if clause above forces this code path when the status
1321		 * failure is a value other than FFER6. Do not call the offline
1322		 * twice. This is the adapter hardware error path.
1323		 */
1324		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1325				"0457 Adapter Hardware Error "
1326				"Data: x%x x%x x%x\n",
1327				phba->work_hs,
1328				phba->work_status[0], phba->work_status[1]);
1329
1330		event_data = FC_REG_DUMP_EVENT;
1331		shost = lpfc_shost_from_vport(vport);
1332		fc_host_post_vendor_event(shost, fc_get_event_number(),
1333				sizeof(event_data), (char *) &event_data,
1334				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1335
1336		lpfc_offline_eratt(phba);
1337	}
1338	return;
1339}
1340
1341/**
1342 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1343 * @phba: pointer to lpfc hba data structure.
1344 *
1345 * This routine is invoked to handle the SLI4 HBA hardware error attention
1346 * conditions.
1347 **/
1348static void
1349lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1350{
1351	struct lpfc_vport *vport = phba->pport;
1352	uint32_t event_data;
1353	struct Scsi_Host *shost;
1354
1355	/* If the pci channel is offline, ignore possible errors, since
1356	 * we cannot communicate with the pci card anyway.
1357	 */
1358	if (pci_channel_offline(phba->pcidev))
1359		return;
1360	/* If resets are disabled then leave the HBA alone and return */
1361	if (!phba->cfg_enable_hba_reset)
1362		return;
1363
1364	/* Send an internal error event to mgmt application */
1365	lpfc_board_errevt_to_mgmt(phba);
1366
1367	/* For now, the actual action for SLI4 device handling is not
1368	 * specified yet, just treated it as adaptor hardware failure
1369	 */
1370	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1371			"0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1372			phba->work_status[0], phba->work_status[1]);
1373
1374	event_data = FC_REG_DUMP_EVENT;
1375	shost = lpfc_shost_from_vport(vport);
1376	fc_host_post_vendor_event(shost, fc_get_event_number(),
1377				  sizeof(event_data), (char *) &event_data,
1378				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1379
1380	lpfc_sli4_offline_eratt(phba);
1381}
1382
1383/**
1384 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1385 * @phba: pointer to lpfc HBA data structure.
1386 *
1387 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1388 * routine from the API jump table function pointer from the lpfc_hba struct.
1389 *
1390 * Return codes
1391 *   0 - success.
1392 *   Any other value - error.
1393 **/
1394void
1395lpfc_handle_eratt(struct lpfc_hba *phba)
1396{
1397	(*phba->lpfc_handle_eratt)(phba);
1398}
1399
1400/**
1401 * lpfc_handle_latt - The HBA link event handler
1402 * @phba: pointer to lpfc hba data structure.
1403 *
1404 * This routine is invoked from the worker thread to handle a HBA host
1405 * attention link event.
1406 **/
1407void
1408lpfc_handle_latt(struct lpfc_hba *phba)
1409{
1410	struct lpfc_vport *vport = phba->pport;
1411	struct lpfc_sli   *psli = &phba->sli;
1412	LPFC_MBOXQ_t *pmb;
1413	volatile uint32_t control;
1414	struct lpfc_dmabuf *mp;
1415	int rc = 0;
1416
1417	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1418	if (!pmb) {
1419		rc = 1;
1420		goto lpfc_handle_latt_err_exit;
1421	}
1422
1423	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1424	if (!mp) {
1425		rc = 2;
1426		goto lpfc_handle_latt_free_pmb;
1427	}
1428
1429	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1430	if (!mp->virt) {
1431		rc = 3;
1432		goto lpfc_handle_latt_free_mp;
1433	}
1434
1435	/* Cleanup any outstanding ELS commands */
1436	lpfc_els_flush_all_cmd(phba);
1437
1438	psli->slistat.link_event++;
1439	lpfc_read_la(phba, pmb, mp);
1440	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
1441	pmb->vport = vport;
1442	/* Block ELS IOCBs until we have processed this mbox command */
1443	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1444	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1445	if (rc == MBX_NOT_FINISHED) {
1446		rc = 4;
1447		goto lpfc_handle_latt_free_mbuf;
1448	}
1449
1450	/* Clear Link Attention in HA REG */
1451	spin_lock_irq(&phba->hbalock);
1452	writel(HA_LATT, phba->HAregaddr);
1453	readl(phba->HAregaddr); /* flush */
1454	spin_unlock_irq(&phba->hbalock);
1455
1456	return;
1457
1458lpfc_handle_latt_free_mbuf:
1459	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1460	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1461lpfc_handle_latt_free_mp:
1462	kfree(mp);
1463lpfc_handle_latt_free_pmb:
1464	mempool_free(pmb, phba->mbox_mem_pool);
1465lpfc_handle_latt_err_exit:
1466	/* Enable Link attention interrupts */
1467	spin_lock_irq(&phba->hbalock);
1468	psli->sli_flag |= LPFC_PROCESS_LA;
1469	control = readl(phba->HCregaddr);
1470	control |= HC_LAINT_ENA;
1471	writel(control, phba->HCregaddr);
1472	readl(phba->HCregaddr); /* flush */
1473
1474	/* Clear Link Attention in HA REG */
1475	writel(HA_LATT, phba->HAregaddr);
1476	readl(phba->HAregaddr); /* flush */
1477	spin_unlock_irq(&phba->hbalock);
1478	lpfc_linkdown(phba);
1479	phba->link_state = LPFC_HBA_ERROR;
1480
1481	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1482		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1483
1484	return;
1485}
1486
1487/**
1488 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1489 * @phba: pointer to lpfc hba data structure.
1490 * @vpd: pointer to the vital product data.
1491 * @len: length of the vital product data in bytes.
1492 *
1493 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1494 * an array of characters. In this routine, the ModelName, ProgramType, and
1495 * ModelDesc, etc. fields of the phba data structure will be populated.
1496 *
1497 * Return codes
1498 *   0 - pointer to the VPD passed in is NULL
1499 *   1 - success
1500 **/
1501int
1502lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1503{
1504	uint8_t lenlo, lenhi;
1505	int Length;
1506	int i, j;
1507	int finished = 0;
1508	int index = 0;
1509
1510	if (!vpd)
1511		return 0;
1512
1513	/* Vital Product */
1514	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1515			"0455 Vital Product Data: x%x x%x x%x x%x\n",
1516			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1517			(uint32_t) vpd[3]);
1518	while (!finished && (index < (len - 4))) {
1519		switch (vpd[index]) {
1520		case 0x82:
1521		case 0x91:
1522			index += 1;
1523			lenlo = vpd[index];
1524			index += 1;
1525			lenhi = vpd[index];
1526			index += 1;
1527			i = ((((unsigned short)lenhi) << 8) + lenlo);
1528			index += i;
1529			break;
1530		case 0x90:
1531			index += 1;
1532			lenlo = vpd[index];
1533			index += 1;
1534			lenhi = vpd[index];
1535			index += 1;
1536			Length = ((((unsigned short)lenhi) << 8) + lenlo);
1537			if (Length > len - index)
1538				Length = len - index;
1539			while (Length > 0) {
1540			/* Look for Serial Number */
1541			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1542				index += 2;
1543				i = vpd[index];
1544				index += 1;
1545				j = 0;
1546				Length -= (3+i);
1547				while(i--) {
1548					phba->SerialNumber[j++] = vpd[index++];
1549					if (j == 31)
1550						break;
1551				}
1552				phba->SerialNumber[j] = 0;
1553				continue;
1554			}
1555			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1556				phba->vpd_flag |= VPD_MODEL_DESC;
1557				index += 2;
1558				i = vpd[index];
1559				index += 1;
1560				j = 0;
1561				Length -= (3+i);
1562				while(i--) {
1563					phba->ModelDesc[j++] = vpd[index++];
1564					if (j == 255)
1565						break;
1566				}
1567				phba->ModelDesc[j] = 0;
1568				continue;
1569			}
1570			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1571				phba->vpd_flag |= VPD_MODEL_NAME;
1572				index += 2;
1573				i = vpd[index];
1574				index += 1;
1575				j = 0;
1576				Length -= (3+i);
1577				while(i--) {
1578					phba->ModelName[j++] = vpd[index++];
1579					if (j == 79)
1580						break;
1581				}
1582				phba->ModelName[j] = 0;
1583				continue;
1584			}
1585			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1586				phba->vpd_flag |= VPD_PROGRAM_TYPE;
1587				index += 2;
1588				i = vpd[index];
1589				index += 1;
1590				j = 0;
1591				Length -= (3+i);
1592				while(i--) {
1593					phba->ProgramType[j++] = vpd[index++];
1594					if (j == 255)
1595						break;
1596				}
1597				phba->ProgramType[j] = 0;
1598				continue;
1599			}
1600			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1601				phba->vpd_flag |= VPD_PORT;
1602				index += 2;
1603				i = vpd[index];
1604				index += 1;
1605				j = 0;
1606				Length -= (3+i);
1607				while(i--) {
1608				phba->Port[j++] = vpd[index++];
1609				if (j == 19)
1610					break;
1611				}
1612				phba->Port[j] = 0;
1613				continue;
1614			}
1615			else {
1616				index += 2;
1617				i = vpd[index];
1618				index += 1;
1619				index += i;
1620				Length -= (3 + i);
1621			}
1622		}
1623		finished = 0;
1624		break;
1625		case 0x78:
1626			finished = 1;
1627			break;
1628		default:
1629			index ++;
1630			break;
1631		}
1632	}
1633
1634	return(1);
1635}
1636
1637/**
1638 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1639 * @phba: pointer to lpfc hba data structure.
1640 * @mdp: pointer to the data structure to hold the derived model name.
1641 * @descp: pointer to the data structure to hold the derived description.
1642 *
1643 * This routine retrieves HBA's description based on its registered PCI device
1644 * ID. The @descp passed into this function points to an array of 256 chars. It
1645 * shall be returned with the model name, maximum speed, and the host bus type.
1646 * The @mdp passed into this function points to an array of 80 chars. When the
1647 * function returns, the @mdp will be filled with the model name.
1648 **/
1649static void
1650lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1651{
1652	lpfc_vpd_t *vp;
1653	uint16_t dev_id = phba->pcidev->device;
1654	int max_speed;
1655	int GE = 0;
1656	int oneConnect = 0; /* default is not a oneConnect */
1657	struct {
1658		char *name;
1659		char *bus;
1660		char *function;
1661	} m = {"<Unknown>", "", ""};
1662
1663	if (mdp && mdp[0] != '\0'
1664		&& descp && descp[0] != '\0')
1665		return;
1666
1667	if (phba->lmt & LMT_10Gb)
1668		max_speed = 10;
1669	else if (phba->lmt & LMT_8Gb)
1670		max_speed = 8;
1671	else if (phba->lmt & LMT_4Gb)
1672		max_speed = 4;
1673	else if (phba->lmt & LMT_2Gb)
1674		max_speed = 2;
1675	else
1676		max_speed = 1;
1677
1678	vp = &phba->vpd;
1679
1680	switch (dev_id) {
1681	case PCI_DEVICE_ID_FIREFLY:
1682		m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1683		break;
1684	case PCI_DEVICE_ID_SUPERFLY:
1685		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1686			m = (typeof(m)){"LP7000", "PCI",
1687					"Fibre Channel Adapter"};
1688		else
1689			m = (typeof(m)){"LP7000E", "PCI",
1690					"Fibre Channel Adapter"};
1691		break;
1692	case PCI_DEVICE_ID_DRAGONFLY:
1693		m = (typeof(m)){"LP8000", "PCI",
1694				"Fibre Channel Adapter"};
1695		break;
1696	case PCI_DEVICE_ID_CENTAUR:
1697		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1698			m = (typeof(m)){"LP9002", "PCI",
1699					"Fibre Channel Adapter"};
1700		else
1701			m = (typeof(m)){"LP9000", "PCI",
1702					"Fibre Channel Adapter"};
1703		break;
1704	case PCI_DEVICE_ID_RFLY:
1705		m = (typeof(m)){"LP952", "PCI",
1706				"Fibre Channel Adapter"};
1707		break;
1708	case PCI_DEVICE_ID_PEGASUS:
1709		m = (typeof(m)){"LP9802", "PCI-X",
1710				"Fibre Channel Adapter"};
1711		break;
1712	case PCI_DEVICE_ID_THOR:
1713		m = (typeof(m)){"LP10000", "PCI-X",
1714				"Fibre Channel Adapter"};
1715		break;
1716	case PCI_DEVICE_ID_VIPER:
1717		m = (typeof(m)){"LPX1000",  "PCI-X",
1718				"Fibre Channel Adapter"};
1719		break;
1720	case PCI_DEVICE_ID_PFLY:
1721		m = (typeof(m)){"LP982", "PCI-X",
1722				"Fibre Channel Adapter"};
1723		break;
1724	case PCI_DEVICE_ID_TFLY:
1725		m = (typeof(m)){"LP1050", "PCI-X",
1726				"Fibre Channel Adapter"};
1727		break;
1728	case PCI_DEVICE_ID_HELIOS:
1729		m = (typeof(m)){"LP11000", "PCI-X2",
1730				"Fibre Channel Adapter"};
1731		break;
1732	case PCI_DEVICE_ID_HELIOS_SCSP:
1733		m = (typeof(m)){"LP11000-SP", "PCI-X2",
1734				"Fibre Channel Adapter"};
1735		break;
1736	case PCI_DEVICE_ID_HELIOS_DCSP:
1737		m = (typeof(m)){"LP11002-SP",  "PCI-X2",
1738				"Fibre Channel Adapter"};
1739		break;
1740	case PCI_DEVICE_ID_NEPTUNE:
1741		m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1742		break;
1743	case PCI_DEVICE_ID_NEPTUNE_SCSP:
1744		m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1745		break;
1746	case PCI_DEVICE_ID_NEPTUNE_DCSP:
1747		m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1748		break;
1749	case PCI_DEVICE_ID_BMID:
1750		m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1751		break;
1752	case PCI_DEVICE_ID_BSMB:
1753		m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1754		break;
1755	case PCI_DEVICE_ID_ZEPHYR:
1756		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1757		break;
1758	case PCI_DEVICE_ID_ZEPHYR_SCSP:
1759		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1760		break;
1761	case PCI_DEVICE_ID_ZEPHYR_DCSP:
1762		m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1763		GE = 1;
1764		break;
1765	case PCI_DEVICE_ID_ZMID:
1766		m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1767		break;
1768	case PCI_DEVICE_ID_ZSMB:
1769		m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1770		break;
1771	case PCI_DEVICE_ID_LP101:
1772		m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1773		break;
1774	case PCI_DEVICE_ID_LP10000S:
1775		m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1776		break;
1777	case PCI_DEVICE_ID_LP11000S:
1778		m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1779		break;
1780	case PCI_DEVICE_ID_LPE11000S:
1781		m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1782		break;
1783	case PCI_DEVICE_ID_SAT:
1784		m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1785		break;
1786	case PCI_DEVICE_ID_SAT_MID:
1787		m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1788		break;
1789	case PCI_DEVICE_ID_SAT_SMB:
1790		m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1791		break;
1792	case PCI_DEVICE_ID_SAT_DCSP:
1793		m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1794		break;
1795	case PCI_DEVICE_ID_SAT_SCSP:
1796		m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1797		break;
1798	case PCI_DEVICE_ID_SAT_S:
1799		m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1800		break;
1801	case PCI_DEVICE_ID_HORNET:
1802		m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1803		GE = 1;
1804		break;
1805	case PCI_DEVICE_ID_PROTEUS_VF:
1806		m = (typeof(m)){"LPev12000", "PCIe IOV",
1807				"Fibre Channel Adapter"};
1808		break;
1809	case PCI_DEVICE_ID_PROTEUS_PF:
1810		m = (typeof(m)){"LPev12000", "PCIe IOV",
1811				"Fibre Channel Adapter"};
1812		break;
1813	case PCI_DEVICE_ID_PROTEUS_S:
1814		m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1815				"Fibre Channel Adapter"};
1816		break;
1817	case PCI_DEVICE_ID_TIGERSHARK:
1818		oneConnect = 1;
1819		m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1820		break;
1821	case PCI_DEVICE_ID_TOMCAT:
1822		oneConnect = 1;
1823		m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1824		break;
1825	case PCI_DEVICE_ID_FALCON:
1826		m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1827				"EmulexSecure Fibre"};
1828		break;
1829	case PCI_DEVICE_ID_BALIUS:
1830		m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
1831				"Fibre Channel Adapter"};
1832		break;
1833	default:
1834		m = (typeof(m)){"Unknown", "", ""};
1835		break;
1836	}
1837
1838	if (mdp && mdp[0] == '\0')
1839		snprintf(mdp, 79,"%s", m.name);
1840	/* oneConnect hba requires special processing, they are all initiators
1841	 * and we put the port number on the end
1842	 */
1843	if (descp && descp[0] == '\0') {
1844		if (oneConnect)
1845			snprintf(descp, 255,
1846				"Emulex OneConnect %s, %s Initiator, Port %s",
1847				m.name, m.function,
1848				phba->Port);
1849		else
1850			snprintf(descp, 255,
1851				"Emulex %s %d%s %s %s",
1852				m.name, max_speed, (GE) ? "GE" : "Gb",
1853				m.bus, m.function);
1854	}
1855}
1856
1857/**
1858 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1859 * @phba: pointer to lpfc hba data structure.
1860 * @pring: pointer to a IOCB ring.
1861 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1862 *
1863 * This routine posts a given number of IOCBs with the associated DMA buffer
1864 * descriptors specified by the cnt argument to the given IOCB ring.
1865 *
1866 * Return codes
1867 *   The number of IOCBs NOT able to be posted to the IOCB ring.
1868 **/
1869int
1870lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1871{
1872	IOCB_t *icmd;
1873	struct lpfc_iocbq *iocb;
1874	struct lpfc_dmabuf *mp1, *mp2;
1875
1876	cnt += pring->missbufcnt;
1877
1878	/* While there are buffers to post */
1879	while (cnt > 0) {
1880		/* Allocate buffer for  command iocb */
1881		iocb = lpfc_sli_get_iocbq(phba);
1882		if (iocb == NULL) {
1883			pring->missbufcnt = cnt;
1884			return cnt;
1885		}
1886		icmd = &iocb->iocb;
1887
1888		/* 2 buffers can be posted per command */
1889		/* Allocate buffer to post */
1890		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1891		if (mp1)
1892		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1893		if (!mp1 || !mp1->virt) {
1894			kfree(mp1);
1895			lpfc_sli_release_iocbq(phba, iocb);
1896			pring->missbufcnt = cnt;
1897			return cnt;
1898		}
1899
1900		INIT_LIST_HEAD(&mp1->list);
1901		/* Allocate buffer to post */
1902		if (cnt > 1) {
1903			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1904			if (mp2)
1905				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1906							    &mp2->phys);
1907			if (!mp2 || !mp2->virt) {
1908				kfree(mp2);
1909				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1910				kfree(mp1);
1911				lpfc_sli_release_iocbq(phba, iocb);
1912				pring->missbufcnt = cnt;
1913				return cnt;
1914			}
1915
1916			INIT_LIST_HEAD(&mp2->list);
1917		} else {
1918			mp2 = NULL;
1919		}
1920
1921		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1922		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1923		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1924		icmd->ulpBdeCount = 1;
1925		cnt--;
1926		if (mp2) {
1927			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1928			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1929			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1930			cnt--;
1931			icmd->ulpBdeCount = 2;
1932		}
1933
1934		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1935		icmd->ulpLe = 1;
1936
1937		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1938		    IOCB_ERROR) {
1939			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1940			kfree(mp1);
1941			cnt++;
1942			if (mp2) {
1943				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1944				kfree(mp2);
1945				cnt++;
1946			}
1947			lpfc_sli_release_iocbq(phba, iocb);
1948			pring->missbufcnt = cnt;
1949			return cnt;
1950		}
1951		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1952		if (mp2)
1953			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1954	}
1955	pring->missbufcnt = 0;
1956	return 0;
1957}
1958
1959/**
1960 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
1961 * @phba: pointer to lpfc hba data structure.
1962 *
1963 * This routine posts initial receive IOCB buffers to the ELS ring. The
1964 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1965 * set to 64 IOCBs.
1966 *
1967 * Return codes
1968 *   0 - success (currently always success)
1969 **/
1970static int
1971lpfc_post_rcv_buf(struct lpfc_hba *phba)
1972{
1973	struct lpfc_sli *psli = &phba->sli;
1974
1975	/* Ring 0, ELS / CT buffers */
1976	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
1977	/* Ring 2 - FCP no buffers needed */
1978
1979	return 0;
1980}
1981
1982#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1983
1984/**
1985 * lpfc_sha_init - Set up initial array of hash table entries
1986 * @HashResultPointer: pointer to an array as hash table.
1987 *
1988 * This routine sets up the initial values to the array of hash table entries
1989 * for the LC HBAs.
1990 **/
1991static void
1992lpfc_sha_init(uint32_t * HashResultPointer)
1993{
1994	HashResultPointer[0] = 0x67452301;
1995	HashResultPointer[1] = 0xEFCDAB89;
1996	HashResultPointer[2] = 0x98BADCFE;
1997	HashResultPointer[3] = 0x10325476;
1998	HashResultPointer[4] = 0xC3D2E1F0;
1999}
2000
2001/**
2002 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2003 * @HashResultPointer: pointer to an initial/result hash table.
2004 * @HashWorkingPointer: pointer to an working hash table.
2005 *
2006 * This routine iterates an initial hash table pointed by @HashResultPointer
2007 * with the values from the working hash table pointeed by @HashWorkingPointer.
2008 * The results are putting back to the initial hash table, returned through
2009 * the @HashResultPointer as the result hash table.
2010 **/
2011static void
2012lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2013{
2014	int t;
2015	uint32_t TEMP;
2016	uint32_t A, B, C, D, E;
2017	t = 16;
2018	do {
2019		HashWorkingPointer[t] =
2020		    S(1,
2021		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2022								     8] ^
2023		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2024	} while (++t <= 79);
2025	t = 0;
2026	A = HashResultPointer[0];
2027	B = HashResultPointer[1];
2028	C = HashResultPointer[2];
2029	D = HashResultPointer[3];
2030	E = HashResultPointer[4];
2031
2032	do {
2033		if (t < 20) {
2034			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2035		} else if (t < 40) {
2036			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2037		} else if (t < 60) {
2038			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2039		} else {
2040			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2041		}
2042		TEMP += S(5, A) + E + HashWorkingPointer[t];
2043		E = D;
2044		D = C;
2045		C = S(30, B);
2046		B = A;
2047		A = TEMP;
2048	} while (++t <= 79);
2049
2050	HashResultPointer[0] += A;
2051	HashResultPointer[1] += B;
2052	HashResultPointer[2] += C;
2053	HashResultPointer[3] += D;
2054	HashResultPointer[4] += E;
2055
2056}
2057
2058/**
2059 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2060 * @RandomChallenge: pointer to the entry of host challenge random number array.
2061 * @HashWorking: pointer to the entry of the working hash array.
2062 *
2063 * This routine calculates the working hash array referred by @HashWorking
2064 * from the challenge random numbers associated with the host, referred by
2065 * @RandomChallenge. The result is put into the entry of the working hash
2066 * array and returned by reference through @HashWorking.
2067 **/
2068static void
2069lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2070{
2071	*HashWorking = (*RandomChallenge ^ *HashWorking);
2072}
2073
2074/**
2075 * lpfc_hba_init - Perform special handling for LC HBA initialization
2076 * @phba: pointer to lpfc hba data structure.
2077 * @hbainit: pointer to an array of unsigned 32-bit integers.
2078 *
2079 * This routine performs the special handling for LC HBA initialization.
2080 **/
2081void
2082lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2083{
2084	int t;
2085	uint32_t *HashWorking;
2086	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2087
2088	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2089	if (!HashWorking)
2090		return;
2091
2092	HashWorking[0] = HashWorking[78] = *pwwnn++;
2093	HashWorking[1] = HashWorking[79] = *pwwnn;
2094
2095	for (t = 0; t < 7; t++)
2096		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2097
2098	lpfc_sha_init(hbainit);
2099	lpfc_sha_iterate(hbainit, HashWorking);
2100	kfree(HashWorking);
2101}
2102
2103/**
2104 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2105 * @vport: pointer to a virtual N_Port data structure.
2106 *
2107 * This routine performs the necessary cleanups before deleting the @vport.
2108 * It invokes the discovery state machine to perform necessary state
2109 * transitions and to release the ndlps associated with the @vport. Note,
2110 * the physical port is treated as @vport 0.
2111 **/
2112void
2113lpfc_cleanup(struct lpfc_vport *vport)
2114{
2115	struct lpfc_hba   *phba = vport->phba;
2116	struct lpfc_nodelist *ndlp, *next_ndlp;
2117	int i = 0;
2118
2119	if (phba->link_state > LPFC_LINK_DOWN)
2120		lpfc_port_link_failure(vport);
2121
2122	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2123		if (!NLP_CHK_NODE_ACT(ndlp)) {
2124			ndlp = lpfc_enable_node(vport, ndlp,
2125						NLP_STE_UNUSED_NODE);
2126			if (!ndlp)
2127				continue;
2128			spin_lock_irq(&phba->ndlp_lock);
2129			NLP_SET_FREE_REQ(ndlp);
2130			spin_unlock_irq(&phba->ndlp_lock);
2131			/* Trigger the release of the ndlp memory */
2132			lpfc_nlp_put(ndlp);
2133			continue;
2134		}
2135		spin_lock_irq(&phba->ndlp_lock);
2136		if (NLP_CHK_FREE_REQ(ndlp)) {
2137			/* The ndlp should not be in memory free mode already */
2138			spin_unlock_irq(&phba->ndlp_lock);
2139			continue;
2140		} else
2141			/* Indicate request for freeing ndlp memory */
2142			NLP_SET_FREE_REQ(ndlp);
2143		spin_unlock_irq(&phba->ndlp_lock);
2144
2145		if (vport->port_type != LPFC_PHYSICAL_PORT &&
2146		    ndlp->nlp_DID == Fabric_DID) {
2147			/* Just free up ndlp with Fabric_DID for vports */
2148			lpfc_nlp_put(ndlp);
2149			continue;
2150		}
2151
2152		if (ndlp->nlp_type & NLP_FABRIC)
2153			lpfc_disc_state_machine(vport, ndlp, NULL,
2154					NLP_EVT_DEVICE_RECOVERY);
2155
2156		lpfc_disc_state_machine(vport, ndlp, NULL,
2157					     NLP_EVT_DEVICE_RM);
2158
2159	}
2160
2161	/* At this point, ALL ndlp's should be gone
2162	 * because of the previous NLP_EVT_DEVICE_RM.
2163	 * Lets wait for this to happen, if needed.
2164	 */
2165	while (!list_empty(&vport->fc_nodes)) {
2166		if (i++ > 3000) {
2167			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2168				"0233 Nodelist not empty\n");
2169			list_for_each_entry_safe(ndlp, next_ndlp,
2170						&vport->fc_nodes, nlp_listp) {
2171				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2172						LOG_NODE,
2173						"0282 did:x%x ndlp:x%p "
2174						"usgmap:x%x refcnt:%d\n",
2175						ndlp->nlp_DID, (void *)ndlp,
2176						ndlp->nlp_usg_map,
2177						atomic_read(
2178							&ndlp->kref.refcount));
2179			}
2180			break;
2181		}
2182
2183		/* Wait for any activity on ndlps to settle */
2184		msleep(10);
2185	}
2186}
2187
2188/**
2189 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2190 * @vport: pointer to a virtual N_Port data structure.
2191 *
2192 * This routine stops all the timers associated with a @vport. This function
2193 * is invoked before disabling or deleting a @vport. Note that the physical
2194 * port is treated as @vport 0.
2195 **/
2196void
2197lpfc_stop_vport_timers(struct lpfc_vport *vport)
2198{
2199	del_timer_sync(&vport->els_tmofunc);
2200	del_timer_sync(&vport->fc_fdmitmo);
2201	lpfc_can_disctmo(vport);
2202	return;
2203}
2204
2205/**
2206 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2207 * @phba: pointer to lpfc hba data structure.
2208 *
2209 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2210 * caller of this routine should already hold the host lock.
2211 **/
2212void
2213__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2214{
2215	/* Clear pending FCF rediscovery wait and failover in progress flags */
2216	phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
2217				FCF_DEAD_DISC |
2218				FCF_ACVL_DISC);
2219	/* Now, try to stop the timer */
2220	del_timer(&phba->fcf.redisc_wait);
2221}
2222
2223/**
2224 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2225 * @phba: pointer to lpfc hba data structure.
2226 *
2227 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2228 * checks whether the FCF rediscovery wait timer is pending with the host
2229 * lock held before proceeding with disabling the timer and clearing the
2230 * wait timer pendig flag.
2231 **/
2232void
2233lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2234{
2235	spin_lock_irq(&phba->hbalock);
2236	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2237		/* FCF rediscovery timer already fired or stopped */
2238		spin_unlock_irq(&phba->hbalock);
2239		return;
2240	}
2241	__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2242	spin_unlock_irq(&phba->hbalock);
2243}
2244
2245/**
2246 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2247 * @phba: pointer to lpfc hba data structure.
2248 *
2249 * This routine stops all the timers associated with a HBA. This function is
2250 * invoked before either putting a HBA offline or unloading the driver.
2251 **/
2252void
2253lpfc_stop_hba_timers(struct lpfc_hba *phba)
2254{
2255	lpfc_stop_vport_timers(phba->pport);
2256	del_timer_sync(&phba->sli.mbox_tmo);
2257	del_timer_sync(&phba->fabric_block_timer);
2258	del_timer_sync(&phba->eratt_poll);
2259	del_timer_sync(&phba->hb_tmofunc);
2260	phba->hb_outstanding = 0;
2261
2262	switch (phba->pci_dev_grp) {
2263	case LPFC_PCI_DEV_LP:
2264		/* Stop any LightPulse device specific driver timers */
2265		del_timer_sync(&phba->fcp_poll_timer);
2266		break;
2267	case LPFC_PCI_DEV_OC:
2268		/* Stop any OneConnect device sepcific driver timers */
2269		lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2270		break;
2271	default:
2272		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2273				"0297 Invalid device group (x%x)\n",
2274				phba->pci_dev_grp);
2275		break;
2276	}
2277	return;
2278}
2279
2280/**
2281 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2282 * @phba: pointer to lpfc hba data structure.
2283 *
2284 * This routine marks a HBA's management interface as blocked. Once the HBA's
2285 * management interface is marked as blocked, all the user space access to
2286 * the HBA, whether they are from sysfs interface or libdfc interface will
2287 * all be blocked. The HBA is set to block the management interface when the
2288 * driver prepares the HBA interface for online or offline.
2289 **/
2290static void
2291lpfc_block_mgmt_io(struct lpfc_hba * phba)
2292{
2293	unsigned long iflag;
2294	uint8_t actcmd = MBX_HEARTBEAT;
2295	unsigned long timeout;
2296
2297
2298	spin_lock_irqsave(&phba->hbalock, iflag);
2299	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2300	if (phba->sli.mbox_active)
2301		actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2302	spin_unlock_irqrestore(&phba->hbalock, iflag);
2303	/* Determine how long we might wait for the active mailbox
2304	 * command to be gracefully completed by firmware.
2305	 */
2306	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
2307			jiffies;
2308	/* Wait for the outstnading mailbox command to complete */
2309	while (phba->sli.mbox_active) {
2310		/* Check active mailbox complete status every 2ms */
2311		msleep(2);
2312		if (time_after(jiffies, timeout)) {
2313			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2314				"2813 Mgmt IO is Blocked %x "
2315				"- mbox cmd %x still active\n",
2316				phba->sli.sli_flag, actcmd);
2317			break;
2318		}
2319	}
2320}
2321
2322/**
2323 * lpfc_online - Initialize and bring a HBA online
2324 * @phba: pointer to lpfc hba data structure.
2325 *
2326 * This routine initializes the HBA and brings a HBA online. During this
2327 * process, the management interface is blocked to prevent user space access
2328 * to the HBA interfering with the driver initialization.
2329 *
2330 * Return codes
2331 *   0 - successful
2332 *   1 - failed
2333 **/
2334int
2335lpfc_online(struct lpfc_hba *phba)
2336{
2337	struct lpfc_vport *vport;
2338	struct lpfc_vport **vports;
2339	int i;
2340
2341	if (!phba)
2342		return 0;
2343	vport = phba->pport;
2344
2345	if (!(vport->fc_flag & FC_OFFLINE_MODE))
2346		return 0;
2347
2348	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2349			"0458 Bring Adapter online\n");
2350
2351	lpfc_block_mgmt_io(phba);
2352
2353	if (!lpfc_sli_queue_setup(phba)) {
2354		lpfc_unblock_mgmt_io(phba);
2355		return 1;
2356	}
2357
2358	if (phba->sli_rev == LPFC_SLI_REV4) {
2359		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2360			lpfc_unblock_mgmt_io(phba);
2361			return 1;
2362		}
2363	} else {
2364		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
2365			lpfc_unblock_mgmt_io(phba);
2366			return 1;
2367		}
2368	}
2369
2370	vports = lpfc_create_vport_work_array(phba);
2371	if (vports != NULL)
2372		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2373			struct Scsi_Host *shost;
2374			shost = lpfc_shost_from_vport(vports[i]);
2375			spin_lock_irq(shost->host_lock);
2376			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2377			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2378				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2379			if (phba->sli_rev == LPFC_SLI_REV4)
2380				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2381			spin_unlock_irq(shost->host_lock);
2382		}
2383		lpfc_destroy_vport_work_array(phba, vports);
2384
2385	lpfc_unblock_mgmt_io(phba);
2386	return 0;
2387}
2388
2389/**
2390 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2391 * @phba: pointer to lpfc hba data structure.
2392 *
2393 * This routine marks a HBA's management interface as not blocked. Once the
2394 * HBA's management interface is marked as not blocked, all the user space
2395 * access to the HBA, whether they are from sysfs interface or libdfc
2396 * interface will be allowed. The HBA is set to block the management interface
2397 * when the driver prepares the HBA interface for online or offline and then
2398 * set to unblock the management interface afterwards.
2399 **/
2400void
2401lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2402{
2403	unsigned long iflag;
2404
2405	spin_lock_irqsave(&phba->hbalock, iflag);
2406	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2407	spin_unlock_irqrestore(&phba->hbalock, iflag);
2408}
2409
2410/**
2411 * lpfc_offline_prep - Prepare a HBA to be brought offline
2412 * @phba: pointer to lpfc hba data structure.
2413 *
2414 * This routine is invoked to prepare a HBA to be brought offline. It performs
2415 * unregistration login to all the nodes on all vports and flushes the mailbox
2416 * queue to make it ready to be brought offline.
2417 **/
2418void
2419lpfc_offline_prep(struct lpfc_hba * phba)
2420{
2421	struct lpfc_vport *vport = phba->pport;
2422	struct lpfc_nodelist  *ndlp, *next_ndlp;
2423	struct lpfc_vport **vports;
2424	struct Scsi_Host *shost;
2425	int i;
2426
2427	if (vport->fc_flag & FC_OFFLINE_MODE)
2428		return;
2429
2430	lpfc_block_mgmt_io(phba);
2431
2432	lpfc_linkdown(phba);
2433
2434	/* Issue an unreg_login to all nodes on all vports */
2435	vports = lpfc_create_vport_work_array(phba);
2436	if (vports != NULL) {
2437		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2438			if (vports[i]->load_flag & FC_UNLOADING)
2439				continue;
2440			shost = lpfc_shost_from_vport(vports[i]);
2441			spin_lock_irq(shost->host_lock);
2442			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2443			vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2444			vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2445			spin_unlock_irq(shost->host_lock);
2446
2447			shost =	lpfc_shost_from_vport(vports[i]);
2448			list_for_each_entry_safe(ndlp, next_ndlp,
2449						 &vports[i]->fc_nodes,
2450						 nlp_listp) {
2451				if (!NLP_CHK_NODE_ACT(ndlp))
2452					continue;
2453				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2454					continue;
2455				if (ndlp->nlp_type & NLP_FABRIC) {
2456					lpfc_disc_state_machine(vports[i], ndlp,
2457						NULL, NLP_EVT_DEVICE_RECOVERY);
2458					lpfc_disc_state_machine(vports[i], ndlp,
2459						NULL, NLP_EVT_DEVICE_RM);
2460				}
2461				spin_lock_irq(shost->host_lock);
2462				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2463				spin_unlock_irq(shost->host_lock);
2464				lpfc_unreg_rpi(vports[i], ndlp);
2465			}
2466		}
2467	}
2468	lpfc_destroy_vport_work_array(phba, vports);
2469
2470	lpfc_sli_mbox_sys_shutdown(phba);
2471}
2472
2473/**
2474 * lpfc_offline - Bring a HBA offline
2475 * @phba: pointer to lpfc hba data structure.
2476 *
2477 * This routine actually brings a HBA offline. It stops all the timers
2478 * associated with the HBA, brings down the SLI layer, and eventually
2479 * marks the HBA as in offline state for the upper layer protocol.
2480 **/
2481void
2482lpfc_offline(struct lpfc_hba *phba)
2483{
2484	struct Scsi_Host  *shost;
2485	struct lpfc_vport **vports;
2486	int i;
2487
2488	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2489		return;
2490
2491	/* stop port and all timers associated with this hba */
2492	lpfc_stop_port(phba);
2493	vports = lpfc_create_vport_work_array(phba);
2494	if (vports != NULL)
2495		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2496			lpfc_stop_vport_timers(vports[i]);
2497	lpfc_destroy_vport_work_array(phba, vports);
2498	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2499			"0460 Bring Adapter offline\n");
2500	/* Bring down the SLI Layer and cleanup.  The HBA is offline
2501	   now.  */
2502	lpfc_sli_hba_down(phba);
2503	spin_lock_irq(&phba->hbalock);
2504	phba->work_ha = 0;
2505	spin_unlock_irq(&phba->hbalock);
2506	vports = lpfc_create_vport_work_array(phba);
2507	if (vports != NULL)
2508		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2509			shost = lpfc_shost_from_vport(vports[i]);
2510			spin_lock_irq(shost->host_lock);
2511			vports[i]->work_port_events = 0;
2512			vports[i]->fc_flag |= FC_OFFLINE_MODE;
2513			spin_unlock_irq(shost->host_lock);
2514		}
2515	lpfc_destroy_vport_work_array(phba, vports);
2516}
2517
2518/**
2519 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2520 * @phba: pointer to lpfc hba data structure.
2521 *
2522 * This routine is to free all the SCSI buffers and IOCBs from the driver
2523 * list back to kernel. It is called from lpfc_pci_remove_one to free
2524 * the internal resources before the device is removed from the system.
2525 *
2526 * Return codes
2527 *   0 - successful (for now, it always returns 0)
2528 **/
2529static int
2530lpfc_scsi_free(struct lpfc_hba *phba)
2531{
2532	struct lpfc_scsi_buf *sb, *sb_next;
2533	struct lpfc_iocbq *io, *io_next;
2534
2535	spin_lock_irq(&phba->hbalock);
2536	/* Release all the lpfc_scsi_bufs maintained by this host. */
2537	spin_lock(&phba->scsi_buf_list_lock);
2538	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2539		list_del(&sb->list);
2540		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2541			      sb->dma_handle);
2542		kfree(sb);
2543		phba->total_scsi_bufs--;
2544	}
2545	spin_unlock(&phba->scsi_buf_list_lock);
2546
2547	/* Release all the lpfc_iocbq entries maintained by this host. */
2548	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2549		list_del(&io->list);
2550		kfree(io);
2551		phba->total_iocbq_bufs--;
2552	}
2553	spin_unlock_irq(&phba->hbalock);
2554	return 0;
2555}
2556
2557/**
2558 * lpfc_create_port - Create an FC port
2559 * @phba: pointer to lpfc hba data structure.
2560 * @instance: a unique integer ID to this FC port.
2561 * @dev: pointer to the device data structure.
2562 *
2563 * This routine creates a FC port for the upper layer protocol. The FC port
2564 * can be created on top of either a physical port or a virtual port provided
2565 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2566 * and associates the FC port created before adding the shost into the SCSI
2567 * layer.
2568 *
2569 * Return codes
2570 *   @vport - pointer to the virtual N_Port data structure.
2571 *   NULL - port create failed.
2572 **/
2573struct lpfc_vport *
2574lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2575{
2576	struct lpfc_vport *vport;
2577	struct Scsi_Host  *shost;
2578	int error = 0;
2579
2580	if (dev != &phba->pcidev->dev)
2581		shost = scsi_host_alloc(&lpfc_vport_template,
2582					sizeof(struct lpfc_vport));
2583	else
2584		shost = scsi_host_alloc(&lpfc_template,
2585					sizeof(struct lpfc_vport));
2586	if (!shost)
2587		goto out;
2588
2589	vport = (struct lpfc_vport *) shost->hostdata;
2590	vport->phba = phba;
2591	vport->load_flag |= FC_LOADING;
2592	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2593	vport->fc_rscn_flush = 0;
2594
2595	lpfc_get_vport_cfgparam(vport);
2596	shost->unique_id = instance;
2597	shost->max_id = LPFC_MAX_TARGET;
2598	shost->max_lun = vport->cfg_max_luns;
2599	shost->this_id = -1;
2600	shost->max_cmd_len = 16;
2601	if (phba->sli_rev == LPFC_SLI_REV4) {
2602		shost->dma_boundary =
2603			phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2604		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2605	}
2606
2607	/*
2608	 * Set initial can_queue value since 0 is no longer supported and
2609	 * scsi_add_host will fail. This will be adjusted later based on the
2610	 * max xri value determined in hba setup.
2611	 */
2612	shost->can_queue = phba->cfg_hba_queue_depth - 10;
2613	if (dev != &phba->pcidev->dev) {
2614		shost->transportt = lpfc_vport_transport_template;
2615		vport->port_type = LPFC_NPIV_PORT;
2616	} else {
2617		shost->transportt = lpfc_transport_template;
2618		vport->port_type = LPFC_PHYSICAL_PORT;
2619	}
2620
2621	/* Initialize all internally managed lists. */
2622	INIT_LIST_HEAD(&vport->fc_nodes);
2623	INIT_LIST_HEAD(&vport->rcv_buffer_list);
2624	spin_lock_init(&vport->work_port_lock);
2625
2626	init_timer(&vport->fc_disctmo);
2627	vport->fc_disctmo.function = lpfc_disc_timeout;
2628	vport->fc_disctmo.data = (unsigned long)vport;
2629
2630	init_timer(&vport->fc_fdmitmo);
2631	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2632	vport->fc_fdmitmo.data = (unsigned long)vport;
2633
2634	init_timer(&vport->els_tmofunc);
2635	vport->els_tmofunc.function = lpfc_els_timeout;
2636	vport->els_tmofunc.data = (unsigned long)vport;
2637	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2638	if (error)
2639		goto out_put_shost;
2640
2641	spin_lock_irq(&phba->hbalock);
2642	list_add_tail(&vport->listentry, &phba->port_list);
2643	spin_unlock_irq(&phba->hbalock);
2644	return vport;
2645
2646out_put_shost:
2647	scsi_host_put(shost);
2648out:
2649	return NULL;
2650}
2651
2652/**
2653 * destroy_port -  destroy an FC port
2654 * @vport: pointer to an lpfc virtual N_Port data structure.
2655 *
2656 * This routine destroys a FC port from the upper layer protocol. All the
2657 * resources associated with the port are released.
2658 **/
2659void
2660destroy_port(struct lpfc_vport *vport)
2661{
2662	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2663	struct lpfc_hba  *phba = vport->phba;
2664
2665	lpfc_debugfs_terminate(vport);
2666	fc_remove_host(shost);
2667	scsi_remove_host(shost);
2668
2669	spin_lock_irq(&phba->hbalock);
2670	list_del_init(&vport->listentry);
2671	spin_unlock_irq(&phba->hbalock);
2672
2673	lpfc_cleanup(vport);
2674	return;
2675}
2676
2677/**
2678 * lpfc_get_instance - Get a unique integer ID
2679 *
2680 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2681 * uses the kernel idr facility to perform the task.
2682 *
2683 * Return codes:
2684 *   instance - a unique integer ID allocated as the new instance.
2685 *   -1 - lpfc get instance failed.
2686 **/
2687int
2688lpfc_get_instance(void)
2689{
2690	int instance = 0;
2691
2692	/* Assign an unused number */
2693	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2694		return -1;
2695	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2696		return -1;
2697	return instance;
2698}
2699
2700/**
2701 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2702 * @shost: pointer to SCSI host data structure.
2703 * @time: elapsed time of the scan in jiffies.
2704 *
2705 * This routine is called by the SCSI layer with a SCSI host to determine
2706 * whether the scan host is finished.
2707 *
2708 * Note: there is no scan_start function as adapter initialization will have
2709 * asynchronously kicked off the link initialization.
2710 *
2711 * Return codes
2712 *   0 - SCSI host scan is not over yet.
2713 *   1 - SCSI host scan is over.
2714 **/
2715int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2716{
2717	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2718	struct lpfc_hba   *phba = vport->phba;
2719	int stat = 0;
2720
2721	spin_lock_irq(shost->host_lock);
2722
2723	if (vport->load_flag & FC_UNLOADING) {
2724		stat = 1;
2725		goto finished;
2726	}
2727	if (time >= 30 * HZ) {
2728		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2729				"0461 Scanning longer than 30 "
2730				"seconds.  Continuing initialization\n");
2731		stat = 1;
2732		goto finished;
2733	}
2734	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2735		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2736				"0465 Link down longer than 15 "
2737				"seconds.  Continuing initialization\n");
2738		stat = 1;
2739		goto finished;
2740	}
2741
2742	if (vport->port_state != LPFC_VPORT_READY)
2743		goto finished;
2744	if (vport->num_disc_nodes || vport->fc_prli_sent)
2745		goto finished;
2746	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2747		goto finished;
2748	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2749		goto finished;
2750
2751	stat = 1;
2752
2753finished:
2754	spin_unlock_irq(shost->host_lock);
2755	return stat;
2756}
2757
2758/**
2759 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2760 * @shost: pointer to SCSI host data structure.
2761 *
2762 * This routine initializes a given SCSI host attributes on a FC port. The
2763 * SCSI host can be either on top of a physical port or a virtual port.
2764 **/
2765void lpfc_host_attrib_init(struct Scsi_Host *shost)
2766{
2767	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2768	struct lpfc_hba   *phba = vport->phba;
2769	/*
2770	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
2771	 */
2772
2773	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2774	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2775	fc_host_supported_classes(shost) = FC_COS_CLASS3;
2776
2777	memset(fc_host_supported_fc4s(shost), 0,
2778	       sizeof(fc_host_supported_fc4s(shost)));
2779	fc_host_supported_fc4s(shost)[2] = 1;
2780	fc_host_supported_fc4s(shost)[7] = 1;
2781
2782	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2783				 sizeof fc_host_symbolic_name(shost));
2784
2785	fc_host_supported_speeds(shost) = 0;
2786	if (phba->lmt & LMT_10Gb)
2787		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2788	if (phba->lmt & LMT_8Gb)
2789		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2790	if (phba->lmt & LMT_4Gb)
2791		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2792	if (phba->lmt & LMT_2Gb)
2793		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2794	if (phba->lmt & LMT_1Gb)
2795		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2796
2797	fc_host_maxframe_size(shost) =
2798		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2799		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2800
2801	/* This value is also unchanging */
2802	memset(fc_host_active_fc4s(shost), 0,
2803	       sizeof(fc_host_active_fc4s(shost)));
2804	fc_host_active_fc4s(shost)[2] = 1;
2805	fc_host_active_fc4s(shost)[7] = 1;
2806
2807	fc_host_max_npiv_vports(shost) = phba->max_vpi;
2808	spin_lock_irq(shost->host_lock);
2809	vport->load_flag &= ~FC_LOADING;
2810	spin_unlock_irq(shost->host_lock);
2811}
2812
2813/**
2814 * lpfc_stop_port_s3 - Stop SLI3 device port
2815 * @phba: pointer to lpfc hba data structure.
2816 *
2817 * This routine is invoked to stop an SLI3 device port, it stops the device
2818 * from generating interrupts and stops the device driver's timers for the
2819 * device.
2820 **/
2821static void
2822lpfc_stop_port_s3(struct lpfc_hba *phba)
2823{
2824	/* Clear all interrupt enable conditions */
2825	writel(0, phba->HCregaddr);
2826	readl(phba->HCregaddr); /* flush */
2827	/* Clear all pending interrupts */
2828	writel(0xffffffff, phba->HAregaddr);
2829	readl(phba->HAregaddr); /* flush */
2830
2831	/* Reset some HBA SLI setup states */
2832	lpfc_stop_hba_timers(phba);
2833	phba->pport->work_port_events = 0;
2834}
2835
2836/**
2837 * lpfc_stop_port_s4 - Stop SLI4 device port
2838 * @phba: pointer to lpfc hba data structure.
2839 *
2840 * This routine is invoked to stop an SLI4 device port, it stops the device
2841 * from generating interrupts and stops the device driver's timers for the
2842 * device.
2843 **/
2844static void
2845lpfc_stop_port_s4(struct lpfc_hba *phba)
2846{
2847	/* Reset some HBA SLI4 setup states */
2848	lpfc_stop_hba_timers(phba);
2849	phba->pport->work_port_events = 0;
2850	phba->sli4_hba.intr_enable = 0;
2851}
2852
2853/**
2854 * lpfc_stop_port - Wrapper function for stopping hba port
2855 * @phba: Pointer to HBA context object.
2856 *
2857 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2858 * the API jump table function pointer from the lpfc_hba struct.
2859 **/
2860void
2861lpfc_stop_port(struct lpfc_hba *phba)
2862{
2863	phba->lpfc_stop_port(phba);
2864}
2865
2866/**
2867 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2868 * @phba: pointer to lpfc hba data structure.
2869 *
2870 * This routine is invoked to remove the driver default fcf record from
2871 * the port.  This routine currently acts on FCF Index 0.
2872 *
2873 **/
2874void
2875lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2876{
2877	int rc = 0;
2878	LPFC_MBOXQ_t *mboxq;
2879	struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2880	uint32_t mbox_tmo, req_len;
2881	uint32_t shdr_status, shdr_add_status;
2882
2883	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2884	if (!mboxq) {
2885		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2886			"2020 Failed to allocate mbox for ADD_FCF cmd\n");
2887		return;
2888	}
2889
2890	req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2891		  sizeof(struct lpfc_sli4_cfg_mhdr);
2892	rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2893			      LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2894			      req_len, LPFC_SLI4_MBX_EMBED);
2895	/*
2896	 * In phase 1, there is a single FCF index, 0.  In phase2, the driver
2897	 * supports multiple FCF indices.
2898	 */
2899	del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2900	bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2901	bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2902	       phba->fcf.current_rec.fcf_indx);
2903
2904	if (!phba->sli4_hba.intr_enable)
2905		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2906	else {
2907		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2908		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2909	}
2910	/* The IOCTL status is embedded in the mailbox subheader. */
2911	shdr_status = bf_get(lpfc_mbox_hdr_status,
2912			     &del_fcf_record->header.cfg_shdr.response);
2913	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2914				 &del_fcf_record->header.cfg_shdr.response);
2915	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2916		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2917				"2516 DEL FCF of default FCF Index failed "
2918				"mbx status x%x, status x%x add_status x%x\n",
2919				rc, shdr_status, shdr_add_status);
2920	}
2921	if (rc != MBX_TIMEOUT)
2922		mempool_free(mboxq, phba->mbox_mem_pool);
2923}
2924
2925/**
2926 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
2927 * @phba: Pointer to hba for which this call is being executed.
2928 *
2929 * This routine starts the timer waiting for the FCF rediscovery to complete.
2930 **/
2931void
2932lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
2933{
2934	unsigned long fcf_redisc_wait_tmo =
2935		(jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
2936	/* Start fcf rediscovery wait period timer */
2937	mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
2938	spin_lock_irq(&phba->hbalock);
2939	/* Allow action to new fcf asynchronous event */
2940	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2941	/* Mark the FCF rediscovery pending state */
2942	phba->fcf.fcf_flag |= FCF_REDISC_PEND;
2943	spin_unlock_irq(&phba->hbalock);
2944}
2945
2946/**
2947 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
2948 * @ptr: Map to lpfc_hba data structure pointer.
2949 *
2950 * This routine is invoked when waiting for FCF table rediscover has been
2951 * timed out. If new FCF record(s) has (have) been discovered during the
2952 * wait period, a new FCF event shall be added to the FCOE async event
2953 * list, and then worker thread shall be waked up for processing from the
2954 * worker thread context.
2955 **/
2956void
2957lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2958{
2959	struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2960
2961	/* Don't send FCF rediscovery event if timer cancelled */
2962	spin_lock_irq(&phba->hbalock);
2963	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2964		spin_unlock_irq(&phba->hbalock);
2965		return;
2966	}
2967	/* Clear FCF rediscovery timer pending flag */
2968	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2969	/* FCF rediscovery event to worker thread */
2970	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2971	spin_unlock_irq(&phba->hbalock);
2972	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2973			"2776 FCF rediscover wait timer expired, post "
2974			"a worker thread event for FCF table scan\n");
2975	/* wake up worker thread */
2976	lpfc_worker_wake_up(phba);
2977}
2978
2979/**
2980 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
2981 * @phba: pointer to lpfc hba data structure.
2982 *
2983 * This function uses the QUERY_FW_CFG mailbox command to determine if the
2984 * firmware loaded supports FCoE. A return of zero indicates that the mailbox
2985 * was successful and the firmware supports FCoE. Any other return indicates
2986 * a error. It is assumed that this function will be called before interrupts
2987 * are enabled.
2988 **/
2989static int
2990lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
2991{
2992	int rc = 0;
2993	LPFC_MBOXQ_t *mboxq;
2994	struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
2995	uint32_t length;
2996	uint32_t shdr_status, shdr_add_status;
2997
2998	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2999	if (!mboxq) {
3000		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3001				"2621 Failed to allocate mbox for "
3002				"query firmware config cmd\n");
3003		return -ENOMEM;
3004	}
3005	query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
3006	length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
3007		  sizeof(struct lpfc_sli4_cfg_mhdr));
3008	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
3009			 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
3010			 length, LPFC_SLI4_MBX_EMBED);
3011	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3012	/* The IOCTL status is embedded in the mailbox subheader. */
3013	shdr_status = bf_get(lpfc_mbox_hdr_status,
3014			     &query_fw_cfg->header.cfg_shdr.response);
3015	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
3016				 &query_fw_cfg->header.cfg_shdr.response);
3017	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
3018		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3019				"2622 Query Firmware Config failed "
3020				"mbx status x%x, status x%x add_status x%x\n",
3021				rc, shdr_status, shdr_add_status);
3022		return -EINVAL;
3023	}
3024	if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
3025		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3026				"2623 FCoE Function not supported by firmware. "
3027				"Function mode = %08x\n",
3028				query_fw_cfg->function_mode);
3029		return -EINVAL;
3030	}
3031	if (rc != MBX_TIMEOUT)
3032		mempool_free(mboxq, phba->mbox_mem_pool);
3033	return 0;
3034}
3035
3036/**
3037 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3038 * @phba: pointer to lpfc hba data structure.
3039 * @acqe_link: pointer to the async link completion queue entry.
3040 *
3041 * This routine is to parse the SLI4 link-attention link fault code and
3042 * translate it into the base driver's read link attention mailbox command
3043 * status.
3044 *
3045 * Return: Link-attention status in terms of base driver's coding.
3046 **/
3047static uint16_t
3048lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3049			   struct lpfc_acqe_link *acqe_link)
3050{
3051	uint16_t latt_fault;
3052
3053	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3054	case LPFC_ASYNC_LINK_FAULT_NONE:
3055	case LPFC_ASYNC_LINK_FAULT_LOCAL:
3056	case LPFC_ASYNC_LINK_FAULT_REMOTE:
3057		latt_fault = 0;
3058		break;
3059	default:
3060		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3061				"0398 Invalid link fault code: x%x\n",
3062				bf_get(lpfc_acqe_link_fault, acqe_link));
3063		latt_fault = MBXERR_ERROR;
3064		break;
3065	}
3066	return latt_fault;
3067}
3068
3069/**
3070 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3071 * @phba: pointer to lpfc hba data structure.
3072 * @acqe_link: pointer to the async link completion queue entry.
3073 *
3074 * This routine is to parse the SLI4 link attention type and translate it
3075 * into the base driver's link attention type coding.
3076 *
3077 * Return: Link attention type in terms of base driver's coding.
3078 **/
3079static uint8_t
3080lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3081			  struct lpfc_acqe_link *acqe_link)
3082{
3083	uint8_t att_type;
3084
3085	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3086	case LPFC_ASYNC_LINK_STATUS_DOWN:
3087	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3088		att_type = AT_LINK_DOWN;
3089		break;
3090	case LPFC_ASYNC_LINK_STATUS_UP:
3091		/* Ignore physical link up events - wait for logical link up */
3092		att_type = AT_RESERVED;
3093		break;
3094	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3095		att_type = AT_LINK_UP;
3096		break;
3097	default:
3098		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3099				"0399 Invalid link attention type: x%x\n",
3100				bf_get(lpfc_acqe_link_status, acqe_link));
3101		att_type = AT_RESERVED;
3102		break;
3103	}
3104	return att_type;
3105}
3106
3107/**
3108 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3109 * @phba: pointer to lpfc hba data structure.
3110 * @acqe_link: pointer to the async link completion queue entry.
3111 *
3112 * This routine is to parse the SLI4 link-attention link speed and translate
3113 * it into the base driver's link-attention link speed coding.
3114 *
3115 * Return: Link-attention link speed in terms of base driver's coding.
3116 **/
3117static uint8_t
3118lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3119				struct lpfc_acqe_link *acqe_link)
3120{
3121	uint8_t link_speed;
3122
3123	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3124	case LPFC_ASYNC_LINK_SPEED_ZERO:
3125		link_speed = LA_UNKNW_LINK;
3126		break;
3127	case LPFC_ASYNC_LINK_SPEED_10MBPS:
3128		link_speed = LA_UNKNW_LINK;
3129		break;
3130	case LPFC_ASYNC_LINK_SPEED_100MBPS:
3131		link_speed = LA_UNKNW_LINK;
3132		break;
3133	case LPFC_ASYNC_LINK_SPEED_1GBPS:
3134		link_speed = LA_1GHZ_LINK;
3135		break;
3136	case LPFC_ASYNC_LINK_SPEED_10GBPS:
3137		link_speed = LA_10GHZ_LINK;
3138		break;
3139	default:
3140		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3141				"0483 Invalid link-attention link speed: x%x\n",
3142				bf_get(lpfc_acqe_link_speed, acqe_link));
3143		link_speed = LA_UNKNW_LINK;
3144		break;
3145	}
3146	return link_speed;
3147}
3148
3149/**
3150 * lpfc_sli4_async_link_evt - Process the asynchronous link event
3151 * @phba: pointer to lpfc hba data structure.
3152 * @acqe_link: pointer to the async link completion queue entry.
3153 *
3154 * This routine is to handle the SLI4 asynchronous link event.
3155 **/
3156static void
3157lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3158			 struct lpfc_acqe_link *acqe_link)
3159{
3160	struct lpfc_dmabuf *mp;
3161	LPFC_MBOXQ_t *pmb;
3162	MAILBOX_t *mb;
3163	READ_LA_VAR *la;
3164	uint8_t att_type;
3165
3166	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3167	if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
3168		return;
3169	phba->fcoe_eventtag = acqe_link->event_tag;
3170	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3171	if (!pmb) {
3172		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3173				"0395 The mboxq allocation failed\n");
3174		return;
3175	}
3176	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3177	if (!mp) {
3178		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3179				"0396 The lpfc_dmabuf allocation failed\n");
3180		goto out_free_pmb;
3181	}
3182	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3183	if (!mp->virt) {
3184		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3185				"0397 The mbuf allocation failed\n");
3186		goto out_free_dmabuf;
3187	}
3188
3189	/* Cleanup any outstanding ELS commands */
3190	lpfc_els_flush_all_cmd(phba);
3191
3192	/* Block ELS IOCBs until we have done process link event */
3193	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3194
3195	/* Update link event statistics */
3196	phba->sli.slistat.link_event++;
3197
3198	/* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
3199	lpfc_read_la(phba, pmb, mp);
3200	pmb->vport = phba->pport;
3201
3202	/* Parse and translate status field */
3203	mb = &pmb->u.mb;
3204	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3205
3206	/* Parse and translate link attention fields */
3207	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
3208	la->eventTag = acqe_link->event_tag;
3209	la->attType = att_type;
3210	la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
3211
3212	/* Fake the the following irrelvant fields */
3213	la->topology = TOPOLOGY_PT_PT;
3214	la->granted_AL_PA = 0;
3215	la->il = 0;
3216	la->pb = 0;
3217	la->fa = 0;
3218	la->mm = 0;
3219
3220	/* Keep the link status for extra SLI4 state machine reference */
3221	phba->sli4_hba.link_state.speed =
3222				bf_get(lpfc_acqe_link_speed, acqe_link);
3223	phba->sli4_hba.link_state.duplex =
3224				bf_get(lpfc_acqe_link_duplex, acqe_link);
3225	phba->sli4_hba.link_state.status =
3226				bf_get(lpfc_acqe_link_status, acqe_link);
3227	phba->sli4_hba.link_state.physical =
3228				bf_get(lpfc_acqe_link_physical, acqe_link);
3229	phba->sli4_hba.link_state.fault =
3230				bf_get(lpfc_acqe_link_fault, acqe_link);
3231	phba->sli4_hba.link_state.logical_speed =
3232				bf_get(lpfc_acqe_qos_link_speed, acqe_link);
3233
3234	/* Invoke the lpfc_handle_latt mailbox command callback function */
3235	lpfc_mbx_cmpl_read_la(phba, pmb);
3236
3237	return;
3238
3239out_free_dmabuf:
3240	kfree(mp);
3241out_free_pmb:
3242	mempool_free(pmb, phba->mbox_mem_pool);
3243}
3244
3245/**
3246 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3247 * @vport: pointer to vport data structure.
3248 *
3249 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3250 * response to a CVL event.
3251 *
3252 * Return the pointer to the ndlp with the vport if successful, otherwise
3253 * return NULL.
3254 **/
3255static struct lpfc_nodelist *
3256lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3257{
3258	struct lpfc_nodelist *ndlp;
3259	struct Scsi_Host *shost;
3260	struct lpfc_hba *phba;
3261
3262	if (!vport)
3263		return NULL;
3264	phba = vport->phba;
3265	if (!phba)
3266		return NULL;
3267	ndlp = lpfc_findnode_did(vport, Fabric_DID);
3268	if (!ndlp) {
3269		/* Cannot find existing Fabric ndlp, so allocate a new one */
3270		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3271		if (!ndlp)
3272			return 0;
3273		lpfc_nlp_init(vport, ndlp, Fabric_DID);
3274		/* Set the node type */
3275		ndlp->nlp_type |= NLP_FABRIC;
3276		/* Put ndlp onto node list */
3277		lpfc_enqueue_node(vport, ndlp);
3278	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
3279		/* re-setup ndlp without removing from node list */
3280		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3281		if (!ndlp)
3282			return 0;
3283	}
3284	if (phba->pport->port_state <= LPFC_FLOGI)
3285		return NULL;
3286	/* If virtual link is not yet instantiated ignore CVL */
3287	if (vport->port_state <= LPFC_FDISC)
3288		return NULL;
3289	shost = lpfc_shost_from_vport(vport);
3290	if (!shost)
3291		return NULL;
3292	lpfc_linkdown_port(vport);
3293	lpfc_cleanup_pending_mbox(vport);
3294	spin_lock_irq(shost->host_lock);
3295	vport->fc_flag |= FC_VPORT_CVL_RCVD;
3296	spin_unlock_irq(shost->host_lock);
3297
3298	return ndlp;
3299}
3300
3301/**
3302 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3303 * @vport: pointer to lpfc hba data structure.
3304 *
3305 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3306 * response to a FCF dead event.
3307 **/
3308static void
3309lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3310{
3311	struct lpfc_vport **vports;
3312	int i;
3313
3314	vports = lpfc_create_vport_work_array(phba);
3315	if (vports)
3316		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3317			lpfc_sli4_perform_vport_cvl(vports[i]);
3318	lpfc_destroy_vport_work_array(phba, vports);
3319}
3320
3321/**
3322 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
3323 * @phba: pointer to lpfc hba data structure.
3324 * @acqe_link: pointer to the async fcoe completion queue entry.
3325 *
3326 * This routine is to handle the SLI4 asynchronous fcoe event.
3327 **/
3328static void
3329lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3330			 struct lpfc_acqe_fcoe *acqe_fcoe)
3331{
3332	uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
3333	int rc;
3334	struct lpfc_vport *vport;
3335	struct lpfc_nodelist *ndlp;
3336	struct Scsi_Host  *shost;
3337	int active_vlink_present;
3338	struct lpfc_vport **vports;
3339	int i;
3340
3341	phba->fc_eventTag = acqe_fcoe->event_tag;
3342	phba->fcoe_eventtag = acqe_fcoe->event_tag;
3343	switch (event_type) {
3344	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
3345	case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
3346		if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
3347			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3348					LOG_DISCOVERY,
3349					"2546 New FCF found event: "
3350					"evt_tag:x%x, fcf_index:x%x\n",
3351					acqe_fcoe->event_tag,
3352					acqe_fcoe->index);
3353		else
3354			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3355					LOG_DISCOVERY,
3356					"2788 FCF parameter modified event: "
3357					"evt_tag:x%x, fcf_index:x%x\n",
3358					acqe_fcoe->event_tag,
3359					acqe_fcoe->index);
3360		spin_lock_irq(&phba->hbalock);
3361		if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
3362		    (phba->hba_flag & FCF_DISC_INPROGRESS)) {
3363			/*
3364			 * If the current FCF is in discovered state or
3365			 * FCF discovery is in progress, do nothing.
3366			 */
3367			spin_unlock_irq(&phba->hbalock);
3368			break;
3369		}
3370
3371		if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3372			/*
3373			 * If fast FCF failover rescan event is pending,
3374			 * do nothing.
3375			 */
3376			spin_unlock_irq(&phba->hbalock);
3377			break;
3378		}
3379		spin_unlock_irq(&phba->hbalock);
3380
3381		if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
3382		    !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
3383			/*
3384			 * During period of FCF discovery, read the FCF
3385			 * table record indexed by the event to update
3386			 * FCF round robin failover eligible FCF bmask.
3387			 */
3388			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3389					LOG_DISCOVERY,
3390					"2779 Read new FCF record with "
3391					"fcf_index:x%x for updating FCF "
3392					"round robin failover bmask\n",
3393					acqe_fcoe->index);
3394			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3395		}
3396
3397		/* Otherwise, scan the entire FCF table and re-discover SAN */
3398		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3399				"2770 Start FCF table scan due to new FCF "
3400				"event: evt_tag:x%x, fcf_index:x%x\n",
3401				acqe_fcoe->event_tag, acqe_fcoe->index);
3402		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3403						     LPFC_FCOE_FCF_GET_FIRST);
3404		if (rc)
3405			lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3406					"2547 Issue FCF scan read FCF mailbox "
3407					"command failed 0x%x\n", rc);
3408		break;
3409
3410	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
3411		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3412			"2548 FCF Table full count 0x%x tag 0x%x\n",
3413			bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
3414			acqe_fcoe->event_tag);
3415		break;
3416
3417	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
3418		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3419			"2549 FCF disconnected from network index 0x%x"
3420			" tag 0x%x\n", acqe_fcoe->index,
3421			acqe_fcoe->event_tag);
3422		/* If the event is not for currently used fcf do nothing */
3423		if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
3424			break;
3425		/* We request port to rediscover the entire FCF table for
3426		 * a fast recovery from case that the current FCF record
3427		 * is no longer valid if we are not in the middle of FCF
3428		 * failover process already.
3429		 */
3430		spin_lock_irq(&phba->hbalock);
3431		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3432			spin_unlock_irq(&phba->hbalock);
3433			/* Update FLOGI FCF failover eligible FCF bmask */
3434			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
3435			break;
3436		}
3437		/* Mark the fast failover process in progress */
3438		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3439		spin_unlock_irq(&phba->hbalock);
3440		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3441				"2771 Start FCF fast failover process due to "
3442				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3443				"\n", acqe_fcoe->event_tag, acqe_fcoe->index);
3444		rc = lpfc_sli4_redisc_fcf_table(phba);
3445		if (rc) {
3446			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3447					LOG_DISCOVERY,
3448					"2772 Issue FCF rediscover mabilbox "
3449					"command failed, fail through to FCF "
3450					"dead event\n");
3451			spin_lock_irq(&phba->hbalock);
3452			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3453			spin_unlock_irq(&phba->hbalock);
3454			/*
3455			 * Last resort will fail over by treating this
3456			 * as a link down to FCF registration.
3457			 */
3458			lpfc_sli4_fcf_dead_failthrough(phba);
3459		} else
3460			/* Handling fast FCF failover to a DEAD FCF event
3461			 * is considered equalivant to receiving CVL to all
3462			 * vports.
3463			 */
3464			lpfc_sli4_perform_all_vport_cvl(phba);
3465		break;
3466	case LPFC_FCOE_EVENT_TYPE_CVL:
3467		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3468			"2718 Clear Virtual Link Received for VPI 0x%x"
3469			" tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3470		vport = lpfc_find_vport_by_vpid(phba,
3471				acqe_fcoe->index - phba->vpi_base);
3472		ndlp = lpfc_sli4_perform_vport_cvl(vport);
3473		if (!ndlp)
3474			break;
3475		active_vlink_present = 0;
3476
3477		vports = lpfc_create_vport_work_array(phba);
3478		if (vports) {
3479			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3480					i++) {
3481				if ((!(vports[i]->fc_flag &
3482					FC_VPORT_CVL_RCVD)) &&
3483					(vports[i]->port_state > LPFC_FDISC)) {
3484					active_vlink_present = 1;
3485					break;
3486				}
3487			}
3488			lpfc_destroy_vport_work_array(phba, vports);
3489		}
3490
3491		if (active_vlink_present) {
3492			/*
3493			 * If there are other active VLinks present,
3494			 * re-instantiate the Vlink using FDISC.
3495			 */
3496			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3497			shost = lpfc_shost_from_vport(vport);
3498			spin_lock_irq(shost->host_lock);
3499			ndlp->nlp_flag |= NLP_DELAY_TMO;
3500			spin_unlock_irq(shost->host_lock);
3501			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3502			vport->port_state = LPFC_FDISC;
3503		} else {
3504			/*
3505			 * Otherwise, we request port to rediscover
3506			 * the entire FCF table for a fast recovery
3507			 * from possible case that the current FCF
3508			 * is no longer valid if we are not already
3509			 * in the FCF failover process.
3510			 */
3511			spin_lock_irq(&phba->hbalock);
3512			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3513				spin_unlock_irq(&phba->hbalock);
3514				break;
3515			}
3516			/* Mark the fast failover process in progress */
3517			phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3518			spin_unlock_irq(&phba->hbalock);
3519			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3520					LOG_DISCOVERY,
3521					"2773 Start FCF fast failover due "
3522					"to CVL event: evt_tag:x%x\n",
3523					acqe_fcoe->event_tag);
3524			rc = lpfc_sli4_redisc_fcf_table(phba);
3525			if (rc) {
3526				lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3527						LOG_DISCOVERY,
3528						"2774 Issue FCF rediscover "
3529						"mabilbox command failed, "
3530						"through to CVL event\n");
3531				spin_lock_irq(&phba->hbalock);
3532				phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3533				spin_unlock_irq(&phba->hbalock);
3534				/*
3535				 * Last resort will be re-try on the
3536				 * the current registered FCF entry.
3537				 */
3538				lpfc_retry_pport_discovery(phba);
3539			}
3540		}
3541		break;
3542	default:
3543		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3544			"0288 Unknown FCoE event type 0x%x event tag "
3545			"0x%x\n", event_type, acqe_fcoe->event_tag);
3546		break;
3547	}
3548}
3549
3550/**
3551 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3552 * @phba: pointer to lpfc hba data structure.
3553 * @acqe_link: pointer to the async dcbx completion queue entry.
3554 *
3555 * This routine is to handle the SLI4 asynchronous dcbx event.
3556 **/
3557static void
3558lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3559			 struct lpfc_acqe_dcbx *acqe_dcbx)
3560{
3561	phba->fc_eventTag = acqe_dcbx->event_tag;
3562	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3563			"0290 The SLI4 DCBX asynchronous event is not "
3564			"handled yet\n");
3565}
3566
3567/**
3568 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
3569 * @phba: pointer to lpfc hba data structure.
3570 * @acqe_link: pointer to the async grp5 completion queue entry.
3571 *
3572 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
3573 * is an asynchronous notified of a logical link speed change.  The Port
3574 * reports the logical link speed in units of 10Mbps.
3575 **/
3576static void
3577lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
3578			 struct lpfc_acqe_grp5 *acqe_grp5)
3579{
3580	uint16_t prev_ll_spd;
3581
3582	phba->fc_eventTag = acqe_grp5->event_tag;
3583	phba->fcoe_eventtag = acqe_grp5->event_tag;
3584	prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
3585	phba->sli4_hba.link_state.logical_speed =
3586		(bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
3587	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3588			"2789 GRP5 Async Event: Updating logical link speed "
3589			"from %dMbps to %dMbps\n", (prev_ll_spd * 10),
3590			(phba->sli4_hba.link_state.logical_speed*10));
3591}
3592
3593/**
3594 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3595 * @phba: pointer to lpfc hba data structure.
3596 *
3597 * This routine is invoked by the worker thread to process all the pending
3598 * SLI4 asynchronous events.
3599 **/
3600void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3601{
3602	struct lpfc_cq_event *cq_event;
3603
3604	/* First, declare the async event has been handled */
3605	spin_lock_irq(&phba->hbalock);
3606	phba->hba_flag &= ~ASYNC_EVENT;
3607	spin_unlock_irq(&phba->hbalock);
3608	/* Now, handle all the async events */
3609	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3610		/* Get the first event from the head of the event queue */
3611		spin_lock_irq(&phba->hbalock);
3612		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3613				 cq_event, struct lpfc_cq_event, list);
3614		spin_unlock_irq(&phba->hbalock);
3615		/* Process the asynchronous event */
3616		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3617		case LPFC_TRAILER_CODE_LINK:
3618			lpfc_sli4_async_link_evt(phba,
3619						 &cq_event->cqe.acqe_link);
3620			break;
3621		case LPFC_TRAILER_CODE_FCOE:
3622			lpfc_sli4_async_fcoe_evt(phba,
3623						 &cq_event->cqe.acqe_fcoe);
3624			break;
3625		case LPFC_TRAILER_CODE_DCBX:
3626			lpfc_sli4_async_dcbx_evt(phba,
3627						 &cq_event->cqe.acqe_dcbx);
3628			break;
3629		case LPFC_TRAILER_CODE_GRP5:
3630			lpfc_sli4_async_grp5_evt(phba,
3631						 &cq_event->cqe.acqe_grp5);
3632			break;
3633		default:
3634			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3635					"1804 Invalid asynchrous event code: "
3636					"x%x\n", bf_get(lpfc_trailer_code,
3637					&cq_event->cqe.mcqe_cmpl));
3638			break;
3639		}
3640		/* Free the completion event processed to the free pool */
3641		lpfc_sli4_cq_event_release(phba, cq_event);
3642	}
3643}
3644
3645/**
3646 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3647 * @phba: pointer to lpfc hba data structure.
3648 *
3649 * This routine is invoked by the worker thread to process FCF table
3650 * rediscovery pending completion event.
3651 **/
3652void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3653{
3654	int rc;
3655
3656	spin_lock_irq(&phba->hbalock);
3657	/* Clear FCF rediscovery timeout event */
3658	phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3659	/* Clear driver fast failover FCF record flag */
3660	phba->fcf.failover_rec.flag = 0;
3661	/* Set state for FCF fast failover */
3662	phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3663	spin_unlock_irq(&phba->hbalock);
3664
3665	/* Scan FCF table from the first entry to re-discover SAN */
3666	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3667			"2777 Start FCF table scan after FCF "
3668			"rediscovery quiescent period over\n");
3669	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3670	if (rc)
3671		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3672				"2747 Issue FCF scan read FCF mailbox "
3673				"command failed 0x%x\n", rc);
3674}
3675
3676/**
3677 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3678 * @phba: pointer to lpfc hba data structure.
3679 * @dev_grp: The HBA PCI-Device group number.
3680 *
3681 * This routine is invoked to set up the per HBA PCI-Device group function
3682 * API jump table entries.
3683 *
3684 * Return: 0 if success, otherwise -ENODEV
3685 **/
3686int
3687lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3688{
3689	int rc;
3690
3691	/* Set up lpfc PCI-device group */
3692	phba->pci_dev_grp = dev_grp;
3693
3694	/* The LPFC_PCI_DEV_OC uses SLI4 */
3695	if (dev_grp == LPFC_PCI_DEV_OC)
3696		phba->sli_rev = LPFC_SLI_REV4;
3697
3698	/* Set up device INIT API function jump table */
3699	rc = lpfc_init_api_table_setup(phba, dev_grp);
3700	if (rc)
3701		return -ENODEV;
3702	/* Set up SCSI API function jump table */
3703	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3704	if (rc)
3705		return -ENODEV;
3706	/* Set up SLI API function jump table */
3707	rc = lpfc_sli_api_table_setup(phba, dev_grp);
3708	if (rc)
3709		return -ENODEV;
3710	/* Set up MBOX API function jump table */
3711	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3712	if (rc)
3713		return -ENODEV;
3714
3715	return 0;
3716}
3717
3718/**
3719 * lpfc_log_intr_mode - Log the active interrupt mode
3720 * @phba: pointer to lpfc hba data structure.
3721 * @intr_mode: active interrupt mode adopted.
3722 *
3723 * This routine it invoked to log the currently used active interrupt mode
3724 * to the device.
3725 **/
3726static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3727{
3728	switch (intr_mode) {
3729	case 0:
3730		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3731				"0470 Enable INTx interrupt mode.\n");
3732		break;
3733	case 1:
3734		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3735				"0481 Enabled MSI interrupt mode.\n");
3736		break;
3737	case 2:
3738		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3739				"0480 Enabled MSI-X interrupt mode.\n");
3740		break;
3741	default:
3742		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3743				"0482 Illegal interrupt mode.\n");
3744		break;
3745	}
3746	return;
3747}
3748
3749/**
3750 * lpfc_enable_pci_dev - Enable a generic PCI device.
3751 * @phba: pointer to lpfc hba data structure.
3752 *
3753 * This routine is invoked to enable the PCI device that is common to all
3754 * PCI devices.
3755 *
3756 * Return codes
3757 * 	0 - successful
3758 * 	other values - error
3759 **/
3760static int
3761lpfc_enable_pci_dev(struct lpfc_hba *phba)
3762{
3763	struct pci_dev *pdev;
3764	int bars;
3765
3766	/* Obtain PCI device reference */
3767	if (!phba->pcidev)
3768		goto out_error;
3769	else
3770		pdev = phba->pcidev;
3771	/* Select PCI BARs */
3772	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3773	/* Enable PCI device */
3774	if (pci_enable_device_mem(pdev))
3775		goto out_error;
3776	/* Request PCI resource for the device */
3777	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3778		goto out_disable_device;
3779	/* Set up device as PCI master and save state for EEH */
3780	pci_set_master(pdev);
3781	pci_try_set_mwi(pdev);
3782	pci_save_state(pdev);
3783
3784	return 0;
3785
3786out_disable_device:
3787	pci_disable_device(pdev);
3788out_error:
3789	return -ENODEV;
3790}
3791
3792/**
3793 * lpfc_disable_pci_dev - Disable a generic PCI device.
3794 * @phba: pointer to lpfc hba data structure.
3795 *
3796 * This routine is invoked to disable the PCI device that is common to all
3797 * PCI devices.
3798 **/
3799static void
3800lpfc_disable_pci_dev(struct lpfc_hba *phba)
3801{
3802	struct pci_dev *pdev;
3803	int bars;
3804
3805	/* Obtain PCI device reference */
3806	if (!phba->pcidev)
3807		return;
3808	else
3809		pdev = phba->pcidev;
3810	/* Select PCI BARs */
3811	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3812	/* Release PCI resource and disable PCI device */
3813	pci_release_selected_regions(pdev, bars);
3814	pci_disable_device(pdev);
3815	/* Null out PCI private reference to driver */
3816	pci_set_drvdata(pdev, NULL);
3817
3818	return;
3819}
3820
3821/**
3822 * lpfc_reset_hba - Reset a hba
3823 * @phba: pointer to lpfc hba data structure.
3824 *
3825 * This routine is invoked to reset a hba device. It brings the HBA
3826 * offline, performs a board restart, and then brings the board back
3827 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3828 * on outstanding mailbox commands.
3829 **/
3830void
3831lpfc_reset_hba(struct lpfc_hba *phba)
3832{
3833	/* If resets are disabled then set error state and return. */
3834	if (!phba->cfg_enable_hba_reset) {
3835		phba->link_state = LPFC_HBA_ERROR;
3836		return;
3837	}
3838	lpfc_offline_prep(phba);
3839	lpfc_offline(phba);
3840	lpfc_sli_brdrestart(phba);
3841	lpfc_online(phba);
3842	lpfc_unblock_mgmt_io(phba);
3843}
3844
3845/**
3846 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3847 * @phba: pointer to lpfc hba data structure.
3848 *
3849 * This routine is invoked to set up the driver internal resources specific to
3850 * support the SLI-3 HBA device it attached to.
3851 *
3852 * Return codes
3853 * 	0 - successful
3854 * 	other values - error
3855 **/
3856static int
3857lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3858{
3859	struct lpfc_sli *psli;
3860
3861	/*
3862	 * Initialize timers used by driver
3863	 */
3864
3865	/* Heartbeat timer */
3866	init_timer(&phba->hb_tmofunc);
3867	phba->hb_tmofunc.function = lpfc_hb_timeout;
3868	phba->hb_tmofunc.data = (unsigned long)phba;
3869
3870	psli = &phba->sli;
3871	/* MBOX heartbeat timer */
3872	init_timer(&psli->mbox_tmo);
3873	psli->mbox_tmo.function = lpfc_mbox_timeout;
3874	psli->mbox_tmo.data = (unsigned long) phba;
3875	/* FCP polling mode timer */
3876	init_timer(&phba->fcp_poll_timer);
3877	phba->fcp_poll_timer.function = lpfc_poll_timeout;
3878	phba->fcp_poll_timer.data = (unsigned long) phba;
3879	/* Fabric block timer */
3880	init_timer(&phba->fabric_block_timer);
3881	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3882	phba->fabric_block_timer.data = (unsigned long) phba;
3883	/* EA polling mode timer */
3884	init_timer(&phba->eratt_poll);
3885	phba->eratt_poll.function = lpfc_poll_eratt;
3886	phba->eratt_poll.data = (unsigned long) phba;
3887
3888	/* Host attention work mask setup */
3889	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3890	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3891
3892	/* Get all the module params for configuring this host */
3893	lpfc_get_cfgparam(phba);
3894	if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
3895		phba->menlo_flag |= HBA_MENLO_SUPPORT;
3896		/* check for menlo minimum sg count */
3897		if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
3898			phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
3899	}
3900
3901	/*
3902	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3903	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3904	 * 2 segments are added since the IOCB needs a command and response bde.
3905	 */
3906	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3907		sizeof(struct fcp_rsp) +
3908			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3909
3910	if (phba->cfg_enable_bg) {
3911		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3912		phba->cfg_sg_dma_buf_size +=
3913			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3914	}
3915
3916	/* Also reinitialize the host templates with new values. */
3917	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3918	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3919
3920	phba->max_vpi = LPFC_MAX_VPI;
3921	/* This will be set to correct value after config_port mbox */
3922	phba->max_vports = 0;
3923
3924	/*
3925	 * Initialize the SLI Layer to run with lpfc HBAs.
3926	 */
3927	lpfc_sli_setup(phba);
3928	lpfc_sli_queue_setup(phba);
3929
3930	/* Allocate device driver memory */
3931	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3932		return -ENOMEM;
3933
3934	return 0;
3935}
3936
3937/**
3938 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3939 * @phba: pointer to lpfc hba data structure.
3940 *
3941 * This routine is invoked to unset the driver internal resources set up
3942 * specific for supporting the SLI-3 HBA device it attached to.
3943 **/
3944static void
3945lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3946{
3947	/* Free device driver memory allocated */
3948	lpfc_mem_free_all(phba);
3949
3950	return;
3951}
3952
3953/**
3954 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3955 * @phba: pointer to lpfc hba data structure.
3956 *
3957 * This routine is invoked to set up the driver internal resources specific to
3958 * support the SLI-4 HBA device it attached to.
3959 *
3960 * Return codes
3961 * 	0 - successful
3962 * 	other values - error
3963 **/
3964static int
3965lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3966{
3967	struct lpfc_sli *psli;
3968	LPFC_MBOXQ_t *mboxq;
3969	int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3970	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3971	struct lpfc_mqe *mqe;
3972	int longs;
3973
3974	/* Before proceed, wait for POST done and device ready */
3975	rc = lpfc_sli4_post_status_check(phba);
3976	if (rc)
3977		return -ENODEV;
3978
3979	/*
3980	 * Initialize timers used by driver
3981	 */
3982
3983	/* Heartbeat timer */
3984	init_timer(&phba->hb_tmofunc);
3985	phba->hb_tmofunc.function = lpfc_hb_timeout;
3986	phba->hb_tmofunc.data = (unsigned long)phba;
3987
3988	psli = &phba->sli;
3989	/* MBOX heartbeat timer */
3990	init_timer(&psli->mbox_tmo);
3991	psli->mbox_tmo.function = lpfc_mbox_timeout;
3992	psli->mbox_tmo.data = (unsigned long) phba;
3993	/* Fabric block timer */
3994	init_timer(&phba->fabric_block_timer);
3995	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3996	phba->fabric_block_timer.data = (unsigned long) phba;
3997	/* EA polling mode timer */
3998	init_timer(&phba->eratt_poll);
3999	phba->eratt_poll.function = lpfc_poll_eratt;
4000	phba->eratt_poll.data = (unsigned long) phba;
4001	/* FCF rediscover timer */
4002	init_timer(&phba->fcf.redisc_wait);
4003	phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4004	phba->fcf.redisc_wait.data = (unsigned long)phba;
4005
4006	/*
4007	 * We need to do a READ_CONFIG mailbox command here before
4008	 * calling lpfc_get_cfgparam. For VFs this will report the
4009	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
4010	 * All of the resources allocated
4011	 * for this Port are tied to these values.
4012	 */
4013	/* Get all the module params for configuring this host */
4014	lpfc_get_cfgparam(phba);
4015	phba->max_vpi = LPFC_MAX_VPI;
4016	/* This will be set to correct value after the read_config mbox */
4017	phba->max_vports = 0;
4018
4019	/* Program the default value of vlan_id and fc_map */
4020	phba->valid_vlan = 0;
4021	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4022	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4023	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4024
4025	/*
4026	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4027	 * used to create the sg_dma_buf_pool must be dynamically calculated.
4028	 * 2 segments are added since the IOCB needs a command and response bde.
4029	 * To insure that the scsi sgl does not cross a 4k page boundary only
4030	 * sgl sizes of must be a power of 2.
4031	 */
4032	buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4033		    ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
4034	/* Feature Level 1 hardware is limited to 2 pages */
4035	if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
4036	     LPFC_SLI_INTF_FEATURELEVEL1_1))
4037		max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
4038	else
4039		max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
4040	for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4041	     dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4042	     dma_buf_size = dma_buf_size << 1)
4043		;
4044	if (dma_buf_size == max_buf_size)
4045		phba->cfg_sg_seg_cnt = (dma_buf_size -
4046			sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
4047			(2 * sizeof(struct sli4_sge))) /
4048				sizeof(struct sli4_sge);
4049	phba->cfg_sg_dma_buf_size = dma_buf_size;
4050
4051	/* Initialize buffer queue management fields */
4052	hbq_count = lpfc_sli_hbq_count();
4053	for (i = 0; i < hbq_count; ++i)
4054		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4055	INIT_LIST_HEAD(&phba->rb_pend_list);
4056	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4057	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
4058
4059	/*
4060	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
4061	 */
4062	/* Initialize the Abort scsi buffer list used by driver */
4063	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
4064	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
4065	/* This abort list used by worker thread */
4066	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4067
4068	/*
4069	 * Initialize dirver internal slow-path work queues
4070	 */
4071
4072	/* Driver internel slow-path CQ Event pool */
4073	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
4074	/* Response IOCB work queue list */
4075	INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
4076	/* Asynchronous event CQ Event work queue list */
4077	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4078	/* Fast-path XRI aborted CQ Event work queue list */
4079	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4080	/* Slow-path XRI aborted CQ Event work queue list */
4081	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4082	/* Receive queue CQ Event work queue list */
4083	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4084
4085	/* Initialize the driver internal SLI layer lists. */
4086	lpfc_sli_setup(phba);
4087	lpfc_sli_queue_setup(phba);
4088
4089	/* Allocate device driver memory */
4090	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4091	if (rc)
4092		return -ENOMEM;
4093
4094	/* Create the bootstrap mailbox command */
4095	rc = lpfc_create_bootstrap_mbox(phba);
4096	if (unlikely(rc))
4097		goto out_free_mem;
4098
4099	/* Set up the host's endian order with the device. */
4100	rc = lpfc_setup_endian_order(phba);
4101	if (unlikely(rc))
4102		goto out_free_bsmbx;
4103
4104	rc = lpfc_sli4_fw_cfg_check(phba);
4105	if (unlikely(rc))
4106		goto out_free_bsmbx;
4107
4108	/* Set up the hba's configuration parameters. */
4109	rc = lpfc_sli4_read_config(phba);
4110	if (unlikely(rc))
4111		goto out_free_bsmbx;
4112
4113	/* Perform a function reset */
4114	rc = lpfc_pci_function_reset(phba);
4115	if (unlikely(rc))
4116		goto out_free_bsmbx;
4117
4118	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4119						       GFP_KERNEL);
4120	if (!mboxq) {
4121		rc = -ENOMEM;
4122		goto out_free_bsmbx;
4123	}
4124
4125	/* Get the Supported Pages. It is always available. */
4126	lpfc_supported_pages(mboxq);
4127	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4128	if (unlikely(rc)) {
4129		rc = -EIO;
4130		mempool_free(mboxq, phba->mbox_mem_pool);
4131		goto out_free_bsmbx;
4132	}
4133
4134	mqe = &mboxq->u.mqe;
4135	memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4136	       LPFC_MAX_SUPPORTED_PAGES);
4137	for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4138		switch (pn_page[i]) {
4139		case LPFC_SLI4_PARAMETERS:
4140			phba->sli4_hba.pc_sli4_params.supported = 1;
4141			break;
4142		default:
4143			break;
4144		}
4145	}
4146
4147	/* Read the port's SLI4 Parameters capabilities if supported. */
4148	if (phba->sli4_hba.pc_sli4_params.supported)
4149		rc = lpfc_pc_sli4_params_get(phba, mboxq);
4150	mempool_free(mboxq, phba->mbox_mem_pool);
4151	if (rc) {
4152		rc = -EIO;
4153		goto out_free_bsmbx;
4154	}
4155	/* Create all the SLI4 queues */
4156	rc = lpfc_sli4_queue_create(phba);
4157	if (rc)
4158		goto out_free_bsmbx;
4159
4160	/* Create driver internal CQE event pool */
4161	rc = lpfc_sli4_cq_event_pool_create(phba);
4162	if (rc)
4163		goto out_destroy_queue;
4164
4165	/* Initialize and populate the iocb list per host */
4166	rc = lpfc_init_sgl_list(phba);
4167	if (rc) {
4168		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4169				"1400 Failed to initialize sgl list.\n");
4170		goto out_destroy_cq_event_pool;
4171	}
4172	rc = lpfc_init_active_sgl_array(phba);
4173	if (rc) {
4174		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4175				"1430 Failed to initialize sgl list.\n");
4176		goto out_free_sgl_list;
4177	}
4178
4179	rc = lpfc_sli4_init_rpi_hdrs(phba);
4180	if (rc) {
4181		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4182				"1432 Failed to initialize rpi headers.\n");
4183		goto out_free_active_sgl;
4184	}
4185
4186	/* Allocate eligible FCF bmask memory for FCF round robin failover */
4187	longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4188	phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4189					 GFP_KERNEL);
4190	if (!phba->fcf.fcf_rr_bmask) {
4191		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4192				"2759 Failed allocate memory for FCF round "
4193				"robin failover bmask\n");
4194		goto out_remove_rpi_hdrs;
4195	}
4196
4197	phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4198				    phba->cfg_fcp_eq_count), GFP_KERNEL);
4199	if (!phba->sli4_hba.fcp_eq_hdl) {
4200		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4201				"2572 Failed allocate memory for fast-path "
4202				"per-EQ handle array\n");
4203		goto out_free_fcf_rr_bmask;
4204	}
4205
4206	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4207				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4208	if (!phba->sli4_hba.msix_entries) {
4209		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4210				"2573 Failed allocate memory for msi-x "
4211				"interrupt vector entries\n");
4212		goto out_free_fcp_eq_hdl;
4213	}
4214
4215	return rc;
4216
4217out_free_fcp_eq_hdl:
4218	kfree(phba->sli4_hba.fcp_eq_hdl);
4219out_free_fcf_rr_bmask:
4220	kfree(phba->fcf.fcf_rr_bmask);
4221out_remove_rpi_hdrs:
4222	lpfc_sli4_remove_rpi_hdrs(phba);
4223out_free_active_sgl:
4224	lpfc_free_active_sgl(phba);
4225out_free_sgl_list:
4226	lpfc_free_sgl_list(phba);
4227out_destroy_cq_event_pool:
4228	lpfc_sli4_cq_event_pool_destroy(phba);
4229out_destroy_queue:
4230	lpfc_sli4_queue_destroy(phba);
4231out_free_bsmbx:
4232	lpfc_destroy_bootstrap_mbox(phba);
4233out_free_mem:
4234	lpfc_mem_free(phba);
4235	return rc;
4236}
4237
4238/**
4239 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4240 * @phba: pointer to lpfc hba data structure.
4241 *
4242 * This routine is invoked to unset the driver internal resources set up
4243 * specific for supporting the SLI-4 HBA device it attached to.
4244 **/
4245static void
4246lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4247{
4248	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4249
4250	/* unregister default FCFI from the HBA */
4251	lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
4252
4253	/* Free the default FCR table */
4254	lpfc_sli_remove_dflt_fcf(phba);
4255
4256	/* Free memory allocated for msi-x interrupt vector entries */
4257	kfree(phba->sli4_hba.msix_entries);
4258
4259	/* Free memory allocated for fast-path work queue handles */
4260	kfree(phba->sli4_hba.fcp_eq_hdl);
4261
4262	/* Free the allocated rpi headers. */
4263	lpfc_sli4_remove_rpi_hdrs(phba);
4264	lpfc_sli4_remove_rpis(phba);
4265
4266	/* Free eligible FCF index bmask */
4267	kfree(phba->fcf.fcf_rr_bmask);
4268
4269	/* Free the ELS sgl list */
4270	lpfc_free_active_sgl(phba);
4271	lpfc_free_sgl_list(phba);
4272
4273	/* Free the SCSI sgl management array */
4274	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4275
4276	/* Free the SLI4 queues */
4277	lpfc_sli4_queue_destroy(phba);
4278
4279	/* Free the completion queue EQ event pool */
4280	lpfc_sli4_cq_event_release_all(phba);
4281	lpfc_sli4_cq_event_pool_destroy(phba);
4282
4283	/* Reset SLI4 HBA FCoE function */
4284	lpfc_pci_function_reset(phba);
4285
4286	/* Free the bsmbx region. */
4287	lpfc_destroy_bootstrap_mbox(phba);
4288
4289	/* Free the SLI Layer memory with SLI4 HBAs */
4290	lpfc_mem_free_all(phba);
4291
4292	/* Free the current connect table */
4293	list_for_each_entry_safe(conn_entry, next_conn_entry,
4294		&phba->fcf_conn_rec_list, list) {
4295		list_del_init(&conn_entry->list);
4296		kfree(conn_entry);
4297	}
4298
4299	return;
4300}
4301
4302/**
4303 * lpfc_init_api_table_setup - Set up init api fucntion jump table
4304 * @phba: The hba struct for which this call is being executed.
4305 * @dev_grp: The HBA PCI-Device group number.
4306 *
4307 * This routine sets up the device INIT interface API function jump table
4308 * in @phba struct.
4309 *
4310 * Returns: 0 - success, -ENODEV - failure.
4311 **/
4312int
4313lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4314{
4315	phba->lpfc_hba_init_link = lpfc_hba_init_link;
4316	phba->lpfc_hba_down_link = lpfc_hba_down_link;
4317	switch (dev_grp) {
4318	case LPFC_PCI_DEV_LP:
4319		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4320		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4321		phba->lpfc_stop_port = lpfc_stop_port_s3;
4322		break;
4323	case LPFC_PCI_DEV_OC:
4324		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4325		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4326		phba->lpfc_stop_port = lpfc_stop_port_s4;
4327		break;
4328	default:
4329		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4330				"1431 Invalid HBA PCI-device group: 0x%x\n",
4331				dev_grp);
4332		return -ENODEV;
4333		break;
4334	}
4335	return 0;
4336}
4337
4338/**
4339 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4340 * @phba: pointer to lpfc hba data structure.
4341 *
4342 * This routine is invoked to set up the driver internal resources before the
4343 * device specific resource setup to support the HBA device it attached to.
4344 *
4345 * Return codes
4346 *	0 - successful
4347 *	other values - error
4348 **/
4349static int
4350lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4351{
4352	/*
4353	 * Driver resources common to all SLI revisions
4354	 */
4355	atomic_set(&phba->fast_event_count, 0);
4356	spin_lock_init(&phba->hbalock);
4357
4358	/* Initialize ndlp management spinlock */
4359	spin_lock_init(&phba->ndlp_lock);
4360
4361	INIT_LIST_HEAD(&phba->port_list);
4362	INIT_LIST_HEAD(&phba->work_list);
4363	init_waitqueue_head(&phba->wait_4_mlo_m_q);
4364
4365	/* Initialize the wait queue head for the kernel thread */
4366	init_waitqueue_head(&phba->work_waitq);
4367
4368	/* Initialize the scsi buffer list used by driver for scsi IO */
4369	spin_lock_init(&phba->scsi_buf_list_lock);
4370	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4371
4372	/* Initialize the fabric iocb list */
4373	INIT_LIST_HEAD(&phba->fabric_iocb_list);
4374
4375	/* Initialize list to save ELS buffers */
4376	INIT_LIST_HEAD(&phba->elsbuf);
4377
4378	/* Initialize FCF connection rec list */
4379	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4380
4381	return 0;
4382}
4383
4384/**
4385 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4386 * @phba: pointer to lpfc hba data structure.
4387 *
4388 * This routine is invoked to set up the driver internal resources after the
4389 * device specific resource setup to support the HBA device it attached to.
4390 *
4391 * Return codes
4392 * 	0 - successful
4393 * 	other values - error
4394 **/
4395static int
4396lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4397{
4398	int error;
4399
4400	/* Startup the kernel thread for this host adapter. */
4401	phba->worker_thread = kthread_run(lpfc_do_work, phba,
4402					  "lpfc_worker_%d", phba->brd_no);
4403	if (IS_ERR(phba->worker_thread)) {
4404		error = PTR_ERR(phba->worker_thread);
4405		return error;
4406	}
4407
4408	return 0;
4409}
4410
4411/**
4412 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4413 * @phba: pointer to lpfc hba data structure.
4414 *
4415 * This routine is invoked to unset the driver internal resources set up after
4416 * the device specific resource setup for supporting the HBA device it
4417 * attached to.
4418 **/
4419static void
4420lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4421{
4422	/* Stop kernel worker thread */
4423	kthread_stop(phba->worker_thread);
4424}
4425
4426/**
4427 * lpfc_free_iocb_list - Free iocb list.
4428 * @phba: pointer to lpfc hba data structure.
4429 *
4430 * This routine is invoked to free the driver's IOCB list and memory.
4431 **/
4432static void
4433lpfc_free_iocb_list(struct lpfc_hba *phba)
4434{
4435	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4436
4437	spin_lock_irq(&phba->hbalock);
4438	list_for_each_entry_safe(iocbq_entry, iocbq_next,
4439				 &phba->lpfc_iocb_list, list) {
4440		list_del(&iocbq_entry->list);
4441		kfree(iocbq_entry);
4442		phba->total_iocbq_bufs--;
4443	}
4444	spin_unlock_irq(&phba->hbalock);
4445
4446	return;
4447}
4448
4449/**
4450 * lpfc_init_iocb_list - Allocate and initialize iocb list.
4451 * @phba: pointer to lpfc hba data structure.
4452 *
4453 * This routine is invoked to allocate and initizlize the driver's IOCB
4454 * list and set up the IOCB tag array accordingly.
4455 *
4456 * Return codes
4457 *	0 - successful
4458 *	other values - error
4459 **/
4460static int
4461lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4462{
4463	struct lpfc_iocbq *iocbq_entry = NULL;
4464	uint16_t iotag;
4465	int i;
4466
4467	/* Initialize and populate the iocb list per host.  */
4468	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
4469	for (i = 0; i < iocb_count; i++) {
4470		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
4471		if (iocbq_entry == NULL) {
4472			printk(KERN_ERR "%s: only allocated %d iocbs of "
4473				"expected %d count. Unloading driver.\n",
4474				__func__, i, LPFC_IOCB_LIST_CNT);
4475			goto out_free_iocbq;
4476		}
4477
4478		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4479		if (iotag == 0) {
4480			kfree(iocbq_entry);
4481			printk(KERN_ERR "%s: failed to allocate IOTAG. "
4482				"Unloading driver.\n", __func__);
4483			goto out_free_iocbq;
4484		}
4485		iocbq_entry->sli4_xritag = NO_XRI;
4486
4487		spin_lock_irq(&phba->hbalock);
4488		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4489		phba->total_iocbq_bufs++;
4490		spin_unlock_irq(&phba->hbalock);
4491	}
4492
4493	return 0;
4494
4495out_free_iocbq:
4496	lpfc_free_iocb_list(phba);
4497
4498	return -ENOMEM;
4499}
4500
4501/**
4502 * lpfc_free_sgl_list - Free sgl list.
4503 * @phba: pointer to lpfc hba data structure.
4504 *
4505 * This routine is invoked to free the driver's sgl list and memory.
4506 **/
4507static void
4508lpfc_free_sgl_list(struct lpfc_hba *phba)
4509{
4510	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4511	LIST_HEAD(sglq_list);
4512	int rc = 0;
4513
4514	spin_lock_irq(&phba->hbalock);
4515	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
4516	spin_unlock_irq(&phba->hbalock);
4517
4518	list_for_each_entry_safe(sglq_entry, sglq_next,
4519				 &sglq_list, list) {
4520		list_del(&sglq_entry->list);
4521		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
4522		kfree(sglq_entry);
4523		phba->sli4_hba.total_sglq_bufs--;
4524	}
4525	rc = lpfc_sli4_remove_all_sgl_pages(phba);
4526	if (rc) {
4527		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4528			"2005 Unable to deregister pages from HBA: %x\n", rc);
4529	}
4530	kfree(phba->sli4_hba.lpfc_els_sgl_array);
4531}
4532
4533/**
4534 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
4535 * @phba: pointer to lpfc hba data structure.
4536 *
4537 * This routine is invoked to allocate the driver's active sgl memory.
4538 * This array will hold the sglq_entry's for active IOs.
4539 **/
4540static int
4541lpfc_init_active_sgl_array(struct lpfc_hba *phba)
4542{
4543	int size;
4544	size = sizeof(struct lpfc_sglq *);
4545	size *= phba->sli4_hba.max_cfg_param.max_xri;
4546
4547	phba->sli4_hba.lpfc_sglq_active_list =
4548		kzalloc(size, GFP_KERNEL);
4549	if (!phba->sli4_hba.lpfc_sglq_active_list)
4550		return -ENOMEM;
4551	return 0;
4552}
4553
4554/**
4555 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
4556 * @phba: pointer to lpfc hba data structure.
4557 *
4558 * This routine is invoked to walk through the array of active sglq entries
4559 * and free all of the resources.
4560 * This is just a place holder for now.
4561 **/
4562static void
4563lpfc_free_active_sgl(struct lpfc_hba *phba)
4564{
4565	kfree(phba->sli4_hba.lpfc_sglq_active_list);
4566}
4567
4568/**
4569 * lpfc_init_sgl_list - Allocate and initialize sgl list.
4570 * @phba: pointer to lpfc hba data structure.
4571 *
4572 * This routine is invoked to allocate and initizlize the driver's sgl
4573 * list and set up the sgl xritag tag array accordingly.
4574 *
4575 * Return codes
4576 *	0 - successful
4577 *	other values - error
4578 **/
4579static int
4580lpfc_init_sgl_list(struct lpfc_hba *phba)
4581{
4582	struct lpfc_sglq *sglq_entry = NULL;
4583	int i;
4584	int els_xri_cnt;
4585
4586	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4587	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4588				"2400 lpfc_init_sgl_list els %d.\n",
4589				els_xri_cnt);
4590	/* Initialize and populate the sglq list per host/VF. */
4591	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
4592	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
4593
4594	/* Sanity check on XRI management */
4595	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
4596		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4597				"2562 No room left for SCSI XRI allocation: "
4598				"max_xri=%d, els_xri=%d\n",
4599				phba->sli4_hba.max_cfg_param.max_xri,
4600				els_xri_cnt);
4601		return -ENOMEM;
4602	}
4603
4604	/* Allocate memory for the ELS XRI management array */
4605	phba->sli4_hba.lpfc_els_sgl_array =
4606			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
4607			GFP_KERNEL);
4608
4609	if (!phba->sli4_hba.lpfc_els_sgl_array) {
4610		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4611				"2401 Failed to allocate memory for ELS "
4612				"XRI management array of size %d.\n",
4613				els_xri_cnt);
4614		return -ENOMEM;
4615	}
4616
4617	/* Keep the SCSI XRI into the XRI management array */
4618	phba->sli4_hba.scsi_xri_max =
4619			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4620	phba->sli4_hba.scsi_xri_cnt = 0;
4621
4622	phba->sli4_hba.lpfc_scsi_psb_array =
4623			kzalloc((sizeof(struct lpfc_scsi_buf *) *
4624			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
4625
4626	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
4627		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4628				"2563 Failed to allocate memory for SCSI "
4629				"XRI management array of size %d.\n",
4630				phba->sli4_hba.scsi_xri_max);
4631		kfree(phba->sli4_hba.lpfc_els_sgl_array);
4632		return -ENOMEM;
4633	}
4634
4635	for (i = 0; i < els_xri_cnt; i++) {
4636		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
4637		if (sglq_entry == NULL) {
4638			printk(KERN_ERR "%s: only allocated %d sgls of "
4639				"expected %d count. Unloading driver.\n",
4640				__func__, i, els_xri_cnt);
4641			goto out_free_mem;
4642		}
4643
4644		sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
4645		if (sglq_entry->sli4_xritag == NO_XRI) {
4646			kfree(sglq_entry);
4647			printk(KERN_ERR "%s: failed to allocate XRI.\n"
4648				"Unloading driver.\n", __func__);
4649			goto out_free_mem;
4650		}
4651		sglq_entry->buff_type = GEN_BUFF_TYPE;
4652		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4653		if (sglq_entry->virt == NULL) {
4654			kfree(sglq_entry);
4655			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
4656				"Unloading driver.\n", __func__);
4657			goto out_free_mem;
4658		}
4659		sglq_entry->sgl = sglq_entry->virt;
4660		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4661
4662		/* The list order is used by later block SGL registraton */
4663		spin_lock_irq(&phba->hbalock);
4664		sglq_entry->state = SGL_FREED;
4665		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4666		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4667		phba->sli4_hba.total_sglq_bufs++;
4668		spin_unlock_irq(&phba->hbalock);
4669	}
4670	return 0;
4671
4672out_free_mem:
4673	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4674	lpfc_free_sgl_list(phba);
4675	return -ENOMEM;
4676}
4677
4678/**
4679 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
4680 * @phba: pointer to lpfc hba data structure.
4681 *
4682 * This routine is invoked to post rpi header templates to the
4683 * HBA consistent with the SLI-4 interface spec.  This routine
4684 * posts a PAGE_SIZE memory region to the port to hold up to
4685 * PAGE_SIZE modulo 64 rpi context headers.
4686 * No locks are held here because this is an initialization routine
4687 * called only from probe or lpfc_online when interrupts are not
4688 * enabled and the driver is reinitializing the device.
4689 *
4690 * Return codes
4691 * 	0 - successful
4692 * 	ENOMEM - No availble memory
4693 *      EIO - The mailbox failed to complete successfully.
4694 **/
4695int
4696lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4697{
4698	int rc = 0;
4699	int longs;
4700	uint16_t rpi_count;
4701	struct lpfc_rpi_hdr *rpi_hdr;
4702
4703	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4704
4705	/*
4706	 * Provision an rpi bitmask range for discovery. The total count
4707	 * is the difference between max and base + 1.
4708	 */
4709	rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
4710		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4711
4712	longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4713	phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
4714					   GFP_KERNEL);
4715	if (!phba->sli4_hba.rpi_bmask)
4716		return -ENOMEM;
4717
4718	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4719	if (!rpi_hdr) {
4720		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4721				"0391 Error during rpi post operation\n");
4722		lpfc_sli4_remove_rpis(phba);
4723		rc = -ENODEV;
4724	}
4725
4726	return rc;
4727}
4728
4729/**
4730 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4731 * @phba: pointer to lpfc hba data structure.
4732 *
4733 * This routine is invoked to allocate a single 4KB memory region to
4734 * support rpis and stores them in the phba.  This single region
4735 * provides support for up to 64 rpis.  The region is used globally
4736 * by the device.
4737 *
4738 * Returns:
4739 *   A valid rpi hdr on success.
4740 *   A NULL pointer on any failure.
4741 **/
4742struct lpfc_rpi_hdr *
4743lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4744{
4745	uint16_t rpi_limit, curr_rpi_range;
4746	struct lpfc_dmabuf *dmabuf;
4747	struct lpfc_rpi_hdr *rpi_hdr;
4748
4749	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4750		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4751
4752	spin_lock_irq(&phba->hbalock);
4753	curr_rpi_range = phba->sli4_hba.next_rpi;
4754	spin_unlock_irq(&phba->hbalock);
4755
4756	/*
4757	 * The port has a limited number of rpis. The increment here
4758	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4759	 * and to allow the full max_rpi range per port.
4760	 */
4761	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4762		return NULL;
4763
4764	/*
4765	 * First allocate the protocol header region for the port.  The
4766	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4767	 */
4768	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4769	if (!dmabuf)
4770		return NULL;
4771
4772	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4773					  LPFC_HDR_TEMPLATE_SIZE,
4774					  &dmabuf->phys,
4775					  GFP_KERNEL);
4776	if (!dmabuf->virt) {
4777		rpi_hdr = NULL;
4778		goto err_free_dmabuf;
4779	}
4780
4781	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4782	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4783		rpi_hdr = NULL;
4784		goto err_free_coherent;
4785	}
4786
4787	/* Save the rpi header data for cleanup later. */
4788	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4789	if (!rpi_hdr)
4790		goto err_free_coherent;
4791
4792	rpi_hdr->dmabuf = dmabuf;
4793	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4794	rpi_hdr->page_count = 1;
4795	spin_lock_irq(&phba->hbalock);
4796	rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4797	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4798
4799	/*
4800	 * The next_rpi stores the next module-64 rpi value to post
4801	 * in any subsequent rpi memory region postings.
4802	 */
4803	phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4804	spin_unlock_irq(&phba->hbalock);
4805	return rpi_hdr;
4806
4807 err_free_coherent:
4808	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4809			  dmabuf->virt, dmabuf->phys);
4810 err_free_dmabuf:
4811	kfree(dmabuf);
4812	return NULL;
4813}
4814
4815/**
4816 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4817 * @phba: pointer to lpfc hba data structure.
4818 *
4819 * This routine is invoked to remove all memory resources allocated
4820 * to support rpis. This routine presumes the caller has released all
4821 * rpis consumed by fabric or port logins and is prepared to have
4822 * the header pages removed.
4823 **/
4824void
4825lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4826{
4827	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4828
4829	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4830				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4831		list_del(&rpi_hdr->list);
4832		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4833				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4834		kfree(rpi_hdr->dmabuf);
4835		kfree(rpi_hdr);
4836	}
4837
4838	phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4839	memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4840}
4841
4842/**
4843 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4844 * @pdev: pointer to pci device data structure.
4845 *
4846 * This routine is invoked to allocate the driver hba data structure for an
4847 * HBA device. If the allocation is successful, the phba reference to the
4848 * PCI device data structure is set.
4849 *
4850 * Return codes
4851 *      pointer to @phba - successful
4852 *      NULL - error
4853 **/
4854static struct lpfc_hba *
4855lpfc_hba_alloc(struct pci_dev *pdev)
4856{
4857	struct lpfc_hba *phba;
4858
4859	/* Allocate memory for HBA structure */
4860	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4861	if (!phba) {
4862		dev_err(&pdev->dev, "failed to allocate hba struct\n");
4863		return NULL;
4864	}
4865
4866	/* Set reference to PCI device in HBA structure */
4867	phba->pcidev = pdev;
4868
4869	/* Assign an unused board number */
4870	phba->brd_no = lpfc_get_instance();
4871	if (phba->brd_no < 0) {
4872		kfree(phba);
4873		return NULL;
4874	}
4875
4876	spin_lock_init(&phba->ct_ev_lock);
4877	INIT_LIST_HEAD(&phba->ct_ev_waiters);
4878
4879	return phba;
4880}
4881
4882/**
4883 * lpfc_hba_free - Free driver hba data structure with a device.
4884 * @phba: pointer to lpfc hba data structure.
4885 *
4886 * This routine is invoked to free the driver hba data structure with an
4887 * HBA device.
4888 **/
4889static void
4890lpfc_hba_free(struct lpfc_hba *phba)
4891{
4892	/* Release the driver assigned board number */
4893	idr_remove(&lpfc_hba_index, phba->brd_no);
4894
4895	kfree(phba);
4896	return;
4897}
4898
4899/**
4900 * lpfc_create_shost - Create hba physical port with associated scsi host.
4901 * @phba: pointer to lpfc hba data structure.
4902 *
4903 * This routine is invoked to create HBA physical port and associate a SCSI
4904 * host with it.
4905 *
4906 * Return codes
4907 *      0 - successful
4908 *      other values - error
4909 **/
4910static int
4911lpfc_create_shost(struct lpfc_hba *phba)
4912{
4913	struct lpfc_vport *vport;
4914	struct Scsi_Host  *shost;
4915
4916	/* Initialize HBA FC structure */
4917	phba->fc_edtov = FF_DEF_EDTOV;
4918	phba->fc_ratov = FF_DEF_RATOV;
4919	phba->fc_altov = FF_DEF_ALTOV;
4920	phba->fc_arbtov = FF_DEF_ARBTOV;
4921
4922	atomic_set(&phba->sdev_cnt, 0);
4923	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4924	if (!vport)
4925		return -ENODEV;
4926
4927	shost = lpfc_shost_from_vport(vport);
4928	phba->pport = vport;
4929	lpfc_debugfs_initialize(vport);
4930	/* Put reference to SCSI host to driver's device private data */
4931	pci_set_drvdata(phba->pcidev, shost);
4932
4933	return 0;
4934}
4935
4936/**
4937 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4938 * @phba: pointer to lpfc hba data structure.
4939 *
4940 * This routine is invoked to destroy HBA physical port and the associated
4941 * SCSI host.
4942 **/
4943static void
4944lpfc_destroy_shost(struct lpfc_hba *phba)
4945{
4946	struct lpfc_vport *vport = phba->pport;
4947
4948	/* Destroy physical port that associated with the SCSI host */
4949	destroy_port(vport);
4950
4951	return;
4952}
4953
4954/**
4955 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4956 * @phba: pointer to lpfc hba data structure.
4957 * @shost: the shost to be used to detect Block guard settings.
4958 *
4959 * This routine sets up the local Block guard protocol settings for @shost.
4960 * This routine also allocates memory for debugging bg buffers.
4961 **/
4962static void
4963lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4964{
4965	int pagecnt = 10;
4966	if (lpfc_prot_mask && lpfc_prot_guard) {
4967		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4968				"1478 Registering BlockGuard with the "
4969				"SCSI layer\n");
4970		scsi_host_set_prot(shost, lpfc_prot_mask);
4971		scsi_host_set_guard(shost, lpfc_prot_guard);
4972	}
4973	if (!_dump_buf_data) {
4974		while (pagecnt) {
4975			spin_lock_init(&_dump_buf_lock);
4976			_dump_buf_data =
4977				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4978			if (_dump_buf_data) {
4979				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4980					"9043 BLKGRD: allocated %d pages for "
4981				       "_dump_buf_data at 0x%p\n",
4982				       (1 << pagecnt), _dump_buf_data);
4983				_dump_buf_data_order = pagecnt;
4984				memset(_dump_buf_data, 0,
4985				       ((1 << PAGE_SHIFT) << pagecnt));
4986				break;
4987			} else
4988				--pagecnt;
4989		}
4990		if (!_dump_buf_data_order)
4991			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4992				"9044 BLKGRD: ERROR unable to allocate "
4993			       "memory for hexdump\n");
4994	} else
4995		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4996			"9045 BLKGRD: already allocated _dump_buf_data=0x%p"
4997		       "\n", _dump_buf_data);
4998	if (!_dump_buf_dif) {
4999		while (pagecnt) {
5000			_dump_buf_dif =
5001				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
5002			if (_dump_buf_dif) {
5003				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5004					"9046 BLKGRD: allocated %d pages for "
5005				       "_dump_buf_dif at 0x%p\n",
5006				       (1 << pagecnt), _dump_buf_dif);
5007				_dump_buf_dif_order = pagecnt;
5008				memset(_dump_buf_dif, 0,
5009				       ((1 << PAGE_SHIFT) << pagecnt));
5010				break;
5011			} else
5012				--pagecnt;
5013		}
5014		if (!_dump_buf_dif_order)
5015			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5016			"9047 BLKGRD: ERROR unable to allocate "
5017			       "memory for hexdump\n");
5018	} else
5019		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5020			"9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
5021		       _dump_buf_dif);
5022}
5023
5024/**
5025 * lpfc_post_init_setup - Perform necessary device post initialization setup.
5026 * @phba: pointer to lpfc hba data structure.
5027 *
5028 * This routine is invoked to perform all the necessary post initialization
5029 * setup for the device.
5030 **/
5031static void
5032lpfc_post_init_setup(struct lpfc_hba *phba)
5033{
5034	struct Scsi_Host  *shost;
5035	struct lpfc_adapter_event_header adapter_event;
5036
5037	/* Get the default values for Model Name and Description */
5038	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
5039
5040	/*
5041	 * hba setup may have changed the hba_queue_depth so we need to
5042	 * adjust the value of can_queue.
5043	 */
5044	shost = pci_get_drvdata(phba->pcidev);
5045	shost->can_queue = phba->cfg_hba_queue_depth - 10;
5046	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
5047		lpfc_setup_bg(phba, shost);
5048
5049	lpfc_host_attrib_init(shost);
5050
5051	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
5052		spin_lock_irq(shost->host_lock);
5053		lpfc_poll_start_timer(phba);
5054		spin_unlock_irq(shost->host_lock);
5055	}
5056
5057	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5058			"0428 Perform SCSI scan\n");
5059	/* Send board arrival event to upper layer */
5060	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
5061	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
5062	fc_host_post_vendor_event(shost, fc_get_event_number(),
5063				  sizeof(adapter_event),
5064				  (char *) &adapter_event,
5065				  LPFC_NL_VENDOR_ID);
5066	return;
5067}
5068
5069/**
5070 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
5071 * @phba: pointer to lpfc hba data structure.
5072 *
5073 * This routine is invoked to set up the PCI device memory space for device
5074 * with SLI-3 interface spec.
5075 *
5076 * Return codes
5077 * 	0 - successful
5078 * 	other values - error
5079 **/
5080static int
5081lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5082{
5083	struct pci_dev *pdev;
5084	unsigned long bar0map_len, bar2map_len;
5085	int i, hbq_count;
5086	void *ptr;
5087	int error = -ENODEV;
5088
5089	/* Obtain PCI device reference */
5090	if (!phba->pcidev)
5091		return error;
5092	else
5093		pdev = phba->pcidev;
5094
5095	/* Set the device DMA mask size */
5096	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5097	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5098		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5099		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5100			return error;
5101		}
5102	}
5103
5104	/* Get the bus address of Bar0 and Bar2 and the number of bytes
5105	 * required by each mapping.
5106	 */
5107	phba->pci_bar0_map = pci_resource_start(pdev, 0);
5108	bar0map_len = pci_resource_len(pdev, 0);
5109
5110	phba->pci_bar2_map = pci_resource_start(pdev, 2);
5111	bar2map_len = pci_resource_len(pdev, 2);
5112
5113	/* Map HBA SLIM to a kernel virtual address. */
5114	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5115	if (!phba->slim_memmap_p) {
5116		dev_printk(KERN_ERR, &pdev->dev,
5117			   "ioremap failed for SLIM memory.\n");
5118		goto out;
5119	}
5120
5121	/* Map HBA Control Registers to a kernel virtual address. */
5122	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5123	if (!phba->ctrl_regs_memmap_p) {
5124		dev_printk(KERN_ERR, &pdev->dev,
5125			   "ioremap failed for HBA control registers.\n");
5126		goto out_iounmap_slim;
5127	}
5128
5129	/* Allocate memory for SLI-2 structures */
5130	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5131					       SLI2_SLIM_SIZE,
5132					       &phba->slim2p.phys,
5133					       GFP_KERNEL);
5134	if (!phba->slim2p.virt)
5135		goto out_iounmap;
5136
5137	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5138	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5139	phba->mbox_ext = (phba->slim2p.virt +
5140		offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5141	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5142	phba->IOCBs = (phba->slim2p.virt +
5143		       offsetof(struct lpfc_sli2_slim, IOCBs));
5144
5145	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5146						 lpfc_sli_hbq_size(),
5147						 &phba->hbqslimp.phys,
5148						 GFP_KERNEL);
5149	if (!phba->hbqslimp.virt)
5150		goto out_free_slim;
5151
5152	hbq_count = lpfc_sli_hbq_count();
5153	ptr = phba->hbqslimp.virt;
5154	for (i = 0; i < hbq_count; ++i) {
5155		phba->hbqs[i].hbq_virt = ptr;
5156		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5157		ptr += (lpfc_hbq_defs[i]->entry_count *
5158			sizeof(struct lpfc_hbq_entry));
5159	}
5160	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5161	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5162
5163	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5164
5165	INIT_LIST_HEAD(&phba->rb_pend_list);
5166
5167	phba->MBslimaddr = phba->slim_memmap_p;
5168	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5169	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5170	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5171	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5172
5173	return 0;
5174
5175out_free_slim:
5176	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5177			  phba->slim2p.virt, phba->slim2p.phys);
5178out_iounmap:
5179	iounmap(phba->ctrl_regs_memmap_p);
5180out_iounmap_slim:
5181	iounmap(phba->slim_memmap_p);
5182out:
5183	return error;
5184}
5185
5186/**
5187 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5188 * @phba: pointer to lpfc hba data structure.
5189 *
5190 * This routine is invoked to unset the PCI device memory space for device
5191 * with SLI-3 interface spec.
5192 **/
5193static void
5194lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5195{
5196	struct pci_dev *pdev;
5197
5198	/* Obtain PCI device reference */
5199	if (!phba->pcidev)
5200		return;
5201	else
5202		pdev = phba->pcidev;
5203
5204	/* Free coherent DMA memory allocated */
5205	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5206			  phba->hbqslimp.virt, phba->hbqslimp.phys);
5207	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5208			  phba->slim2p.virt, phba->slim2p.phys);
5209
5210	/* I/O memory unmap */
5211	iounmap(phba->ctrl_regs_memmap_p);
5212	iounmap(phba->slim_memmap_p);
5213
5214	return;
5215}
5216
5217/**
5218 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
5219 * @phba: pointer to lpfc hba data structure.
5220 *
5221 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5222 * done and check status.
5223 *
5224 * Return 0 if successful, otherwise -ENODEV.
5225 **/
5226int
5227lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5228{
5229	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
5230	int i, port_error = -ENODEV;
5231
5232	if (!phba->sli4_hba.STAregaddr)
5233		return -ENODEV;
5234
5235	/* Wait up to 30 seconds for the SLI Port POST done and ready */
5236	for (i = 0; i < 3000; i++) {
5237		sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
5238		/* Encounter fatal POST error, break out */
5239		if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
5240			port_error = -ENODEV;
5241			break;
5242		}
5243		if (LPFC_POST_STAGE_ARMFW_READY ==
5244		    bf_get(lpfc_hst_state_port_status, &sta_reg)) {
5245			port_error = 0;
5246			break;
5247		}
5248		msleep(10);
5249	}
5250
5251	if (port_error)
5252		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5253			"1408 Failure HBA POST Status: sta_reg=0x%x, "
5254			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
5255			"dl=x%x, pstatus=x%x\n", sta_reg.word0,
5256			bf_get(lpfc_hst_state_perr, &sta_reg),
5257			bf_get(lpfc_hst_state_sfi, &sta_reg),
5258			bf_get(lpfc_hst_state_nip, &sta_reg),
5259			bf_get(lpfc_hst_state_ipc, &sta_reg),
5260			bf_get(lpfc_hst_state_xrom, &sta_reg),
5261			bf_get(lpfc_hst_state_dl, &sta_reg),
5262			bf_get(lpfc_hst_state_port_status, &sta_reg));
5263
5264	/* Log device information */
5265	phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
5266	if (bf_get(lpfc_sli_intf_valid,
5267		   &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
5268		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5269				"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
5270				"FeatureL1=0x%x, FeatureL2=0x%x\n",
5271				bf_get(lpfc_sli_intf_sli_family,
5272				       &phba->sli4_hba.sli_intf),
5273				bf_get(lpfc_sli_intf_slirev,
5274				       &phba->sli4_hba.sli_intf),
5275				bf_get(lpfc_sli_intf_featurelevel1,
5276				       &phba->sli4_hba.sli_intf),
5277				bf_get(lpfc_sli_intf_featurelevel2,
5278				       &phba->sli4_hba.sli_intf));
5279	}
5280	phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
5281	phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
5282	/* With uncoverable error, log the error message and return error */
5283	uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
5284	uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
5285	if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5286	    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5287		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5288				"1422 HBA Unrecoverable error: "
5289				"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
5290				"ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
5291				uerrlo_reg.word0, uerrhi_reg.word0,
5292				phba->sli4_hba.ue_mask_lo,
5293				phba->sli4_hba.ue_mask_hi);
5294		return -ENODEV;
5295	}
5296
5297	return port_error;
5298}
5299
5300/**
5301 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5302 * @phba: pointer to lpfc hba data structure.
5303 *
5304 * This routine is invoked to set up SLI4 BAR0 PCI config space register
5305 * memory map.
5306 **/
5307static void
5308lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
5309{
5310	phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5311					LPFC_UERR_STATUS_LO;
5312	phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5313					LPFC_UERR_STATUS_HI;
5314	phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5315					LPFC_UE_MASK_LO;
5316	phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5317					LPFC_UE_MASK_HI;
5318	phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
5319					LPFC_SLI_INTF;
5320}
5321
5322/**
5323 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5324 * @phba: pointer to lpfc hba data structure.
5325 *
5326 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5327 * memory map.
5328 **/
5329static void
5330lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5331{
5332
5333	phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5334				    LPFC_HST_STATE;
5335	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5336				    LPFC_HST_ISR0;
5337	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5338				    LPFC_HST_IMR0;
5339	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5340				     LPFC_HST_ISCR0;
5341	return;
5342}
5343
5344/**
5345 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
5346 * @phba: pointer to lpfc hba data structure.
5347 * @vf: virtual function number
5348 *
5349 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5350 * based on the given viftual function number, @vf.
5351 *
5352 * Return 0 if successful, otherwise -ENODEV.
5353 **/
5354static int
5355lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5356{
5357	if (vf > LPFC_VIR_FUNC_MAX)
5358		return -ENODEV;
5359
5360	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5361				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5362	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5363				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5364	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5365				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5366	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5367				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5368	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5369				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5370	return 0;
5371}
5372
5373/**
5374 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
5375 * @phba: pointer to lpfc hba data structure.
5376 *
5377 * This routine is invoked to create the bootstrap mailbox
5378 * region consistent with the SLI-4 interface spec.  This
5379 * routine allocates all memory necessary to communicate
5380 * mailbox commands to the port and sets up all alignment
5381 * needs.  No locks are expected to be held when calling
5382 * this routine.
5383 *
5384 * Return codes
5385 * 	0 - successful
5386 * 	ENOMEM - could not allocated memory.
5387 **/
5388static int
5389lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
5390{
5391	uint32_t bmbx_size;
5392	struct lpfc_dmabuf *dmabuf;
5393	struct dma_address *dma_address;
5394	uint32_t pa_addr;
5395	uint64_t phys_addr;
5396
5397	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5398	if (!dmabuf)
5399		return -ENOMEM;
5400
5401	/*
5402	 * The bootstrap mailbox region is comprised of 2 parts
5403	 * plus an alignment restriction of 16 bytes.
5404	 */
5405	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5406	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5407					  bmbx_size,
5408					  &dmabuf->phys,
5409					  GFP_KERNEL);
5410	if (!dmabuf->virt) {
5411		kfree(dmabuf);
5412		return -ENOMEM;
5413	}
5414	memset(dmabuf->virt, 0, bmbx_size);
5415
5416	/*
5417	 * Initialize the bootstrap mailbox pointers now so that the register
5418	 * operations are simple later.  The mailbox dma address is required
5419	 * to be 16-byte aligned.  Also align the virtual memory as each
5420	 * maibox is copied into the bmbx mailbox region before issuing the
5421	 * command to the port.
5422	 */
5423	phba->sli4_hba.bmbx.dmabuf = dmabuf;
5424	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
5425
5426	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
5427					      LPFC_ALIGN_16_BYTE);
5428	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
5429					      LPFC_ALIGN_16_BYTE);
5430
5431	/*
5432	 * Set the high and low physical addresses now.  The SLI4 alignment
5433	 * requirement is 16 bytes and the mailbox is posted to the port
5434	 * as two 30-bit addresses.  The other data is a bit marking whether
5435	 * the 30-bit address is the high or low address.
5436	 * Upcast bmbx aphys to 64bits so shift instruction compiles
5437	 * clean on 32 bit machines.
5438	 */
5439	dma_address = &phba->sli4_hba.bmbx.dma_address;
5440	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
5441	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
5442	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
5443					   LPFC_BMBX_BIT1_ADDR_HI);
5444
5445	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
5446	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
5447					   LPFC_BMBX_BIT1_ADDR_LO);
5448	return 0;
5449}
5450
5451/**
5452 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
5453 * @phba: pointer to lpfc hba data structure.
5454 *
5455 * This routine is invoked to teardown the bootstrap mailbox
5456 * region and release all host resources. This routine requires
5457 * the caller to ensure all mailbox commands recovered, no
5458 * additional mailbox comands are sent, and interrupts are disabled
5459 * before calling this routine.
5460 *
5461 **/
5462static void
5463lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5464{
5465	dma_free_coherent(&phba->pcidev->dev,
5466			  phba->sli4_hba.bmbx.bmbx_size,
5467			  phba->sli4_hba.bmbx.dmabuf->virt,
5468			  phba->sli4_hba.bmbx.dmabuf->phys);
5469
5470	kfree(phba->sli4_hba.bmbx.dmabuf);
5471	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
5472}
5473
5474/**
5475 * lpfc_sli4_read_config - Get the config parameters.
5476 * @phba: pointer to lpfc hba data structure.
5477 *
5478 * This routine is invoked to read the configuration parameters from the HBA.
5479 * The configuration parameters are used to set the base and maximum values
5480 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
5481 * allocation for the port.
5482 *
5483 * Return codes
5484 * 	0 - successful
5485 * 	ENOMEM - No availble memory
5486 *      EIO - The mailbox failed to complete successfully.
5487 **/
5488static int
5489lpfc_sli4_read_config(struct lpfc_hba *phba)
5490{
5491	LPFC_MBOXQ_t *pmb;
5492	struct lpfc_mbx_read_config *rd_config;
5493	uint32_t rc = 0;
5494
5495	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5496	if (!pmb) {
5497		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5498				"2011 Unable to allocate memory for issuing "
5499				"SLI_CONFIG_SPECIAL mailbox command\n");
5500		return -ENOMEM;
5501	}
5502
5503	lpfc_read_config(phba, pmb);
5504
5505	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5506	if (rc != MBX_SUCCESS) {
5507		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5508			"2012 Mailbox failed , mbxCmd x%x "
5509			"READ_CONFIG, mbxStatus x%x\n",
5510			bf_get(lpfc_mqe_command, &pmb->u.mqe),
5511			bf_get(lpfc_mqe_status, &pmb->u.mqe));
5512		rc = -EIO;
5513	} else {
5514		rd_config = &pmb->u.mqe.un.rd_config;
5515		phba->sli4_hba.max_cfg_param.max_xri =
5516			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5517		phba->sli4_hba.max_cfg_param.xri_base =
5518			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
5519		phba->sli4_hba.max_cfg_param.max_vpi =
5520			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
5521		phba->sli4_hba.max_cfg_param.vpi_base =
5522			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
5523		phba->sli4_hba.max_cfg_param.max_rpi =
5524			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
5525		phba->sli4_hba.max_cfg_param.rpi_base =
5526			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
5527		phba->sli4_hba.max_cfg_param.max_vfi =
5528			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
5529		phba->sli4_hba.max_cfg_param.vfi_base =
5530			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5531		phba->sli4_hba.max_cfg_param.max_fcfi =
5532			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5533		phba->sli4_hba.max_cfg_param.fcfi_base =
5534			bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
5535		phba->sli4_hba.max_cfg_param.max_eq =
5536			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5537		phba->sli4_hba.max_cfg_param.max_rq =
5538			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
5539		phba->sli4_hba.max_cfg_param.max_wq =
5540			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
5541		phba->sli4_hba.max_cfg_param.max_cq =
5542			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
5543		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
5544		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
5545		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
5546		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5547		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
5548		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
5549				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
5550		phba->max_vports = phba->max_vpi;
5551		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5552				"2003 cfg params XRI(B:%d M:%d), "
5553				"VPI(B:%d M:%d) "
5554				"VFI(B:%d M:%d) "
5555				"RPI(B:%d M:%d) "
5556				"FCFI(B:%d M:%d)\n",
5557				phba->sli4_hba.max_cfg_param.xri_base,
5558				phba->sli4_hba.max_cfg_param.max_xri,
5559				phba->sli4_hba.max_cfg_param.vpi_base,
5560				phba->sli4_hba.max_cfg_param.max_vpi,
5561				phba->sli4_hba.max_cfg_param.vfi_base,
5562				phba->sli4_hba.max_cfg_param.max_vfi,
5563				phba->sli4_hba.max_cfg_param.rpi_base,
5564				phba->sli4_hba.max_cfg_param.max_rpi,
5565				phba->sli4_hba.max_cfg_param.fcfi_base,
5566				phba->sli4_hba.max_cfg_param.max_fcfi);
5567	}
5568	mempool_free(pmb, phba->mbox_mem_pool);
5569
5570	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
5571	if (phba->cfg_hba_queue_depth >
5572		(phba->sli4_hba.max_cfg_param.max_xri -
5573			lpfc_sli4_get_els_iocb_cnt(phba)))
5574		phba->cfg_hba_queue_depth =
5575			phba->sli4_hba.max_cfg_param.max_xri -
5576				lpfc_sli4_get_els_iocb_cnt(phba);
5577	return rc;
5578}
5579
5580/**
5581 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
5582 * @phba: pointer to lpfc hba data structure.
5583 *
5584 * This routine is invoked to setup the host-side endian order to the
5585 * HBA consistent with the SLI-4 interface spec.
5586 *
5587 * Return codes
5588 * 	0 - successful
5589 * 	ENOMEM - No availble memory
5590 *      EIO - The mailbox failed to complete successfully.
5591 **/
5592static int
5593lpfc_setup_endian_order(struct lpfc_hba *phba)
5594{
5595	LPFC_MBOXQ_t *mboxq;
5596	uint32_t rc = 0;
5597	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
5598				      HOST_ENDIAN_HIGH_WORD1};
5599
5600	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5601	if (!mboxq) {
5602		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5603				"0492 Unable to allocate memory for issuing "
5604				"SLI_CONFIG_SPECIAL mailbox command\n");
5605		return -ENOMEM;
5606	}
5607
5608	/*
5609	 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
5610	 * words to contain special data values and no other data.
5611	 */
5612	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
5613	memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
5614	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5615	if (rc != MBX_SUCCESS) {
5616		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5617				"0493 SLI_CONFIG_SPECIAL mailbox failed with "
5618				"status x%x\n",
5619				rc);
5620		rc = -EIO;
5621	}
5622
5623	mempool_free(mboxq, phba->mbox_mem_pool);
5624	return rc;
5625}
5626
5627/**
5628 * lpfc_sli4_queue_create - Create all the SLI4 queues
5629 * @phba: pointer to lpfc hba data structure.
5630 *
5631 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
5632 * operation. For each SLI4 queue type, the parameters such as queue entry
5633 * count (queue depth) shall be taken from the module parameter. For now,
5634 * we just use some constant number as place holder.
5635 *
5636 * Return codes
5637 *      0 - successful
5638 *      ENOMEM - No availble memory
5639 *      EIO - The mailbox failed to complete successfully.
5640 **/
5641static int
5642lpfc_sli4_queue_create(struct lpfc_hba *phba)
5643{
5644	struct lpfc_queue *qdesc;
5645	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5646	int cfg_fcp_wq_count;
5647	int cfg_fcp_eq_count;
5648
5649	/*
5650	 * Sanity check for confiugred queue parameters against the run-time
5651	 * device parameters
5652	 */
5653
5654	/* Sanity check on FCP fast-path WQ parameters */
5655	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
5656	if (cfg_fcp_wq_count >
5657	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
5658		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
5659				   LPFC_SP_WQN_DEF;
5660		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
5661			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5662					"2581 Not enough WQs (%d) from "
5663					"the pci function for supporting "
5664					"FCP WQs (%d)\n",
5665					phba->sli4_hba.max_cfg_param.max_wq,
5666					phba->cfg_fcp_wq_count);
5667			goto out_error;
5668		}
5669		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5670				"2582 Not enough WQs (%d) from the pci "
5671				"function for supporting the requested "
5672				"FCP WQs (%d), the actual FCP WQs can "
5673				"be supported: %d\n",
5674				phba->sli4_hba.max_cfg_param.max_wq,
5675				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
5676	}
5677	/* The actual number of FCP work queues adopted */
5678	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
5679
5680	/* Sanity check on FCP fast-path EQ parameters */
5681	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
5682	if (cfg_fcp_eq_count >
5683	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
5684		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
5685				   LPFC_SP_EQN_DEF;
5686		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
5687			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5688					"2574 Not enough EQs (%d) from the "
5689					"pci function for supporting FCP "
5690					"EQs (%d)\n",
5691					phba->sli4_hba.max_cfg_param.max_eq,
5692					phba->cfg_fcp_eq_count);
5693			goto out_error;
5694		}
5695		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5696				"2575 Not enough EQs (%d) from the pci "
5697				"function for supporting the requested "
5698				"FCP EQs (%d), the actual FCP EQs can "
5699				"be supported: %d\n",
5700				phba->sli4_hba.max_cfg_param.max_eq,
5701				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
5702	}
5703	/* It does not make sense to have more EQs than WQs */
5704	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
5705		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5706				"2593 The FCP EQ count(%d) cannot be greater "
5707				"than the FCP WQ count(%d), limiting the "
5708				"FCP EQ count to %d\n", cfg_fcp_eq_count,
5709				phba->cfg_fcp_wq_count,
5710				phba->cfg_fcp_wq_count);
5711		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
5712	}
5713	/* The actual number of FCP event queues adopted */
5714	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
5715	/* The overall number of event queues used */
5716	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
5717
5718	/*
5719	 * Create Event Queues (EQs)
5720	 */
5721
5722	/* Get EQ depth from module parameter, fake the default for now */
5723	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
5724	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
5725
5726	/* Create slow path event queue */
5727	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5728				      phba->sli4_hba.eq_ecount);
5729	if (!qdesc) {
5730		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5731				"0496 Failed allocate slow-path EQ\n");
5732		goto out_error;
5733	}
5734	phba->sli4_hba.sp_eq = qdesc;
5735
5736	/* Create fast-path FCP Event Queue(s) */
5737	phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
5738			       phba->cfg_fcp_eq_count), GFP_KERNEL);
5739	if (!phba->sli4_hba.fp_eq) {
5740		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5741				"2576 Failed allocate memory for fast-path "
5742				"EQ record array\n");
5743		goto out_free_sp_eq;
5744	}
5745	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5746		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5747					      phba->sli4_hba.eq_ecount);
5748		if (!qdesc) {
5749			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5750					"0497 Failed allocate fast-path EQ\n");
5751			goto out_free_fp_eq;
5752		}
5753		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5754	}
5755
5756	/*
5757	 * Create Complete Queues (CQs)
5758	 */
5759
5760	/* Get CQ depth from module parameter, fake the default for now */
5761	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5762	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5763
5764	/* Create slow-path Mailbox Command Complete Queue */
5765	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5766				      phba->sli4_hba.cq_ecount);
5767	if (!qdesc) {
5768		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5769				"0500 Failed allocate slow-path mailbox CQ\n");
5770		goto out_free_fp_eq;
5771	}
5772	phba->sli4_hba.mbx_cq = qdesc;
5773
5774	/* Create slow-path ELS Complete Queue */
5775	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5776				      phba->sli4_hba.cq_ecount);
5777	if (!qdesc) {
5778		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5779				"0501 Failed allocate slow-path ELS CQ\n");
5780		goto out_free_mbx_cq;
5781	}
5782	phba->sli4_hba.els_cq = qdesc;
5783
5784
5785	/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5786	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5787				phba->cfg_fcp_eq_count), GFP_KERNEL);
5788	if (!phba->sli4_hba.fcp_cq) {
5789		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5790				"2577 Failed allocate memory for fast-path "
5791				"CQ record array\n");
5792		goto out_free_els_cq;
5793	}
5794	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5795		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5796					      phba->sli4_hba.cq_ecount);
5797		if (!qdesc) {
5798			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5799					"0499 Failed allocate fast-path FCP "
5800					"CQ (%d)\n", fcp_cqidx);
5801			goto out_free_fcp_cq;
5802		}
5803		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5804	}
5805
5806	/* Create Mailbox Command Queue */
5807	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5808	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5809
5810	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5811				      phba->sli4_hba.mq_ecount);
5812	if (!qdesc) {
5813		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5814				"0505 Failed allocate slow-path MQ\n");
5815		goto out_free_fcp_cq;
5816	}
5817	phba->sli4_hba.mbx_wq = qdesc;
5818
5819	/*
5820	 * Create all the Work Queues (WQs)
5821	 */
5822	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5823	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5824
5825	/* Create slow-path ELS Work Queue */
5826	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5827				      phba->sli4_hba.wq_ecount);
5828	if (!qdesc) {
5829		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5830				"0504 Failed allocate slow-path ELS WQ\n");
5831		goto out_free_mbx_wq;
5832	}
5833	phba->sli4_hba.els_wq = qdesc;
5834
5835	/* Create fast-path FCP Work Queue(s) */
5836	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5837				phba->cfg_fcp_wq_count), GFP_KERNEL);
5838	if (!phba->sli4_hba.fcp_wq) {
5839		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5840				"2578 Failed allocate memory for fast-path "
5841				"WQ record array\n");
5842		goto out_free_els_wq;
5843	}
5844	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5845		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5846					      phba->sli4_hba.wq_ecount);
5847		if (!qdesc) {
5848			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5849					"0503 Failed allocate fast-path FCP "
5850					"WQ (%d)\n", fcp_wqidx);
5851			goto out_free_fcp_wq;
5852		}
5853		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5854	}
5855
5856	/*
5857	 * Create Receive Queue (RQ)
5858	 */
5859	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5860	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5861
5862	/* Create Receive Queue for header */
5863	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5864				      phba->sli4_hba.rq_ecount);
5865	if (!qdesc) {
5866		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5867				"0506 Failed allocate receive HRQ\n");
5868		goto out_free_fcp_wq;
5869	}
5870	phba->sli4_hba.hdr_rq = qdesc;
5871
5872	/* Create Receive Queue for data */
5873	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5874				      phba->sli4_hba.rq_ecount);
5875	if (!qdesc) {
5876		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5877				"0507 Failed allocate receive DRQ\n");
5878		goto out_free_hdr_rq;
5879	}
5880	phba->sli4_hba.dat_rq = qdesc;
5881
5882	return 0;
5883
5884out_free_hdr_rq:
5885	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5886	phba->sli4_hba.hdr_rq = NULL;
5887out_free_fcp_wq:
5888	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5889		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5890		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5891	}
5892	kfree(phba->sli4_hba.fcp_wq);
5893out_free_els_wq:
5894	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5895	phba->sli4_hba.els_wq = NULL;
5896out_free_mbx_wq:
5897	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5898	phba->sli4_hba.mbx_wq = NULL;
5899out_free_fcp_cq:
5900	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5901		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5902		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5903	}
5904	kfree(phba->sli4_hba.fcp_cq);
5905out_free_els_cq:
5906	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5907	phba->sli4_hba.els_cq = NULL;
5908out_free_mbx_cq:
5909	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5910	phba->sli4_hba.mbx_cq = NULL;
5911out_free_fp_eq:
5912	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5913		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5914		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5915	}
5916	kfree(phba->sli4_hba.fp_eq);
5917out_free_sp_eq:
5918	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5919	phba->sli4_hba.sp_eq = NULL;
5920out_error:
5921	return -ENOMEM;
5922}
5923
5924/**
5925 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5926 * @phba: pointer to lpfc hba data structure.
5927 *
5928 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5929 * operation.
5930 *
5931 * Return codes
5932 *      0 - successful
5933 *      ENOMEM - No availble memory
5934 *      EIO - The mailbox failed to complete successfully.
5935 **/
5936static void
5937lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5938{
5939	int fcp_qidx;
5940
5941	/* Release mailbox command work queue */
5942	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5943	phba->sli4_hba.mbx_wq = NULL;
5944
5945	/* Release ELS work queue */
5946	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5947	phba->sli4_hba.els_wq = NULL;
5948
5949	/* Release FCP work queue */
5950	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5951		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5952	kfree(phba->sli4_hba.fcp_wq);
5953	phba->sli4_hba.fcp_wq = NULL;
5954
5955	/* Release unsolicited receive queue */
5956	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5957	phba->sli4_hba.hdr_rq = NULL;
5958	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5959	phba->sli4_hba.dat_rq = NULL;
5960
5961	/* Release ELS complete queue */
5962	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5963	phba->sli4_hba.els_cq = NULL;
5964
5965	/* Release mailbox command complete queue */
5966	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5967	phba->sli4_hba.mbx_cq = NULL;
5968
5969	/* Release FCP response complete queue */
5970	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5971		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5972	kfree(phba->sli4_hba.fcp_cq);
5973	phba->sli4_hba.fcp_cq = NULL;
5974
5975	/* Release fast-path event queue */
5976	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5977		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5978	kfree(phba->sli4_hba.fp_eq);
5979	phba->sli4_hba.fp_eq = NULL;
5980
5981	/* Release slow-path event queue */
5982	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5983	phba->sli4_hba.sp_eq = NULL;
5984
5985	return;
5986}
5987
5988/**
5989 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5990 * @phba: pointer to lpfc hba data structure.
5991 *
5992 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5993 * operation.
5994 *
5995 * Return codes
5996 *      0 - successful
5997 *      ENOMEM - No availble memory
5998 *      EIO - The mailbox failed to complete successfully.
5999 **/
6000int
6001lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6002{
6003	int rc = -ENOMEM;
6004	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6005	int fcp_cq_index = 0;
6006
6007	/*
6008	 * Set up Event Queues (EQs)
6009	 */
6010
6011	/* Set up slow-path event queue */
6012	if (!phba->sli4_hba.sp_eq) {
6013		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6014				"0520 Slow-path EQ not allocated\n");
6015		goto out_error;
6016	}
6017	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
6018			    LPFC_SP_DEF_IMAX);
6019	if (rc) {
6020		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6021				"0521 Failed setup of slow-path EQ: "
6022				"rc = 0x%x\n", rc);
6023		goto out_error;
6024	}
6025	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6026			"2583 Slow-path EQ setup: queue-id=%d\n",
6027			phba->sli4_hba.sp_eq->queue_id);
6028
6029	/* Set up fast-path event queue */
6030	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6031		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
6032			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6033					"0522 Fast-path EQ (%d) not "
6034					"allocated\n", fcp_eqidx);
6035			goto out_destroy_fp_eq;
6036		}
6037		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
6038				    phba->cfg_fcp_imax);
6039		if (rc) {
6040			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6041					"0523 Failed setup of fast-path EQ "
6042					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
6043			goto out_destroy_fp_eq;
6044		}
6045		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6046				"2584 Fast-path EQ setup: "
6047				"queue[%d]-id=%d\n", fcp_eqidx,
6048				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
6049	}
6050
6051	/*
6052	 * Set up Complete Queues (CQs)
6053	 */
6054
6055	/* Set up slow-path MBOX Complete Queue as the first CQ */
6056	if (!phba->sli4_hba.mbx_cq) {
6057		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6058				"0528 Mailbox CQ not allocated\n");
6059		goto out_destroy_fp_eq;
6060	}
6061	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
6062			    LPFC_MCQ, LPFC_MBOX);
6063	if (rc) {
6064		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6065				"0529 Failed setup of slow-path mailbox CQ: "
6066				"rc = 0x%x\n", rc);
6067		goto out_destroy_fp_eq;
6068	}
6069	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6070			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6071			phba->sli4_hba.mbx_cq->queue_id,
6072			phba->sli4_hba.sp_eq->queue_id);
6073
6074	/* Set up slow-path ELS Complete Queue */
6075	if (!phba->sli4_hba.els_cq) {
6076		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6077				"0530 ELS CQ not allocated\n");
6078		goto out_destroy_mbx_cq;
6079	}
6080	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
6081			    LPFC_WCQ, LPFC_ELS);
6082	if (rc) {
6083		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6084				"0531 Failed setup of slow-path ELS CQ: "
6085				"rc = 0x%x\n", rc);
6086		goto out_destroy_mbx_cq;
6087	}
6088	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6089			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
6090			phba->sli4_hba.els_cq->queue_id,
6091			phba->sli4_hba.sp_eq->queue_id);
6092
6093	/* Set up fast-path FCP Response Complete Queue */
6094	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
6095		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6096			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6097					"0526 Fast-path FCP CQ (%d) not "
6098					"allocated\n", fcp_cqidx);
6099			goto out_destroy_fcp_cq;
6100		}
6101		rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
6102				    phba->sli4_hba.fp_eq[fcp_cqidx],
6103				    LPFC_WCQ, LPFC_FCP);
6104		if (rc) {
6105			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6106					"0527 Failed setup of fast-path FCP "
6107					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6108			goto out_destroy_fcp_cq;
6109		}
6110		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6111				"2588 FCP CQ setup: cq[%d]-id=%d, "
6112				"parent eq[%d]-id=%d\n",
6113				fcp_cqidx,
6114				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6115				fcp_cqidx,
6116				phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
6117	}
6118
6119	/*
6120	 * Set up all the Work Queues (WQs)
6121	 */
6122
6123	/* Set up Mailbox Command Queue */
6124	if (!phba->sli4_hba.mbx_wq) {
6125		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6126				"0538 Slow-path MQ not allocated\n");
6127		goto out_destroy_fcp_cq;
6128	}
6129	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
6130			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
6131	if (rc) {
6132		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6133				"0539 Failed setup of slow-path MQ: "
6134				"rc = 0x%x\n", rc);
6135		goto out_destroy_fcp_cq;
6136	}
6137	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6138			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
6139			phba->sli4_hba.mbx_wq->queue_id,
6140			phba->sli4_hba.mbx_cq->queue_id);
6141
6142	/* Set up slow-path ELS Work Queue */
6143	if (!phba->sli4_hba.els_wq) {
6144		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6145				"0536 Slow-path ELS WQ not allocated\n");
6146		goto out_destroy_mbx_wq;
6147	}
6148	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
6149			    phba->sli4_hba.els_cq, LPFC_ELS);
6150	if (rc) {
6151		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6152				"0537 Failed setup of slow-path ELS WQ: "
6153				"rc = 0x%x\n", rc);
6154		goto out_destroy_mbx_wq;
6155	}
6156	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6157			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
6158			phba->sli4_hba.els_wq->queue_id,
6159			phba->sli4_hba.els_cq->queue_id);
6160
6161	/* Set up fast-path FCP Work Queue */
6162	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6163		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6164			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6165					"0534 Fast-path FCP WQ (%d) not "
6166					"allocated\n", fcp_wqidx);
6167			goto out_destroy_fcp_wq;
6168		}
6169		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6170				    phba->sli4_hba.fcp_cq[fcp_cq_index],
6171				    LPFC_FCP);
6172		if (rc) {
6173			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6174					"0535 Failed setup of fast-path FCP "
6175					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6176			goto out_destroy_fcp_wq;
6177		}
6178		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6179				"2591 FCP WQ setup: wq[%d]-id=%d, "
6180				"parent cq[%d]-id=%d\n",
6181				fcp_wqidx,
6182				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6183				fcp_cq_index,
6184				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6185		/* Round robin FCP Work Queue's Completion Queue assignment */
6186		fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
6187	}
6188
6189	/*
6190	 * Create Receive Queue (RQ)
6191	 */
6192	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6193		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6194				"0540 Receive Queue not allocated\n");
6195		goto out_destroy_fcp_wq;
6196	}
6197	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6198			    phba->sli4_hba.els_cq, LPFC_USOL);
6199	if (rc) {
6200		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6201				"0541 Failed setup of Receive Queue: "
6202				"rc = 0x%x\n", rc);
6203		goto out_destroy_fcp_wq;
6204	}
6205	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6206			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6207			"parent cq-id=%d\n",
6208			phba->sli4_hba.hdr_rq->queue_id,
6209			phba->sli4_hba.dat_rq->queue_id,
6210			phba->sli4_hba.els_cq->queue_id);
6211	return 0;
6212
6213out_destroy_fcp_wq:
6214	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6215		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6216	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6217out_destroy_mbx_wq:
6218	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6219out_destroy_fcp_cq:
6220	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6221		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6222	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6223out_destroy_mbx_cq:
6224	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6225out_destroy_fp_eq:
6226	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6227		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6228	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6229out_error:
6230	return rc;
6231}
6232
6233/**
6234 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
6235 * @phba: pointer to lpfc hba data structure.
6236 *
6237 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
6238 * operation.
6239 *
6240 * Return codes
6241 *      0 - successful
6242 *      ENOMEM - No availble memory
6243 *      EIO - The mailbox failed to complete successfully.
6244 **/
6245void
6246lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6247{
6248	int fcp_qidx;
6249
6250	/* Unset mailbox command work queue */
6251	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6252	/* Unset ELS work queue */
6253	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6254	/* Unset unsolicited receive queue */
6255	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
6256	/* Unset FCP work queue */
6257	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6258		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
6259	/* Unset mailbox command complete queue */
6260	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6261	/* Unset ELS complete queue */
6262	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6263	/* Unset FCP response complete queue */
6264	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6265		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6266	/* Unset fast-path event queue */
6267	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6268		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6269	/* Unset slow-path event queue */
6270	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6271}
6272
6273/**
6274 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
6275 * @phba: pointer to lpfc hba data structure.
6276 *
6277 * This routine is invoked to allocate and set up a pool of completion queue
6278 * events. The body of the completion queue event is a completion queue entry
6279 * CQE. For now, this pool is used for the interrupt service routine to queue
6280 * the following HBA completion queue events for the worker thread to process:
6281 *   - Mailbox asynchronous events
6282 *   - Receive queue completion unsolicited events
6283 * Later, this can be used for all the slow-path events.
6284 *
6285 * Return codes
6286 *      0 - successful
6287 *      -ENOMEM - No availble memory
6288 **/
6289static int
6290lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
6291{
6292	struct lpfc_cq_event *cq_event;
6293	int i;
6294
6295	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
6296		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
6297		if (!cq_event)
6298			goto out_pool_create_fail;
6299		list_add_tail(&cq_event->list,
6300			      &phba->sli4_hba.sp_cqe_event_pool);
6301	}
6302	return 0;
6303
6304out_pool_create_fail:
6305	lpfc_sli4_cq_event_pool_destroy(phba);
6306	return -ENOMEM;
6307}
6308
6309/**
6310 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
6311 * @phba: pointer to lpfc hba data structure.
6312 *
6313 * This routine is invoked to free the pool of completion queue events at
6314 * driver unload time. Note that, it is the responsibility of the driver
6315 * cleanup routine to free all the outstanding completion-queue events
6316 * allocated from this pool back into the pool before invoking this routine
6317 * to destroy the pool.
6318 **/
6319static void
6320lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
6321{
6322	struct lpfc_cq_event *cq_event, *next_cq_event;
6323
6324	list_for_each_entry_safe(cq_event, next_cq_event,
6325				 &phba->sli4_hba.sp_cqe_event_pool, list) {
6326		list_del(&cq_event->list);
6327		kfree(cq_event);
6328	}
6329}
6330
6331/**
6332 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6333 * @phba: pointer to lpfc hba data structure.
6334 *
6335 * This routine is the lock free version of the API invoked to allocate a
6336 * completion-queue event from the free pool.
6337 *
6338 * Return: Pointer to the newly allocated completion-queue event if successful
6339 *         NULL otherwise.
6340 **/
6341struct lpfc_cq_event *
6342__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6343{
6344	struct lpfc_cq_event *cq_event = NULL;
6345
6346	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
6347			 struct lpfc_cq_event, list);
6348	return cq_event;
6349}
6350
6351/**
6352 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6353 * @phba: pointer to lpfc hba data structure.
6354 *
6355 * This routine is the lock version of the API invoked to allocate a
6356 * completion-queue event from the free pool.
6357 *
6358 * Return: Pointer to the newly allocated completion-queue event if successful
6359 *         NULL otherwise.
6360 **/
6361struct lpfc_cq_event *
6362lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6363{
6364	struct lpfc_cq_event *cq_event;
6365	unsigned long iflags;
6366
6367	spin_lock_irqsave(&phba->hbalock, iflags);
6368	cq_event = __lpfc_sli4_cq_event_alloc(phba);
6369	spin_unlock_irqrestore(&phba->hbalock, iflags);
6370	return cq_event;
6371}
6372
6373/**
6374 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6375 * @phba: pointer to lpfc hba data structure.
6376 * @cq_event: pointer to the completion queue event to be freed.
6377 *
6378 * This routine is the lock free version of the API invoked to release a
6379 * completion-queue event back into the free pool.
6380 **/
6381void
6382__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6383			     struct lpfc_cq_event *cq_event)
6384{
6385	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
6386}
6387
6388/**
6389 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6390 * @phba: pointer to lpfc hba data structure.
6391 * @cq_event: pointer to the completion queue event to be freed.
6392 *
6393 * This routine is the lock version of the API invoked to release a
6394 * completion-queue event back into the free pool.
6395 **/
6396void
6397lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6398			   struct lpfc_cq_event *cq_event)
6399{
6400	unsigned long iflags;
6401	spin_lock_irqsave(&phba->hbalock, iflags);
6402	__lpfc_sli4_cq_event_release(phba, cq_event);
6403	spin_unlock_irqrestore(&phba->hbalock, iflags);
6404}
6405
6406/**
6407 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
6408 * @phba: pointer to lpfc hba data structure.
6409 *
6410 * This routine is to free all the pending completion-queue events to the
6411 * back into the free pool for device reset.
6412 **/
6413static void
6414lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
6415{
6416	LIST_HEAD(cqelist);
6417	struct lpfc_cq_event *cqe;
6418	unsigned long iflags;
6419
6420	/* Retrieve all the pending WCQEs from pending WCQE lists */
6421	spin_lock_irqsave(&phba->hbalock, iflags);
6422	/* Pending FCP XRI abort events */
6423	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
6424			 &cqelist);
6425	/* Pending ELS XRI abort events */
6426	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
6427			 &cqelist);
6428	/* Pending asynnc events */
6429	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
6430			 &cqelist);
6431	spin_unlock_irqrestore(&phba->hbalock, iflags);
6432
6433	while (!list_empty(&cqelist)) {
6434		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
6435		lpfc_sli4_cq_event_release(phba, cqe);
6436	}
6437}
6438
6439/**
6440 * lpfc_pci_function_reset - Reset pci function.
6441 * @phba: pointer to lpfc hba data structure.
6442 *
6443 * This routine is invoked to request a PCI function reset. It will destroys
6444 * all resources assigned to the PCI function which originates this request.
6445 *
6446 * Return codes
6447 *      0 - successful
6448 *      ENOMEM - No availble memory
6449 *      EIO - The mailbox failed to complete successfully.
6450 **/
6451int
6452lpfc_pci_function_reset(struct lpfc_hba *phba)
6453{
6454	LPFC_MBOXQ_t *mboxq;
6455	uint32_t rc = 0;
6456	uint32_t shdr_status, shdr_add_status;
6457	union lpfc_sli4_cfg_shdr *shdr;
6458
6459	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6460	if (!mboxq) {
6461		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6462				"0494 Unable to allocate memory for issuing "
6463				"SLI_FUNCTION_RESET mailbox command\n");
6464		return -ENOMEM;
6465	}
6466
6467	/* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
6468	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6469			 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
6470			 LPFC_SLI4_MBX_EMBED);
6471	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6472	shdr = (union lpfc_sli4_cfg_shdr *)
6473		&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6474	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6475	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6476	if (rc != MBX_TIMEOUT)
6477		mempool_free(mboxq, phba->mbox_mem_pool);
6478	if (shdr_status || shdr_add_status || rc) {
6479		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6480				"0495 SLI_FUNCTION_RESET mailbox failed with "
6481				"status x%x add_status x%x, mbx status x%x\n",
6482				shdr_status, shdr_add_status, rc);
6483		rc = -ENXIO;
6484	}
6485	return rc;
6486}
6487
6488/**
6489 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
6490 * @phba: pointer to lpfc hba data structure.
6491 * @cnt: number of nop mailbox commands to send.
6492 *
6493 * This routine is invoked to send a number @cnt of NOP mailbox command and
6494 * wait for each command to complete.
6495 *
6496 * Return: the number of NOP mailbox command completed.
6497 **/
6498static int
6499lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
6500{
6501	LPFC_MBOXQ_t *mboxq;
6502	int length, cmdsent;
6503	uint32_t mbox_tmo;
6504	uint32_t rc = 0;
6505	uint32_t shdr_status, shdr_add_status;
6506	union lpfc_sli4_cfg_shdr *shdr;
6507
6508	if (cnt == 0) {
6509		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6510				"2518 Requested to send 0 NOP mailbox cmd\n");
6511		return cnt;
6512	}
6513
6514	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6515	if (!mboxq) {
6516		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6517				"2519 Unable to allocate memory for issuing "
6518				"NOP mailbox command\n");
6519		return 0;
6520	}
6521
6522	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
6523	length = (sizeof(struct lpfc_mbx_nop) -
6524		  sizeof(struct lpfc_sli4_cfg_mhdr));
6525	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6526			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
6527
6528	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6529	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
6530		if (!phba->sli4_hba.intr_enable)
6531			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6532		else
6533			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6534		if (rc == MBX_TIMEOUT)
6535			break;
6536		/* Check return status */
6537		shdr = (union lpfc_sli4_cfg_shdr *)
6538			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6539		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6540		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
6541					 &shdr->response);
6542		if (shdr_status || shdr_add_status || rc) {
6543			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6544					"2520 NOP mailbox command failed "
6545					"status x%x add_status x%x mbx "
6546					"status x%x\n", shdr_status,
6547					shdr_add_status, rc);
6548			break;
6549		}
6550	}
6551
6552	if (rc != MBX_TIMEOUT)
6553		mempool_free(mboxq, phba->mbox_mem_pool);
6554
6555	return cmdsent;
6556}
6557
6558/**
6559 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
6560 * @phba: pointer to lpfc hba data structure.
6561 * @fcfi: fcf index.
6562 *
6563 * This routine is invoked to unregister a FCFI from device.
6564 **/
6565void
6566lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
6567{
6568	LPFC_MBOXQ_t *mbox;
6569	uint32_t mbox_tmo;
6570	int rc;
6571	unsigned long flags;
6572
6573	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6574
6575	if (!mbox)
6576		return;
6577
6578	lpfc_unreg_fcfi(mbox, fcfi);
6579
6580	if (!phba->sli4_hba.intr_enable)
6581		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6582	else {
6583		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6584		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6585	}
6586	if (rc != MBX_TIMEOUT)
6587		mempool_free(mbox, phba->mbox_mem_pool);
6588	if (rc != MBX_SUCCESS)
6589		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6590				"2517 Unregister FCFI command failed "
6591				"status %d, mbxStatus x%x\n", rc,
6592				bf_get(lpfc_mqe_status, &mbox->u.mqe));
6593	else {
6594		spin_lock_irqsave(&phba->hbalock, flags);
6595		/* Mark the FCFI is no longer registered */
6596		phba->fcf.fcf_flag &=
6597			~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE);
6598		spin_unlock_irqrestore(&phba->hbalock, flags);
6599	}
6600}
6601
6602/**
6603 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
6604 * @phba: pointer to lpfc hba data structure.
6605 *
6606 * This routine is invoked to set up the PCI device memory space for device
6607 * with SLI-4 interface spec.
6608 *
6609 * Return codes
6610 * 	0 - successful
6611 * 	other values - error
6612 **/
6613static int
6614lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
6615{
6616	struct pci_dev *pdev;
6617	unsigned long bar0map_len, bar1map_len, bar2map_len;
6618	int error = -ENODEV;
6619
6620	/* Obtain PCI device reference */
6621	if (!phba->pcidev)
6622		return error;
6623	else
6624		pdev = phba->pcidev;
6625
6626	/* Set the device DMA mask size */
6627	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6628	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6629		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6630		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6631			return error;
6632		}
6633	}
6634
6635	/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
6636	 * number of bytes required by each mapping. They are actually
6637	 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
6638	 */
6639	if (pci_resource_start(pdev, 0)) {
6640		phba->pci_bar0_map = pci_resource_start(pdev, 0);
6641		bar0map_len = pci_resource_len(pdev, 0);
6642	} else {
6643		phba->pci_bar0_map = pci_resource_start(pdev, 1);
6644		bar0map_len = pci_resource_len(pdev, 1);
6645	}
6646	phba->pci_bar1_map = pci_resource_start(pdev, 2);
6647	bar1map_len = pci_resource_len(pdev, 2);
6648
6649	phba->pci_bar2_map = pci_resource_start(pdev, 4);
6650	bar2map_len = pci_resource_len(pdev, 4);
6651
6652	/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
6653	phba->sli4_hba.conf_regs_memmap_p =
6654				ioremap(phba->pci_bar0_map, bar0map_len);
6655	if (!phba->sli4_hba.conf_regs_memmap_p) {
6656		dev_printk(KERN_ERR, &pdev->dev,
6657			   "ioremap failed for SLI4 PCI config registers.\n");
6658		goto out;
6659	}
6660
6661	/* Map SLI4 HBA Control Register base to a kernel virtual address. */
6662	phba->sli4_hba.ctrl_regs_memmap_p =
6663				ioremap(phba->pci_bar1_map, bar1map_len);
6664	if (!phba->sli4_hba.ctrl_regs_memmap_p) {
6665		dev_printk(KERN_ERR, &pdev->dev,
6666			   "ioremap failed for SLI4 HBA control registers.\n");
6667		goto out_iounmap_conf;
6668	}
6669
6670	/* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
6671	phba->sli4_hba.drbl_regs_memmap_p =
6672				ioremap(phba->pci_bar2_map, bar2map_len);
6673	if (!phba->sli4_hba.drbl_regs_memmap_p) {
6674		dev_printk(KERN_ERR, &pdev->dev,
6675			   "ioremap failed for SLI4 HBA doorbell registers.\n");
6676		goto out_iounmap_ctrl;
6677	}
6678
6679	/* Set up BAR0 PCI config space register memory map */
6680	lpfc_sli4_bar0_register_memmap(phba);
6681
6682	/* Set up BAR1 register memory map */
6683	lpfc_sli4_bar1_register_memmap(phba);
6684
6685	/* Set up BAR2 register memory map */
6686	error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
6687	if (error)
6688		goto out_iounmap_all;
6689
6690	return 0;
6691
6692out_iounmap_all:
6693	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6694out_iounmap_ctrl:
6695	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6696out_iounmap_conf:
6697	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6698out:
6699	return error;
6700}
6701
6702/**
6703 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
6704 * @phba: pointer to lpfc hba data structure.
6705 *
6706 * This routine is invoked to unset the PCI device memory space for device
6707 * with SLI-4 interface spec.
6708 **/
6709static void
6710lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
6711{
6712	struct pci_dev *pdev;
6713
6714	/* Obtain PCI device reference */
6715	if (!phba->pcidev)
6716		return;
6717	else
6718		pdev = phba->pcidev;
6719
6720	/* Free coherent DMA memory allocated */
6721
6722	/* Unmap I/O memory space */
6723	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6724	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6725	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6726
6727	return;
6728}
6729
6730/**
6731 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6732 * @phba: pointer to lpfc hba data structure.
6733 *
6734 * This routine is invoked to enable the MSI-X interrupt vectors to device
6735 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6736 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6737 * invoked, enables either all or nothing, depending on the current
6738 * availability of PCI vector resources. The device driver is responsible
6739 * for calling the individual request_irq() to register each MSI-X vector
6740 * with a interrupt handler, which is done in this function. Note that
6741 * later when device is unloading, the driver should always call free_irq()
6742 * on all MSI-X vectors it has done request_irq() on before calling
6743 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
6744 * will be left with MSI-X enabled and leaks its vectors.
6745 *
6746 * Return codes
6747 *   0 - successful
6748 *   other values - error
6749 **/
6750static int
6751lpfc_sli_enable_msix(struct lpfc_hba *phba)
6752{
6753	int rc, i;
6754	LPFC_MBOXQ_t *pmb;
6755
6756	/* Set up MSI-X multi-message vectors */
6757	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6758		phba->msix_entries[i].entry = i;
6759
6760	/* Configure MSI-X capability structure */
6761	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
6762				ARRAY_SIZE(phba->msix_entries));
6763	if (rc) {
6764		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6765				"0420 PCI enable MSI-X failed (%d)\n", rc);
6766		goto msi_fail_out;
6767	}
6768	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6769		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6770				"0477 MSI-X entry[%d]: vector=x%x "
6771				"message=%d\n", i,
6772				phba->msix_entries[i].vector,
6773				phba->msix_entries[i].entry);
6774	/*
6775	 * Assign MSI-X vectors to interrupt handlers
6776	 */
6777
6778	/* vector-0 is associated to slow-path handler */
6779	rc = request_irq(phba->msix_entries[0].vector,
6780			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6781			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6782	if (rc) {
6783		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6784				"0421 MSI-X slow-path request_irq failed "
6785				"(%d)\n", rc);
6786		goto msi_fail_out;
6787	}
6788
6789	/* vector-1 is associated to fast-path handler */
6790	rc = request_irq(phba->msix_entries[1].vector,
6791			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6792			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
6793
6794	if (rc) {
6795		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6796				"0429 MSI-X fast-path request_irq failed "
6797				"(%d)\n", rc);
6798		goto irq_fail_out;
6799	}
6800
6801	/*
6802	 * Configure HBA MSI-X attention conditions to messages
6803	 */
6804	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6805
6806	if (!pmb) {
6807		rc = -ENOMEM;
6808		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6809				"0474 Unable to allocate memory for issuing "
6810				"MBOX_CONFIG_MSI command\n");
6811		goto mem_fail_out;
6812	}
6813	rc = lpfc_config_msi(phba, pmb);
6814	if (rc)
6815		goto mbx_fail_out;
6816	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6817	if (rc != MBX_SUCCESS) {
6818		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
6819				"0351 Config MSI mailbox command failed, "
6820				"mbxCmd x%x, mbxStatus x%x\n",
6821				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
6822		goto mbx_fail_out;
6823	}
6824
6825	/* Free memory allocated for mailbox command */
6826	mempool_free(pmb, phba->mbox_mem_pool);
6827	return rc;
6828
6829mbx_fail_out:
6830	/* Free memory allocated for mailbox command */
6831	mempool_free(pmb, phba->mbox_mem_pool);
6832
6833mem_fail_out:
6834	/* free the irq already requested */
6835	free_irq(phba->msix_entries[1].vector, phba);
6836
6837irq_fail_out:
6838	/* free the irq already requested */
6839	free_irq(phba->msix_entries[0].vector, phba);
6840
6841msi_fail_out:
6842	/* Unconfigure MSI-X capability structure */
6843	pci_disable_msix(phba->pcidev);
6844	return rc;
6845}
6846
6847/**
6848 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
6849 * @phba: pointer to lpfc hba data structure.
6850 *
6851 * This routine is invoked to release the MSI-X vectors and then disable the
6852 * MSI-X interrupt mode to device with SLI-3 interface spec.
6853 **/
6854static void
6855lpfc_sli_disable_msix(struct lpfc_hba *phba)
6856{
6857	int i;
6858
6859	/* Free up MSI-X multi-message vectors */
6860	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6861		free_irq(phba->msix_entries[i].vector, phba);
6862	/* Disable MSI-X */
6863	pci_disable_msix(phba->pcidev);
6864
6865	return;
6866}
6867
6868/**
6869 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
6870 * @phba: pointer to lpfc hba data structure.
6871 *
6872 * This routine is invoked to enable the MSI interrupt mode to device with
6873 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
6874 * enable the MSI vector. The device driver is responsible for calling the
6875 * request_irq() to register MSI vector with a interrupt the handler, which
6876 * is done in this function.
6877 *
6878 * Return codes
6879 * 	0 - successful
6880 * 	other values - error
6881 */
6882static int
6883lpfc_sli_enable_msi(struct lpfc_hba *phba)
6884{
6885	int rc;
6886
6887	rc = pci_enable_msi(phba->pcidev);
6888	if (!rc)
6889		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6890				"0462 PCI enable MSI mode success.\n");
6891	else {
6892		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6893				"0471 PCI enable MSI mode failed (%d)\n", rc);
6894		return rc;
6895	}
6896
6897	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6898			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6899	if (rc) {
6900		pci_disable_msi(phba->pcidev);
6901		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6902				"0478 MSI request_irq failed (%d)\n", rc);
6903	}
6904	return rc;
6905}
6906
6907/**
6908 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
6909 * @phba: pointer to lpfc hba data structure.
6910 *
6911 * This routine is invoked to disable the MSI interrupt mode to device with
6912 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
6913 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6914 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6915 * its vector.
6916 */
6917static void
6918lpfc_sli_disable_msi(struct lpfc_hba *phba)
6919{
6920	free_irq(phba->pcidev->irq, phba);
6921	pci_disable_msi(phba->pcidev);
6922	return;
6923}
6924
6925/**
6926 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
6927 * @phba: pointer to lpfc hba data structure.
6928 *
6929 * This routine is invoked to enable device interrupt and associate driver's
6930 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
6931 * spec. Depends on the interrupt mode configured to the driver, the driver
6932 * will try to fallback from the configured interrupt mode to an interrupt
6933 * mode which is supported by the platform, kernel, and device in the order
6934 * of:
6935 * MSI-X -> MSI -> IRQ.
6936 *
6937 * Return codes
6938 *   0 - successful
6939 *   other values - error
6940 **/
6941static uint32_t
6942lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6943{
6944	uint32_t intr_mode = LPFC_INTR_ERROR;
6945	int retval;
6946
6947	if (cfg_mode == 2) {
6948		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6949		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6950		if (!retval) {
6951			/* Now, try to enable MSI-X interrupt mode */
6952			retval = lpfc_sli_enable_msix(phba);
6953			if (!retval) {
6954				/* Indicate initialization to MSI-X mode */
6955				phba->intr_type = MSIX;
6956				intr_mode = 2;
6957			}
6958		}
6959	}
6960
6961	/* Fallback to MSI if MSI-X initialization failed */
6962	if (cfg_mode >= 1 && phba->intr_type == NONE) {
6963		retval = lpfc_sli_enable_msi(phba);
6964		if (!retval) {
6965			/* Indicate initialization to MSI mode */
6966			phba->intr_type = MSI;
6967			intr_mode = 1;
6968		}
6969	}
6970
6971	/* Fallback to INTx if both MSI-X/MSI initalization failed */
6972	if (phba->intr_type == NONE) {
6973		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6974				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6975		if (!retval) {
6976			/* Indicate initialization to INTx mode */
6977			phba->intr_type = INTx;
6978			intr_mode = 0;
6979		}
6980	}
6981	return intr_mode;
6982}
6983
6984/**
6985 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6986 * @phba: pointer to lpfc hba data structure.
6987 *
6988 * This routine is invoked to disable device interrupt and disassociate the
6989 * driver's interrupt handler(s) from interrupt vector(s) to device with
6990 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6991 * release the interrupt vector(s) for the message signaled interrupt.
6992 **/
6993static void
6994lpfc_sli_disable_intr(struct lpfc_hba *phba)
6995{
6996	/* Disable the currently initialized interrupt mode */
6997	if (phba->intr_type == MSIX)
6998		lpfc_sli_disable_msix(phba);
6999	else if (phba->intr_type == MSI)
7000		lpfc_sli_disable_msi(phba);
7001	else if (phba->intr_type == INTx)
7002		free_irq(phba->pcidev->irq, phba);
7003
7004	/* Reset interrupt management states */
7005	phba->intr_type = NONE;
7006	phba->sli.slistat.sli_intr = 0;
7007
7008	return;
7009}
7010
7011/**
7012 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
7013 * @phba: pointer to lpfc hba data structure.
7014 *
7015 * This routine is invoked to enable the MSI-X interrupt vectors to device
7016 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
7017 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
7018 * enables either all or nothing, depending on the current availability of
7019 * PCI vector resources. The device driver is responsible for calling the
7020 * individual request_irq() to register each MSI-X vector with a interrupt
7021 * handler, which is done in this function. Note that later when device is
7022 * unloading, the driver should always call free_irq() on all MSI-X vectors
7023 * it has done request_irq() on before calling pci_disable_msix(). Failure
7024 * to do so results in a BUG_ON() and a device will be left with MSI-X
7025 * enabled and leaks its vectors.
7026 *
7027 * Return codes
7028 * 0 - successful
7029 * other values - error
7030 **/
7031static int
7032lpfc_sli4_enable_msix(struct lpfc_hba *phba)
7033{
7034	int vectors, rc, index;
7035
7036	/* Set up MSI-X multi-message vectors */
7037	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
7038		phba->sli4_hba.msix_entries[index].entry = index;
7039
7040	/* Configure MSI-X capability structure */
7041	vectors = phba->sli4_hba.cfg_eqn;
7042enable_msix_vectors:
7043	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
7044			     vectors);
7045	if (rc > 1) {
7046		vectors = rc;
7047		goto enable_msix_vectors;
7048	} else if (rc) {
7049		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7050				"0484 PCI enable MSI-X failed (%d)\n", rc);
7051		goto msi_fail_out;
7052	}
7053
7054	/* Log MSI-X vector assignment */
7055	for (index = 0; index < vectors; index++)
7056		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7057				"0489 MSI-X entry[%d]: vector=x%x "
7058				"message=%d\n", index,
7059				phba->sli4_hba.msix_entries[index].vector,
7060				phba->sli4_hba.msix_entries[index].entry);
7061	/*
7062	 * Assign MSI-X vectors to interrupt handlers
7063	 */
7064
7065	/* The first vector must associated to slow-path handler for MQ */
7066	rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7067			 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
7068			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7069	if (rc) {
7070		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7071				"0485 MSI-X slow-path request_irq failed "
7072				"(%d)\n", rc);
7073		goto msi_fail_out;
7074	}
7075
7076	/* The rest of the vector(s) are associated to fast-path handler(s) */
7077	for (index = 1; index < vectors; index++) {
7078		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
7079		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
7080		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
7081				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
7082				 LPFC_FP_DRIVER_HANDLER_NAME,
7083				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7084		if (rc) {
7085			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7086					"0486 MSI-X fast-path (%d) "
7087					"request_irq failed (%d)\n", index, rc);
7088			goto cfg_fail_out;
7089		}
7090	}
7091	phba->sli4_hba.msix_vec_nr = vectors;
7092
7093	return rc;
7094
7095cfg_fail_out:
7096	/* free the irq already requested */
7097	for (--index; index >= 1; index--)
7098		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
7099			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7100
7101	/* free the irq already requested */
7102	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7103
7104msi_fail_out:
7105	/* Unconfigure MSI-X capability structure */
7106	pci_disable_msix(phba->pcidev);
7107	return rc;
7108}
7109
7110/**
7111 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
7112 * @phba: pointer to lpfc hba data structure.
7113 *
7114 * This routine is invoked to release the MSI-X vectors and then disable the
7115 * MSI-X interrupt mode to device with SLI-4 interface spec.
7116 **/
7117static void
7118lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7119{
7120	int index;
7121
7122	/* Free up MSI-X multi-message vectors */
7123	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7124
7125	for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
7126		free_irq(phba->sli4_hba.msix_entries[index].vector,
7127			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7128
7129	/* Disable MSI-X */
7130	pci_disable_msix(phba->pcidev);
7131
7132	return;
7133}
7134
7135/**
7136 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
7137 * @phba: pointer to lpfc hba data structure.
7138 *
7139 * This routine is invoked to enable the MSI interrupt mode to device with
7140 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
7141 * to enable the MSI vector. The device driver is responsible for calling
7142 * the request_irq() to register MSI vector with a interrupt the handler,
7143 * which is done in this function.
7144 *
7145 * Return codes
7146 * 	0 - successful
7147 * 	other values - error
7148 **/
7149static int
7150lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7151{
7152	int rc, index;
7153
7154	rc = pci_enable_msi(phba->pcidev);
7155	if (!rc)
7156		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7157				"0487 PCI enable MSI mode success.\n");
7158	else {
7159		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7160				"0488 PCI enable MSI mode failed (%d)\n", rc);
7161		return rc;
7162	}
7163
7164	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7165			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7166	if (rc) {
7167		pci_disable_msi(phba->pcidev);
7168		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7169				"0490 MSI request_irq failed (%d)\n", rc);
7170		return rc;
7171	}
7172
7173	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
7174		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7175		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7176	}
7177
7178	return 0;
7179}
7180
7181/**
7182 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
7183 * @phba: pointer to lpfc hba data structure.
7184 *
7185 * This routine is invoked to disable the MSI interrupt mode to device with
7186 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
7187 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7188 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7189 * its vector.
7190 **/
7191static void
7192lpfc_sli4_disable_msi(struct lpfc_hba *phba)
7193{
7194	free_irq(phba->pcidev->irq, phba);
7195	pci_disable_msi(phba->pcidev);
7196	return;
7197}
7198
7199/**
7200 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
7201 * @phba: pointer to lpfc hba data structure.
7202 *
7203 * This routine is invoked to enable device interrupt and associate driver's
7204 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
7205 * interface spec. Depends on the interrupt mode configured to the driver,
7206 * the driver will try to fallback from the configured interrupt mode to an
7207 * interrupt mode which is supported by the platform, kernel, and device in
7208 * the order of:
7209 * MSI-X -> MSI -> IRQ.
7210 *
7211 * Return codes
7212 * 	0 - successful
7213 * 	other values - error
7214 **/
7215static uint32_t
7216lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7217{
7218	uint32_t intr_mode = LPFC_INTR_ERROR;
7219	int retval, index;
7220
7221	if (cfg_mode == 2) {
7222		/* Preparation before conf_msi mbox cmd */
7223		retval = 0;
7224		if (!retval) {
7225			/* Now, try to enable MSI-X interrupt mode */
7226			retval = lpfc_sli4_enable_msix(phba);
7227			if (!retval) {
7228				/* Indicate initialization to MSI-X mode */
7229				phba->intr_type = MSIX;
7230				intr_mode = 2;
7231			}
7232		}
7233	}
7234
7235	/* Fallback to MSI if MSI-X initialization failed */
7236	if (cfg_mode >= 1 && phba->intr_type == NONE) {
7237		retval = lpfc_sli4_enable_msi(phba);
7238		if (!retval) {
7239			/* Indicate initialization to MSI mode */
7240			phba->intr_type = MSI;
7241			intr_mode = 1;
7242		}
7243	}
7244
7245	/* Fallback to INTx if both MSI-X/MSI initalization failed */
7246	if (phba->intr_type == NONE) {
7247		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7248				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7249		if (!retval) {
7250			/* Indicate initialization to INTx mode */
7251			phba->intr_type = INTx;
7252			intr_mode = 0;
7253			for (index = 0; index < phba->cfg_fcp_eq_count;
7254			     index++) {
7255				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7256				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7257			}
7258		}
7259	}
7260	return intr_mode;
7261}
7262
7263/**
7264 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
7265 * @phba: pointer to lpfc hba data structure.
7266 *
7267 * This routine is invoked to disable device interrupt and disassociate
7268 * the driver's interrupt handler(s) from interrupt vector(s) to device
7269 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
7270 * will release the interrupt vector(s) for the message signaled interrupt.
7271 **/
7272static void
7273lpfc_sli4_disable_intr(struct lpfc_hba *phba)
7274{
7275	/* Disable the currently initialized interrupt mode */
7276	if (phba->intr_type == MSIX)
7277		lpfc_sli4_disable_msix(phba);
7278	else if (phba->intr_type == MSI)
7279		lpfc_sli4_disable_msi(phba);
7280	else if (phba->intr_type == INTx)
7281		free_irq(phba->pcidev->irq, phba);
7282
7283	/* Reset interrupt management states */
7284	phba->intr_type = NONE;
7285	phba->sli.slistat.sli_intr = 0;
7286
7287	return;
7288}
7289
7290/**
7291 * lpfc_unset_hba - Unset SLI3 hba device initialization
7292 * @phba: pointer to lpfc hba data structure.
7293 *
7294 * This routine is invoked to unset the HBA device initialization steps to
7295 * a device with SLI-3 interface spec.
7296 **/
7297static void
7298lpfc_unset_hba(struct lpfc_hba *phba)
7299{
7300	struct lpfc_vport *vport = phba->pport;
7301	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7302
7303	spin_lock_irq(shost->host_lock);
7304	vport->load_flag |= FC_UNLOADING;
7305	spin_unlock_irq(shost->host_lock);
7306
7307	lpfc_stop_hba_timers(phba);
7308
7309	phba->pport->work_port_events = 0;
7310
7311	lpfc_sli_hba_down(phba);
7312
7313	lpfc_sli_brdrestart(phba);
7314
7315	lpfc_sli_disable_intr(phba);
7316
7317	return;
7318}
7319
7320/**
7321 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
7322 * @phba: pointer to lpfc hba data structure.
7323 *
7324 * This routine is invoked to unset the HBA device initialization steps to
7325 * a device with SLI-4 interface spec.
7326 **/
7327static void
7328lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7329{
7330	struct lpfc_vport *vport = phba->pport;
7331	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7332
7333	spin_lock_irq(shost->host_lock);
7334	vport->load_flag |= FC_UNLOADING;
7335	spin_unlock_irq(shost->host_lock);
7336
7337	phba->pport->work_port_events = 0;
7338
7339	lpfc_sli4_hba_down(phba);
7340
7341	lpfc_sli4_disable_intr(phba);
7342
7343	return;
7344}
7345
7346/**
7347 * lpfc_sli4_hba_unset - Unset the fcoe hba
7348 * @phba: Pointer to HBA context object.
7349 *
7350 * This function is called in the SLI4 code path to reset the HBA's FCoE
7351 * function. The caller is not required to hold any lock. This routine
7352 * issues PCI function reset mailbox command to reset the FCoE function.
7353 * At the end of the function, it calls lpfc_hba_down_post function to
7354 * free any pending commands.
7355 **/
7356static void
7357lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7358{
7359	int wait_cnt = 0;
7360	LPFC_MBOXQ_t *mboxq;
7361
7362	lpfc_stop_hba_timers(phba);
7363	phba->sli4_hba.intr_enable = 0;
7364
7365	/*
7366	 * Gracefully wait out the potential current outstanding asynchronous
7367	 * mailbox command.
7368	 */
7369
7370	/* First, block any pending async mailbox command from posted */
7371	spin_lock_irq(&phba->hbalock);
7372	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7373	spin_unlock_irq(&phba->hbalock);
7374	/* Now, trying to wait it out if we can */
7375	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7376		msleep(10);
7377		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
7378			break;
7379	}
7380	/* Forcefully release the outstanding mailbox command if timed out */
7381	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7382		spin_lock_irq(&phba->hbalock);
7383		mboxq = phba->sli.mbox_active;
7384		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7385		__lpfc_mbox_cmpl_put(phba, mboxq);
7386		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7387		phba->sli.mbox_active = NULL;
7388		spin_unlock_irq(&phba->hbalock);
7389	}
7390
7391	/* Tear down the queues in the HBA */
7392	lpfc_sli4_queue_unset(phba);
7393
7394	/* Disable PCI subsystem interrupt */
7395	lpfc_sli4_disable_intr(phba);
7396
7397	/* Stop kthread signal shall trigger work_done one more time */
7398	kthread_stop(phba->worker_thread);
7399
7400	/* Stop the SLI4 device port */
7401	phba->pport->work_port_events = 0;
7402}
7403
7404 /**
7405 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
7406 * @phba: Pointer to HBA context object.
7407 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
7408 *
7409 * This function is called in the SLI4 code path to read the port's
7410 * sli4 capabilities.
7411 *
7412 * This function may be be called from any context that can block-wait
7413 * for the completion.  The expectation is that this routine is called
7414 * typically from probe_one or from the online routine.
7415 **/
7416int
7417lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7418{
7419	int rc;
7420	struct lpfc_mqe *mqe;
7421	struct lpfc_pc_sli4_params *sli4_params;
7422	uint32_t mbox_tmo;
7423
7424	rc = 0;
7425	mqe = &mboxq->u.mqe;
7426
7427	/* Read the port's SLI4 Parameters port capabilities */
7428	lpfc_sli4_params(mboxq);
7429	if (!phba->sli4_hba.intr_enable)
7430		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7431	else {
7432		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
7433		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7434	}
7435
7436	if (unlikely(rc))
7437		return 1;
7438
7439	sli4_params = &phba->sli4_hba.pc_sli4_params;
7440	sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
7441	sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
7442	sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
7443	sli4_params->featurelevel_1 = bf_get(featurelevel_1,
7444					     &mqe->un.sli4_params);
7445	sli4_params->featurelevel_2 = bf_get(featurelevel_2,
7446					     &mqe->un.sli4_params);
7447	sli4_params->proto_types = mqe->un.sli4_params.word3;
7448	sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
7449	sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
7450	sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
7451	sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
7452	sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
7453	sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
7454	sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
7455	sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
7456	sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
7457	sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
7458	sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
7459	sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
7460	sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
7461	sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
7462	sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
7463	sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
7464	sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
7465	sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7466	sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7467	sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
7468	return rc;
7469}
7470
7471/**
7472 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
7473 * @pdev: pointer to PCI device
7474 * @pid: pointer to PCI device identifier
7475 *
7476 * This routine is to be called to attach a device with SLI-3 interface spec
7477 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7478 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7479 * information of the device and driver to see if the driver state that it can
7480 * support this kind of device. If the match is successful, the driver core
7481 * invokes this routine. If this routine determines it can claim the HBA, it
7482 * does all the initialization that it needs to do to handle the HBA properly.
7483 *
7484 * Return code
7485 * 	0 - driver can claim the device
7486 * 	negative value - driver can not claim the device
7487 **/
7488static int __devinit
7489lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
7490{
7491	struct lpfc_hba   *phba;
7492	struct lpfc_vport *vport = NULL;
7493	struct Scsi_Host  *shost = NULL;
7494	int error;
7495	uint32_t cfg_mode, intr_mode;
7496
7497	/* Allocate memory for HBA structure */
7498	phba = lpfc_hba_alloc(pdev);
7499	if (!phba)
7500		return -ENOMEM;
7501
7502	/* Perform generic PCI device enabling operation */
7503	error = lpfc_enable_pci_dev(phba);
7504	if (error) {
7505		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7506				"1401 Failed to enable pci device.\n");
7507		goto out_free_phba;
7508	}
7509
7510	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
7511	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
7512	if (error)
7513		goto out_disable_pci_dev;
7514
7515	/* Set up SLI-3 specific device PCI memory space */
7516	error = lpfc_sli_pci_mem_setup(phba);
7517	if (error) {
7518		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7519				"1402 Failed to set up pci memory space.\n");
7520		goto out_disable_pci_dev;
7521	}
7522
7523	/* Set up phase-1 common device driver resources */
7524	error = lpfc_setup_driver_resource_phase1(phba);
7525	if (error) {
7526		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7527				"1403 Failed to set up driver resource.\n");
7528		goto out_unset_pci_mem_s3;
7529	}
7530
7531	/* Set up SLI-3 specific device driver resources */
7532	error = lpfc_sli_driver_resource_setup(phba);
7533	if (error) {
7534		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7535				"1404 Failed to set up driver resource.\n");
7536		goto out_unset_pci_mem_s3;
7537	}
7538
7539	/* Initialize and populate the iocb list per host */
7540	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
7541	if (error) {
7542		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7543				"1405 Failed to initialize iocb list.\n");
7544		goto out_unset_driver_resource_s3;
7545	}
7546
7547	/* Set up common device driver resources */
7548	error = lpfc_setup_driver_resource_phase2(phba);
7549	if (error) {
7550		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7551				"1406 Failed to set up driver resource.\n");
7552		goto out_free_iocb_list;
7553	}
7554
7555	/* Create SCSI host to the physical port */
7556	error = lpfc_create_shost(phba);
7557	if (error) {
7558		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7559				"1407 Failed to create scsi host.\n");
7560		goto out_unset_driver_resource;
7561	}
7562
7563	/* Configure sysfs attributes */
7564	vport = phba->pport;
7565	error = lpfc_alloc_sysfs_attr(vport);
7566	if (error) {
7567		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7568				"1476 Failed to allocate sysfs attr\n");
7569		goto out_destroy_shost;
7570	}
7571
7572	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
7573	/* Now, trying to enable interrupt and bring up the device */
7574	cfg_mode = phba->cfg_use_msi;
7575	while (true) {
7576		/* Put device to a known state before enabling interrupt */
7577		lpfc_stop_port(phba);
7578		/* Configure and enable interrupt */
7579		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
7580		if (intr_mode == LPFC_INTR_ERROR) {
7581			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7582					"0431 Failed to enable interrupt.\n");
7583			error = -ENODEV;
7584			goto out_free_sysfs_attr;
7585		}
7586		/* SLI-3 HBA setup */
7587		if (lpfc_sli_hba_setup(phba)) {
7588			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7589					"1477 Failed to set up hba\n");
7590			error = -ENODEV;
7591			goto out_remove_device;
7592		}
7593
7594		/* Wait 50ms for the interrupts of previous mailbox commands */
7595		msleep(50);
7596		/* Check active interrupts on message signaled interrupts */
7597		if (intr_mode == 0 ||
7598		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
7599			/* Log the current active interrupt mode */
7600			phba->intr_mode = intr_mode;
7601			lpfc_log_intr_mode(phba, intr_mode);
7602			break;
7603		} else {
7604			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7605					"0447 Configure interrupt mode (%d) "
7606					"failed active interrupt test.\n",
7607					intr_mode);
7608			/* Disable the current interrupt mode */
7609			lpfc_sli_disable_intr(phba);
7610			/* Try next level of interrupt mode */
7611			cfg_mode = --intr_mode;
7612		}
7613	}
7614
7615	/* Perform post initialization setup */
7616	lpfc_post_init_setup(phba);
7617
7618	/* Check if there are static vports to be created. */
7619	lpfc_create_static_vport(phba);
7620
7621	return 0;
7622
7623out_remove_device:
7624	lpfc_unset_hba(phba);
7625out_free_sysfs_attr:
7626	lpfc_free_sysfs_attr(vport);
7627out_destroy_shost:
7628	lpfc_destroy_shost(phba);
7629out_unset_driver_resource:
7630	lpfc_unset_driver_resource_phase2(phba);
7631out_free_iocb_list:
7632	lpfc_free_iocb_list(phba);
7633out_unset_driver_resource_s3:
7634	lpfc_sli_driver_resource_unset(phba);
7635out_unset_pci_mem_s3:
7636	lpfc_sli_pci_mem_unset(phba);
7637out_disable_pci_dev:
7638	lpfc_disable_pci_dev(phba);
7639	if (shost)
7640		scsi_host_put(shost);
7641out_free_phba:
7642	lpfc_hba_free(phba);
7643	return error;
7644}
7645
7646/**
7647 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
7648 * @pdev: pointer to PCI device
7649 *
7650 * This routine is to be called to disattach a device with SLI-3 interface
7651 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7652 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7653 * device to be removed from the PCI subsystem properly.
7654 **/
7655static void __devexit
7656lpfc_pci_remove_one_s3(struct pci_dev *pdev)
7657{
7658	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
7659	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7660	struct lpfc_vport **vports;
7661	struct lpfc_hba   *phba = vport->phba;
7662	int i;
7663	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
7664
7665	spin_lock_irq(&phba->hbalock);
7666	vport->load_flag |= FC_UNLOADING;
7667	spin_unlock_irq(&phba->hbalock);
7668
7669	lpfc_free_sysfs_attr(vport);
7670
7671	/* Release all the vports against this physical port */
7672	vports = lpfc_create_vport_work_array(phba);
7673	if (vports != NULL)
7674		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7675			fc_vport_terminate(vports[i]->fc_vport);
7676	lpfc_destroy_vport_work_array(phba, vports);
7677
7678	/* Remove FC host and then SCSI host with the physical port */
7679	fc_remove_host(shost);
7680	scsi_remove_host(shost);
7681	lpfc_cleanup(vport);
7682
7683	/*
7684	 * Bring down the SLI Layer. This step disable all interrupts,
7685	 * clears the rings, discards all mailbox commands, and resets
7686	 * the HBA.
7687	 */
7688
7689	/* HBA interrupt will be diabled after this call */
7690	lpfc_sli_hba_down(phba);
7691	/* Stop kthread signal shall trigger work_done one more time */
7692	kthread_stop(phba->worker_thread);
7693	/* Final cleanup of txcmplq and reset the HBA */
7694	lpfc_sli_brdrestart(phba);
7695
7696	lpfc_stop_hba_timers(phba);
7697	spin_lock_irq(&phba->hbalock);
7698	list_del_init(&vport->listentry);
7699	spin_unlock_irq(&phba->hbalock);
7700
7701	lpfc_debugfs_terminate(vport);
7702
7703	/* Disable interrupt */
7704	lpfc_sli_disable_intr(phba);
7705
7706	pci_set_drvdata(pdev, NULL);
7707	scsi_host_put(shost);
7708
7709	/*
7710	 * Call scsi_free before mem_free since scsi bufs are released to their
7711	 * corresponding pools here.
7712	 */
7713	lpfc_scsi_free(phba);
7714	lpfc_mem_free_all(phba);
7715
7716	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7717			  phba->hbqslimp.virt, phba->hbqslimp.phys);
7718
7719	/* Free resources associated with SLI2 interface */
7720	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7721			  phba->slim2p.virt, phba->slim2p.phys);
7722
7723	/* unmap adapter SLIM and Control Registers */
7724	iounmap(phba->ctrl_regs_memmap_p);
7725	iounmap(phba->slim_memmap_p);
7726
7727	lpfc_hba_free(phba);
7728
7729	pci_release_selected_regions(pdev, bars);
7730	pci_disable_device(pdev);
7731}
7732
7733/**
7734 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
7735 * @pdev: pointer to PCI device
7736 * @msg: power management message
7737 *
7738 * This routine is to be called from the kernel's PCI subsystem to support
7739 * system Power Management (PM) to device with SLI-3 interface spec. When
7740 * PM invokes this method, it quiesces the device by stopping the driver's
7741 * worker thread for the device, turning off device's interrupt and DMA,
7742 * and bring the device offline. Note that as the driver implements the
7743 * minimum PM requirements to a power-aware driver's PM support for the
7744 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7745 * to the suspend() method call will be treated as SUSPEND and the driver will
7746 * fully reinitialize its device during resume() method call, the driver will
7747 * set device to PCI_D3hot state in PCI config space instead of setting it
7748 * according to the @msg provided by the PM.
7749 *
7750 * Return code
7751 * 	0 - driver suspended the device
7752 * 	Error otherwise
7753 **/
7754static int
7755lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
7756{
7757	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7758	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7759
7760	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7761			"0473 PCI device Power Management suspend.\n");
7762
7763	/* Bring down the device */
7764	lpfc_offline_prep(phba);
7765	lpfc_offline(phba);
7766	kthread_stop(phba->worker_thread);
7767
7768	/* Disable interrupt from device */
7769	lpfc_sli_disable_intr(phba);
7770
7771	/* Save device state to PCI config space */
7772	pci_save_state(pdev);
7773	pci_set_power_state(pdev, PCI_D3hot);
7774
7775	return 0;
7776}
7777
7778/**
7779 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
7780 * @pdev: pointer to PCI device
7781 *
7782 * This routine is to be called from the kernel's PCI subsystem to support
7783 * system Power Management (PM) to device with SLI-3 interface spec. When PM
7784 * invokes this method, it restores the device's PCI config space state and
7785 * fully reinitializes the device and brings it online. Note that as the
7786 * driver implements the minimum PM requirements to a power-aware driver's
7787 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
7788 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
7789 * driver will fully reinitialize its device during resume() method call,
7790 * the device will be set to PCI_D0 directly in PCI config space before
7791 * restoring the state.
7792 *
7793 * Return code
7794 * 	0 - driver suspended the device
7795 * 	Error otherwise
7796 **/
7797static int
7798lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7799{
7800	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7801	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7802	uint32_t intr_mode;
7803	int error;
7804
7805	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7806			"0452 PCI device Power Management resume.\n");
7807
7808	/* Restore device state from PCI config space */
7809	pci_set_power_state(pdev, PCI_D0);
7810	pci_restore_state(pdev);
7811
7812	/*
7813	 * As the new kernel behavior of pci_restore_state() API call clears
7814	 * device saved_state flag, need to save the restored state again.
7815	 */
7816	pci_save_state(pdev);
7817
7818	if (pdev->is_busmaster)
7819		pci_set_master(pdev);
7820
7821	/* Startup the kernel thread for this host adapter. */
7822	phba->worker_thread = kthread_run(lpfc_do_work, phba,
7823					"lpfc_worker_%d", phba->brd_no);
7824	if (IS_ERR(phba->worker_thread)) {
7825		error = PTR_ERR(phba->worker_thread);
7826		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7827				"0434 PM resume failed to start worker "
7828				"thread: error=x%x.\n", error);
7829		return error;
7830	}
7831
7832	/* Configure and enable interrupt */
7833	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7834	if (intr_mode == LPFC_INTR_ERROR) {
7835		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7836				"0430 PM resume Failed to enable interrupt\n");
7837		return -EIO;
7838	} else
7839		phba->intr_mode = intr_mode;
7840
7841	/* Restart HBA and bring it online */
7842	lpfc_sli_brdrestart(phba);
7843	lpfc_online(phba);
7844
7845	/* Log the current active interrupt mode */
7846	lpfc_log_intr_mode(phba, phba->intr_mode);
7847
7848	return 0;
7849}
7850
7851/**
7852 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
7853 * @phba: pointer to lpfc hba data structure.
7854 *
7855 * This routine is called to prepare the SLI3 device for PCI slot recover. It
7856 * aborts all the outstanding SCSI I/Os to the pci device.
7857 **/
7858static void
7859lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
7860{
7861	struct lpfc_sli *psli = &phba->sli;
7862	struct lpfc_sli_ring  *pring;
7863
7864	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7865			"2723 PCI channel I/O abort preparing for recovery\n");
7866
7867	/*
7868	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
7869	 * and let the SCSI mid-layer to retry them to recover.
7870	 */
7871	pring = &psli->ring[psli->fcp_ring];
7872	lpfc_sli_abort_iocb_ring(phba, pring);
7873}
7874
7875/**
7876 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
7877 * @phba: pointer to lpfc hba data structure.
7878 *
7879 * This routine is called to prepare the SLI3 device for PCI slot reset. It
7880 * disables the device interrupt and pci device, and aborts the internal FCP
7881 * pending I/Os.
7882 **/
7883static void
7884lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7885{
7886	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7887			"2710 PCI channel disable preparing for reset\n");
7888
7889	/* Block any management I/Os to the device */
7890	lpfc_block_mgmt_io(phba);
7891
7892	/* Block all SCSI devices' I/Os on the host */
7893	lpfc_scsi_dev_block(phba);
7894
7895	/* stop all timers */
7896	lpfc_stop_hba_timers(phba);
7897
7898	/* Disable interrupt and pci device */
7899	lpfc_sli_disable_intr(phba);
7900	pci_disable_device(phba->pcidev);
7901
7902	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
7903	lpfc_sli_flush_fcp_rings(phba);
7904}
7905
7906/**
7907 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
7908 * @phba: pointer to lpfc hba data structure.
7909 *
7910 * This routine is called to prepare the SLI3 device for PCI slot permanently
7911 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
7912 * pending I/Os.
7913 **/
7914static void
7915lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7916{
7917	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7918			"2711 PCI channel permanent disable for failure\n");
7919	/* Block all SCSI devices' I/Os on the host */
7920	lpfc_scsi_dev_block(phba);
7921
7922	/* stop all timers */
7923	lpfc_stop_hba_timers(phba);
7924
7925	/* Clean up all driver's outstanding SCSI I/Os */
7926	lpfc_sli_flush_fcp_rings(phba);
7927}
7928
7929/**
7930 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7931 * @pdev: pointer to PCI device.
7932 * @state: the current PCI connection state.
7933 *
7934 * This routine is called from the PCI subsystem for I/O error handling to
7935 * device with SLI-3 interface spec. This function is called by the PCI
7936 * subsystem after a PCI bus error affecting this device has been detected.
7937 * When this function is invoked, it will need to stop all the I/Os and
7938 * interrupt(s) to the device. Once that is done, it will return
7939 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7940 * as desired.
7941 *
7942 * Return codes
7943 * 	PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
7944 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7945 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7946 **/
7947static pci_ers_result_t
7948lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7949{
7950	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7951	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7952
7953	switch (state) {
7954	case pci_channel_io_normal:
7955		/* Non-fatal error, prepare for recovery */
7956		lpfc_sli_prep_dev_for_recover(phba);
7957		return PCI_ERS_RESULT_CAN_RECOVER;
7958	case pci_channel_io_frozen:
7959		/* Fatal error, prepare for slot reset */
7960		lpfc_sli_prep_dev_for_reset(phba);
7961		return PCI_ERS_RESULT_NEED_RESET;
7962	case pci_channel_io_perm_failure:
7963		/* Permanent failure, prepare for device down */
7964		lpfc_sli_prep_dev_for_perm_failure(phba);
7965		return PCI_ERS_RESULT_DISCONNECT;
7966	default:
7967		/* Unknown state, prepare and request slot reset */
7968		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7969				"0472 Unknown PCI error state: x%x\n", state);
7970		lpfc_sli_prep_dev_for_reset(phba);
7971		return PCI_ERS_RESULT_NEED_RESET;
7972	}
7973}
7974
7975/**
7976 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
7977 * @pdev: pointer to PCI device.
7978 *
7979 * This routine is called from the PCI subsystem for error handling to
7980 * device with SLI-3 interface spec. This is called after PCI bus has been
7981 * reset to restart the PCI card from scratch, as if from a cold-boot.
7982 * During the PCI subsystem error recovery, after driver returns
7983 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7984 * recovery and then call this routine before calling the .resume method
7985 * to recover the device. This function will initialize the HBA device,
7986 * enable the interrupt, but it will just put the HBA to offline state
7987 * without passing any I/O traffic.
7988 *
7989 * Return codes
7990 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7991 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7992 */
7993static pci_ers_result_t
7994lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7995{
7996	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7997	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7998	struct lpfc_sli *psli = &phba->sli;
7999	uint32_t intr_mode;
8000
8001	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
8002	if (pci_enable_device_mem(pdev)) {
8003		printk(KERN_ERR "lpfc: Cannot re-enable "
8004			"PCI device after reset.\n");
8005		return PCI_ERS_RESULT_DISCONNECT;
8006	}
8007
8008	pci_restore_state(pdev);
8009
8010	/*
8011	 * As the new kernel behavior of pci_restore_state() API call clears
8012	 * device saved_state flag, need to save the restored state again.
8013	 */
8014	pci_save_state(pdev);
8015
8016	if (pdev->is_busmaster)
8017		pci_set_master(pdev);
8018
8019	spin_lock_irq(&phba->hbalock);
8020	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8021	spin_unlock_irq(&phba->hbalock);
8022
8023	/* Configure and enable interrupt */
8024	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8025	if (intr_mode == LPFC_INTR_ERROR) {
8026		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8027				"0427 Cannot re-enable interrupt after "
8028				"slot reset.\n");
8029		return PCI_ERS_RESULT_DISCONNECT;
8030	} else
8031		phba->intr_mode = intr_mode;
8032
8033	/* Take device offline, it will perform cleanup */
8034	lpfc_offline_prep(phba);
8035	lpfc_offline(phba);
8036	lpfc_sli_brdrestart(phba);
8037
8038	/* Log the current active interrupt mode */
8039	lpfc_log_intr_mode(phba, phba->intr_mode);
8040
8041	return PCI_ERS_RESULT_RECOVERED;
8042}
8043
8044/**
8045 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
8046 * @pdev: pointer to PCI device
8047 *
8048 * This routine is called from the PCI subsystem for error handling to device
8049 * with SLI-3 interface spec. It is called when kernel error recovery tells
8050 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8051 * error recovery. After this call, traffic can start to flow from this device
8052 * again.
8053 */
8054static void
8055lpfc_io_resume_s3(struct pci_dev *pdev)
8056{
8057	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8058	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8059
8060	/* Bring device online, it will be no-op for non-fatal error resume */
8061	lpfc_online(phba);
8062
8063	/* Clean up Advanced Error Reporting (AER) if needed */
8064	if (phba->hba_flag & HBA_AER_ENABLED)
8065		pci_cleanup_aer_uncorrect_error_status(pdev);
8066}
8067
8068/**
8069 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
8070 * @phba: pointer to lpfc hba data structure.
8071 *
8072 * returns the number of ELS/CT IOCBs to reserve
8073 **/
8074int
8075lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
8076{
8077	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
8078
8079	if (phba->sli_rev == LPFC_SLI_REV4) {
8080		if (max_xri <= 100)
8081			return 10;
8082		else if (max_xri <= 256)
8083			return 25;
8084		else if (max_xri <= 512)
8085			return 50;
8086		else if (max_xri <= 1024)
8087			return 100;
8088		else
8089			return 150;
8090	} else
8091		return 0;
8092}
8093
8094/**
8095 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
8096 * @pdev: pointer to PCI device
8097 * @pid: pointer to PCI device identifier
8098 *
8099 * This routine is called from the kernel's PCI subsystem to device with
8100 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8101 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8102 * information of the device and driver to see if the driver state that it
8103 * can support this kind of device. If the match is successful, the driver
8104 * core invokes this routine. If this routine determines it can claim the HBA,
8105 * it does all the initialization that it needs to do to handle the HBA
8106 * properly.
8107 *
8108 * Return code
8109 * 	0 - driver can claim the device
8110 * 	negative value - driver can not claim the device
8111 **/
8112static int __devinit
8113lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8114{
8115	struct lpfc_hba   *phba;
8116	struct lpfc_vport *vport = NULL;
8117	struct Scsi_Host  *shost = NULL;
8118	int error;
8119	uint32_t cfg_mode, intr_mode;
8120	int mcnt;
8121
8122	/* Allocate memory for HBA structure */
8123	phba = lpfc_hba_alloc(pdev);
8124	if (!phba)
8125		return -ENOMEM;
8126
8127	/* Perform generic PCI device enabling operation */
8128	error = lpfc_enable_pci_dev(phba);
8129	if (error) {
8130		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8131				"1409 Failed to enable pci device.\n");
8132		goto out_free_phba;
8133	}
8134
8135	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
8136	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
8137	if (error)
8138		goto out_disable_pci_dev;
8139
8140	/* Set up SLI-4 specific device PCI memory space */
8141	error = lpfc_sli4_pci_mem_setup(phba);
8142	if (error) {
8143		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8144				"1410 Failed to set up pci memory space.\n");
8145		goto out_disable_pci_dev;
8146	}
8147
8148	/* Set up phase-1 common device driver resources */
8149	error = lpfc_setup_driver_resource_phase1(phba);
8150	if (error) {
8151		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8152				"1411 Failed to set up driver resource.\n");
8153		goto out_unset_pci_mem_s4;
8154	}
8155
8156	/* Set up SLI-4 Specific device driver resources */
8157	error = lpfc_sli4_driver_resource_setup(phba);
8158	if (error) {
8159		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8160				"1412 Failed to set up driver resource.\n");
8161		goto out_unset_pci_mem_s4;
8162	}
8163
8164	/* Initialize and populate the iocb list per host */
8165
8166	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8167			"2821 initialize iocb list %d.\n",
8168			phba->cfg_iocb_cnt*1024);
8169	error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
8170
8171	if (error) {
8172		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8173				"1413 Failed to initialize iocb list.\n");
8174		goto out_unset_driver_resource_s4;
8175	}
8176
8177	/* Set up common device driver resources */
8178	error = lpfc_setup_driver_resource_phase2(phba);
8179	if (error) {
8180		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8181				"1414 Failed to set up driver resource.\n");
8182		goto out_free_iocb_list;
8183	}
8184
8185	/* Create SCSI host to the physical port */
8186	error = lpfc_create_shost(phba);
8187	if (error) {
8188		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8189				"1415 Failed to create scsi host.\n");
8190		goto out_unset_driver_resource;
8191	}
8192
8193	/* Configure sysfs attributes */
8194	vport = phba->pport;
8195	error = lpfc_alloc_sysfs_attr(vport);
8196	if (error) {
8197		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8198				"1416 Failed to allocate sysfs attr\n");
8199		goto out_destroy_shost;
8200	}
8201
8202	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8203	/* Now, trying to enable interrupt and bring up the device */
8204	cfg_mode = phba->cfg_use_msi;
8205	while (true) {
8206		/* Put device to a known state before enabling interrupt */
8207		lpfc_stop_port(phba);
8208		/* Configure and enable interrupt */
8209		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
8210		if (intr_mode == LPFC_INTR_ERROR) {
8211			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8212					"0426 Failed to enable interrupt.\n");
8213			error = -ENODEV;
8214			goto out_free_sysfs_attr;
8215		}
8216		/* Default to single FCP EQ for non-MSI-X */
8217		if (phba->intr_type != MSIX)
8218			phba->cfg_fcp_eq_count = 1;
8219		else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count)
8220			phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
8221		/* Set up SLI-4 HBA */
8222		if (lpfc_sli4_hba_setup(phba)) {
8223			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8224					"1421 Failed to set up hba\n");
8225			error = -ENODEV;
8226			goto out_disable_intr;
8227		}
8228
8229		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
8230		if (intr_mode != 0)
8231			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
8232							    LPFC_ACT_INTR_CNT);
8233
8234		/* Check active interrupts received only for MSI/MSI-X */
8235		if (intr_mode == 0 ||
8236		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
8237			/* Log the current active interrupt mode */
8238			phba->intr_mode = intr_mode;
8239			lpfc_log_intr_mode(phba, intr_mode);
8240			break;
8241		}
8242		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8243				"0451 Configure interrupt mode (%d) "
8244				"failed active interrupt test.\n",
8245				intr_mode);
8246		/* Unset the preivous SLI-4 HBA setup */
8247		lpfc_sli4_unset_hba(phba);
8248		/* Try next level of interrupt mode */
8249		cfg_mode = --intr_mode;
8250	}
8251
8252	/* Perform post initialization setup */
8253	lpfc_post_init_setup(phba);
8254
8255	/* Check if there are static vports to be created. */
8256	lpfc_create_static_vport(phba);
8257
8258	return 0;
8259
8260out_disable_intr:
8261	lpfc_sli4_disable_intr(phba);
8262out_free_sysfs_attr:
8263	lpfc_free_sysfs_attr(vport);
8264out_destroy_shost:
8265	lpfc_destroy_shost(phba);
8266out_unset_driver_resource:
8267	lpfc_unset_driver_resource_phase2(phba);
8268out_free_iocb_list:
8269	lpfc_free_iocb_list(phba);
8270out_unset_driver_resource_s4:
8271	lpfc_sli4_driver_resource_unset(phba);
8272out_unset_pci_mem_s4:
8273	lpfc_sli4_pci_mem_unset(phba);
8274out_disable_pci_dev:
8275	lpfc_disable_pci_dev(phba);
8276	if (shost)
8277		scsi_host_put(shost);
8278out_free_phba:
8279	lpfc_hba_free(phba);
8280	return error;
8281}
8282
8283/**
8284 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
8285 * @pdev: pointer to PCI device
8286 *
8287 * This routine is called from the kernel's PCI subsystem to device with
8288 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8289 * removed from PCI bus, it performs all the necessary cleanup for the HBA
8290 * device to be removed from the PCI subsystem properly.
8291 **/
8292static void __devexit
8293lpfc_pci_remove_one_s4(struct pci_dev *pdev)
8294{
8295	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8296	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8297	struct lpfc_vport **vports;
8298	struct lpfc_hba *phba = vport->phba;
8299	int i;
8300
8301	/* Mark the device unloading flag */
8302	spin_lock_irq(&phba->hbalock);
8303	vport->load_flag |= FC_UNLOADING;
8304	spin_unlock_irq(&phba->hbalock);
8305
8306	/* Free the HBA sysfs attributes */
8307	lpfc_free_sysfs_attr(vport);
8308
8309	/* Release all the vports against this physical port */
8310	vports = lpfc_create_vport_work_array(phba);
8311	if (vports != NULL)
8312		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8313			fc_vport_terminate(vports[i]->fc_vport);
8314	lpfc_destroy_vport_work_array(phba, vports);
8315
8316	/* Remove FC host and then SCSI host with the physical port */
8317	fc_remove_host(shost);
8318	scsi_remove_host(shost);
8319
8320	/* Perform cleanup on the physical port */
8321	lpfc_cleanup(vport);
8322
8323	/*
8324	 * Bring down the SLI Layer. This step disables all interrupts,
8325	 * clears the rings, discards all mailbox commands, and resets
8326	 * the HBA FCoE function.
8327	 */
8328	lpfc_debugfs_terminate(vport);
8329	lpfc_sli4_hba_unset(phba);
8330
8331	spin_lock_irq(&phba->hbalock);
8332	list_del_init(&vport->listentry);
8333	spin_unlock_irq(&phba->hbalock);
8334
8335	/* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
8336	 * buffers are released to their corresponding pools here.
8337	 */
8338	lpfc_scsi_free(phba);
8339	lpfc_sli4_driver_resource_unset(phba);
8340
8341	/* Unmap adapter Control and Doorbell registers */
8342	lpfc_sli4_pci_mem_unset(phba);
8343
8344	/* Release PCI resources and disable device's PCI function */
8345	scsi_host_put(shost);
8346	lpfc_disable_pci_dev(phba);
8347
8348	/* Finally, free the driver's device data structure */
8349	lpfc_hba_free(phba);
8350
8351	return;
8352}
8353
8354/**
8355 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
8356 * @pdev: pointer to PCI device
8357 * @msg: power management message
8358 *
8359 * This routine is called from the kernel's PCI subsystem to support system
8360 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
8361 * this method, it quiesces the device by stopping the driver's worker
8362 * thread for the device, turning off device's interrupt and DMA, and bring
8363 * the device offline. Note that as the driver implements the minimum PM
8364 * requirements to a power-aware driver's PM support for suspend/resume -- all
8365 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
8366 * method call will be treated as SUSPEND and the driver will fully
8367 * reinitialize its device during resume() method call, the driver will set
8368 * device to PCI_D3hot state in PCI config space instead of setting it
8369 * according to the @msg provided by the PM.
8370 *
8371 * Return code
8372 * 	0 - driver suspended the device
8373 * 	Error otherwise
8374 **/
8375static int
8376lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
8377{
8378	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8379	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8380
8381	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8382			"2843 PCI device Power Management suspend.\n");
8383
8384	/* Bring down the device */
8385	lpfc_offline_prep(phba);
8386	lpfc_offline(phba);
8387	kthread_stop(phba->worker_thread);
8388
8389	/* Disable interrupt from device */
8390	lpfc_sli4_disable_intr(phba);
8391
8392	/* Save device state to PCI config space */
8393	pci_save_state(pdev);
8394	pci_set_power_state(pdev, PCI_D3hot);
8395
8396	return 0;
8397}
8398
8399/**
8400 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
8401 * @pdev: pointer to PCI device
8402 *
8403 * This routine is called from the kernel's PCI subsystem to support system
8404 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
8405 * this method, it restores the device's PCI config space state and fully
8406 * reinitializes the device and brings it online. Note that as the driver
8407 * implements the minimum PM requirements to a power-aware driver's PM for
8408 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8409 * to the suspend() method call will be treated as SUSPEND and the driver
8410 * will fully reinitialize its device during resume() method call, the device
8411 * will be set to PCI_D0 directly in PCI config space before restoring the
8412 * state.
8413 *
8414 * Return code
8415 * 	0 - driver suspended the device
8416 * 	Error otherwise
8417 **/
8418static int
8419lpfc_pci_resume_one_s4(struct pci_dev *pdev)
8420{
8421	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8422	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8423	uint32_t intr_mode;
8424	int error;
8425
8426	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8427			"0292 PCI device Power Management resume.\n");
8428
8429	/* Restore device state from PCI config space */
8430	pci_set_power_state(pdev, PCI_D0);
8431	pci_restore_state(pdev);
8432
8433	/*
8434	 * As the new kernel behavior of pci_restore_state() API call clears
8435	 * device saved_state flag, need to save the restored state again.
8436	 */
8437	pci_save_state(pdev);
8438
8439	if (pdev->is_busmaster)
8440		pci_set_master(pdev);
8441
8442	 /* Startup the kernel thread for this host adapter. */
8443	phba->worker_thread = kthread_run(lpfc_do_work, phba,
8444					"lpfc_worker_%d", phba->brd_no);
8445	if (IS_ERR(phba->worker_thread)) {
8446		error = PTR_ERR(phba->worker_thread);
8447		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8448				"0293 PM resume failed to start worker "
8449				"thread: error=x%x.\n", error);
8450		return error;
8451	}
8452
8453	/* Configure and enable interrupt */
8454	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
8455	if (intr_mode == LPFC_INTR_ERROR) {
8456		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8457				"0294 PM resume Failed to enable interrupt\n");
8458		return -EIO;
8459	} else
8460		phba->intr_mode = intr_mode;
8461
8462	/* Restart HBA and bring it online */
8463	lpfc_sli_brdrestart(phba);
8464	lpfc_online(phba);
8465
8466	/* Log the current active interrupt mode */
8467	lpfc_log_intr_mode(phba, phba->intr_mode);
8468
8469	return 0;
8470}
8471
8472/**
8473 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
8474 * @phba: pointer to lpfc hba data structure.
8475 *
8476 * This routine is called to prepare the SLI4 device for PCI slot recover. It
8477 * aborts all the outstanding SCSI I/Os to the pci device.
8478 **/
8479static void
8480lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
8481{
8482	struct lpfc_sli *psli = &phba->sli;
8483	struct lpfc_sli_ring  *pring;
8484
8485	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8486			"2828 PCI channel I/O abort preparing for recovery\n");
8487	/*
8488	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8489	 * and let the SCSI mid-layer to retry them to recover.
8490	 */
8491	pring = &psli->ring[psli->fcp_ring];
8492	lpfc_sli_abort_iocb_ring(phba, pring);
8493}
8494
8495/**
8496 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
8497 * @phba: pointer to lpfc hba data structure.
8498 *
8499 * This routine is called to prepare the SLI4 device for PCI slot reset. It
8500 * disables the device interrupt and pci device, and aborts the internal FCP
8501 * pending I/Os.
8502 **/
8503static void
8504lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
8505{
8506	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8507			"2826 PCI channel disable preparing for reset\n");
8508
8509	/* Block any management I/Os to the device */
8510	lpfc_block_mgmt_io(phba);
8511
8512	/* Block all SCSI devices' I/Os on the host */
8513	lpfc_scsi_dev_block(phba);
8514
8515	/* stop all timers */
8516	lpfc_stop_hba_timers(phba);
8517
8518	/* Disable interrupt and pci device */
8519	lpfc_sli4_disable_intr(phba);
8520	pci_disable_device(phba->pcidev);
8521
8522	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
8523	lpfc_sli_flush_fcp_rings(phba);
8524}
8525
8526/**
8527 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
8528 * @phba: pointer to lpfc hba data structure.
8529 *
8530 * This routine is called to prepare the SLI4 device for PCI slot permanently
8531 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
8532 * pending I/Os.
8533 **/
8534static void
8535lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
8536{
8537	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8538			"2827 PCI channel permanent disable for failure\n");
8539
8540	/* Block all SCSI devices' I/Os on the host */
8541	lpfc_scsi_dev_block(phba);
8542
8543	/* stop all timers */
8544	lpfc_stop_hba_timers(phba);
8545
8546	/* Clean up all driver's outstanding SCSI I/Os */
8547	lpfc_sli_flush_fcp_rings(phba);
8548}
8549
8550/**
8551 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
8552 * @pdev: pointer to PCI device.
8553 * @state: the current PCI connection state.
8554 *
8555 * This routine is called from the PCI subsystem for error handling to device
8556 * with SLI-4 interface spec. This function is called by the PCI subsystem
8557 * after a PCI bus error affecting this device has been detected. When this
8558 * function is invoked, it will need to stop all the I/Os and interrupt(s)
8559 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
8560 * for the PCI subsystem to perform proper recovery as desired.
8561 *
8562 * Return codes
8563 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8564 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8565 **/
8566static pci_ers_result_t
8567lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8568{
8569	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8570	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8571
8572	switch (state) {
8573	case pci_channel_io_normal:
8574		/* Non-fatal error, prepare for recovery */
8575		lpfc_sli4_prep_dev_for_recover(phba);
8576		return PCI_ERS_RESULT_CAN_RECOVER;
8577	case pci_channel_io_frozen:
8578		/* Fatal error, prepare for slot reset */
8579		lpfc_sli4_prep_dev_for_reset(phba);
8580		return PCI_ERS_RESULT_NEED_RESET;
8581	case pci_channel_io_perm_failure:
8582		/* Permanent failure, prepare for device down */
8583		lpfc_sli4_prep_dev_for_perm_failure(phba);
8584		return PCI_ERS_RESULT_DISCONNECT;
8585	default:
8586		/* Unknown state, prepare and request slot reset */
8587		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8588				"2825 Unknown PCI error state: x%x\n", state);
8589		lpfc_sli4_prep_dev_for_reset(phba);
8590		return PCI_ERS_RESULT_NEED_RESET;
8591	}
8592}
8593
8594/**
8595 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
8596 * @pdev: pointer to PCI device.
8597 *
8598 * This routine is called from the PCI subsystem for error handling to device
8599 * with SLI-4 interface spec. It is called after PCI bus has been reset to
8600 * restart the PCI card from scratch, as if from a cold-boot. During the
8601 * PCI subsystem error recovery, after the driver returns
8602 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
8603 * recovery and then call this routine before calling the .resume method to
8604 * recover the device. This function will initialize the HBA device, enable
8605 * the interrupt, but it will just put the HBA to offline state without
8606 * passing any I/O traffic.
8607 *
8608 * Return codes
8609 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
8610 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8611 */
8612static pci_ers_result_t
8613lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8614{
8615	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8616	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8617	struct lpfc_sli *psli = &phba->sli;
8618	uint32_t intr_mode;
8619
8620	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
8621	if (pci_enable_device_mem(pdev)) {
8622		printk(KERN_ERR "lpfc: Cannot re-enable "
8623			"PCI device after reset.\n");
8624		return PCI_ERS_RESULT_DISCONNECT;
8625	}
8626
8627	pci_restore_state(pdev);
8628	if (pdev->is_busmaster)
8629		pci_set_master(pdev);
8630
8631	spin_lock_irq(&phba->hbalock);
8632	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8633	spin_unlock_irq(&phba->hbalock);
8634
8635	/* Configure and enable interrupt */
8636	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
8637	if (intr_mode == LPFC_INTR_ERROR) {
8638		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8639				"2824 Cannot re-enable interrupt after "
8640				"slot reset.\n");
8641		return PCI_ERS_RESULT_DISCONNECT;
8642	} else
8643		phba->intr_mode = intr_mode;
8644
8645	/* Log the current active interrupt mode */
8646	lpfc_log_intr_mode(phba, phba->intr_mode);
8647
8648	return PCI_ERS_RESULT_RECOVERED;
8649}
8650
8651/**
8652 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
8653 * @pdev: pointer to PCI device
8654 *
8655 * This routine is called from the PCI subsystem for error handling to device
8656 * with SLI-4 interface spec. It is called when kernel error recovery tells
8657 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8658 * error recovery. After this call, traffic can start to flow from this device
8659 * again.
8660 **/
8661static void
8662lpfc_io_resume_s4(struct pci_dev *pdev)
8663{
8664	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8665	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8666
8667	/*
8668	 * In case of slot reset, as function reset is performed through
8669	 * mailbox command which needs DMA to be enabled, this operation
8670	 * has to be moved to the io resume phase. Taking device offline
8671	 * will perform the necessary cleanup.
8672	 */
8673	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
8674		/* Perform device reset */
8675		lpfc_offline_prep(phba);
8676		lpfc_offline(phba);
8677		lpfc_sli_brdrestart(phba);
8678		/* Bring the device back online */
8679		lpfc_online(phba);
8680	}
8681
8682	/* Clean up Advanced Error Reporting (AER) if needed */
8683	if (phba->hba_flag & HBA_AER_ENABLED)
8684		pci_cleanup_aer_uncorrect_error_status(pdev);
8685}
8686
8687/**
8688 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
8689 * @pdev: pointer to PCI device
8690 * @pid: pointer to PCI device identifier
8691 *
8692 * This routine is to be registered to the kernel's PCI subsystem. When an
8693 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
8694 * at PCI device-specific information of the device and driver to see if the
8695 * driver state that it can support this kind of device. If the match is
8696 * successful, the driver core invokes this routine. This routine dispatches
8697 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
8698 * do all the initialization that it needs to do to handle the HBA device
8699 * properly.
8700 *
8701 * Return code
8702 * 	0 - driver can claim the device
8703 * 	negative value - driver can not claim the device
8704 **/
8705static int __devinit
8706lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
8707{
8708	int rc;
8709	struct lpfc_sli_intf intf;
8710
8711	if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
8712		return -ENODEV;
8713
8714	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
8715	    (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
8716		rc = lpfc_pci_probe_one_s4(pdev, pid);
8717	else
8718		rc = lpfc_pci_probe_one_s3(pdev, pid);
8719
8720	return rc;
8721}
8722
8723/**
8724 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
8725 * @pdev: pointer to PCI device
8726 *
8727 * This routine is to be registered to the kernel's PCI subsystem. When an
8728 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
8729 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
8730 * remove routine, which will perform all the necessary cleanup for the
8731 * device to be removed from the PCI subsystem properly.
8732 **/
8733static void __devexit
8734lpfc_pci_remove_one(struct pci_dev *pdev)
8735{
8736	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8737	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8738
8739	switch (phba->pci_dev_grp) {
8740	case LPFC_PCI_DEV_LP:
8741		lpfc_pci_remove_one_s3(pdev);
8742		break;
8743	case LPFC_PCI_DEV_OC:
8744		lpfc_pci_remove_one_s4(pdev);
8745		break;
8746	default:
8747		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8748				"1424 Invalid PCI device group: 0x%x\n",
8749				phba->pci_dev_grp);
8750		break;
8751	}
8752	return;
8753}
8754
8755/**
8756 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
8757 * @pdev: pointer to PCI device
8758 * @msg: power management message
8759 *
8760 * This routine is to be registered to the kernel's PCI subsystem to support
8761 * system Power Management (PM). When PM invokes this method, it dispatches
8762 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
8763 * suspend the device.
8764 *
8765 * Return code
8766 * 	0 - driver suspended the device
8767 * 	Error otherwise
8768 **/
8769static int
8770lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
8771{
8772	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8773	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8774	int rc = -ENODEV;
8775
8776	switch (phba->pci_dev_grp) {
8777	case LPFC_PCI_DEV_LP:
8778		rc = lpfc_pci_suspend_one_s3(pdev, msg);
8779		break;
8780	case LPFC_PCI_DEV_OC:
8781		rc = lpfc_pci_suspend_one_s4(pdev, msg);
8782		break;
8783	default:
8784		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8785				"1425 Invalid PCI device group: 0x%x\n",
8786				phba->pci_dev_grp);
8787		break;
8788	}
8789	return rc;
8790}
8791
8792/**
8793 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
8794 * @pdev: pointer to PCI device
8795 *
8796 * This routine is to be registered to the kernel's PCI subsystem to support
8797 * system Power Management (PM). When PM invokes this method, it dispatches
8798 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
8799 * resume the device.
8800 *
8801 * Return code
8802 * 	0 - driver suspended the device
8803 * 	Error otherwise
8804 **/
8805static int
8806lpfc_pci_resume_one(struct pci_dev *pdev)
8807{
8808	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8809	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8810	int rc = -ENODEV;
8811
8812	switch (phba->pci_dev_grp) {
8813	case LPFC_PCI_DEV_LP:
8814		rc = lpfc_pci_resume_one_s3(pdev);
8815		break;
8816	case LPFC_PCI_DEV_OC:
8817		rc = lpfc_pci_resume_one_s4(pdev);
8818		break;
8819	default:
8820		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8821				"1426 Invalid PCI device group: 0x%x\n",
8822				phba->pci_dev_grp);
8823		break;
8824	}
8825	return rc;
8826}
8827
8828/**
8829 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
8830 * @pdev: pointer to PCI device.
8831 * @state: the current PCI connection state.
8832 *
8833 * This routine is registered to the PCI subsystem for error handling. This
8834 * function is called by the PCI subsystem after a PCI bus error affecting
8835 * this device has been detected. When this routine is invoked, it dispatches
8836 * the action to the proper SLI-3 or SLI-4 device error detected handling
8837 * routine, which will perform the proper error detected operation.
8838 *
8839 * Return codes
8840 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8841 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8842 **/
8843static pci_ers_result_t
8844lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
8845{
8846	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8847	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8848	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
8849
8850	switch (phba->pci_dev_grp) {
8851	case LPFC_PCI_DEV_LP:
8852		rc = lpfc_io_error_detected_s3(pdev, state);
8853		break;
8854	case LPFC_PCI_DEV_OC:
8855		rc = lpfc_io_error_detected_s4(pdev, state);
8856		break;
8857	default:
8858		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8859				"1427 Invalid PCI device group: 0x%x\n",
8860				phba->pci_dev_grp);
8861		break;
8862	}
8863	return rc;
8864}
8865
8866/**
8867 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
8868 * @pdev: pointer to PCI device.
8869 *
8870 * This routine is registered to the PCI subsystem for error handling. This
8871 * function is called after PCI bus has been reset to restart the PCI card
8872 * from scratch, as if from a cold-boot. When this routine is invoked, it
8873 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
8874 * routine, which will perform the proper device reset.
8875 *
8876 * Return codes
8877 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
8878 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8879 **/
8880static pci_ers_result_t
8881lpfc_io_slot_reset(struct pci_dev *pdev)
8882{
8883	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8884	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8885	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
8886
8887	switch (phba->pci_dev_grp) {
8888	case LPFC_PCI_DEV_LP:
8889		rc = lpfc_io_slot_reset_s3(pdev);
8890		break;
8891	case LPFC_PCI_DEV_OC:
8892		rc = lpfc_io_slot_reset_s4(pdev);
8893		break;
8894	default:
8895		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8896				"1428 Invalid PCI device group: 0x%x\n",
8897				phba->pci_dev_grp);
8898		break;
8899	}
8900	return rc;
8901}
8902
8903/**
8904 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
8905 * @pdev: pointer to PCI device
8906 *
8907 * This routine is registered to the PCI subsystem for error handling. It
8908 * is called when kernel error recovery tells the lpfc driver that it is
8909 * OK to resume normal PCI operation after PCI bus error recovery. When
8910 * this routine is invoked, it dispatches the action to the proper SLI-3
8911 * or SLI-4 device io_resume routine, which will resume the device operation.
8912 **/
8913static void
8914lpfc_io_resume(struct pci_dev *pdev)
8915{
8916	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8917	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8918
8919	switch (phba->pci_dev_grp) {
8920	case LPFC_PCI_DEV_LP:
8921		lpfc_io_resume_s3(pdev);
8922		break;
8923	case LPFC_PCI_DEV_OC:
8924		lpfc_io_resume_s4(pdev);
8925		break;
8926	default:
8927		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8928				"1429 Invalid PCI device group: 0x%x\n",
8929				phba->pci_dev_grp);
8930		break;
8931	}
8932	return;
8933}
8934
8935static struct pci_device_id lpfc_id_table[] = {
8936	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
8937		PCI_ANY_ID, PCI_ANY_ID, },
8938	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
8939		PCI_ANY_ID, PCI_ANY_ID, },
8940	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
8941		PCI_ANY_ID, PCI_ANY_ID, },
8942	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
8943		PCI_ANY_ID, PCI_ANY_ID, },
8944	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
8945		PCI_ANY_ID, PCI_ANY_ID, },
8946	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
8947		PCI_ANY_ID, PCI_ANY_ID, },
8948	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
8949		PCI_ANY_ID, PCI_ANY_ID, },
8950	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
8951		PCI_ANY_ID, PCI_ANY_ID, },
8952	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
8953		PCI_ANY_ID, PCI_ANY_ID, },
8954	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
8955		PCI_ANY_ID, PCI_ANY_ID, },
8956	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
8957		PCI_ANY_ID, PCI_ANY_ID, },
8958	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
8959		PCI_ANY_ID, PCI_ANY_ID, },
8960	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
8961		PCI_ANY_ID, PCI_ANY_ID, },
8962	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
8963		PCI_ANY_ID, PCI_ANY_ID, },
8964	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
8965		PCI_ANY_ID, PCI_ANY_ID, },
8966	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
8967		PCI_ANY_ID, PCI_ANY_ID, },
8968	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
8969		PCI_ANY_ID, PCI_ANY_ID, },
8970	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
8971		PCI_ANY_ID, PCI_ANY_ID, },
8972	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
8973		PCI_ANY_ID, PCI_ANY_ID, },
8974	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
8975		PCI_ANY_ID, PCI_ANY_ID, },
8976	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
8977		PCI_ANY_ID, PCI_ANY_ID, },
8978	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
8979		PCI_ANY_ID, PCI_ANY_ID, },
8980	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
8981		PCI_ANY_ID, PCI_ANY_ID, },
8982	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
8983		PCI_ANY_ID, PCI_ANY_ID, },
8984	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
8985		PCI_ANY_ID, PCI_ANY_ID, },
8986	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
8987		PCI_ANY_ID, PCI_ANY_ID, },
8988	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
8989		PCI_ANY_ID, PCI_ANY_ID, },
8990	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
8991		PCI_ANY_ID, PCI_ANY_ID, },
8992	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
8993		PCI_ANY_ID, PCI_ANY_ID, },
8994	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
8995		PCI_ANY_ID, PCI_ANY_ID, },
8996	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
8997		PCI_ANY_ID, PCI_ANY_ID, },
8998	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
8999		PCI_ANY_ID, PCI_ANY_ID, },
9000	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
9001		PCI_ANY_ID, PCI_ANY_ID, },
9002	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
9003		PCI_ANY_ID, PCI_ANY_ID, },
9004	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
9005		PCI_ANY_ID, PCI_ANY_ID, },
9006	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
9007		PCI_ANY_ID, PCI_ANY_ID, },
9008	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
9009		PCI_ANY_ID, PCI_ANY_ID, },
9010	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
9011		PCI_ANY_ID, PCI_ANY_ID, },
9012	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
9013		PCI_ANY_ID, PCI_ANY_ID, },
9014	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
9015		PCI_ANY_ID, PCI_ANY_ID, },
9016	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
9017		PCI_ANY_ID, PCI_ANY_ID, },
9018	{ 0 }
9019};
9020
9021MODULE_DEVICE_TABLE(pci, lpfc_id_table);
9022
9023static struct pci_error_handlers lpfc_err_handler = {
9024	.error_detected = lpfc_io_error_detected,
9025	.slot_reset = lpfc_io_slot_reset,
9026	.resume = lpfc_io_resume,
9027};
9028
9029static struct pci_driver lpfc_driver = {
9030	.name		= LPFC_DRIVER_NAME,
9031	.id_table	= lpfc_id_table,
9032	.probe		= lpfc_pci_probe_one,
9033	.remove		= __devexit_p(lpfc_pci_remove_one),
9034	.suspend        = lpfc_pci_suspend_one,
9035	.resume		= lpfc_pci_resume_one,
9036	.err_handler    = &lpfc_err_handler,
9037};
9038
9039/**
9040 * lpfc_init - lpfc module initialization routine
9041 *
9042 * This routine is to be invoked when the lpfc module is loaded into the
9043 * kernel. The special kernel macro module_init() is used to indicate the
9044 * role of this routine to the kernel as lpfc module entry point.
9045 *
9046 * Return codes
9047 *   0 - successful
9048 *   -ENOMEM - FC attach transport failed
9049 *   all others - failed
9050 */
9051static int __init
9052lpfc_init(void)
9053{
9054	int error = 0;
9055
9056	printk(LPFC_MODULE_DESC "\n");
9057	printk(LPFC_COPYRIGHT "\n");
9058
9059	if (lpfc_enable_npiv) {
9060		lpfc_transport_functions.vport_create = lpfc_vport_create;
9061		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
9062	}
9063	lpfc_transport_template =
9064				fc_attach_transport(&lpfc_transport_functions);
9065	if (lpfc_transport_template == NULL)
9066		return -ENOMEM;
9067	if (lpfc_enable_npiv) {
9068		lpfc_vport_transport_template =
9069			fc_attach_transport(&lpfc_vport_transport_functions);
9070		if (lpfc_vport_transport_template == NULL) {
9071			fc_release_transport(lpfc_transport_template);
9072			return -ENOMEM;
9073		}
9074	}
9075	error = pci_register_driver(&lpfc_driver);
9076	if (error) {
9077		fc_release_transport(lpfc_transport_template);
9078		if (lpfc_enable_npiv)
9079			fc_release_transport(lpfc_vport_transport_template);
9080	}
9081
9082	return error;
9083}
9084
9085/**
9086 * lpfc_exit - lpfc module removal routine
9087 *
9088 * This routine is invoked when the lpfc module is removed from the kernel.
9089 * The special kernel macro module_exit() is used to indicate the role of
9090 * this routine to the kernel as lpfc module exit point.
9091 */
9092static void __exit
9093lpfc_exit(void)
9094{
9095	pci_unregister_driver(&lpfc_driver);
9096	fc_release_transport(lpfc_transport_template);
9097	if (lpfc_enable_npiv)
9098		fc_release_transport(lpfc_vport_transport_template);
9099	if (_dump_buf_data) {
9100		printk(KERN_ERR	"9062 BLKGRD: freeing %lu pages for "
9101				"_dump_buf_data at 0x%p\n",
9102				(1L << _dump_buf_data_order), _dump_buf_data);
9103		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
9104	}
9105
9106	if (_dump_buf_dif) {
9107		printk(KERN_ERR	"9049 BLKGRD: freeing %lu pages for "
9108				"_dump_buf_dif at 0x%p\n",
9109				(1L << _dump_buf_dif_order), _dump_buf_dif);
9110		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
9111	}
9112}
9113
9114module_init(lpfc_init);
9115module_exit(lpfc_exit);
9116MODULE_LICENSE("GPL");
9117MODULE_DESCRIPTION(LPFC_MODULE_DESC);
9118MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
9119MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
9120