lpfc_init.c revision 3677a3a76e190f801af0778df3b8efa1fe161a6e
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
29#include <linux/spinlock.h>
30#include <linux/ctype.h>
31#include <linux/aer.h>
32#include <linux/slab.h>
33
34#include <scsi/scsi.h>
35#include <scsi/scsi_device.h>
36#include <scsi/scsi_host.h>
37#include <scsi/scsi_transport_fc.h>
38
39#include "lpfc_hw4.h"
40#include "lpfc_hw.h"
41#include "lpfc_sli.h"
42#include "lpfc_sli4.h"
43#include "lpfc_nl.h"
44#include "lpfc_disc.h"
45#include "lpfc_scsi.h"
46#include "lpfc.h"
47#include "lpfc_logmsg.h"
48#include "lpfc_crtn.h"
49#include "lpfc_vport.h"
50#include "lpfc_version.h"
51
52char *_dump_buf_data;
53unsigned long _dump_buf_data_order;
54char *_dump_buf_dif;
55unsigned long _dump_buf_dif_order;
56spinlock_t _dump_buf_lock;
57
58static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
59static int lpfc_post_rcv_buf(struct lpfc_hba *);
60static int lpfc_sli4_queue_create(struct lpfc_hba *);
61static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
62static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
63static int lpfc_setup_endian_order(struct lpfc_hba *);
64static int lpfc_sli4_read_config(struct lpfc_hba *);
65static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
66static void lpfc_free_sgl_list(struct lpfc_hba *);
67static int lpfc_init_sgl_list(struct lpfc_hba *);
68static int lpfc_init_active_sgl_array(struct lpfc_hba *);
69static void lpfc_free_active_sgl(struct lpfc_hba *);
70static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
71static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
72static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
73static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
74static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
75
76static struct scsi_transport_template *lpfc_transport_template = NULL;
77static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
78static DEFINE_IDR(lpfc_hba_index);
79
80/**
81 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
82 * @phba: pointer to lpfc hba data structure.
83 *
84 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
85 * mailbox command. It retrieves the revision information from the HBA and
86 * collects the Vital Product Data (VPD) about the HBA for preparing the
87 * configuration of the HBA.
88 *
89 * Return codes:
90 *   0 - success.
91 *   -ERESTART - requests the SLI layer to reset the HBA and try again.
92 *   Any other value - indicates an error.
93 **/
94int
95lpfc_config_port_prep(struct lpfc_hba *phba)
96{
97	lpfc_vpd_t *vp = &phba->vpd;
98	int i = 0, rc;
99	LPFC_MBOXQ_t *pmb;
100	MAILBOX_t *mb;
101	char *lpfc_vpd_data = NULL;
102	uint16_t offset = 0;
103	static char licensed[56] =
104		    "key unlock for use with gnu public licensed code only\0";
105	static int init_key = 1;
106
107	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
108	if (!pmb) {
109		phba->link_state = LPFC_HBA_ERROR;
110		return -ENOMEM;
111	}
112
113	mb = &pmb->u.mb;
114	phba->link_state = LPFC_INIT_MBX_CMDS;
115
116	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
117		if (init_key) {
118			uint32_t *ptext = (uint32_t *) licensed;
119
120			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
121				*ptext = cpu_to_be32(*ptext);
122			init_key = 0;
123		}
124
125		lpfc_read_nv(phba, pmb);
126		memset((char*)mb->un.varRDnvp.rsvd3, 0,
127			sizeof (mb->un.varRDnvp.rsvd3));
128		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
129			 sizeof (licensed));
130
131		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
132
133		if (rc != MBX_SUCCESS) {
134			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
135					"0324 Config Port initialization "
136					"error, mbxCmd x%x READ_NVPARM, "
137					"mbxStatus x%x\n",
138					mb->mbxCommand, mb->mbxStatus);
139			mempool_free(pmb, phba->mbox_mem_pool);
140			return -ERESTART;
141		}
142		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
143		       sizeof(phba->wwnn));
144		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
145		       sizeof(phba->wwpn));
146	}
147
148	phba->sli3_options = 0x0;
149
150	/* Setup and issue mailbox READ REV command */
151	lpfc_read_rev(phba, pmb);
152	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
153	if (rc != MBX_SUCCESS) {
154		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
155				"0439 Adapter failed to init, mbxCmd x%x "
156				"READ_REV, mbxStatus x%x\n",
157				mb->mbxCommand, mb->mbxStatus);
158		mempool_free( pmb, phba->mbox_mem_pool);
159		return -ERESTART;
160	}
161
162
163	/*
164	 * The value of rr must be 1 since the driver set the cv field to 1.
165	 * This setting requires the FW to set all revision fields.
166	 */
167	if (mb->un.varRdRev.rr == 0) {
168		vp->rev.rBit = 0;
169		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
170				"0440 Adapter failed to init, READ_REV has "
171				"missing revision information.\n");
172		mempool_free(pmb, phba->mbox_mem_pool);
173		return -ERESTART;
174	}
175
176	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
177		mempool_free(pmb, phba->mbox_mem_pool);
178		return -EINVAL;
179	}
180
181	/* Save information as VPD data */
182	vp->rev.rBit = 1;
183	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
184	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
185	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
186	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
187	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
188	vp->rev.biuRev = mb->un.varRdRev.biuRev;
189	vp->rev.smRev = mb->un.varRdRev.smRev;
190	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
191	vp->rev.endecRev = mb->un.varRdRev.endecRev;
192	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
193	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
194	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
195	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
196	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
197	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
198
199	/* If the sli feature level is less then 9, we must
200	 * tear down all RPIs and VPIs on link down if NPIV
201	 * is enabled.
202	 */
203	if (vp->rev.feaLevelHigh < 9)
204		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
205
206	if (lpfc_is_LC_HBA(phba->pcidev->device))
207		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
208						sizeof (phba->RandomData));
209
210	/* Get adapter VPD information */
211	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
212	if (!lpfc_vpd_data)
213		goto out_free_mbox;
214
215	do {
216		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
218
219		if (rc != MBX_SUCCESS) {
220			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
221					"0441 VPD not present on adapter, "
222					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
223					mb->mbxCommand, mb->mbxStatus);
224			mb->un.varDmp.word_cnt = 0;
225		}
226		/* dump mem may return a zero when finished or we got a
227		 * mailbox error, either way we are done.
228		 */
229		if (mb->un.varDmp.word_cnt == 0)
230			break;
231		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
232			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
233		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
234				      lpfc_vpd_data + offset,
235				      mb->un.varDmp.word_cnt);
236		offset += mb->un.varDmp.word_cnt;
237	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
238	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
239
240	kfree(lpfc_vpd_data);
241out_free_mbox:
242	mempool_free(pmb, phba->mbox_mem_pool);
243	return 0;
244}
245
246/**
247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
248 * @phba: pointer to lpfc hba data structure.
249 * @pmboxq: pointer to the driver internal queue element for mailbox command.
250 *
251 * This is the completion handler for driver's configuring asynchronous event
252 * mailbox command to the device. If the mailbox command returns successfully,
253 * it will set internal async event support flag to 1; otherwise, it will
254 * set internal async event support flag to 0.
255 **/
256static void
257lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
258{
259	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
260		phba->temp_sensor_support = 1;
261	else
262		phba->temp_sensor_support = 0;
263	mempool_free(pmboxq, phba->mbox_mem_pool);
264	return;
265}
266
267/**
268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
269 * @phba: pointer to lpfc hba data structure.
270 * @pmboxq: pointer to the driver internal queue element for mailbox command.
271 *
272 * This is the completion handler for dump mailbox command for getting
273 * wake up parameters. When this command complete, the response contain
274 * Option rom version of the HBA. This function translate the version number
275 * into a human readable string and store it in OptionROMVersion.
276 **/
277static void
278lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
279{
280	struct prog_id *prg;
281	uint32_t prog_id_word;
282	char dist = ' ';
283	/* character array used for decoding dist type. */
284	char dist_char[] = "nabx";
285
286	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
287		mempool_free(pmboxq, phba->mbox_mem_pool);
288		return;
289	}
290
291	prg = (struct prog_id *) &prog_id_word;
292
293	/* word 7 contain option rom version */
294	prog_id_word = pmboxq->u.mb.un.varWords[7];
295
296	/* Decode the Option rom version word to a readable string */
297	if (prg->dist < 4)
298		dist = dist_char[prg->dist];
299
300	if ((prg->dist == 3) && (prg->num == 0))
301		sprintf(phba->OptionROMVersion, "%d.%d%d",
302			prg->ver, prg->rev, prg->lev);
303	else
304		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
305			prg->ver, prg->rev, prg->lev,
306			dist, prg->num);
307	mempool_free(pmboxq, phba->mbox_mem_pool);
308	return;
309}
310
311/**
312 * lpfc_config_port_post - Perform lpfc initialization after config port
313 * @phba: pointer to lpfc hba data structure.
314 *
315 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
316 * command call. It performs all internal resource and state setups on the
317 * port: post IOCB buffers, enable appropriate host interrupt attentions,
318 * ELS ring timers, etc.
319 *
320 * Return codes
321 *   0 - success.
322 *   Any other value - error.
323 **/
324int
325lpfc_config_port_post(struct lpfc_hba *phba)
326{
327	struct lpfc_vport *vport = phba->pport;
328	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
329	LPFC_MBOXQ_t *pmb;
330	MAILBOX_t *mb;
331	struct lpfc_dmabuf *mp;
332	struct lpfc_sli *psli = &phba->sli;
333	uint32_t status, timeout;
334	int i, j;
335	int rc;
336
337	spin_lock_irq(&phba->hbalock);
338	/*
339	 * If the Config port completed correctly the HBA is not
340	 * over heated any more.
341	 */
342	if (phba->over_temp_state == HBA_OVER_TEMP)
343		phba->over_temp_state = HBA_NORMAL_TEMP;
344	spin_unlock_irq(&phba->hbalock);
345
346	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
347	if (!pmb) {
348		phba->link_state = LPFC_HBA_ERROR;
349		return -ENOMEM;
350	}
351	mb = &pmb->u.mb;
352
353	/* Get login parameters for NID.  */
354	rc = lpfc_read_sparam(phba, pmb, 0);
355	if (rc) {
356		mempool_free(pmb, phba->mbox_mem_pool);
357		return -ENOMEM;
358	}
359
360	pmb->vport = vport;
361	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
362		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
363				"0448 Adapter failed init, mbxCmd x%x "
364				"READ_SPARM mbxStatus x%x\n",
365				mb->mbxCommand, mb->mbxStatus);
366		phba->link_state = LPFC_HBA_ERROR;
367		mp = (struct lpfc_dmabuf *) pmb->context1;
368		mempool_free(pmb, phba->mbox_mem_pool);
369		lpfc_mbuf_free(phba, mp->virt, mp->phys);
370		kfree(mp);
371		return -EIO;
372	}
373
374	mp = (struct lpfc_dmabuf *) pmb->context1;
375
376	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
377	lpfc_mbuf_free(phba, mp->virt, mp->phys);
378	kfree(mp);
379	pmb->context1 = NULL;
380
381	if (phba->cfg_soft_wwnn)
382		u64_to_wwn(phba->cfg_soft_wwnn,
383			   vport->fc_sparam.nodeName.u.wwn);
384	if (phba->cfg_soft_wwpn)
385		u64_to_wwn(phba->cfg_soft_wwpn,
386			   vport->fc_sparam.portName.u.wwn);
387	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
388	       sizeof (struct lpfc_name));
389	memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
390	       sizeof (struct lpfc_name));
391
392	/* Update the fc_host data structures with new wwn. */
393	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
394	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
395	fc_host_max_npiv_vports(shost) = phba->max_vpi;
396
397	/* If no serial number in VPD data, use low 6 bytes of WWNN */
398	/* This should be consolidated into parse_vpd ? - mr */
399	if (phba->SerialNumber[0] == 0) {
400		uint8_t *outptr;
401
402		outptr = &vport->fc_nodename.u.s.IEEE[0];
403		for (i = 0; i < 12; i++) {
404			status = *outptr++;
405			j = ((status & 0xf0) >> 4);
406			if (j <= 9)
407				phba->SerialNumber[i] =
408				    (char)((uint8_t) 0x30 + (uint8_t) j);
409			else
410				phba->SerialNumber[i] =
411				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
412			i++;
413			j = (status & 0xf);
414			if (j <= 9)
415				phba->SerialNumber[i] =
416				    (char)((uint8_t) 0x30 + (uint8_t) j);
417			else
418				phba->SerialNumber[i] =
419				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
420		}
421	}
422
423	lpfc_read_config(phba, pmb);
424	pmb->vport = vport;
425	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
426		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
427				"0453 Adapter failed to init, mbxCmd x%x "
428				"READ_CONFIG, mbxStatus x%x\n",
429				mb->mbxCommand, mb->mbxStatus);
430		phba->link_state = LPFC_HBA_ERROR;
431		mempool_free( pmb, phba->mbox_mem_pool);
432		return -EIO;
433	}
434
435	/* Check if the port is disabled */
436	lpfc_sli_read_link_ste(phba);
437
438	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
439	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
440		phba->cfg_hba_queue_depth =
441			(mb->un.varRdConfig.max_xri + 1) -
442					lpfc_sli4_get_els_iocb_cnt(phba);
443
444	phba->lmt = mb->un.varRdConfig.lmt;
445
446	/* Get the default values for Model Name and Description */
447	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
448
449	if ((phba->cfg_link_speed > LINK_SPEED_10G)
450	    || ((phba->cfg_link_speed == LINK_SPEED_1G)
451		&& !(phba->lmt & LMT_1Gb))
452	    || ((phba->cfg_link_speed == LINK_SPEED_2G)
453		&& !(phba->lmt & LMT_2Gb))
454	    || ((phba->cfg_link_speed == LINK_SPEED_4G)
455		&& !(phba->lmt & LMT_4Gb))
456	    || ((phba->cfg_link_speed == LINK_SPEED_8G)
457		&& !(phba->lmt & LMT_8Gb))
458	    || ((phba->cfg_link_speed == LINK_SPEED_10G)
459		&& !(phba->lmt & LMT_10Gb))) {
460		/* Reset link speed to auto */
461		lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
462			"1302 Invalid speed for this board: "
463			"Reset link speed to auto: x%x\n",
464			phba->cfg_link_speed);
465			phba->cfg_link_speed = LINK_SPEED_AUTO;
466	}
467
468	phba->link_state = LPFC_LINK_DOWN;
469
470	/* Only process IOCBs on ELS ring till hba_state is READY */
471	if (psli->ring[psli->extra_ring].cmdringaddr)
472		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
473	if (psli->ring[psli->fcp_ring].cmdringaddr)
474		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
475	if (psli->ring[psli->next_ring].cmdringaddr)
476		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
477
478	/* Post receive buffers for desired rings */
479	if (phba->sli_rev != 3)
480		lpfc_post_rcv_buf(phba);
481
482	/*
483	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
484	 */
485	if (phba->intr_type == MSIX) {
486		rc = lpfc_config_msi(phba, pmb);
487		if (rc) {
488			mempool_free(pmb, phba->mbox_mem_pool);
489			return -EIO;
490		}
491		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
492		if (rc != MBX_SUCCESS) {
493			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
494					"0352 Config MSI mailbox command "
495					"failed, mbxCmd x%x, mbxStatus x%x\n",
496					pmb->u.mb.mbxCommand,
497					pmb->u.mb.mbxStatus);
498			mempool_free(pmb, phba->mbox_mem_pool);
499			return -EIO;
500		}
501	}
502
503	spin_lock_irq(&phba->hbalock);
504	/* Initialize ERATT handling flag */
505	phba->hba_flag &= ~HBA_ERATT_HANDLED;
506
507	/* Enable appropriate host interrupts */
508	status = readl(phba->HCregaddr);
509	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
510	if (psli->num_rings > 0)
511		status |= HC_R0INT_ENA;
512	if (psli->num_rings > 1)
513		status |= HC_R1INT_ENA;
514	if (psli->num_rings > 2)
515		status |= HC_R2INT_ENA;
516	if (psli->num_rings > 3)
517		status |= HC_R3INT_ENA;
518
519	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
520	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
521		status &= ~(HC_R0INT_ENA);
522
523	writel(status, phba->HCregaddr);
524	readl(phba->HCregaddr); /* flush */
525	spin_unlock_irq(&phba->hbalock);
526
527	/* Set up ring-0 (ELS) timer */
528	timeout = phba->fc_ratov * 2;
529	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
530	/* Set up heart beat (HB) timer */
531	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
532	phba->hb_outstanding = 0;
533	phba->last_completion_time = jiffies;
534	/* Set up error attention (ERATT) polling timer */
535	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
536
537	if (phba->hba_flag & LINK_DISABLED) {
538		lpfc_printf_log(phba,
539			KERN_ERR, LOG_INIT,
540			"2598 Adapter Link is disabled.\n");
541		lpfc_down_link(phba, pmb);
542		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
543		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
544		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
545			lpfc_printf_log(phba,
546			KERN_ERR, LOG_INIT,
547			"2599 Adapter failed to issue DOWN_LINK"
548			" mbox command rc 0x%x\n", rc);
549
550			mempool_free(pmb, phba->mbox_mem_pool);
551			return -EIO;
552		}
553	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
554		lpfc_init_link(phba, pmb, phba->cfg_topology,
555			phba->cfg_link_speed);
556		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
557		lpfc_set_loopback_flag(phba);
558		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
559		if (rc != MBX_SUCCESS) {
560			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
561				"0454 Adapter failed to init, mbxCmd x%x "
562				"INIT_LINK, mbxStatus x%x\n",
563				mb->mbxCommand, mb->mbxStatus);
564
565			/* Clear all interrupt enable conditions */
566			writel(0, phba->HCregaddr);
567			readl(phba->HCregaddr); /* flush */
568			/* Clear all pending interrupts */
569			writel(0xffffffff, phba->HAregaddr);
570			readl(phba->HAregaddr); /* flush */
571
572			phba->link_state = LPFC_HBA_ERROR;
573			if (rc != MBX_BUSY)
574				mempool_free(pmb, phba->mbox_mem_pool);
575			return -EIO;
576		}
577	}
578	/* MBOX buffer will be freed in mbox compl */
579	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
580	if (!pmb) {
581		phba->link_state = LPFC_HBA_ERROR;
582		return -ENOMEM;
583	}
584
585	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
586	pmb->mbox_cmpl = lpfc_config_async_cmpl;
587	pmb->vport = phba->pport;
588	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
589
590	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
591		lpfc_printf_log(phba,
592				KERN_ERR,
593				LOG_INIT,
594				"0456 Adapter failed to issue "
595				"ASYNCEVT_ENABLE mbox status x%x\n",
596				rc);
597		mempool_free(pmb, phba->mbox_mem_pool);
598	}
599
600	/* Get Option rom version */
601	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
602	if (!pmb) {
603		phba->link_state = LPFC_HBA_ERROR;
604		return -ENOMEM;
605	}
606
607	lpfc_dump_wakeup_param(phba, pmb);
608	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
609	pmb->vport = phba->pport;
610	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
611
612	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
613		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
614				"to get Option ROM version status x%x\n", rc);
615		mempool_free(pmb, phba->mbox_mem_pool);
616	}
617
618	return 0;
619}
620
621/**
622 * lpfc_hba_init_link - Initialize the FC link
623 * @phba: pointer to lpfc hba data structure.
624 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
625 *
626 * This routine will issue the INIT_LINK mailbox command call.
627 * It is available to other drivers through the lpfc_hba data
628 * structure for use as a delayed link up mechanism with the
629 * module parameter lpfc_suppress_link_up.
630 *
631 * Return code
632 *		0 - success
633 *		Any other value - error
634 **/
635int
636lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
637{
638	struct lpfc_vport *vport = phba->pport;
639	LPFC_MBOXQ_t *pmb;
640	MAILBOX_t *mb;
641	int rc;
642
643	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
644	if (!pmb) {
645		phba->link_state = LPFC_HBA_ERROR;
646		return -ENOMEM;
647	}
648	mb = &pmb->u.mb;
649	pmb->vport = vport;
650
651	lpfc_init_link(phba, pmb, phba->cfg_topology,
652		phba->cfg_link_speed);
653	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
654	lpfc_set_loopback_flag(phba);
655	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
656	if (rc != MBX_SUCCESS) {
657		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
658			"0498 Adapter failed to init, mbxCmd x%x "
659			"INIT_LINK, mbxStatus x%x\n",
660			mb->mbxCommand, mb->mbxStatus);
661		/* Clear all interrupt enable conditions */
662		writel(0, phba->HCregaddr);
663		readl(phba->HCregaddr); /* flush */
664		/* Clear all pending interrupts */
665		writel(0xffffffff, phba->HAregaddr);
666		readl(phba->HAregaddr); /* flush */
667		phba->link_state = LPFC_HBA_ERROR;
668		if (rc != MBX_BUSY || flag == MBX_POLL)
669			mempool_free(pmb, phba->mbox_mem_pool);
670		return -EIO;
671	}
672	phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
673	if (flag == MBX_POLL)
674		mempool_free(pmb, phba->mbox_mem_pool);
675
676	return 0;
677}
678
679/**
680 * lpfc_hba_down_link - this routine downs the FC link
681 * @phba: pointer to lpfc hba data structure.
682 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
683 *
684 * This routine will issue the DOWN_LINK mailbox command call.
685 * It is available to other drivers through the lpfc_hba data
686 * structure for use to stop the link.
687 *
688 * Return code
689 *		0 - success
690 *		Any other value - error
691 **/
692int
693lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
694{
695	LPFC_MBOXQ_t *pmb;
696	int rc;
697
698	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
699	if (!pmb) {
700		phba->link_state = LPFC_HBA_ERROR;
701		return -ENOMEM;
702	}
703
704	lpfc_printf_log(phba,
705		KERN_ERR, LOG_INIT,
706		"0491 Adapter Link is disabled.\n");
707	lpfc_down_link(phba, pmb);
708	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
709	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
710	if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
711		lpfc_printf_log(phba,
712		KERN_ERR, LOG_INIT,
713		"2522 Adapter failed to issue DOWN_LINK"
714		" mbox command rc 0x%x\n", rc);
715
716		mempool_free(pmb, phba->mbox_mem_pool);
717		return -EIO;
718	}
719	if (flag == MBX_POLL)
720		mempool_free(pmb, phba->mbox_mem_pool);
721
722	return 0;
723}
724
725/**
726 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
727 * @phba: pointer to lpfc HBA data structure.
728 *
729 * This routine will do LPFC uninitialization before the HBA is reset when
730 * bringing down the SLI Layer.
731 *
732 * Return codes
733 *   0 - success.
734 *   Any other value - error.
735 **/
736int
737lpfc_hba_down_prep(struct lpfc_hba *phba)
738{
739	struct lpfc_vport **vports;
740	int i;
741
742	if (phba->sli_rev <= LPFC_SLI_REV3) {
743		/* Disable interrupts */
744		writel(0, phba->HCregaddr);
745		readl(phba->HCregaddr); /* flush */
746	}
747
748	if (phba->pport->load_flag & FC_UNLOADING)
749		lpfc_cleanup_discovery_resources(phba->pport);
750	else {
751		vports = lpfc_create_vport_work_array(phba);
752		if (vports != NULL)
753			for (i = 0; i <= phba->max_vports &&
754				vports[i] != NULL; i++)
755				lpfc_cleanup_discovery_resources(vports[i]);
756		lpfc_destroy_vport_work_array(phba, vports);
757	}
758	return 0;
759}
760
761/**
762 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
763 * @phba: pointer to lpfc HBA data structure.
764 *
765 * This routine will do uninitialization after the HBA is reset when bring
766 * down the SLI Layer.
767 *
768 * Return codes
769 *   0 - success.
770 *   Any other value - error.
771 **/
772static int
773lpfc_hba_down_post_s3(struct lpfc_hba *phba)
774{
775	struct lpfc_sli *psli = &phba->sli;
776	struct lpfc_sli_ring *pring;
777	struct lpfc_dmabuf *mp, *next_mp;
778	LIST_HEAD(completions);
779	int i;
780
781	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
782		lpfc_sli_hbqbuf_free_all(phba);
783	else {
784		/* Cleanup preposted buffers on the ELS ring */
785		pring = &psli->ring[LPFC_ELS_RING];
786		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
787			list_del(&mp->list);
788			pring->postbufq_cnt--;
789			lpfc_mbuf_free(phba, mp->virt, mp->phys);
790			kfree(mp);
791		}
792	}
793
794	spin_lock_irq(&phba->hbalock);
795	for (i = 0; i < psli->num_rings; i++) {
796		pring = &psli->ring[i];
797
798		/* At this point in time the HBA is either reset or DOA. Either
799		 * way, nothing should be on txcmplq as it will NEVER complete.
800		 */
801		list_splice_init(&pring->txcmplq, &completions);
802		pring->txcmplq_cnt = 0;
803		spin_unlock_irq(&phba->hbalock);
804
805		/* Cancel all the IOCBs from the completions list */
806		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
807				      IOERR_SLI_ABORTED);
808
809		lpfc_sli_abort_iocb_ring(phba, pring);
810		spin_lock_irq(&phba->hbalock);
811	}
812	spin_unlock_irq(&phba->hbalock);
813
814	return 0;
815}
816/**
817 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
818 * @phba: pointer to lpfc HBA data structure.
819 *
820 * This routine will do uninitialization after the HBA is reset when bring
821 * down the SLI Layer.
822 *
823 * Return codes
824 *   0 - success.
825 *   Any other value - error.
826 **/
827static int
828lpfc_hba_down_post_s4(struct lpfc_hba *phba)
829{
830	struct lpfc_scsi_buf *psb, *psb_next;
831	LIST_HEAD(aborts);
832	int ret;
833	unsigned long iflag = 0;
834	struct lpfc_sglq *sglq_entry = NULL;
835
836	ret = lpfc_hba_down_post_s3(phba);
837	if (ret)
838		return ret;
839	/* At this point in time the HBA is either reset or DOA. Either
840	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
841	 * on the lpfc_sgl_list so that it can either be freed if the
842	 * driver is unloading or reposted if the driver is restarting
843	 * the port.
844	 */
845	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
846					/* scsl_buf_list */
847	/* abts_sgl_list_lock required because worker thread uses this
848	 * list.
849	 */
850	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
851	list_for_each_entry(sglq_entry,
852		&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
853		sglq_entry->state = SGL_FREED;
854
855	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
856			&phba->sli4_hba.lpfc_sgl_list);
857	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
858	/* abts_scsi_buf_list_lock required because worker thread uses this
859	 * list.
860	 */
861	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
862	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
863			&aborts);
864	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
865	spin_unlock_irq(&phba->hbalock);
866
867	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
868		psb->pCmd = NULL;
869		psb->status = IOSTAT_SUCCESS;
870	}
871	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
872	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
873	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
874	return 0;
875}
876
877/**
878 * lpfc_hba_down_post - Wrapper func for hba down post routine
879 * @phba: pointer to lpfc HBA data structure.
880 *
881 * This routine wraps the actual SLI3 or SLI4 routine for performing
882 * uninitialization after the HBA is reset when bring down the SLI Layer.
883 *
884 * Return codes
885 *   0 - success.
886 *   Any other value - error.
887 **/
888int
889lpfc_hba_down_post(struct lpfc_hba *phba)
890{
891	return (*phba->lpfc_hba_down_post)(phba);
892}
893
894/**
895 * lpfc_hb_timeout - The HBA-timer timeout handler
896 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
897 *
898 * This is the HBA-timer timeout handler registered to the lpfc driver. When
899 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
900 * work-port-events bitmap and the worker thread is notified. This timeout
901 * event will be used by the worker thread to invoke the actual timeout
902 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
903 * be performed in the timeout handler and the HBA timeout event bit shall
904 * be cleared by the worker thread after it has taken the event bitmap out.
905 **/
906static void
907lpfc_hb_timeout(unsigned long ptr)
908{
909	struct lpfc_hba *phba;
910	uint32_t tmo_posted;
911	unsigned long iflag;
912
913	phba = (struct lpfc_hba *)ptr;
914
915	/* Check for heart beat timeout conditions */
916	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
917	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
918	if (!tmo_posted)
919		phba->pport->work_port_events |= WORKER_HB_TMO;
920	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
921
922	/* Tell the worker thread there is work to do */
923	if (!tmo_posted)
924		lpfc_worker_wake_up(phba);
925	return;
926}
927
928/**
929 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
930 * @phba: pointer to lpfc hba data structure.
931 * @pmboxq: pointer to the driver internal queue element for mailbox command.
932 *
933 * This is the callback function to the lpfc heart-beat mailbox command.
934 * If configured, the lpfc driver issues the heart-beat mailbox command to
935 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
936 * heart-beat mailbox command is issued, the driver shall set up heart-beat
937 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
938 * heart-beat outstanding state. Once the mailbox command comes back and
939 * no error conditions detected, the heart-beat mailbox command timer is
940 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
941 * state is cleared for the next heart-beat. If the timer expired with the
942 * heart-beat outstanding state set, the driver will put the HBA offline.
943 **/
944static void
945lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
946{
947	unsigned long drvr_flag;
948
949	spin_lock_irqsave(&phba->hbalock, drvr_flag);
950	phba->hb_outstanding = 0;
951	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
952
953	/* Check and reset heart-beat timer is necessary */
954	mempool_free(pmboxq, phba->mbox_mem_pool);
955	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
956		!(phba->link_state == LPFC_HBA_ERROR) &&
957		!(phba->pport->load_flag & FC_UNLOADING))
958		mod_timer(&phba->hb_tmofunc,
959			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
960	return;
961}
962
963/**
964 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
965 * @phba: pointer to lpfc hba data structure.
966 *
967 * This is the actual HBA-timer timeout handler to be invoked by the worker
968 * thread whenever the HBA timer fired and HBA-timeout event posted. This
969 * handler performs any periodic operations needed for the device. If such
970 * periodic event has already been attended to either in the interrupt handler
971 * or by processing slow-ring or fast-ring events within the HBA-timer
972 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
973 * the timer for the next timeout period. If lpfc heart-beat mailbox command
974 * is configured and there is no heart-beat mailbox command outstanding, a
975 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
976 * has been a heart-beat mailbox command outstanding, the HBA shall be put
977 * to offline.
978 **/
979void
980lpfc_hb_timeout_handler(struct lpfc_hba *phba)
981{
982	struct lpfc_vport **vports;
983	LPFC_MBOXQ_t *pmboxq;
984	struct lpfc_dmabuf *buf_ptr;
985	int retval, i;
986	struct lpfc_sli *psli = &phba->sli;
987	LIST_HEAD(completions);
988
989	vports = lpfc_create_vport_work_array(phba);
990	if (vports != NULL)
991		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
992			lpfc_rcv_seq_check_edtov(vports[i]);
993	lpfc_destroy_vport_work_array(phba, vports);
994
995	if ((phba->link_state == LPFC_HBA_ERROR) ||
996		(phba->pport->load_flag & FC_UNLOADING) ||
997		(phba->pport->fc_flag & FC_OFFLINE_MODE))
998		return;
999
1000	spin_lock_irq(&phba->pport->work_port_lock);
1001
1002	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
1003		jiffies)) {
1004		spin_unlock_irq(&phba->pport->work_port_lock);
1005		if (!phba->hb_outstanding)
1006			mod_timer(&phba->hb_tmofunc,
1007				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1008		else
1009			mod_timer(&phba->hb_tmofunc,
1010				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1011		return;
1012	}
1013	spin_unlock_irq(&phba->pport->work_port_lock);
1014
1015	if (phba->elsbuf_cnt &&
1016		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1017		spin_lock_irq(&phba->hbalock);
1018		list_splice_init(&phba->elsbuf, &completions);
1019		phba->elsbuf_cnt = 0;
1020		phba->elsbuf_prev_cnt = 0;
1021		spin_unlock_irq(&phba->hbalock);
1022
1023		while (!list_empty(&completions)) {
1024			list_remove_head(&completions, buf_ptr,
1025				struct lpfc_dmabuf, list);
1026			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1027			kfree(buf_ptr);
1028		}
1029	}
1030	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1031
1032	/* If there is no heart beat outstanding, issue a heartbeat command */
1033	if (phba->cfg_enable_hba_heartbeat) {
1034		if (!phba->hb_outstanding) {
1035			if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1036				(list_empty(&psli->mboxq))) {
1037				pmboxq = mempool_alloc(phba->mbox_mem_pool,
1038							GFP_KERNEL);
1039				if (!pmboxq) {
1040					mod_timer(&phba->hb_tmofunc,
1041						 jiffies +
1042						 HZ * LPFC_HB_MBOX_INTERVAL);
1043					return;
1044				}
1045
1046				lpfc_heart_beat(phba, pmboxq);
1047				pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1048				pmboxq->vport = phba->pport;
1049				retval = lpfc_sli_issue_mbox(phba, pmboxq,
1050						MBX_NOWAIT);
1051
1052				if (retval != MBX_BUSY &&
1053					retval != MBX_SUCCESS) {
1054					mempool_free(pmboxq,
1055							phba->mbox_mem_pool);
1056					mod_timer(&phba->hb_tmofunc,
1057						jiffies +
1058						HZ * LPFC_HB_MBOX_INTERVAL);
1059					return;
1060				}
1061				phba->skipped_hb = 0;
1062				phba->hb_outstanding = 1;
1063			} else if (time_before_eq(phba->last_completion_time,
1064					phba->skipped_hb)) {
1065				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1066					"2857 Last completion time not "
1067					" updated in %d ms\n",
1068					jiffies_to_msecs(jiffies
1069						 - phba->last_completion_time));
1070			} else
1071				phba->skipped_hb = jiffies;
1072
1073			mod_timer(&phba->hb_tmofunc,
1074				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1075			return;
1076		} else {
1077			/*
1078			* If heart beat timeout called with hb_outstanding set
1079			* we need to give the hb mailbox cmd a chance to
1080			* complete or TMO.
1081			*/
1082			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1083					"0459 Adapter heartbeat still out"
1084					"standing:last compl time was %d ms.\n",
1085					jiffies_to_msecs(jiffies
1086						 - phba->last_completion_time));
1087			mod_timer(&phba->hb_tmofunc,
1088				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1089		}
1090	}
1091}
1092
1093/**
1094 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1095 * @phba: pointer to lpfc hba data structure.
1096 *
1097 * This routine is called to bring the HBA offline when HBA hardware error
1098 * other than Port Error 6 has been detected.
1099 **/
1100static void
1101lpfc_offline_eratt(struct lpfc_hba *phba)
1102{
1103	struct lpfc_sli   *psli = &phba->sli;
1104
1105	spin_lock_irq(&phba->hbalock);
1106	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1107	spin_unlock_irq(&phba->hbalock);
1108	lpfc_offline_prep(phba);
1109
1110	lpfc_offline(phba);
1111	lpfc_reset_barrier(phba);
1112	spin_lock_irq(&phba->hbalock);
1113	lpfc_sli_brdreset(phba);
1114	spin_unlock_irq(&phba->hbalock);
1115	lpfc_hba_down_post(phba);
1116	lpfc_sli_brdready(phba, HS_MBRDY);
1117	lpfc_unblock_mgmt_io(phba);
1118	phba->link_state = LPFC_HBA_ERROR;
1119	return;
1120}
1121
1122/**
1123 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1124 * @phba: pointer to lpfc hba data structure.
1125 *
1126 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1127 * other than Port Error 6 has been detected.
1128 **/
1129static void
1130lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1131{
1132	lpfc_offline_prep(phba);
1133	lpfc_offline(phba);
1134	lpfc_sli4_brdreset(phba);
1135	lpfc_hba_down_post(phba);
1136	lpfc_sli4_post_status_check(phba);
1137	lpfc_unblock_mgmt_io(phba);
1138	phba->link_state = LPFC_HBA_ERROR;
1139}
1140
1141/**
1142 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1143 * @phba: pointer to lpfc hba data structure.
1144 *
1145 * This routine is invoked to handle the deferred HBA hardware error
1146 * conditions. This type of error is indicated by HBA by setting ER1
1147 * and another ER bit in the host status register. The driver will
1148 * wait until the ER1 bit clears before handling the error condition.
1149 **/
1150static void
1151lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1152{
1153	uint32_t old_host_status = phba->work_hs;
1154	struct lpfc_sli_ring  *pring;
1155	struct lpfc_sli *psli = &phba->sli;
1156
1157	/* If the pci channel is offline, ignore possible errors,
1158	 * since we cannot communicate with the pci card anyway.
1159	 */
1160	if (pci_channel_offline(phba->pcidev)) {
1161		spin_lock_irq(&phba->hbalock);
1162		phba->hba_flag &= ~DEFER_ERATT;
1163		spin_unlock_irq(&phba->hbalock);
1164		return;
1165	}
1166
1167	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1168		"0479 Deferred Adapter Hardware Error "
1169		"Data: x%x x%x x%x\n",
1170		phba->work_hs,
1171		phba->work_status[0], phba->work_status[1]);
1172
1173	spin_lock_irq(&phba->hbalock);
1174	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1175	spin_unlock_irq(&phba->hbalock);
1176
1177
1178	/*
1179	 * Firmware stops when it triggred erratt. That could cause the I/Os
1180	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1181	 * SCSI layer retry it after re-establishing link.
1182	 */
1183	pring = &psli->ring[psli->fcp_ring];
1184	lpfc_sli_abort_iocb_ring(phba, pring);
1185
1186	/*
1187	 * There was a firmware error. Take the hba offline and then
1188	 * attempt to restart it.
1189	 */
1190	lpfc_offline_prep(phba);
1191	lpfc_offline(phba);
1192
1193	/* Wait for the ER1 bit to clear.*/
1194	while (phba->work_hs & HS_FFER1) {
1195		msleep(100);
1196		phba->work_hs = readl(phba->HSregaddr);
1197		/* If driver is unloading let the worker thread continue */
1198		if (phba->pport->load_flag & FC_UNLOADING) {
1199			phba->work_hs = 0;
1200			break;
1201		}
1202	}
1203
1204	/*
1205	 * This is to ptrotect against a race condition in which
1206	 * first write to the host attention register clear the
1207	 * host status register.
1208	 */
1209	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1210		phba->work_hs = old_host_status & ~HS_FFER1;
1211
1212	spin_lock_irq(&phba->hbalock);
1213	phba->hba_flag &= ~DEFER_ERATT;
1214	spin_unlock_irq(&phba->hbalock);
1215	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1216	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1217}
1218
1219static void
1220lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1221{
1222	struct lpfc_board_event_header board_event;
1223	struct Scsi_Host *shost;
1224
1225	board_event.event_type = FC_REG_BOARD_EVENT;
1226	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1227	shost = lpfc_shost_from_vport(phba->pport);
1228	fc_host_post_vendor_event(shost, fc_get_event_number(),
1229				  sizeof(board_event),
1230				  (char *) &board_event,
1231				  LPFC_NL_VENDOR_ID);
1232}
1233
1234/**
1235 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1236 * @phba: pointer to lpfc hba data structure.
1237 *
1238 * This routine is invoked to handle the following HBA hardware error
1239 * conditions:
1240 * 1 - HBA error attention interrupt
1241 * 2 - DMA ring index out of range
1242 * 3 - Mailbox command came back as unknown
1243 **/
1244static void
1245lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1246{
1247	struct lpfc_vport *vport = phba->pport;
1248	struct lpfc_sli   *psli = &phba->sli;
1249	struct lpfc_sli_ring  *pring;
1250	uint32_t event_data;
1251	unsigned long temperature;
1252	struct temp_event temp_event_data;
1253	struct Scsi_Host  *shost;
1254
1255	/* If the pci channel is offline, ignore possible errors,
1256	 * since we cannot communicate with the pci card anyway.
1257	 */
1258	if (pci_channel_offline(phba->pcidev)) {
1259		spin_lock_irq(&phba->hbalock);
1260		phba->hba_flag &= ~DEFER_ERATT;
1261		spin_unlock_irq(&phba->hbalock);
1262		return;
1263	}
1264
1265	/* If resets are disabled then leave the HBA alone and return */
1266	if (!phba->cfg_enable_hba_reset)
1267		return;
1268
1269	/* Send an internal error event to mgmt application */
1270	lpfc_board_errevt_to_mgmt(phba);
1271
1272	if (phba->hba_flag & DEFER_ERATT)
1273		lpfc_handle_deferred_eratt(phba);
1274
1275	if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1276		if (phba->work_hs & HS_FFER6)
1277			/* Re-establishing Link */
1278			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1279					"1301 Re-establishing Link "
1280					"Data: x%x x%x x%x\n",
1281					phba->work_hs, phba->work_status[0],
1282					phba->work_status[1]);
1283		if (phba->work_hs & HS_FFER8)
1284			/* Device Zeroization */
1285			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1286					"2861 Host Authentication device "
1287					"zeroization Data:x%x x%x x%x\n",
1288					phba->work_hs, phba->work_status[0],
1289					phba->work_status[1]);
1290
1291		spin_lock_irq(&phba->hbalock);
1292		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1293		spin_unlock_irq(&phba->hbalock);
1294
1295		/*
1296		* Firmware stops when it triggled erratt with HS_FFER6.
1297		* That could cause the I/Os dropped by the firmware.
1298		* Error iocb (I/O) on txcmplq and let the SCSI layer
1299		* retry it after re-establishing link.
1300		*/
1301		pring = &psli->ring[psli->fcp_ring];
1302		lpfc_sli_abort_iocb_ring(phba, pring);
1303
1304		/*
1305		 * There was a firmware error.  Take the hba offline and then
1306		 * attempt to restart it.
1307		 */
1308		lpfc_offline_prep(phba);
1309		lpfc_offline(phba);
1310		lpfc_sli_brdrestart(phba);
1311		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1312			lpfc_unblock_mgmt_io(phba);
1313			return;
1314		}
1315		lpfc_unblock_mgmt_io(phba);
1316	} else if (phba->work_hs & HS_CRIT_TEMP) {
1317		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1318		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1319		temp_event_data.event_code = LPFC_CRIT_TEMP;
1320		temp_event_data.data = (uint32_t)temperature;
1321
1322		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1323				"0406 Adapter maximum temperature exceeded "
1324				"(%ld), taking this port offline "
1325				"Data: x%x x%x x%x\n",
1326				temperature, phba->work_hs,
1327				phba->work_status[0], phba->work_status[1]);
1328
1329		shost = lpfc_shost_from_vport(phba->pport);
1330		fc_host_post_vendor_event(shost, fc_get_event_number(),
1331					  sizeof(temp_event_data),
1332					  (char *) &temp_event_data,
1333					  SCSI_NL_VID_TYPE_PCI
1334					  | PCI_VENDOR_ID_EMULEX);
1335
1336		spin_lock_irq(&phba->hbalock);
1337		phba->over_temp_state = HBA_OVER_TEMP;
1338		spin_unlock_irq(&phba->hbalock);
1339		lpfc_offline_eratt(phba);
1340
1341	} else {
1342		/* The if clause above forces this code path when the status
1343		 * failure is a value other than FFER6. Do not call the offline
1344		 * twice. This is the adapter hardware error path.
1345		 */
1346		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1347				"0457 Adapter Hardware Error "
1348				"Data: x%x x%x x%x\n",
1349				phba->work_hs,
1350				phba->work_status[0], phba->work_status[1]);
1351
1352		event_data = FC_REG_DUMP_EVENT;
1353		shost = lpfc_shost_from_vport(vport);
1354		fc_host_post_vendor_event(shost, fc_get_event_number(),
1355				sizeof(event_data), (char *) &event_data,
1356				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1357
1358		lpfc_offline_eratt(phba);
1359	}
1360	return;
1361}
1362
1363/**
1364 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1365 * @phba: pointer to lpfc hba data structure.
1366 *
1367 * This routine is invoked to handle the SLI4 HBA hardware error attention
1368 * conditions.
1369 **/
1370static void
1371lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1372{
1373	struct lpfc_vport *vport = phba->pport;
1374	uint32_t event_data;
1375	struct Scsi_Host *shost;
1376
1377	/* If the pci channel is offline, ignore possible errors, since
1378	 * we cannot communicate with the pci card anyway.
1379	 */
1380	if (pci_channel_offline(phba->pcidev))
1381		return;
1382	/* If resets are disabled then leave the HBA alone and return */
1383	if (!phba->cfg_enable_hba_reset)
1384		return;
1385
1386	/* Send an internal error event to mgmt application */
1387	lpfc_board_errevt_to_mgmt(phba);
1388
1389	/* For now, the actual action for SLI4 device handling is not
1390	 * specified yet, just treated it as adaptor hardware failure
1391	 */
1392	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1393			"0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1394			phba->work_status[0], phba->work_status[1]);
1395
1396	event_data = FC_REG_DUMP_EVENT;
1397	shost = lpfc_shost_from_vport(vport);
1398	fc_host_post_vendor_event(shost, fc_get_event_number(),
1399				  sizeof(event_data), (char *) &event_data,
1400				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1401
1402	lpfc_sli4_offline_eratt(phba);
1403}
1404
1405/**
1406 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1407 * @phba: pointer to lpfc HBA data structure.
1408 *
1409 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1410 * routine from the API jump table function pointer from the lpfc_hba struct.
1411 *
1412 * Return codes
1413 *   0 - success.
1414 *   Any other value - error.
1415 **/
1416void
1417lpfc_handle_eratt(struct lpfc_hba *phba)
1418{
1419	(*phba->lpfc_handle_eratt)(phba);
1420}
1421
1422/**
1423 * lpfc_handle_latt - The HBA link event handler
1424 * @phba: pointer to lpfc hba data structure.
1425 *
1426 * This routine is invoked from the worker thread to handle a HBA host
1427 * attention link event.
1428 **/
1429void
1430lpfc_handle_latt(struct lpfc_hba *phba)
1431{
1432	struct lpfc_vport *vport = phba->pport;
1433	struct lpfc_sli   *psli = &phba->sli;
1434	LPFC_MBOXQ_t *pmb;
1435	volatile uint32_t control;
1436	struct lpfc_dmabuf *mp;
1437	int rc = 0;
1438
1439	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1440	if (!pmb) {
1441		rc = 1;
1442		goto lpfc_handle_latt_err_exit;
1443	}
1444
1445	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1446	if (!mp) {
1447		rc = 2;
1448		goto lpfc_handle_latt_free_pmb;
1449	}
1450
1451	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1452	if (!mp->virt) {
1453		rc = 3;
1454		goto lpfc_handle_latt_free_mp;
1455	}
1456
1457	/* Cleanup any outstanding ELS commands */
1458	lpfc_els_flush_all_cmd(phba);
1459
1460	psli->slistat.link_event++;
1461	lpfc_read_la(phba, pmb, mp);
1462	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
1463	pmb->vport = vport;
1464	/* Block ELS IOCBs until we have processed this mbox command */
1465	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1466	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1467	if (rc == MBX_NOT_FINISHED) {
1468		rc = 4;
1469		goto lpfc_handle_latt_free_mbuf;
1470	}
1471
1472	/* Clear Link Attention in HA REG */
1473	spin_lock_irq(&phba->hbalock);
1474	writel(HA_LATT, phba->HAregaddr);
1475	readl(phba->HAregaddr); /* flush */
1476	spin_unlock_irq(&phba->hbalock);
1477
1478	return;
1479
1480lpfc_handle_latt_free_mbuf:
1481	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1482	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1483lpfc_handle_latt_free_mp:
1484	kfree(mp);
1485lpfc_handle_latt_free_pmb:
1486	mempool_free(pmb, phba->mbox_mem_pool);
1487lpfc_handle_latt_err_exit:
1488	/* Enable Link attention interrupts */
1489	spin_lock_irq(&phba->hbalock);
1490	psli->sli_flag |= LPFC_PROCESS_LA;
1491	control = readl(phba->HCregaddr);
1492	control |= HC_LAINT_ENA;
1493	writel(control, phba->HCregaddr);
1494	readl(phba->HCregaddr); /* flush */
1495
1496	/* Clear Link Attention in HA REG */
1497	writel(HA_LATT, phba->HAregaddr);
1498	readl(phba->HAregaddr); /* flush */
1499	spin_unlock_irq(&phba->hbalock);
1500	lpfc_linkdown(phba);
1501	phba->link_state = LPFC_HBA_ERROR;
1502
1503	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1504		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1505
1506	return;
1507}
1508
1509/**
1510 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1511 * @phba: pointer to lpfc hba data structure.
1512 * @vpd: pointer to the vital product data.
1513 * @len: length of the vital product data in bytes.
1514 *
1515 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1516 * an array of characters. In this routine, the ModelName, ProgramType, and
1517 * ModelDesc, etc. fields of the phba data structure will be populated.
1518 *
1519 * Return codes
1520 *   0 - pointer to the VPD passed in is NULL
1521 *   1 - success
1522 **/
1523int
1524lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1525{
1526	uint8_t lenlo, lenhi;
1527	int Length;
1528	int i, j;
1529	int finished = 0;
1530	int index = 0;
1531
1532	if (!vpd)
1533		return 0;
1534
1535	/* Vital Product */
1536	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1537			"0455 Vital Product Data: x%x x%x x%x x%x\n",
1538			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1539			(uint32_t) vpd[3]);
1540	while (!finished && (index < (len - 4))) {
1541		switch (vpd[index]) {
1542		case 0x82:
1543		case 0x91:
1544			index += 1;
1545			lenlo = vpd[index];
1546			index += 1;
1547			lenhi = vpd[index];
1548			index += 1;
1549			i = ((((unsigned short)lenhi) << 8) + lenlo);
1550			index += i;
1551			break;
1552		case 0x90:
1553			index += 1;
1554			lenlo = vpd[index];
1555			index += 1;
1556			lenhi = vpd[index];
1557			index += 1;
1558			Length = ((((unsigned short)lenhi) << 8) + lenlo);
1559			if (Length > len - index)
1560				Length = len - index;
1561			while (Length > 0) {
1562			/* Look for Serial Number */
1563			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1564				index += 2;
1565				i = vpd[index];
1566				index += 1;
1567				j = 0;
1568				Length -= (3+i);
1569				while(i--) {
1570					phba->SerialNumber[j++] = vpd[index++];
1571					if (j == 31)
1572						break;
1573				}
1574				phba->SerialNumber[j] = 0;
1575				continue;
1576			}
1577			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1578				phba->vpd_flag |= VPD_MODEL_DESC;
1579				index += 2;
1580				i = vpd[index];
1581				index += 1;
1582				j = 0;
1583				Length -= (3+i);
1584				while(i--) {
1585					phba->ModelDesc[j++] = vpd[index++];
1586					if (j == 255)
1587						break;
1588				}
1589				phba->ModelDesc[j] = 0;
1590				continue;
1591			}
1592			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1593				phba->vpd_flag |= VPD_MODEL_NAME;
1594				index += 2;
1595				i = vpd[index];
1596				index += 1;
1597				j = 0;
1598				Length -= (3+i);
1599				while(i--) {
1600					phba->ModelName[j++] = vpd[index++];
1601					if (j == 79)
1602						break;
1603				}
1604				phba->ModelName[j] = 0;
1605				continue;
1606			}
1607			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1608				phba->vpd_flag |= VPD_PROGRAM_TYPE;
1609				index += 2;
1610				i = vpd[index];
1611				index += 1;
1612				j = 0;
1613				Length -= (3+i);
1614				while(i--) {
1615					phba->ProgramType[j++] = vpd[index++];
1616					if (j == 255)
1617						break;
1618				}
1619				phba->ProgramType[j] = 0;
1620				continue;
1621			}
1622			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1623				phba->vpd_flag |= VPD_PORT;
1624				index += 2;
1625				i = vpd[index];
1626				index += 1;
1627				j = 0;
1628				Length -= (3+i);
1629				while(i--) {
1630				phba->Port[j++] = vpd[index++];
1631				if (j == 19)
1632					break;
1633				}
1634				phba->Port[j] = 0;
1635				continue;
1636			}
1637			else {
1638				index += 2;
1639				i = vpd[index];
1640				index += 1;
1641				index += i;
1642				Length -= (3 + i);
1643			}
1644		}
1645		finished = 0;
1646		break;
1647		case 0x78:
1648			finished = 1;
1649			break;
1650		default:
1651			index ++;
1652			break;
1653		}
1654	}
1655
1656	return(1);
1657}
1658
1659/**
1660 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1661 * @phba: pointer to lpfc hba data structure.
1662 * @mdp: pointer to the data structure to hold the derived model name.
1663 * @descp: pointer to the data structure to hold the derived description.
1664 *
1665 * This routine retrieves HBA's description based on its registered PCI device
1666 * ID. The @descp passed into this function points to an array of 256 chars. It
1667 * shall be returned with the model name, maximum speed, and the host bus type.
1668 * The @mdp passed into this function points to an array of 80 chars. When the
1669 * function returns, the @mdp will be filled with the model name.
1670 **/
1671static void
1672lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1673{
1674	lpfc_vpd_t *vp;
1675	uint16_t dev_id = phba->pcidev->device;
1676	int max_speed;
1677	int GE = 0;
1678	int oneConnect = 0; /* default is not a oneConnect */
1679	struct {
1680		char *name;
1681		char *bus;
1682		char *function;
1683	} m = {"<Unknown>", "", ""};
1684
1685	if (mdp && mdp[0] != '\0'
1686		&& descp && descp[0] != '\0')
1687		return;
1688
1689	if (phba->lmt & LMT_10Gb)
1690		max_speed = 10;
1691	else if (phba->lmt & LMT_8Gb)
1692		max_speed = 8;
1693	else if (phba->lmt & LMT_4Gb)
1694		max_speed = 4;
1695	else if (phba->lmt & LMT_2Gb)
1696		max_speed = 2;
1697	else
1698		max_speed = 1;
1699
1700	vp = &phba->vpd;
1701
1702	switch (dev_id) {
1703	case PCI_DEVICE_ID_FIREFLY:
1704		m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1705		break;
1706	case PCI_DEVICE_ID_SUPERFLY:
1707		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1708			m = (typeof(m)){"LP7000", "PCI",
1709					"Fibre Channel Adapter"};
1710		else
1711			m = (typeof(m)){"LP7000E", "PCI",
1712					"Fibre Channel Adapter"};
1713		break;
1714	case PCI_DEVICE_ID_DRAGONFLY:
1715		m = (typeof(m)){"LP8000", "PCI",
1716				"Fibre Channel Adapter"};
1717		break;
1718	case PCI_DEVICE_ID_CENTAUR:
1719		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1720			m = (typeof(m)){"LP9002", "PCI",
1721					"Fibre Channel Adapter"};
1722		else
1723			m = (typeof(m)){"LP9000", "PCI",
1724					"Fibre Channel Adapter"};
1725		break;
1726	case PCI_DEVICE_ID_RFLY:
1727		m = (typeof(m)){"LP952", "PCI",
1728				"Fibre Channel Adapter"};
1729		break;
1730	case PCI_DEVICE_ID_PEGASUS:
1731		m = (typeof(m)){"LP9802", "PCI-X",
1732				"Fibre Channel Adapter"};
1733		break;
1734	case PCI_DEVICE_ID_THOR:
1735		m = (typeof(m)){"LP10000", "PCI-X",
1736				"Fibre Channel Adapter"};
1737		break;
1738	case PCI_DEVICE_ID_VIPER:
1739		m = (typeof(m)){"LPX1000",  "PCI-X",
1740				"Fibre Channel Adapter"};
1741		break;
1742	case PCI_DEVICE_ID_PFLY:
1743		m = (typeof(m)){"LP982", "PCI-X",
1744				"Fibre Channel Adapter"};
1745		break;
1746	case PCI_DEVICE_ID_TFLY:
1747		m = (typeof(m)){"LP1050", "PCI-X",
1748				"Fibre Channel Adapter"};
1749		break;
1750	case PCI_DEVICE_ID_HELIOS:
1751		m = (typeof(m)){"LP11000", "PCI-X2",
1752				"Fibre Channel Adapter"};
1753		break;
1754	case PCI_DEVICE_ID_HELIOS_SCSP:
1755		m = (typeof(m)){"LP11000-SP", "PCI-X2",
1756				"Fibre Channel Adapter"};
1757		break;
1758	case PCI_DEVICE_ID_HELIOS_DCSP:
1759		m = (typeof(m)){"LP11002-SP",  "PCI-X2",
1760				"Fibre Channel Adapter"};
1761		break;
1762	case PCI_DEVICE_ID_NEPTUNE:
1763		m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1764		break;
1765	case PCI_DEVICE_ID_NEPTUNE_SCSP:
1766		m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1767		break;
1768	case PCI_DEVICE_ID_NEPTUNE_DCSP:
1769		m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1770		break;
1771	case PCI_DEVICE_ID_BMID:
1772		m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1773		break;
1774	case PCI_DEVICE_ID_BSMB:
1775		m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1776		break;
1777	case PCI_DEVICE_ID_ZEPHYR:
1778		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1779		break;
1780	case PCI_DEVICE_ID_ZEPHYR_SCSP:
1781		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1782		break;
1783	case PCI_DEVICE_ID_ZEPHYR_DCSP:
1784		m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1785		GE = 1;
1786		break;
1787	case PCI_DEVICE_ID_ZMID:
1788		m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1789		break;
1790	case PCI_DEVICE_ID_ZSMB:
1791		m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1792		break;
1793	case PCI_DEVICE_ID_LP101:
1794		m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1795		break;
1796	case PCI_DEVICE_ID_LP10000S:
1797		m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1798		break;
1799	case PCI_DEVICE_ID_LP11000S:
1800		m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1801		break;
1802	case PCI_DEVICE_ID_LPE11000S:
1803		m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1804		break;
1805	case PCI_DEVICE_ID_SAT:
1806		m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1807		break;
1808	case PCI_DEVICE_ID_SAT_MID:
1809		m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1810		break;
1811	case PCI_DEVICE_ID_SAT_SMB:
1812		m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1813		break;
1814	case PCI_DEVICE_ID_SAT_DCSP:
1815		m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1816		break;
1817	case PCI_DEVICE_ID_SAT_SCSP:
1818		m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1819		break;
1820	case PCI_DEVICE_ID_SAT_S:
1821		m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1822		break;
1823	case PCI_DEVICE_ID_HORNET:
1824		m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1825		GE = 1;
1826		break;
1827	case PCI_DEVICE_ID_PROTEUS_VF:
1828		m = (typeof(m)){"LPev12000", "PCIe IOV",
1829				"Fibre Channel Adapter"};
1830		break;
1831	case PCI_DEVICE_ID_PROTEUS_PF:
1832		m = (typeof(m)){"LPev12000", "PCIe IOV",
1833				"Fibre Channel Adapter"};
1834		break;
1835	case PCI_DEVICE_ID_PROTEUS_S:
1836		m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1837				"Fibre Channel Adapter"};
1838		break;
1839	case PCI_DEVICE_ID_TIGERSHARK:
1840		oneConnect = 1;
1841		m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1842		break;
1843	case PCI_DEVICE_ID_TOMCAT:
1844		oneConnect = 1;
1845		m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1846		break;
1847	case PCI_DEVICE_ID_FALCON:
1848		m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1849				"EmulexSecure Fibre"};
1850		break;
1851	case PCI_DEVICE_ID_BALIUS:
1852		m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
1853				"Fibre Channel Adapter"};
1854		break;
1855	default:
1856		m = (typeof(m)){"Unknown", "", ""};
1857		break;
1858	}
1859
1860	if (mdp && mdp[0] == '\0')
1861		snprintf(mdp, 79,"%s", m.name);
1862	/* oneConnect hba requires special processing, they are all initiators
1863	 * and we put the port number on the end
1864	 */
1865	if (descp && descp[0] == '\0') {
1866		if (oneConnect)
1867			snprintf(descp, 255,
1868				"Emulex OneConnect %s, %s Initiator, Port %s",
1869				m.name, m.function,
1870				phba->Port);
1871		else
1872			snprintf(descp, 255,
1873				"Emulex %s %d%s %s %s",
1874				m.name, max_speed, (GE) ? "GE" : "Gb",
1875				m.bus, m.function);
1876	}
1877}
1878
1879/**
1880 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1881 * @phba: pointer to lpfc hba data structure.
1882 * @pring: pointer to a IOCB ring.
1883 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1884 *
1885 * This routine posts a given number of IOCBs with the associated DMA buffer
1886 * descriptors specified by the cnt argument to the given IOCB ring.
1887 *
1888 * Return codes
1889 *   The number of IOCBs NOT able to be posted to the IOCB ring.
1890 **/
1891int
1892lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1893{
1894	IOCB_t *icmd;
1895	struct lpfc_iocbq *iocb;
1896	struct lpfc_dmabuf *mp1, *mp2;
1897
1898	cnt += pring->missbufcnt;
1899
1900	/* While there are buffers to post */
1901	while (cnt > 0) {
1902		/* Allocate buffer for  command iocb */
1903		iocb = lpfc_sli_get_iocbq(phba);
1904		if (iocb == NULL) {
1905			pring->missbufcnt = cnt;
1906			return cnt;
1907		}
1908		icmd = &iocb->iocb;
1909
1910		/* 2 buffers can be posted per command */
1911		/* Allocate buffer to post */
1912		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1913		if (mp1)
1914		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1915		if (!mp1 || !mp1->virt) {
1916			kfree(mp1);
1917			lpfc_sli_release_iocbq(phba, iocb);
1918			pring->missbufcnt = cnt;
1919			return cnt;
1920		}
1921
1922		INIT_LIST_HEAD(&mp1->list);
1923		/* Allocate buffer to post */
1924		if (cnt > 1) {
1925			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1926			if (mp2)
1927				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1928							    &mp2->phys);
1929			if (!mp2 || !mp2->virt) {
1930				kfree(mp2);
1931				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1932				kfree(mp1);
1933				lpfc_sli_release_iocbq(phba, iocb);
1934				pring->missbufcnt = cnt;
1935				return cnt;
1936			}
1937
1938			INIT_LIST_HEAD(&mp2->list);
1939		} else {
1940			mp2 = NULL;
1941		}
1942
1943		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1944		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1945		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1946		icmd->ulpBdeCount = 1;
1947		cnt--;
1948		if (mp2) {
1949			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1950			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1951			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1952			cnt--;
1953			icmd->ulpBdeCount = 2;
1954		}
1955
1956		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1957		icmd->ulpLe = 1;
1958
1959		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1960		    IOCB_ERROR) {
1961			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1962			kfree(mp1);
1963			cnt++;
1964			if (mp2) {
1965				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1966				kfree(mp2);
1967				cnt++;
1968			}
1969			lpfc_sli_release_iocbq(phba, iocb);
1970			pring->missbufcnt = cnt;
1971			return cnt;
1972		}
1973		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1974		if (mp2)
1975			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1976	}
1977	pring->missbufcnt = 0;
1978	return 0;
1979}
1980
1981/**
1982 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
1983 * @phba: pointer to lpfc hba data structure.
1984 *
1985 * This routine posts initial receive IOCB buffers to the ELS ring. The
1986 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1987 * set to 64 IOCBs.
1988 *
1989 * Return codes
1990 *   0 - success (currently always success)
1991 **/
1992static int
1993lpfc_post_rcv_buf(struct lpfc_hba *phba)
1994{
1995	struct lpfc_sli *psli = &phba->sli;
1996
1997	/* Ring 0, ELS / CT buffers */
1998	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
1999	/* Ring 2 - FCP no buffers needed */
2000
2001	return 0;
2002}
2003
2004#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2005
2006/**
2007 * lpfc_sha_init - Set up initial array of hash table entries
2008 * @HashResultPointer: pointer to an array as hash table.
2009 *
2010 * This routine sets up the initial values to the array of hash table entries
2011 * for the LC HBAs.
2012 **/
2013static void
2014lpfc_sha_init(uint32_t * HashResultPointer)
2015{
2016	HashResultPointer[0] = 0x67452301;
2017	HashResultPointer[1] = 0xEFCDAB89;
2018	HashResultPointer[2] = 0x98BADCFE;
2019	HashResultPointer[3] = 0x10325476;
2020	HashResultPointer[4] = 0xC3D2E1F0;
2021}
2022
2023/**
2024 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2025 * @HashResultPointer: pointer to an initial/result hash table.
2026 * @HashWorkingPointer: pointer to an working hash table.
2027 *
2028 * This routine iterates an initial hash table pointed by @HashResultPointer
2029 * with the values from the working hash table pointeed by @HashWorkingPointer.
2030 * The results are putting back to the initial hash table, returned through
2031 * the @HashResultPointer as the result hash table.
2032 **/
2033static void
2034lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2035{
2036	int t;
2037	uint32_t TEMP;
2038	uint32_t A, B, C, D, E;
2039	t = 16;
2040	do {
2041		HashWorkingPointer[t] =
2042		    S(1,
2043		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2044								     8] ^
2045		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2046	} while (++t <= 79);
2047	t = 0;
2048	A = HashResultPointer[0];
2049	B = HashResultPointer[1];
2050	C = HashResultPointer[2];
2051	D = HashResultPointer[3];
2052	E = HashResultPointer[4];
2053
2054	do {
2055		if (t < 20) {
2056			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2057		} else if (t < 40) {
2058			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2059		} else if (t < 60) {
2060			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2061		} else {
2062			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2063		}
2064		TEMP += S(5, A) + E + HashWorkingPointer[t];
2065		E = D;
2066		D = C;
2067		C = S(30, B);
2068		B = A;
2069		A = TEMP;
2070	} while (++t <= 79);
2071
2072	HashResultPointer[0] += A;
2073	HashResultPointer[1] += B;
2074	HashResultPointer[2] += C;
2075	HashResultPointer[3] += D;
2076	HashResultPointer[4] += E;
2077
2078}
2079
2080/**
2081 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2082 * @RandomChallenge: pointer to the entry of host challenge random number array.
2083 * @HashWorking: pointer to the entry of the working hash array.
2084 *
2085 * This routine calculates the working hash array referred by @HashWorking
2086 * from the challenge random numbers associated with the host, referred by
2087 * @RandomChallenge. The result is put into the entry of the working hash
2088 * array and returned by reference through @HashWorking.
2089 **/
2090static void
2091lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2092{
2093	*HashWorking = (*RandomChallenge ^ *HashWorking);
2094}
2095
2096/**
2097 * lpfc_hba_init - Perform special handling for LC HBA initialization
2098 * @phba: pointer to lpfc hba data structure.
2099 * @hbainit: pointer to an array of unsigned 32-bit integers.
2100 *
2101 * This routine performs the special handling for LC HBA initialization.
2102 **/
2103void
2104lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2105{
2106	int t;
2107	uint32_t *HashWorking;
2108	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2109
2110	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2111	if (!HashWorking)
2112		return;
2113
2114	HashWorking[0] = HashWorking[78] = *pwwnn++;
2115	HashWorking[1] = HashWorking[79] = *pwwnn;
2116
2117	for (t = 0; t < 7; t++)
2118		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2119
2120	lpfc_sha_init(hbainit);
2121	lpfc_sha_iterate(hbainit, HashWorking);
2122	kfree(HashWorking);
2123}
2124
2125/**
2126 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2127 * @vport: pointer to a virtual N_Port data structure.
2128 *
2129 * This routine performs the necessary cleanups before deleting the @vport.
2130 * It invokes the discovery state machine to perform necessary state
2131 * transitions and to release the ndlps associated with the @vport. Note,
2132 * the physical port is treated as @vport 0.
2133 **/
2134void
2135lpfc_cleanup(struct lpfc_vport *vport)
2136{
2137	struct lpfc_hba   *phba = vport->phba;
2138	struct lpfc_nodelist *ndlp, *next_ndlp;
2139	int i = 0;
2140
2141	if (phba->link_state > LPFC_LINK_DOWN)
2142		lpfc_port_link_failure(vport);
2143
2144	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2145		if (!NLP_CHK_NODE_ACT(ndlp)) {
2146			ndlp = lpfc_enable_node(vport, ndlp,
2147						NLP_STE_UNUSED_NODE);
2148			if (!ndlp)
2149				continue;
2150			spin_lock_irq(&phba->ndlp_lock);
2151			NLP_SET_FREE_REQ(ndlp);
2152			spin_unlock_irq(&phba->ndlp_lock);
2153			/* Trigger the release of the ndlp memory */
2154			lpfc_nlp_put(ndlp);
2155			continue;
2156		}
2157		spin_lock_irq(&phba->ndlp_lock);
2158		if (NLP_CHK_FREE_REQ(ndlp)) {
2159			/* The ndlp should not be in memory free mode already */
2160			spin_unlock_irq(&phba->ndlp_lock);
2161			continue;
2162		} else
2163			/* Indicate request for freeing ndlp memory */
2164			NLP_SET_FREE_REQ(ndlp);
2165		spin_unlock_irq(&phba->ndlp_lock);
2166
2167		if (vport->port_type != LPFC_PHYSICAL_PORT &&
2168		    ndlp->nlp_DID == Fabric_DID) {
2169			/* Just free up ndlp with Fabric_DID for vports */
2170			lpfc_nlp_put(ndlp);
2171			continue;
2172		}
2173
2174		if (ndlp->nlp_type & NLP_FABRIC)
2175			lpfc_disc_state_machine(vport, ndlp, NULL,
2176					NLP_EVT_DEVICE_RECOVERY);
2177
2178		lpfc_disc_state_machine(vport, ndlp, NULL,
2179					     NLP_EVT_DEVICE_RM);
2180
2181	}
2182
2183	/* At this point, ALL ndlp's should be gone
2184	 * because of the previous NLP_EVT_DEVICE_RM.
2185	 * Lets wait for this to happen, if needed.
2186	 */
2187	while (!list_empty(&vport->fc_nodes)) {
2188		if (i++ > 3000) {
2189			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2190				"0233 Nodelist not empty\n");
2191			list_for_each_entry_safe(ndlp, next_ndlp,
2192						&vport->fc_nodes, nlp_listp) {
2193				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2194						LOG_NODE,
2195						"0282 did:x%x ndlp:x%p "
2196						"usgmap:x%x refcnt:%d\n",
2197						ndlp->nlp_DID, (void *)ndlp,
2198						ndlp->nlp_usg_map,
2199						atomic_read(
2200							&ndlp->kref.refcount));
2201			}
2202			break;
2203		}
2204
2205		/* Wait for any activity on ndlps to settle */
2206		msleep(10);
2207	}
2208}
2209
2210/**
2211 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2212 * @vport: pointer to a virtual N_Port data structure.
2213 *
2214 * This routine stops all the timers associated with a @vport. This function
2215 * is invoked before disabling or deleting a @vport. Note that the physical
2216 * port is treated as @vport 0.
2217 **/
2218void
2219lpfc_stop_vport_timers(struct lpfc_vport *vport)
2220{
2221	del_timer_sync(&vport->els_tmofunc);
2222	del_timer_sync(&vport->fc_fdmitmo);
2223	lpfc_can_disctmo(vport);
2224	return;
2225}
2226
2227/**
2228 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2229 * @phba: pointer to lpfc hba data structure.
2230 *
2231 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2232 * caller of this routine should already hold the host lock.
2233 **/
2234void
2235__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2236{
2237	/* Clear pending FCF rediscovery wait and failover in progress flags */
2238	phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
2239				FCF_DEAD_DISC |
2240				FCF_ACVL_DISC);
2241	/* Now, try to stop the timer */
2242	del_timer(&phba->fcf.redisc_wait);
2243}
2244
2245/**
2246 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2247 * @phba: pointer to lpfc hba data structure.
2248 *
2249 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2250 * checks whether the FCF rediscovery wait timer is pending with the host
2251 * lock held before proceeding with disabling the timer and clearing the
2252 * wait timer pendig flag.
2253 **/
2254void
2255lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2256{
2257	spin_lock_irq(&phba->hbalock);
2258	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2259		/* FCF rediscovery timer already fired or stopped */
2260		spin_unlock_irq(&phba->hbalock);
2261		return;
2262	}
2263	__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2264	spin_unlock_irq(&phba->hbalock);
2265}
2266
2267/**
2268 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2269 * @phba: pointer to lpfc hba data structure.
2270 *
2271 * This routine stops all the timers associated with a HBA. This function is
2272 * invoked before either putting a HBA offline or unloading the driver.
2273 **/
2274void
2275lpfc_stop_hba_timers(struct lpfc_hba *phba)
2276{
2277	lpfc_stop_vport_timers(phba->pport);
2278	del_timer_sync(&phba->sli.mbox_tmo);
2279	del_timer_sync(&phba->fabric_block_timer);
2280	del_timer_sync(&phba->eratt_poll);
2281	del_timer_sync(&phba->hb_tmofunc);
2282	phba->hb_outstanding = 0;
2283
2284	switch (phba->pci_dev_grp) {
2285	case LPFC_PCI_DEV_LP:
2286		/* Stop any LightPulse device specific driver timers */
2287		del_timer_sync(&phba->fcp_poll_timer);
2288		break;
2289	case LPFC_PCI_DEV_OC:
2290		/* Stop any OneConnect device sepcific driver timers */
2291		lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2292		break;
2293	default:
2294		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2295				"0297 Invalid device group (x%x)\n",
2296				phba->pci_dev_grp);
2297		break;
2298	}
2299	return;
2300}
2301
2302/**
2303 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2304 * @phba: pointer to lpfc hba data structure.
2305 *
2306 * This routine marks a HBA's management interface as blocked. Once the HBA's
2307 * management interface is marked as blocked, all the user space access to
2308 * the HBA, whether they are from sysfs interface or libdfc interface will
2309 * all be blocked. The HBA is set to block the management interface when the
2310 * driver prepares the HBA interface for online or offline.
2311 **/
2312static void
2313lpfc_block_mgmt_io(struct lpfc_hba * phba)
2314{
2315	unsigned long iflag;
2316	uint8_t actcmd = MBX_HEARTBEAT;
2317	unsigned long timeout;
2318
2319
2320	spin_lock_irqsave(&phba->hbalock, iflag);
2321	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2322	if (phba->sli.mbox_active)
2323		actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2324	spin_unlock_irqrestore(&phba->hbalock, iflag);
2325	/* Determine how long we might wait for the active mailbox
2326	 * command to be gracefully completed by firmware.
2327	 */
2328	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
2329			jiffies;
2330	/* Wait for the outstnading mailbox command to complete */
2331	while (phba->sli.mbox_active) {
2332		/* Check active mailbox complete status every 2ms */
2333		msleep(2);
2334		if (time_after(jiffies, timeout)) {
2335			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2336				"2813 Mgmt IO is Blocked %x "
2337				"- mbox cmd %x still active\n",
2338				phba->sli.sli_flag, actcmd);
2339			break;
2340		}
2341	}
2342}
2343
2344/**
2345 * lpfc_online - Initialize and bring a HBA online
2346 * @phba: pointer to lpfc hba data structure.
2347 *
2348 * This routine initializes the HBA and brings a HBA online. During this
2349 * process, the management interface is blocked to prevent user space access
2350 * to the HBA interfering with the driver initialization.
2351 *
2352 * Return codes
2353 *   0 - successful
2354 *   1 - failed
2355 **/
2356int
2357lpfc_online(struct lpfc_hba *phba)
2358{
2359	struct lpfc_vport *vport;
2360	struct lpfc_vport **vports;
2361	int i;
2362
2363	if (!phba)
2364		return 0;
2365	vport = phba->pport;
2366
2367	if (!(vport->fc_flag & FC_OFFLINE_MODE))
2368		return 0;
2369
2370	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2371			"0458 Bring Adapter online\n");
2372
2373	lpfc_block_mgmt_io(phba);
2374
2375	if (!lpfc_sli_queue_setup(phba)) {
2376		lpfc_unblock_mgmt_io(phba);
2377		return 1;
2378	}
2379
2380	if (phba->sli_rev == LPFC_SLI_REV4) {
2381		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2382			lpfc_unblock_mgmt_io(phba);
2383			return 1;
2384		}
2385	} else {
2386		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
2387			lpfc_unblock_mgmt_io(phba);
2388			return 1;
2389		}
2390	}
2391
2392	vports = lpfc_create_vport_work_array(phba);
2393	if (vports != NULL)
2394		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2395			struct Scsi_Host *shost;
2396			shost = lpfc_shost_from_vport(vports[i]);
2397			spin_lock_irq(shost->host_lock);
2398			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2399			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2400				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2401			if (phba->sli_rev == LPFC_SLI_REV4)
2402				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2403			spin_unlock_irq(shost->host_lock);
2404		}
2405		lpfc_destroy_vport_work_array(phba, vports);
2406
2407	lpfc_unblock_mgmt_io(phba);
2408	return 0;
2409}
2410
2411/**
2412 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2413 * @phba: pointer to lpfc hba data structure.
2414 *
2415 * This routine marks a HBA's management interface as not blocked. Once the
2416 * HBA's management interface is marked as not blocked, all the user space
2417 * access to the HBA, whether they are from sysfs interface or libdfc
2418 * interface will be allowed. The HBA is set to block the management interface
2419 * when the driver prepares the HBA interface for online or offline and then
2420 * set to unblock the management interface afterwards.
2421 **/
2422void
2423lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2424{
2425	unsigned long iflag;
2426
2427	spin_lock_irqsave(&phba->hbalock, iflag);
2428	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2429	spin_unlock_irqrestore(&phba->hbalock, iflag);
2430}
2431
2432/**
2433 * lpfc_offline_prep - Prepare a HBA to be brought offline
2434 * @phba: pointer to lpfc hba data structure.
2435 *
2436 * This routine is invoked to prepare a HBA to be brought offline. It performs
2437 * unregistration login to all the nodes on all vports and flushes the mailbox
2438 * queue to make it ready to be brought offline.
2439 **/
2440void
2441lpfc_offline_prep(struct lpfc_hba * phba)
2442{
2443	struct lpfc_vport *vport = phba->pport;
2444	struct lpfc_nodelist  *ndlp, *next_ndlp;
2445	struct lpfc_vport **vports;
2446	struct Scsi_Host *shost;
2447	int i;
2448
2449	if (vport->fc_flag & FC_OFFLINE_MODE)
2450		return;
2451
2452	lpfc_block_mgmt_io(phba);
2453
2454	lpfc_linkdown(phba);
2455
2456	/* Issue an unreg_login to all nodes on all vports */
2457	vports = lpfc_create_vport_work_array(phba);
2458	if (vports != NULL) {
2459		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2460			if (vports[i]->load_flag & FC_UNLOADING)
2461				continue;
2462			shost = lpfc_shost_from_vport(vports[i]);
2463			spin_lock_irq(shost->host_lock);
2464			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2465			vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2466			vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2467			spin_unlock_irq(shost->host_lock);
2468
2469			shost =	lpfc_shost_from_vport(vports[i]);
2470			list_for_each_entry_safe(ndlp, next_ndlp,
2471						 &vports[i]->fc_nodes,
2472						 nlp_listp) {
2473				if (!NLP_CHK_NODE_ACT(ndlp))
2474					continue;
2475				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2476					continue;
2477				if (ndlp->nlp_type & NLP_FABRIC) {
2478					lpfc_disc_state_machine(vports[i], ndlp,
2479						NULL, NLP_EVT_DEVICE_RECOVERY);
2480					lpfc_disc_state_machine(vports[i], ndlp,
2481						NULL, NLP_EVT_DEVICE_RM);
2482				}
2483				spin_lock_irq(shost->host_lock);
2484				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2485				spin_unlock_irq(shost->host_lock);
2486				lpfc_unreg_rpi(vports[i], ndlp);
2487			}
2488		}
2489	}
2490	lpfc_destroy_vport_work_array(phba, vports);
2491
2492	lpfc_sli_mbox_sys_shutdown(phba);
2493}
2494
2495/**
2496 * lpfc_offline - Bring a HBA offline
2497 * @phba: pointer to lpfc hba data structure.
2498 *
2499 * This routine actually brings a HBA offline. It stops all the timers
2500 * associated with the HBA, brings down the SLI layer, and eventually
2501 * marks the HBA as in offline state for the upper layer protocol.
2502 **/
2503void
2504lpfc_offline(struct lpfc_hba *phba)
2505{
2506	struct Scsi_Host  *shost;
2507	struct lpfc_vport **vports;
2508	int i;
2509
2510	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2511		return;
2512
2513	/* stop port and all timers associated with this hba */
2514	lpfc_stop_port(phba);
2515	vports = lpfc_create_vport_work_array(phba);
2516	if (vports != NULL)
2517		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2518			lpfc_stop_vport_timers(vports[i]);
2519	lpfc_destroy_vport_work_array(phba, vports);
2520	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2521			"0460 Bring Adapter offline\n");
2522	/* Bring down the SLI Layer and cleanup.  The HBA is offline
2523	   now.  */
2524	lpfc_sli_hba_down(phba);
2525	spin_lock_irq(&phba->hbalock);
2526	phba->work_ha = 0;
2527	spin_unlock_irq(&phba->hbalock);
2528	vports = lpfc_create_vport_work_array(phba);
2529	if (vports != NULL)
2530		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2531			shost = lpfc_shost_from_vport(vports[i]);
2532			spin_lock_irq(shost->host_lock);
2533			vports[i]->work_port_events = 0;
2534			vports[i]->fc_flag |= FC_OFFLINE_MODE;
2535			spin_unlock_irq(shost->host_lock);
2536		}
2537	lpfc_destroy_vport_work_array(phba, vports);
2538}
2539
2540/**
2541 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2542 * @phba: pointer to lpfc hba data structure.
2543 *
2544 * This routine is to free all the SCSI buffers and IOCBs from the driver
2545 * list back to kernel. It is called from lpfc_pci_remove_one to free
2546 * the internal resources before the device is removed from the system.
2547 *
2548 * Return codes
2549 *   0 - successful (for now, it always returns 0)
2550 **/
2551static int
2552lpfc_scsi_free(struct lpfc_hba *phba)
2553{
2554	struct lpfc_scsi_buf *sb, *sb_next;
2555	struct lpfc_iocbq *io, *io_next;
2556
2557	spin_lock_irq(&phba->hbalock);
2558	/* Release all the lpfc_scsi_bufs maintained by this host. */
2559	spin_lock(&phba->scsi_buf_list_lock);
2560	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2561		list_del(&sb->list);
2562		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2563			      sb->dma_handle);
2564		kfree(sb);
2565		phba->total_scsi_bufs--;
2566	}
2567	spin_unlock(&phba->scsi_buf_list_lock);
2568
2569	/* Release all the lpfc_iocbq entries maintained by this host. */
2570	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2571		list_del(&io->list);
2572		kfree(io);
2573		phba->total_iocbq_bufs--;
2574	}
2575	spin_unlock_irq(&phba->hbalock);
2576	return 0;
2577}
2578
2579/**
2580 * lpfc_create_port - Create an FC port
2581 * @phba: pointer to lpfc hba data structure.
2582 * @instance: a unique integer ID to this FC port.
2583 * @dev: pointer to the device data structure.
2584 *
2585 * This routine creates a FC port for the upper layer protocol. The FC port
2586 * can be created on top of either a physical port or a virtual port provided
2587 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2588 * and associates the FC port created before adding the shost into the SCSI
2589 * layer.
2590 *
2591 * Return codes
2592 *   @vport - pointer to the virtual N_Port data structure.
2593 *   NULL - port create failed.
2594 **/
2595struct lpfc_vport *
2596lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2597{
2598	struct lpfc_vport *vport;
2599	struct Scsi_Host  *shost;
2600	int error = 0;
2601
2602	if (dev != &phba->pcidev->dev)
2603		shost = scsi_host_alloc(&lpfc_vport_template,
2604					sizeof(struct lpfc_vport));
2605	else
2606		shost = scsi_host_alloc(&lpfc_template,
2607					sizeof(struct lpfc_vport));
2608	if (!shost)
2609		goto out;
2610
2611	vport = (struct lpfc_vport *) shost->hostdata;
2612	vport->phba = phba;
2613	vport->load_flag |= FC_LOADING;
2614	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2615	vport->fc_rscn_flush = 0;
2616
2617	lpfc_get_vport_cfgparam(vport);
2618	shost->unique_id = instance;
2619	shost->max_id = LPFC_MAX_TARGET;
2620	shost->max_lun = vport->cfg_max_luns;
2621	shost->this_id = -1;
2622	shost->max_cmd_len = 16;
2623	if (phba->sli_rev == LPFC_SLI_REV4) {
2624		shost->dma_boundary =
2625			phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2626		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2627	}
2628
2629	/*
2630	 * Set initial can_queue value since 0 is no longer supported and
2631	 * scsi_add_host will fail. This will be adjusted later based on the
2632	 * max xri value determined in hba setup.
2633	 */
2634	shost->can_queue = phba->cfg_hba_queue_depth - 10;
2635	if (dev != &phba->pcidev->dev) {
2636		shost->transportt = lpfc_vport_transport_template;
2637		vport->port_type = LPFC_NPIV_PORT;
2638	} else {
2639		shost->transportt = lpfc_transport_template;
2640		vport->port_type = LPFC_PHYSICAL_PORT;
2641	}
2642
2643	/* Initialize all internally managed lists. */
2644	INIT_LIST_HEAD(&vport->fc_nodes);
2645	INIT_LIST_HEAD(&vport->rcv_buffer_list);
2646	spin_lock_init(&vport->work_port_lock);
2647
2648	init_timer(&vport->fc_disctmo);
2649	vport->fc_disctmo.function = lpfc_disc_timeout;
2650	vport->fc_disctmo.data = (unsigned long)vport;
2651
2652	init_timer(&vport->fc_fdmitmo);
2653	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2654	vport->fc_fdmitmo.data = (unsigned long)vport;
2655
2656	init_timer(&vport->els_tmofunc);
2657	vport->els_tmofunc.function = lpfc_els_timeout;
2658	vport->els_tmofunc.data = (unsigned long)vport;
2659	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2660	if (error)
2661		goto out_put_shost;
2662
2663	spin_lock_irq(&phba->hbalock);
2664	list_add_tail(&vport->listentry, &phba->port_list);
2665	spin_unlock_irq(&phba->hbalock);
2666	return vport;
2667
2668out_put_shost:
2669	scsi_host_put(shost);
2670out:
2671	return NULL;
2672}
2673
2674/**
2675 * destroy_port -  destroy an FC port
2676 * @vport: pointer to an lpfc virtual N_Port data structure.
2677 *
2678 * This routine destroys a FC port from the upper layer protocol. All the
2679 * resources associated with the port are released.
2680 **/
2681void
2682destroy_port(struct lpfc_vport *vport)
2683{
2684	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2685	struct lpfc_hba  *phba = vport->phba;
2686
2687	lpfc_debugfs_terminate(vport);
2688	fc_remove_host(shost);
2689	scsi_remove_host(shost);
2690
2691	spin_lock_irq(&phba->hbalock);
2692	list_del_init(&vport->listentry);
2693	spin_unlock_irq(&phba->hbalock);
2694
2695	lpfc_cleanup(vport);
2696	return;
2697}
2698
2699/**
2700 * lpfc_get_instance - Get a unique integer ID
2701 *
2702 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2703 * uses the kernel idr facility to perform the task.
2704 *
2705 * Return codes:
2706 *   instance - a unique integer ID allocated as the new instance.
2707 *   -1 - lpfc get instance failed.
2708 **/
2709int
2710lpfc_get_instance(void)
2711{
2712	int instance = 0;
2713
2714	/* Assign an unused number */
2715	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2716		return -1;
2717	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2718		return -1;
2719	return instance;
2720}
2721
2722/**
2723 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2724 * @shost: pointer to SCSI host data structure.
2725 * @time: elapsed time of the scan in jiffies.
2726 *
2727 * This routine is called by the SCSI layer with a SCSI host to determine
2728 * whether the scan host is finished.
2729 *
2730 * Note: there is no scan_start function as adapter initialization will have
2731 * asynchronously kicked off the link initialization.
2732 *
2733 * Return codes
2734 *   0 - SCSI host scan is not over yet.
2735 *   1 - SCSI host scan is over.
2736 **/
2737int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2738{
2739	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2740	struct lpfc_hba   *phba = vport->phba;
2741	int stat = 0;
2742
2743	spin_lock_irq(shost->host_lock);
2744
2745	if (vport->load_flag & FC_UNLOADING) {
2746		stat = 1;
2747		goto finished;
2748	}
2749	if (time >= 30 * HZ) {
2750		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2751				"0461 Scanning longer than 30 "
2752				"seconds.  Continuing initialization\n");
2753		stat = 1;
2754		goto finished;
2755	}
2756	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2757		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2758				"0465 Link down longer than 15 "
2759				"seconds.  Continuing initialization\n");
2760		stat = 1;
2761		goto finished;
2762	}
2763
2764	if (vport->port_state != LPFC_VPORT_READY)
2765		goto finished;
2766	if (vport->num_disc_nodes || vport->fc_prli_sent)
2767		goto finished;
2768	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2769		goto finished;
2770	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2771		goto finished;
2772
2773	stat = 1;
2774
2775finished:
2776	spin_unlock_irq(shost->host_lock);
2777	return stat;
2778}
2779
2780/**
2781 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2782 * @shost: pointer to SCSI host data structure.
2783 *
2784 * This routine initializes a given SCSI host attributes on a FC port. The
2785 * SCSI host can be either on top of a physical port or a virtual port.
2786 **/
2787void lpfc_host_attrib_init(struct Scsi_Host *shost)
2788{
2789	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2790	struct lpfc_hba   *phba = vport->phba;
2791	/*
2792	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
2793	 */
2794
2795	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2796	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2797	fc_host_supported_classes(shost) = FC_COS_CLASS3;
2798
2799	memset(fc_host_supported_fc4s(shost), 0,
2800	       sizeof(fc_host_supported_fc4s(shost)));
2801	fc_host_supported_fc4s(shost)[2] = 1;
2802	fc_host_supported_fc4s(shost)[7] = 1;
2803
2804	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2805				 sizeof fc_host_symbolic_name(shost));
2806
2807	fc_host_supported_speeds(shost) = 0;
2808	if (phba->lmt & LMT_10Gb)
2809		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2810	if (phba->lmt & LMT_8Gb)
2811		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2812	if (phba->lmt & LMT_4Gb)
2813		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2814	if (phba->lmt & LMT_2Gb)
2815		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2816	if (phba->lmt & LMT_1Gb)
2817		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2818
2819	fc_host_maxframe_size(shost) =
2820		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2821		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2822
2823	fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
2824
2825	/* This value is also unchanging */
2826	memset(fc_host_active_fc4s(shost), 0,
2827	       sizeof(fc_host_active_fc4s(shost)));
2828	fc_host_active_fc4s(shost)[2] = 1;
2829	fc_host_active_fc4s(shost)[7] = 1;
2830
2831	fc_host_max_npiv_vports(shost) = phba->max_vpi;
2832	spin_lock_irq(shost->host_lock);
2833	vport->load_flag &= ~FC_LOADING;
2834	spin_unlock_irq(shost->host_lock);
2835}
2836
2837/**
2838 * lpfc_stop_port_s3 - Stop SLI3 device port
2839 * @phba: pointer to lpfc hba data structure.
2840 *
2841 * This routine is invoked to stop an SLI3 device port, it stops the device
2842 * from generating interrupts and stops the device driver's timers for the
2843 * device.
2844 **/
2845static void
2846lpfc_stop_port_s3(struct lpfc_hba *phba)
2847{
2848	/* Clear all interrupt enable conditions */
2849	writel(0, phba->HCregaddr);
2850	readl(phba->HCregaddr); /* flush */
2851	/* Clear all pending interrupts */
2852	writel(0xffffffff, phba->HAregaddr);
2853	readl(phba->HAregaddr); /* flush */
2854
2855	/* Reset some HBA SLI setup states */
2856	lpfc_stop_hba_timers(phba);
2857	phba->pport->work_port_events = 0;
2858}
2859
2860/**
2861 * lpfc_stop_port_s4 - Stop SLI4 device port
2862 * @phba: pointer to lpfc hba data structure.
2863 *
2864 * This routine is invoked to stop an SLI4 device port, it stops the device
2865 * from generating interrupts and stops the device driver's timers for the
2866 * device.
2867 **/
2868static void
2869lpfc_stop_port_s4(struct lpfc_hba *phba)
2870{
2871	/* Reset some HBA SLI4 setup states */
2872	lpfc_stop_hba_timers(phba);
2873	phba->pport->work_port_events = 0;
2874	phba->sli4_hba.intr_enable = 0;
2875}
2876
2877/**
2878 * lpfc_stop_port - Wrapper function for stopping hba port
2879 * @phba: Pointer to HBA context object.
2880 *
2881 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2882 * the API jump table function pointer from the lpfc_hba struct.
2883 **/
2884void
2885lpfc_stop_port(struct lpfc_hba *phba)
2886{
2887	phba->lpfc_stop_port(phba);
2888}
2889
2890/**
2891 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
2892 * @phba: Pointer to hba for which this call is being executed.
2893 *
2894 * This routine starts the timer waiting for the FCF rediscovery to complete.
2895 **/
2896void
2897lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
2898{
2899	unsigned long fcf_redisc_wait_tmo =
2900		(jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
2901	/* Start fcf rediscovery wait period timer */
2902	mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
2903	spin_lock_irq(&phba->hbalock);
2904	/* Allow action to new fcf asynchronous event */
2905	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2906	/* Mark the FCF rediscovery pending state */
2907	phba->fcf.fcf_flag |= FCF_REDISC_PEND;
2908	spin_unlock_irq(&phba->hbalock);
2909}
2910
2911/**
2912 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
2913 * @ptr: Map to lpfc_hba data structure pointer.
2914 *
2915 * This routine is invoked when waiting for FCF table rediscover has been
2916 * timed out. If new FCF record(s) has (have) been discovered during the
2917 * wait period, a new FCF event shall be added to the FCOE async event
2918 * list, and then worker thread shall be waked up for processing from the
2919 * worker thread context.
2920 **/
2921void
2922lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2923{
2924	struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2925
2926	/* Don't send FCF rediscovery event if timer cancelled */
2927	spin_lock_irq(&phba->hbalock);
2928	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2929		spin_unlock_irq(&phba->hbalock);
2930		return;
2931	}
2932	/* Clear FCF rediscovery timer pending flag */
2933	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2934	/* FCF rediscovery event to worker thread */
2935	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2936	spin_unlock_irq(&phba->hbalock);
2937	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2938			"2776 FCF rediscover wait timer expired, post "
2939			"a worker thread event for FCF table scan\n");
2940	/* wake up worker thread */
2941	lpfc_worker_wake_up(phba);
2942}
2943
2944/**
2945 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
2946 * @phba: pointer to lpfc hba data structure.
2947 *
2948 * This function uses the QUERY_FW_CFG mailbox command to determine if the
2949 * firmware loaded supports FCoE. A return of zero indicates that the mailbox
2950 * was successful and the firmware supports FCoE. Any other return indicates
2951 * a error. It is assumed that this function will be called before interrupts
2952 * are enabled.
2953 **/
2954static int
2955lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
2956{
2957	int rc = 0;
2958	LPFC_MBOXQ_t *mboxq;
2959	struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
2960	uint32_t length;
2961	uint32_t shdr_status, shdr_add_status;
2962
2963	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2964	if (!mboxq) {
2965		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2966				"2621 Failed to allocate mbox for "
2967				"query firmware config cmd\n");
2968		return -ENOMEM;
2969	}
2970	query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
2971	length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
2972		  sizeof(struct lpfc_sli4_cfg_mhdr));
2973	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
2974			 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
2975			 length, LPFC_SLI4_MBX_EMBED);
2976	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2977	/* The IOCTL status is embedded in the mailbox subheader. */
2978	shdr_status = bf_get(lpfc_mbox_hdr_status,
2979			     &query_fw_cfg->header.cfg_shdr.response);
2980	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2981				 &query_fw_cfg->header.cfg_shdr.response);
2982	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2983		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2984				"2622 Query Firmware Config failed "
2985				"mbx status x%x, status x%x add_status x%x\n",
2986				rc, shdr_status, shdr_add_status);
2987		return -EINVAL;
2988	}
2989	if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
2990		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2991				"2623 FCoE Function not supported by firmware. "
2992				"Function mode = %08x\n",
2993				query_fw_cfg->function_mode);
2994		return -EINVAL;
2995	}
2996	if (rc != MBX_TIMEOUT)
2997		mempool_free(mboxq, phba->mbox_mem_pool);
2998	return 0;
2999}
3000
3001/**
3002 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3003 * @phba: pointer to lpfc hba data structure.
3004 * @acqe_link: pointer to the async link completion queue entry.
3005 *
3006 * This routine is to parse the SLI4 link-attention link fault code and
3007 * translate it into the base driver's read link attention mailbox command
3008 * status.
3009 *
3010 * Return: Link-attention status in terms of base driver's coding.
3011 **/
3012static uint16_t
3013lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3014			   struct lpfc_acqe_link *acqe_link)
3015{
3016	uint16_t latt_fault;
3017
3018	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3019	case LPFC_ASYNC_LINK_FAULT_NONE:
3020	case LPFC_ASYNC_LINK_FAULT_LOCAL:
3021	case LPFC_ASYNC_LINK_FAULT_REMOTE:
3022		latt_fault = 0;
3023		break;
3024	default:
3025		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3026				"0398 Invalid link fault code: x%x\n",
3027				bf_get(lpfc_acqe_link_fault, acqe_link));
3028		latt_fault = MBXERR_ERROR;
3029		break;
3030	}
3031	return latt_fault;
3032}
3033
3034/**
3035 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3036 * @phba: pointer to lpfc hba data structure.
3037 * @acqe_link: pointer to the async link completion queue entry.
3038 *
3039 * This routine is to parse the SLI4 link attention type and translate it
3040 * into the base driver's link attention type coding.
3041 *
3042 * Return: Link attention type in terms of base driver's coding.
3043 **/
3044static uint8_t
3045lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3046			  struct lpfc_acqe_link *acqe_link)
3047{
3048	uint8_t att_type;
3049
3050	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3051	case LPFC_ASYNC_LINK_STATUS_DOWN:
3052	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3053		att_type = AT_LINK_DOWN;
3054		break;
3055	case LPFC_ASYNC_LINK_STATUS_UP:
3056		/* Ignore physical link up events - wait for logical link up */
3057		att_type = AT_RESERVED;
3058		break;
3059	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3060		att_type = AT_LINK_UP;
3061		break;
3062	default:
3063		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3064				"0399 Invalid link attention type: x%x\n",
3065				bf_get(lpfc_acqe_link_status, acqe_link));
3066		att_type = AT_RESERVED;
3067		break;
3068	}
3069	return att_type;
3070}
3071
3072/**
3073 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3074 * @phba: pointer to lpfc hba data structure.
3075 * @acqe_link: pointer to the async link completion queue entry.
3076 *
3077 * This routine is to parse the SLI4 link-attention link speed and translate
3078 * it into the base driver's link-attention link speed coding.
3079 *
3080 * Return: Link-attention link speed in terms of base driver's coding.
3081 **/
3082static uint8_t
3083lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3084				struct lpfc_acqe_link *acqe_link)
3085{
3086	uint8_t link_speed;
3087
3088	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3089	case LPFC_ASYNC_LINK_SPEED_ZERO:
3090		link_speed = LA_UNKNW_LINK;
3091		break;
3092	case LPFC_ASYNC_LINK_SPEED_10MBPS:
3093		link_speed = LA_UNKNW_LINK;
3094		break;
3095	case LPFC_ASYNC_LINK_SPEED_100MBPS:
3096		link_speed = LA_UNKNW_LINK;
3097		break;
3098	case LPFC_ASYNC_LINK_SPEED_1GBPS:
3099		link_speed = LA_1GHZ_LINK;
3100		break;
3101	case LPFC_ASYNC_LINK_SPEED_10GBPS:
3102		link_speed = LA_10GHZ_LINK;
3103		break;
3104	default:
3105		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3106				"0483 Invalid link-attention link speed: x%x\n",
3107				bf_get(lpfc_acqe_link_speed, acqe_link));
3108		link_speed = LA_UNKNW_LINK;
3109		break;
3110	}
3111	return link_speed;
3112}
3113
3114/**
3115 * lpfc_sli4_async_link_evt - Process the asynchronous link event
3116 * @phba: pointer to lpfc hba data structure.
3117 * @acqe_link: pointer to the async link completion queue entry.
3118 *
3119 * This routine is to handle the SLI4 asynchronous link event.
3120 **/
3121static void
3122lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3123			 struct lpfc_acqe_link *acqe_link)
3124{
3125	struct lpfc_dmabuf *mp;
3126	LPFC_MBOXQ_t *pmb;
3127	MAILBOX_t *mb;
3128	READ_LA_VAR *la;
3129	uint8_t att_type;
3130
3131	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3132	if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
3133		return;
3134	phba->fcoe_eventtag = acqe_link->event_tag;
3135	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3136	if (!pmb) {
3137		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3138				"0395 The mboxq allocation failed\n");
3139		return;
3140	}
3141	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3142	if (!mp) {
3143		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3144				"0396 The lpfc_dmabuf allocation failed\n");
3145		goto out_free_pmb;
3146	}
3147	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3148	if (!mp->virt) {
3149		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3150				"0397 The mbuf allocation failed\n");
3151		goto out_free_dmabuf;
3152	}
3153
3154	/* Cleanup any outstanding ELS commands */
3155	lpfc_els_flush_all_cmd(phba);
3156
3157	/* Block ELS IOCBs until we have done process link event */
3158	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3159
3160	/* Update link event statistics */
3161	phba->sli.slistat.link_event++;
3162
3163	/* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
3164	lpfc_read_la(phba, pmb, mp);
3165	pmb->vport = phba->pport;
3166
3167	/* Parse and translate status field */
3168	mb = &pmb->u.mb;
3169	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3170
3171	/* Parse and translate link attention fields */
3172	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
3173	la->eventTag = acqe_link->event_tag;
3174	la->attType = att_type;
3175	la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
3176
3177	/* Fake the the following irrelvant fields */
3178	la->topology = TOPOLOGY_PT_PT;
3179	la->granted_AL_PA = 0;
3180	la->il = 0;
3181	la->pb = 0;
3182	la->fa = 0;
3183	la->mm = 0;
3184
3185	/* Keep the link status for extra SLI4 state machine reference */
3186	phba->sli4_hba.link_state.speed =
3187				bf_get(lpfc_acqe_link_speed, acqe_link);
3188	phba->sli4_hba.link_state.duplex =
3189				bf_get(lpfc_acqe_link_duplex, acqe_link);
3190	phba->sli4_hba.link_state.status =
3191				bf_get(lpfc_acqe_link_status, acqe_link);
3192	phba->sli4_hba.link_state.physical =
3193				bf_get(lpfc_acqe_link_physical, acqe_link);
3194	phba->sli4_hba.link_state.fault =
3195				bf_get(lpfc_acqe_link_fault, acqe_link);
3196	phba->sli4_hba.link_state.logical_speed =
3197				bf_get(lpfc_acqe_qos_link_speed, acqe_link);
3198
3199	/* Invoke the lpfc_handle_latt mailbox command callback function */
3200	lpfc_mbx_cmpl_read_la(phba, pmb);
3201
3202	return;
3203
3204out_free_dmabuf:
3205	kfree(mp);
3206out_free_pmb:
3207	mempool_free(pmb, phba->mbox_mem_pool);
3208}
3209
3210/**
3211 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3212 * @vport: pointer to vport data structure.
3213 *
3214 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3215 * response to a CVL event.
3216 *
3217 * Return the pointer to the ndlp with the vport if successful, otherwise
3218 * return NULL.
3219 **/
3220static struct lpfc_nodelist *
3221lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3222{
3223	struct lpfc_nodelist *ndlp;
3224	struct Scsi_Host *shost;
3225	struct lpfc_hba *phba;
3226
3227	if (!vport)
3228		return NULL;
3229	phba = vport->phba;
3230	if (!phba)
3231		return NULL;
3232	ndlp = lpfc_findnode_did(vport, Fabric_DID);
3233	if (!ndlp) {
3234		/* Cannot find existing Fabric ndlp, so allocate a new one */
3235		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3236		if (!ndlp)
3237			return 0;
3238		lpfc_nlp_init(vport, ndlp, Fabric_DID);
3239		/* Set the node type */
3240		ndlp->nlp_type |= NLP_FABRIC;
3241		/* Put ndlp onto node list */
3242		lpfc_enqueue_node(vport, ndlp);
3243	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
3244		/* re-setup ndlp without removing from node list */
3245		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3246		if (!ndlp)
3247			return 0;
3248	}
3249	if (phba->pport->port_state < LPFC_FLOGI)
3250		return NULL;
3251	/* If virtual link is not yet instantiated ignore CVL */
3252	if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC))
3253		return NULL;
3254	shost = lpfc_shost_from_vport(vport);
3255	if (!shost)
3256		return NULL;
3257	lpfc_linkdown_port(vport);
3258	lpfc_cleanup_pending_mbox(vport);
3259	spin_lock_irq(shost->host_lock);
3260	vport->fc_flag |= FC_VPORT_CVL_RCVD;
3261	spin_unlock_irq(shost->host_lock);
3262
3263	return ndlp;
3264}
3265
3266/**
3267 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3268 * @vport: pointer to lpfc hba data structure.
3269 *
3270 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3271 * response to a FCF dead event.
3272 **/
3273static void
3274lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3275{
3276	struct lpfc_vport **vports;
3277	int i;
3278
3279	vports = lpfc_create_vport_work_array(phba);
3280	if (vports)
3281		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3282			lpfc_sli4_perform_vport_cvl(vports[i]);
3283	lpfc_destroy_vport_work_array(phba, vports);
3284}
3285
3286/**
3287 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
3288 * @phba: pointer to lpfc hba data structure.
3289 * @acqe_link: pointer to the async fcoe completion queue entry.
3290 *
3291 * This routine is to handle the SLI4 asynchronous fcoe event.
3292 **/
3293static void
3294lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3295			 struct lpfc_acqe_fcoe *acqe_fcoe)
3296{
3297	uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
3298	int rc;
3299	struct lpfc_vport *vport;
3300	struct lpfc_nodelist *ndlp;
3301	struct Scsi_Host  *shost;
3302	int active_vlink_present;
3303	struct lpfc_vport **vports;
3304	int i;
3305
3306	phba->fc_eventTag = acqe_fcoe->event_tag;
3307	phba->fcoe_eventtag = acqe_fcoe->event_tag;
3308	switch (event_type) {
3309	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
3310	case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
3311		if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
3312			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3313					LOG_DISCOVERY,
3314					"2546 New FCF found event: "
3315					"evt_tag:x%x, fcf_index:x%x\n",
3316					acqe_fcoe->event_tag,
3317					acqe_fcoe->index);
3318		else
3319			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3320					LOG_DISCOVERY,
3321					"2788 FCF parameter modified event: "
3322					"evt_tag:x%x, fcf_index:x%x\n",
3323					acqe_fcoe->event_tag,
3324					acqe_fcoe->index);
3325		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3326			/*
3327			 * During period of FCF discovery, read the FCF
3328			 * table record indexed by the event to update
3329			 * FCF round robin failover eligible FCF bmask.
3330			 */
3331			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3332					LOG_DISCOVERY,
3333					"2779 Read new FCF record with "
3334					"fcf_index:x%x for updating FCF "
3335					"round robin failover bmask\n",
3336					acqe_fcoe->index);
3337			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3338		}
3339
3340		/* If the FCF discovery is in progress, do nothing. */
3341		spin_lock_irq(&phba->hbalock);
3342		if (phba->hba_flag & FCF_DISC_INPROGRESS) {
3343			spin_unlock_irq(&phba->hbalock);
3344			break;
3345		}
3346		/* If fast FCF failover rescan event is pending, do nothing */
3347		if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3348			spin_unlock_irq(&phba->hbalock);
3349			break;
3350		}
3351
3352		/* If the FCF has been in discovered state, do nothing. */
3353		if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3354			spin_unlock_irq(&phba->hbalock);
3355			break;
3356		}
3357		spin_unlock_irq(&phba->hbalock);
3358
3359		/* Otherwise, scan the entire FCF table and re-discover SAN */
3360		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3361				"2770 Start FCF table scan due to new FCF "
3362				"event: evt_tag:x%x, fcf_index:x%x\n",
3363				acqe_fcoe->event_tag, acqe_fcoe->index);
3364		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3365						     LPFC_FCOE_FCF_GET_FIRST);
3366		if (rc)
3367			lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3368					"2547 Issue FCF scan read FCF mailbox "
3369					"command failed 0x%x\n", rc);
3370		break;
3371
3372	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
3373		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3374			"2548 FCF Table full count 0x%x tag 0x%x\n",
3375			bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
3376			acqe_fcoe->event_tag);
3377		break;
3378
3379	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
3380		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3381			"2549 FCF disconnected from network index 0x%x"
3382			" tag 0x%x\n", acqe_fcoe->index,
3383			acqe_fcoe->event_tag);
3384		/*
3385		 * If we are in the middle of FCF failover process, clear
3386		 * the corresponding FCF bit in the roundrobin bitmap.
3387		 */
3388		spin_lock_irq(&phba->hbalock);
3389		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3390			spin_unlock_irq(&phba->hbalock);
3391			/* Update FLOGI FCF failover eligible FCF bmask */
3392			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
3393			break;
3394		}
3395		spin_unlock_irq(&phba->hbalock);
3396
3397		/* If the event is not for currently used fcf do nothing */
3398		if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
3399			break;
3400
3401		/*
3402		 * Otherwise, request the port to rediscover the entire FCF
3403		 * table for a fast recovery from case that the current FCF
3404		 * is no longer valid as we are not in the middle of FCF
3405		 * failover process already.
3406		 */
3407		spin_lock_irq(&phba->hbalock);
3408		/* Mark the fast failover process in progress */
3409		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3410		spin_unlock_irq(&phba->hbalock);
3411
3412		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3413				"2771 Start FCF fast failover process due to "
3414				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3415				"\n", acqe_fcoe->event_tag, acqe_fcoe->index);
3416		rc = lpfc_sli4_redisc_fcf_table(phba);
3417		if (rc) {
3418			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3419					LOG_DISCOVERY,
3420					"2772 Issue FCF rediscover mabilbox "
3421					"command failed, fail through to FCF "
3422					"dead event\n");
3423			spin_lock_irq(&phba->hbalock);
3424			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3425			spin_unlock_irq(&phba->hbalock);
3426			/*
3427			 * Last resort will fail over by treating this
3428			 * as a link down to FCF registration.
3429			 */
3430			lpfc_sli4_fcf_dead_failthrough(phba);
3431		} else {
3432			/* Reset FCF roundrobin bmask for new discovery */
3433			memset(phba->fcf.fcf_rr_bmask, 0,
3434			       sizeof(*phba->fcf.fcf_rr_bmask));
3435			/*
3436			 * Handling fast FCF failover to a DEAD FCF event is
3437			 * considered equalivant to receiving CVL to all vports.
3438			 */
3439			lpfc_sli4_perform_all_vport_cvl(phba);
3440		}
3441		break;
3442	case LPFC_FCOE_EVENT_TYPE_CVL:
3443		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3444			"2718 Clear Virtual Link Received for VPI 0x%x"
3445			" tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3446		vport = lpfc_find_vport_by_vpid(phba,
3447				acqe_fcoe->index - phba->vpi_base);
3448		ndlp = lpfc_sli4_perform_vport_cvl(vport);
3449		if (!ndlp)
3450			break;
3451		active_vlink_present = 0;
3452
3453		vports = lpfc_create_vport_work_array(phba);
3454		if (vports) {
3455			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3456					i++) {
3457				if ((!(vports[i]->fc_flag &
3458					FC_VPORT_CVL_RCVD)) &&
3459					(vports[i]->port_state > LPFC_FDISC)) {
3460					active_vlink_present = 1;
3461					break;
3462				}
3463			}
3464			lpfc_destroy_vport_work_array(phba, vports);
3465		}
3466
3467		if (active_vlink_present) {
3468			/*
3469			 * If there are other active VLinks present,
3470			 * re-instantiate the Vlink using FDISC.
3471			 */
3472			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3473			shost = lpfc_shost_from_vport(vport);
3474			spin_lock_irq(shost->host_lock);
3475			ndlp->nlp_flag |= NLP_DELAY_TMO;
3476			spin_unlock_irq(shost->host_lock);
3477			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3478			vport->port_state = LPFC_FDISC;
3479		} else {
3480			/*
3481			 * Otherwise, we request port to rediscover
3482			 * the entire FCF table for a fast recovery
3483			 * from possible case that the current FCF
3484			 * is no longer valid if we are not already
3485			 * in the FCF failover process.
3486			 */
3487			spin_lock_irq(&phba->hbalock);
3488			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3489				spin_unlock_irq(&phba->hbalock);
3490				break;
3491			}
3492			/* Mark the fast failover process in progress */
3493			phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3494			spin_unlock_irq(&phba->hbalock);
3495			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3496					LOG_DISCOVERY,
3497					"2773 Start FCF fast failover due "
3498					"to CVL event: evt_tag:x%x\n",
3499					acqe_fcoe->event_tag);
3500			rc = lpfc_sli4_redisc_fcf_table(phba);
3501			if (rc) {
3502				lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3503						LOG_DISCOVERY,
3504						"2774 Issue FCF rediscover "
3505						"mabilbox command failed, "
3506						"through to CVL event\n");
3507				spin_lock_irq(&phba->hbalock);
3508				phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3509				spin_unlock_irq(&phba->hbalock);
3510				/*
3511				 * Last resort will be re-try on the
3512				 * the current registered FCF entry.
3513				 */
3514				lpfc_retry_pport_discovery(phba);
3515			} else
3516				/*
3517				 * Reset FCF roundrobin bmask for new
3518				 * discovery.
3519				 */
3520				memset(phba->fcf.fcf_rr_bmask, 0,
3521				       sizeof(*phba->fcf.fcf_rr_bmask));
3522		}
3523		break;
3524	default:
3525		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3526			"0288 Unknown FCoE event type 0x%x event tag "
3527			"0x%x\n", event_type, acqe_fcoe->event_tag);
3528		break;
3529	}
3530}
3531
3532/**
3533 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3534 * @phba: pointer to lpfc hba data structure.
3535 * @acqe_link: pointer to the async dcbx completion queue entry.
3536 *
3537 * This routine is to handle the SLI4 asynchronous dcbx event.
3538 **/
3539static void
3540lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3541			 struct lpfc_acqe_dcbx *acqe_dcbx)
3542{
3543	phba->fc_eventTag = acqe_dcbx->event_tag;
3544	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3545			"0290 The SLI4 DCBX asynchronous event is not "
3546			"handled yet\n");
3547}
3548
3549/**
3550 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
3551 * @phba: pointer to lpfc hba data structure.
3552 * @acqe_link: pointer to the async grp5 completion queue entry.
3553 *
3554 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
3555 * is an asynchronous notified of a logical link speed change.  The Port
3556 * reports the logical link speed in units of 10Mbps.
3557 **/
3558static void
3559lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
3560			 struct lpfc_acqe_grp5 *acqe_grp5)
3561{
3562	uint16_t prev_ll_spd;
3563
3564	phba->fc_eventTag = acqe_grp5->event_tag;
3565	phba->fcoe_eventtag = acqe_grp5->event_tag;
3566	prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
3567	phba->sli4_hba.link_state.logical_speed =
3568		(bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
3569	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3570			"2789 GRP5 Async Event: Updating logical link speed "
3571			"from %dMbps to %dMbps\n", (prev_ll_spd * 10),
3572			(phba->sli4_hba.link_state.logical_speed*10));
3573}
3574
3575/**
3576 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3577 * @phba: pointer to lpfc hba data structure.
3578 *
3579 * This routine is invoked by the worker thread to process all the pending
3580 * SLI4 asynchronous events.
3581 **/
3582void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3583{
3584	struct lpfc_cq_event *cq_event;
3585
3586	/* First, declare the async event has been handled */
3587	spin_lock_irq(&phba->hbalock);
3588	phba->hba_flag &= ~ASYNC_EVENT;
3589	spin_unlock_irq(&phba->hbalock);
3590	/* Now, handle all the async events */
3591	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3592		/* Get the first event from the head of the event queue */
3593		spin_lock_irq(&phba->hbalock);
3594		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3595				 cq_event, struct lpfc_cq_event, list);
3596		spin_unlock_irq(&phba->hbalock);
3597		/* Process the asynchronous event */
3598		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3599		case LPFC_TRAILER_CODE_LINK:
3600			lpfc_sli4_async_link_evt(phba,
3601						 &cq_event->cqe.acqe_link);
3602			break;
3603		case LPFC_TRAILER_CODE_FCOE:
3604			lpfc_sli4_async_fcoe_evt(phba,
3605						 &cq_event->cqe.acqe_fcoe);
3606			break;
3607		case LPFC_TRAILER_CODE_DCBX:
3608			lpfc_sli4_async_dcbx_evt(phba,
3609						 &cq_event->cqe.acqe_dcbx);
3610			break;
3611		case LPFC_TRAILER_CODE_GRP5:
3612			lpfc_sli4_async_grp5_evt(phba,
3613						 &cq_event->cqe.acqe_grp5);
3614			break;
3615		default:
3616			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3617					"1804 Invalid asynchrous event code: "
3618					"x%x\n", bf_get(lpfc_trailer_code,
3619					&cq_event->cqe.mcqe_cmpl));
3620			break;
3621		}
3622		/* Free the completion event processed to the free pool */
3623		lpfc_sli4_cq_event_release(phba, cq_event);
3624	}
3625}
3626
3627/**
3628 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3629 * @phba: pointer to lpfc hba data structure.
3630 *
3631 * This routine is invoked by the worker thread to process FCF table
3632 * rediscovery pending completion event.
3633 **/
3634void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3635{
3636	int rc;
3637
3638	spin_lock_irq(&phba->hbalock);
3639	/* Clear FCF rediscovery timeout event */
3640	phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3641	/* Clear driver fast failover FCF record flag */
3642	phba->fcf.failover_rec.flag = 0;
3643	/* Set state for FCF fast failover */
3644	phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3645	spin_unlock_irq(&phba->hbalock);
3646
3647	/* Scan FCF table from the first entry to re-discover SAN */
3648	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3649			"2777 Start FCF table scan after FCF "
3650			"rediscovery quiescent period over\n");
3651	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3652	if (rc)
3653		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3654				"2747 Issue FCF scan read FCF mailbox "
3655				"command failed 0x%x\n", rc);
3656}
3657
3658/**
3659 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3660 * @phba: pointer to lpfc hba data structure.
3661 * @dev_grp: The HBA PCI-Device group number.
3662 *
3663 * This routine is invoked to set up the per HBA PCI-Device group function
3664 * API jump table entries.
3665 *
3666 * Return: 0 if success, otherwise -ENODEV
3667 **/
3668int
3669lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3670{
3671	int rc;
3672
3673	/* Set up lpfc PCI-device group */
3674	phba->pci_dev_grp = dev_grp;
3675
3676	/* The LPFC_PCI_DEV_OC uses SLI4 */
3677	if (dev_grp == LPFC_PCI_DEV_OC)
3678		phba->sli_rev = LPFC_SLI_REV4;
3679
3680	/* Set up device INIT API function jump table */
3681	rc = lpfc_init_api_table_setup(phba, dev_grp);
3682	if (rc)
3683		return -ENODEV;
3684	/* Set up SCSI API function jump table */
3685	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3686	if (rc)
3687		return -ENODEV;
3688	/* Set up SLI API function jump table */
3689	rc = lpfc_sli_api_table_setup(phba, dev_grp);
3690	if (rc)
3691		return -ENODEV;
3692	/* Set up MBOX API function jump table */
3693	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3694	if (rc)
3695		return -ENODEV;
3696
3697	return 0;
3698}
3699
3700/**
3701 * lpfc_log_intr_mode - Log the active interrupt mode
3702 * @phba: pointer to lpfc hba data structure.
3703 * @intr_mode: active interrupt mode adopted.
3704 *
3705 * This routine it invoked to log the currently used active interrupt mode
3706 * to the device.
3707 **/
3708static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3709{
3710	switch (intr_mode) {
3711	case 0:
3712		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3713				"0470 Enable INTx interrupt mode.\n");
3714		break;
3715	case 1:
3716		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3717				"0481 Enabled MSI interrupt mode.\n");
3718		break;
3719	case 2:
3720		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3721				"0480 Enabled MSI-X interrupt mode.\n");
3722		break;
3723	default:
3724		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3725				"0482 Illegal interrupt mode.\n");
3726		break;
3727	}
3728	return;
3729}
3730
3731/**
3732 * lpfc_enable_pci_dev - Enable a generic PCI device.
3733 * @phba: pointer to lpfc hba data structure.
3734 *
3735 * This routine is invoked to enable the PCI device that is common to all
3736 * PCI devices.
3737 *
3738 * Return codes
3739 * 	0 - successful
3740 * 	other values - error
3741 **/
3742static int
3743lpfc_enable_pci_dev(struct lpfc_hba *phba)
3744{
3745	struct pci_dev *pdev;
3746	int bars;
3747
3748	/* Obtain PCI device reference */
3749	if (!phba->pcidev)
3750		goto out_error;
3751	else
3752		pdev = phba->pcidev;
3753	/* Select PCI BARs */
3754	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3755	/* Enable PCI device */
3756	if (pci_enable_device_mem(pdev))
3757		goto out_error;
3758	/* Request PCI resource for the device */
3759	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3760		goto out_disable_device;
3761	/* Set up device as PCI master and save state for EEH */
3762	pci_set_master(pdev);
3763	pci_try_set_mwi(pdev);
3764	pci_save_state(pdev);
3765
3766	return 0;
3767
3768out_disable_device:
3769	pci_disable_device(pdev);
3770out_error:
3771	return -ENODEV;
3772}
3773
3774/**
3775 * lpfc_disable_pci_dev - Disable a generic PCI device.
3776 * @phba: pointer to lpfc hba data structure.
3777 *
3778 * This routine is invoked to disable the PCI device that is common to all
3779 * PCI devices.
3780 **/
3781static void
3782lpfc_disable_pci_dev(struct lpfc_hba *phba)
3783{
3784	struct pci_dev *pdev;
3785	int bars;
3786
3787	/* Obtain PCI device reference */
3788	if (!phba->pcidev)
3789		return;
3790	else
3791		pdev = phba->pcidev;
3792	/* Select PCI BARs */
3793	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3794	/* Release PCI resource and disable PCI device */
3795	pci_release_selected_regions(pdev, bars);
3796	pci_disable_device(pdev);
3797	/* Null out PCI private reference to driver */
3798	pci_set_drvdata(pdev, NULL);
3799
3800	return;
3801}
3802
3803/**
3804 * lpfc_reset_hba - Reset a hba
3805 * @phba: pointer to lpfc hba data structure.
3806 *
3807 * This routine is invoked to reset a hba device. It brings the HBA
3808 * offline, performs a board restart, and then brings the board back
3809 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3810 * on outstanding mailbox commands.
3811 **/
3812void
3813lpfc_reset_hba(struct lpfc_hba *phba)
3814{
3815	/* If resets are disabled then set error state and return. */
3816	if (!phba->cfg_enable_hba_reset) {
3817		phba->link_state = LPFC_HBA_ERROR;
3818		return;
3819	}
3820	lpfc_offline_prep(phba);
3821	lpfc_offline(phba);
3822	lpfc_sli_brdrestart(phba);
3823	lpfc_online(phba);
3824	lpfc_unblock_mgmt_io(phba);
3825}
3826
3827/**
3828 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3829 * @phba: pointer to lpfc hba data structure.
3830 *
3831 * This routine is invoked to set up the driver internal resources specific to
3832 * support the SLI-3 HBA device it attached to.
3833 *
3834 * Return codes
3835 * 	0 - successful
3836 * 	other values - error
3837 **/
3838static int
3839lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3840{
3841	struct lpfc_sli *psli;
3842
3843	/*
3844	 * Initialize timers used by driver
3845	 */
3846
3847	/* Heartbeat timer */
3848	init_timer(&phba->hb_tmofunc);
3849	phba->hb_tmofunc.function = lpfc_hb_timeout;
3850	phba->hb_tmofunc.data = (unsigned long)phba;
3851
3852	psli = &phba->sli;
3853	/* MBOX heartbeat timer */
3854	init_timer(&psli->mbox_tmo);
3855	psli->mbox_tmo.function = lpfc_mbox_timeout;
3856	psli->mbox_tmo.data = (unsigned long) phba;
3857	/* FCP polling mode timer */
3858	init_timer(&phba->fcp_poll_timer);
3859	phba->fcp_poll_timer.function = lpfc_poll_timeout;
3860	phba->fcp_poll_timer.data = (unsigned long) phba;
3861	/* Fabric block timer */
3862	init_timer(&phba->fabric_block_timer);
3863	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3864	phba->fabric_block_timer.data = (unsigned long) phba;
3865	/* EA polling mode timer */
3866	init_timer(&phba->eratt_poll);
3867	phba->eratt_poll.function = lpfc_poll_eratt;
3868	phba->eratt_poll.data = (unsigned long) phba;
3869
3870	/* Host attention work mask setup */
3871	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3872	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3873
3874	/* Get all the module params for configuring this host */
3875	lpfc_get_cfgparam(phba);
3876	if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
3877		phba->menlo_flag |= HBA_MENLO_SUPPORT;
3878		/* check for menlo minimum sg count */
3879		if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
3880			phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
3881	}
3882
3883	/*
3884	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3885	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3886	 * 2 segments are added since the IOCB needs a command and response bde.
3887	 */
3888	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3889		sizeof(struct fcp_rsp) +
3890			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3891
3892	if (phba->cfg_enable_bg) {
3893		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3894		phba->cfg_sg_dma_buf_size +=
3895			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3896	}
3897
3898	/* Also reinitialize the host templates with new values. */
3899	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3900	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3901
3902	phba->max_vpi = LPFC_MAX_VPI;
3903	/* This will be set to correct value after config_port mbox */
3904	phba->max_vports = 0;
3905
3906	/*
3907	 * Initialize the SLI Layer to run with lpfc HBAs.
3908	 */
3909	lpfc_sli_setup(phba);
3910	lpfc_sli_queue_setup(phba);
3911
3912	/* Allocate device driver memory */
3913	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3914		return -ENOMEM;
3915
3916	return 0;
3917}
3918
3919/**
3920 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3921 * @phba: pointer to lpfc hba data structure.
3922 *
3923 * This routine is invoked to unset the driver internal resources set up
3924 * specific for supporting the SLI-3 HBA device it attached to.
3925 **/
3926static void
3927lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3928{
3929	/* Free device driver memory allocated */
3930	lpfc_mem_free_all(phba);
3931
3932	return;
3933}
3934
3935/**
3936 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3937 * @phba: pointer to lpfc hba data structure.
3938 *
3939 * This routine is invoked to set up the driver internal resources specific to
3940 * support the SLI-4 HBA device it attached to.
3941 *
3942 * Return codes
3943 * 	0 - successful
3944 * 	other values - error
3945 **/
3946static int
3947lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3948{
3949	struct lpfc_sli *psli;
3950	LPFC_MBOXQ_t *mboxq;
3951	int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3952	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3953	struct lpfc_mqe *mqe;
3954	int longs;
3955
3956	/* Before proceed, wait for POST done and device ready */
3957	rc = lpfc_sli4_post_status_check(phba);
3958	if (rc)
3959		return -ENODEV;
3960
3961	/*
3962	 * Initialize timers used by driver
3963	 */
3964
3965	/* Heartbeat timer */
3966	init_timer(&phba->hb_tmofunc);
3967	phba->hb_tmofunc.function = lpfc_hb_timeout;
3968	phba->hb_tmofunc.data = (unsigned long)phba;
3969
3970	psli = &phba->sli;
3971	/* MBOX heartbeat timer */
3972	init_timer(&psli->mbox_tmo);
3973	psli->mbox_tmo.function = lpfc_mbox_timeout;
3974	psli->mbox_tmo.data = (unsigned long) phba;
3975	/* Fabric block timer */
3976	init_timer(&phba->fabric_block_timer);
3977	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3978	phba->fabric_block_timer.data = (unsigned long) phba;
3979	/* EA polling mode timer */
3980	init_timer(&phba->eratt_poll);
3981	phba->eratt_poll.function = lpfc_poll_eratt;
3982	phba->eratt_poll.data = (unsigned long) phba;
3983	/* FCF rediscover timer */
3984	init_timer(&phba->fcf.redisc_wait);
3985	phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
3986	phba->fcf.redisc_wait.data = (unsigned long)phba;
3987
3988	/*
3989	 * We need to do a READ_CONFIG mailbox command here before
3990	 * calling lpfc_get_cfgparam. For VFs this will report the
3991	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3992	 * All of the resources allocated
3993	 * for this Port are tied to these values.
3994	 */
3995	/* Get all the module params for configuring this host */
3996	lpfc_get_cfgparam(phba);
3997	phba->max_vpi = LPFC_MAX_VPI;
3998	/* This will be set to correct value after the read_config mbox */
3999	phba->max_vports = 0;
4000
4001	/* Program the default value of vlan_id and fc_map */
4002	phba->valid_vlan = 0;
4003	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4004	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4005	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4006
4007	/*
4008	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4009	 * used to create the sg_dma_buf_pool must be dynamically calculated.
4010	 * 2 segments are added since the IOCB needs a command and response bde.
4011	 * To insure that the scsi sgl does not cross a 4k page boundary only
4012	 * sgl sizes of must be a power of 2.
4013	 */
4014	buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4015		    ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
4016	/* Feature Level 1 hardware is limited to 2 pages */
4017	if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
4018	     LPFC_SLI_INTF_FEATURELEVEL1_1))
4019		max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
4020	else
4021		max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
4022	for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4023	     dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4024	     dma_buf_size = dma_buf_size << 1)
4025		;
4026	if (dma_buf_size == max_buf_size)
4027		phba->cfg_sg_seg_cnt = (dma_buf_size -
4028			sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
4029			(2 * sizeof(struct sli4_sge))) /
4030				sizeof(struct sli4_sge);
4031	phba->cfg_sg_dma_buf_size = dma_buf_size;
4032
4033	/* Initialize buffer queue management fields */
4034	hbq_count = lpfc_sli_hbq_count();
4035	for (i = 0; i < hbq_count; ++i)
4036		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4037	INIT_LIST_HEAD(&phba->rb_pend_list);
4038	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4039	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
4040
4041	/*
4042	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
4043	 */
4044	/* Initialize the Abort scsi buffer list used by driver */
4045	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
4046	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
4047	/* This abort list used by worker thread */
4048	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4049
4050	/*
4051	 * Initialize dirver internal slow-path work queues
4052	 */
4053
4054	/* Driver internel slow-path CQ Event pool */
4055	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
4056	/* Response IOCB work queue list */
4057	INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
4058	/* Asynchronous event CQ Event work queue list */
4059	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4060	/* Fast-path XRI aborted CQ Event work queue list */
4061	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4062	/* Slow-path XRI aborted CQ Event work queue list */
4063	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4064	/* Receive queue CQ Event work queue list */
4065	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4066
4067	/* Initialize the driver internal SLI layer lists. */
4068	lpfc_sli_setup(phba);
4069	lpfc_sli_queue_setup(phba);
4070
4071	/* Allocate device driver memory */
4072	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4073	if (rc)
4074		return -ENOMEM;
4075
4076	/* Create the bootstrap mailbox command */
4077	rc = lpfc_create_bootstrap_mbox(phba);
4078	if (unlikely(rc))
4079		goto out_free_mem;
4080
4081	/* Set up the host's endian order with the device. */
4082	rc = lpfc_setup_endian_order(phba);
4083	if (unlikely(rc))
4084		goto out_free_bsmbx;
4085
4086	rc = lpfc_sli4_fw_cfg_check(phba);
4087	if (unlikely(rc))
4088		goto out_free_bsmbx;
4089
4090	/* Set up the hba's configuration parameters. */
4091	rc = lpfc_sli4_read_config(phba);
4092	if (unlikely(rc))
4093		goto out_free_bsmbx;
4094
4095	/* Perform a function reset */
4096	rc = lpfc_pci_function_reset(phba);
4097	if (unlikely(rc))
4098		goto out_free_bsmbx;
4099
4100	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4101						       GFP_KERNEL);
4102	if (!mboxq) {
4103		rc = -ENOMEM;
4104		goto out_free_bsmbx;
4105	}
4106
4107	/* Get the Supported Pages. It is always available. */
4108	lpfc_supported_pages(mboxq);
4109	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4110	if (unlikely(rc)) {
4111		rc = -EIO;
4112		mempool_free(mboxq, phba->mbox_mem_pool);
4113		goto out_free_bsmbx;
4114	}
4115
4116	mqe = &mboxq->u.mqe;
4117	memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4118	       LPFC_MAX_SUPPORTED_PAGES);
4119	for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4120		switch (pn_page[i]) {
4121		case LPFC_SLI4_PARAMETERS:
4122			phba->sli4_hba.pc_sli4_params.supported = 1;
4123			break;
4124		default:
4125			break;
4126		}
4127	}
4128
4129	/* Read the port's SLI4 Parameters capabilities if supported. */
4130	if (phba->sli4_hba.pc_sli4_params.supported)
4131		rc = lpfc_pc_sli4_params_get(phba, mboxq);
4132	mempool_free(mboxq, phba->mbox_mem_pool);
4133	if (rc) {
4134		rc = -EIO;
4135		goto out_free_bsmbx;
4136	}
4137	/* Create all the SLI4 queues */
4138	rc = lpfc_sli4_queue_create(phba);
4139	if (rc)
4140		goto out_free_bsmbx;
4141
4142	/* Create driver internal CQE event pool */
4143	rc = lpfc_sli4_cq_event_pool_create(phba);
4144	if (rc)
4145		goto out_destroy_queue;
4146
4147	/* Initialize and populate the iocb list per host */
4148	rc = lpfc_init_sgl_list(phba);
4149	if (rc) {
4150		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4151				"1400 Failed to initialize sgl list.\n");
4152		goto out_destroy_cq_event_pool;
4153	}
4154	rc = lpfc_init_active_sgl_array(phba);
4155	if (rc) {
4156		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4157				"1430 Failed to initialize sgl list.\n");
4158		goto out_free_sgl_list;
4159	}
4160
4161	rc = lpfc_sli4_init_rpi_hdrs(phba);
4162	if (rc) {
4163		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4164				"1432 Failed to initialize rpi headers.\n");
4165		goto out_free_active_sgl;
4166	}
4167
4168	/* Allocate eligible FCF bmask memory for FCF round robin failover */
4169	longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4170	phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4171					 GFP_KERNEL);
4172	if (!phba->fcf.fcf_rr_bmask) {
4173		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4174				"2759 Failed allocate memory for FCF round "
4175				"robin failover bmask\n");
4176		goto out_remove_rpi_hdrs;
4177	}
4178
4179	phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4180				    phba->cfg_fcp_eq_count), GFP_KERNEL);
4181	if (!phba->sli4_hba.fcp_eq_hdl) {
4182		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4183				"2572 Failed allocate memory for fast-path "
4184				"per-EQ handle array\n");
4185		goto out_free_fcf_rr_bmask;
4186	}
4187
4188	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4189				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4190	if (!phba->sli4_hba.msix_entries) {
4191		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4192				"2573 Failed allocate memory for msi-x "
4193				"interrupt vector entries\n");
4194		goto out_free_fcp_eq_hdl;
4195	}
4196
4197	return rc;
4198
4199out_free_fcp_eq_hdl:
4200	kfree(phba->sli4_hba.fcp_eq_hdl);
4201out_free_fcf_rr_bmask:
4202	kfree(phba->fcf.fcf_rr_bmask);
4203out_remove_rpi_hdrs:
4204	lpfc_sli4_remove_rpi_hdrs(phba);
4205out_free_active_sgl:
4206	lpfc_free_active_sgl(phba);
4207out_free_sgl_list:
4208	lpfc_free_sgl_list(phba);
4209out_destroy_cq_event_pool:
4210	lpfc_sli4_cq_event_pool_destroy(phba);
4211out_destroy_queue:
4212	lpfc_sli4_queue_destroy(phba);
4213out_free_bsmbx:
4214	lpfc_destroy_bootstrap_mbox(phba);
4215out_free_mem:
4216	lpfc_mem_free(phba);
4217	return rc;
4218}
4219
4220/**
4221 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4222 * @phba: pointer to lpfc hba data structure.
4223 *
4224 * This routine is invoked to unset the driver internal resources set up
4225 * specific for supporting the SLI-4 HBA device it attached to.
4226 **/
4227static void
4228lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4229{
4230	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4231
4232	/* Free memory allocated for msi-x interrupt vector entries */
4233	kfree(phba->sli4_hba.msix_entries);
4234
4235	/* Free memory allocated for fast-path work queue handles */
4236	kfree(phba->sli4_hba.fcp_eq_hdl);
4237
4238	/* Free the allocated rpi headers. */
4239	lpfc_sli4_remove_rpi_hdrs(phba);
4240	lpfc_sli4_remove_rpis(phba);
4241
4242	/* Free eligible FCF index bmask */
4243	kfree(phba->fcf.fcf_rr_bmask);
4244
4245	/* Free the ELS sgl list */
4246	lpfc_free_active_sgl(phba);
4247	lpfc_free_sgl_list(phba);
4248
4249	/* Free the SCSI sgl management array */
4250	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4251
4252	/* Free the SLI4 queues */
4253	lpfc_sli4_queue_destroy(phba);
4254
4255	/* Free the completion queue EQ event pool */
4256	lpfc_sli4_cq_event_release_all(phba);
4257	lpfc_sli4_cq_event_pool_destroy(phba);
4258
4259	/* Free the bsmbx region. */
4260	lpfc_destroy_bootstrap_mbox(phba);
4261
4262	/* Free the SLI Layer memory with SLI4 HBAs */
4263	lpfc_mem_free_all(phba);
4264
4265	/* Free the current connect table */
4266	list_for_each_entry_safe(conn_entry, next_conn_entry,
4267		&phba->fcf_conn_rec_list, list) {
4268		list_del_init(&conn_entry->list);
4269		kfree(conn_entry);
4270	}
4271
4272	return;
4273}
4274
4275/**
4276 * lpfc_init_api_table_setup - Set up init api fucntion jump table
4277 * @phba: The hba struct for which this call is being executed.
4278 * @dev_grp: The HBA PCI-Device group number.
4279 *
4280 * This routine sets up the device INIT interface API function jump table
4281 * in @phba struct.
4282 *
4283 * Returns: 0 - success, -ENODEV - failure.
4284 **/
4285int
4286lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4287{
4288	phba->lpfc_hba_init_link = lpfc_hba_init_link;
4289	phba->lpfc_hba_down_link = lpfc_hba_down_link;
4290	switch (dev_grp) {
4291	case LPFC_PCI_DEV_LP:
4292		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4293		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4294		phba->lpfc_stop_port = lpfc_stop_port_s3;
4295		break;
4296	case LPFC_PCI_DEV_OC:
4297		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4298		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4299		phba->lpfc_stop_port = lpfc_stop_port_s4;
4300		break;
4301	default:
4302		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4303				"1431 Invalid HBA PCI-device group: 0x%x\n",
4304				dev_grp);
4305		return -ENODEV;
4306		break;
4307	}
4308	return 0;
4309}
4310
4311/**
4312 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4313 * @phba: pointer to lpfc hba data structure.
4314 *
4315 * This routine is invoked to set up the driver internal resources before the
4316 * device specific resource setup to support the HBA device it attached to.
4317 *
4318 * Return codes
4319 *	0 - successful
4320 *	other values - error
4321 **/
4322static int
4323lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4324{
4325	/*
4326	 * Driver resources common to all SLI revisions
4327	 */
4328	atomic_set(&phba->fast_event_count, 0);
4329	spin_lock_init(&phba->hbalock);
4330
4331	/* Initialize ndlp management spinlock */
4332	spin_lock_init(&phba->ndlp_lock);
4333
4334	INIT_LIST_HEAD(&phba->port_list);
4335	INIT_LIST_HEAD(&phba->work_list);
4336	init_waitqueue_head(&phba->wait_4_mlo_m_q);
4337
4338	/* Initialize the wait queue head for the kernel thread */
4339	init_waitqueue_head(&phba->work_waitq);
4340
4341	/* Initialize the scsi buffer list used by driver for scsi IO */
4342	spin_lock_init(&phba->scsi_buf_list_lock);
4343	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4344
4345	/* Initialize the fabric iocb list */
4346	INIT_LIST_HEAD(&phba->fabric_iocb_list);
4347
4348	/* Initialize list to save ELS buffers */
4349	INIT_LIST_HEAD(&phba->elsbuf);
4350
4351	/* Initialize FCF connection rec list */
4352	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4353
4354	return 0;
4355}
4356
4357/**
4358 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4359 * @phba: pointer to lpfc hba data structure.
4360 *
4361 * This routine is invoked to set up the driver internal resources after the
4362 * device specific resource setup to support the HBA device it attached to.
4363 *
4364 * Return codes
4365 * 	0 - successful
4366 * 	other values - error
4367 **/
4368static int
4369lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4370{
4371	int error;
4372
4373	/* Startup the kernel thread for this host adapter. */
4374	phba->worker_thread = kthread_run(lpfc_do_work, phba,
4375					  "lpfc_worker_%d", phba->brd_no);
4376	if (IS_ERR(phba->worker_thread)) {
4377		error = PTR_ERR(phba->worker_thread);
4378		return error;
4379	}
4380
4381	return 0;
4382}
4383
4384/**
4385 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4386 * @phba: pointer to lpfc hba data structure.
4387 *
4388 * This routine is invoked to unset the driver internal resources set up after
4389 * the device specific resource setup for supporting the HBA device it
4390 * attached to.
4391 **/
4392static void
4393lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4394{
4395	/* Stop kernel worker thread */
4396	kthread_stop(phba->worker_thread);
4397}
4398
4399/**
4400 * lpfc_free_iocb_list - Free iocb list.
4401 * @phba: pointer to lpfc hba data structure.
4402 *
4403 * This routine is invoked to free the driver's IOCB list and memory.
4404 **/
4405static void
4406lpfc_free_iocb_list(struct lpfc_hba *phba)
4407{
4408	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4409
4410	spin_lock_irq(&phba->hbalock);
4411	list_for_each_entry_safe(iocbq_entry, iocbq_next,
4412				 &phba->lpfc_iocb_list, list) {
4413		list_del(&iocbq_entry->list);
4414		kfree(iocbq_entry);
4415		phba->total_iocbq_bufs--;
4416	}
4417	spin_unlock_irq(&phba->hbalock);
4418
4419	return;
4420}
4421
4422/**
4423 * lpfc_init_iocb_list - Allocate and initialize iocb list.
4424 * @phba: pointer to lpfc hba data structure.
4425 *
4426 * This routine is invoked to allocate and initizlize the driver's IOCB
4427 * list and set up the IOCB tag array accordingly.
4428 *
4429 * Return codes
4430 *	0 - successful
4431 *	other values - error
4432 **/
4433static int
4434lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4435{
4436	struct lpfc_iocbq *iocbq_entry = NULL;
4437	uint16_t iotag;
4438	int i;
4439
4440	/* Initialize and populate the iocb list per host.  */
4441	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
4442	for (i = 0; i < iocb_count; i++) {
4443		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
4444		if (iocbq_entry == NULL) {
4445			printk(KERN_ERR "%s: only allocated %d iocbs of "
4446				"expected %d count. Unloading driver.\n",
4447				__func__, i, LPFC_IOCB_LIST_CNT);
4448			goto out_free_iocbq;
4449		}
4450
4451		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4452		if (iotag == 0) {
4453			kfree(iocbq_entry);
4454			printk(KERN_ERR "%s: failed to allocate IOTAG. "
4455				"Unloading driver.\n", __func__);
4456			goto out_free_iocbq;
4457		}
4458		iocbq_entry->sli4_xritag = NO_XRI;
4459
4460		spin_lock_irq(&phba->hbalock);
4461		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4462		phba->total_iocbq_bufs++;
4463		spin_unlock_irq(&phba->hbalock);
4464	}
4465
4466	return 0;
4467
4468out_free_iocbq:
4469	lpfc_free_iocb_list(phba);
4470
4471	return -ENOMEM;
4472}
4473
4474/**
4475 * lpfc_free_sgl_list - Free sgl list.
4476 * @phba: pointer to lpfc hba data structure.
4477 *
4478 * This routine is invoked to free the driver's sgl list and memory.
4479 **/
4480static void
4481lpfc_free_sgl_list(struct lpfc_hba *phba)
4482{
4483	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4484	LIST_HEAD(sglq_list);
4485
4486	spin_lock_irq(&phba->hbalock);
4487	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
4488	spin_unlock_irq(&phba->hbalock);
4489
4490	list_for_each_entry_safe(sglq_entry, sglq_next,
4491				 &sglq_list, list) {
4492		list_del(&sglq_entry->list);
4493		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
4494		kfree(sglq_entry);
4495		phba->sli4_hba.total_sglq_bufs--;
4496	}
4497	kfree(phba->sli4_hba.lpfc_els_sgl_array);
4498}
4499
4500/**
4501 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
4502 * @phba: pointer to lpfc hba data structure.
4503 *
4504 * This routine is invoked to allocate the driver's active sgl memory.
4505 * This array will hold the sglq_entry's for active IOs.
4506 **/
4507static int
4508lpfc_init_active_sgl_array(struct lpfc_hba *phba)
4509{
4510	int size;
4511	size = sizeof(struct lpfc_sglq *);
4512	size *= phba->sli4_hba.max_cfg_param.max_xri;
4513
4514	phba->sli4_hba.lpfc_sglq_active_list =
4515		kzalloc(size, GFP_KERNEL);
4516	if (!phba->sli4_hba.lpfc_sglq_active_list)
4517		return -ENOMEM;
4518	return 0;
4519}
4520
4521/**
4522 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
4523 * @phba: pointer to lpfc hba data structure.
4524 *
4525 * This routine is invoked to walk through the array of active sglq entries
4526 * and free all of the resources.
4527 * This is just a place holder for now.
4528 **/
4529static void
4530lpfc_free_active_sgl(struct lpfc_hba *phba)
4531{
4532	kfree(phba->sli4_hba.lpfc_sglq_active_list);
4533}
4534
4535/**
4536 * lpfc_init_sgl_list - Allocate and initialize sgl list.
4537 * @phba: pointer to lpfc hba data structure.
4538 *
4539 * This routine is invoked to allocate and initizlize the driver's sgl
4540 * list and set up the sgl xritag tag array accordingly.
4541 *
4542 * Return codes
4543 *	0 - successful
4544 *	other values - error
4545 **/
4546static int
4547lpfc_init_sgl_list(struct lpfc_hba *phba)
4548{
4549	struct lpfc_sglq *sglq_entry = NULL;
4550	int i;
4551	int els_xri_cnt;
4552
4553	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4554	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4555				"2400 lpfc_init_sgl_list els %d.\n",
4556				els_xri_cnt);
4557	/* Initialize and populate the sglq list per host/VF. */
4558	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
4559	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
4560
4561	/* Sanity check on XRI management */
4562	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
4563		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4564				"2562 No room left for SCSI XRI allocation: "
4565				"max_xri=%d, els_xri=%d\n",
4566				phba->sli4_hba.max_cfg_param.max_xri,
4567				els_xri_cnt);
4568		return -ENOMEM;
4569	}
4570
4571	/* Allocate memory for the ELS XRI management array */
4572	phba->sli4_hba.lpfc_els_sgl_array =
4573			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
4574			GFP_KERNEL);
4575
4576	if (!phba->sli4_hba.lpfc_els_sgl_array) {
4577		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4578				"2401 Failed to allocate memory for ELS "
4579				"XRI management array of size %d.\n",
4580				els_xri_cnt);
4581		return -ENOMEM;
4582	}
4583
4584	/* Keep the SCSI XRI into the XRI management array */
4585	phba->sli4_hba.scsi_xri_max =
4586			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4587	phba->sli4_hba.scsi_xri_cnt = 0;
4588
4589	phba->sli4_hba.lpfc_scsi_psb_array =
4590			kzalloc((sizeof(struct lpfc_scsi_buf *) *
4591			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
4592
4593	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
4594		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4595				"2563 Failed to allocate memory for SCSI "
4596				"XRI management array of size %d.\n",
4597				phba->sli4_hba.scsi_xri_max);
4598		kfree(phba->sli4_hba.lpfc_els_sgl_array);
4599		return -ENOMEM;
4600	}
4601
4602	for (i = 0; i < els_xri_cnt; i++) {
4603		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
4604		if (sglq_entry == NULL) {
4605			printk(KERN_ERR "%s: only allocated %d sgls of "
4606				"expected %d count. Unloading driver.\n",
4607				__func__, i, els_xri_cnt);
4608			goto out_free_mem;
4609		}
4610
4611		sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
4612		if (sglq_entry->sli4_xritag == NO_XRI) {
4613			kfree(sglq_entry);
4614			printk(KERN_ERR "%s: failed to allocate XRI.\n"
4615				"Unloading driver.\n", __func__);
4616			goto out_free_mem;
4617		}
4618		sglq_entry->buff_type = GEN_BUFF_TYPE;
4619		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4620		if (sglq_entry->virt == NULL) {
4621			kfree(sglq_entry);
4622			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
4623				"Unloading driver.\n", __func__);
4624			goto out_free_mem;
4625		}
4626		sglq_entry->sgl = sglq_entry->virt;
4627		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4628
4629		/* The list order is used by later block SGL registraton */
4630		spin_lock_irq(&phba->hbalock);
4631		sglq_entry->state = SGL_FREED;
4632		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4633		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4634		phba->sli4_hba.total_sglq_bufs++;
4635		spin_unlock_irq(&phba->hbalock);
4636	}
4637	return 0;
4638
4639out_free_mem:
4640	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4641	lpfc_free_sgl_list(phba);
4642	return -ENOMEM;
4643}
4644
4645/**
4646 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
4647 * @phba: pointer to lpfc hba data structure.
4648 *
4649 * This routine is invoked to post rpi header templates to the
4650 * HBA consistent with the SLI-4 interface spec.  This routine
4651 * posts a PAGE_SIZE memory region to the port to hold up to
4652 * PAGE_SIZE modulo 64 rpi context headers.
4653 * No locks are held here because this is an initialization routine
4654 * called only from probe or lpfc_online when interrupts are not
4655 * enabled and the driver is reinitializing the device.
4656 *
4657 * Return codes
4658 * 	0 - successful
4659 * 	-ENOMEM - No availble memory
4660 *      -EIO - The mailbox failed to complete successfully.
4661 **/
4662int
4663lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4664{
4665	int rc = 0;
4666	int longs;
4667	uint16_t rpi_count;
4668	struct lpfc_rpi_hdr *rpi_hdr;
4669
4670	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4671
4672	/*
4673	 * Provision an rpi bitmask range for discovery. The total count
4674	 * is the difference between max and base + 1.
4675	 */
4676	rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
4677		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4678
4679	longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4680	phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
4681					   GFP_KERNEL);
4682	if (!phba->sli4_hba.rpi_bmask)
4683		return -ENOMEM;
4684
4685	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4686	if (!rpi_hdr) {
4687		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4688				"0391 Error during rpi post operation\n");
4689		lpfc_sli4_remove_rpis(phba);
4690		rc = -ENODEV;
4691	}
4692
4693	return rc;
4694}
4695
4696/**
4697 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4698 * @phba: pointer to lpfc hba data structure.
4699 *
4700 * This routine is invoked to allocate a single 4KB memory region to
4701 * support rpis and stores them in the phba.  This single region
4702 * provides support for up to 64 rpis.  The region is used globally
4703 * by the device.
4704 *
4705 * Returns:
4706 *   A valid rpi hdr on success.
4707 *   A NULL pointer on any failure.
4708 **/
4709struct lpfc_rpi_hdr *
4710lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4711{
4712	uint16_t rpi_limit, curr_rpi_range;
4713	struct lpfc_dmabuf *dmabuf;
4714	struct lpfc_rpi_hdr *rpi_hdr;
4715
4716	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4717		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4718
4719	spin_lock_irq(&phba->hbalock);
4720	curr_rpi_range = phba->sli4_hba.next_rpi;
4721	spin_unlock_irq(&phba->hbalock);
4722
4723	/*
4724	 * The port has a limited number of rpis. The increment here
4725	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4726	 * and to allow the full max_rpi range per port.
4727	 */
4728	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4729		return NULL;
4730
4731	/*
4732	 * First allocate the protocol header region for the port.  The
4733	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4734	 */
4735	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4736	if (!dmabuf)
4737		return NULL;
4738
4739	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4740					  LPFC_HDR_TEMPLATE_SIZE,
4741					  &dmabuf->phys,
4742					  GFP_KERNEL);
4743	if (!dmabuf->virt) {
4744		rpi_hdr = NULL;
4745		goto err_free_dmabuf;
4746	}
4747
4748	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4749	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4750		rpi_hdr = NULL;
4751		goto err_free_coherent;
4752	}
4753
4754	/* Save the rpi header data for cleanup later. */
4755	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4756	if (!rpi_hdr)
4757		goto err_free_coherent;
4758
4759	rpi_hdr->dmabuf = dmabuf;
4760	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4761	rpi_hdr->page_count = 1;
4762	spin_lock_irq(&phba->hbalock);
4763	rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4764	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4765
4766	/*
4767	 * The next_rpi stores the next module-64 rpi value to post
4768	 * in any subsequent rpi memory region postings.
4769	 */
4770	phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4771	spin_unlock_irq(&phba->hbalock);
4772	return rpi_hdr;
4773
4774 err_free_coherent:
4775	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4776			  dmabuf->virt, dmabuf->phys);
4777 err_free_dmabuf:
4778	kfree(dmabuf);
4779	return NULL;
4780}
4781
4782/**
4783 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4784 * @phba: pointer to lpfc hba data structure.
4785 *
4786 * This routine is invoked to remove all memory resources allocated
4787 * to support rpis. This routine presumes the caller has released all
4788 * rpis consumed by fabric or port logins and is prepared to have
4789 * the header pages removed.
4790 **/
4791void
4792lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4793{
4794	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4795
4796	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4797				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4798		list_del(&rpi_hdr->list);
4799		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4800				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4801		kfree(rpi_hdr->dmabuf);
4802		kfree(rpi_hdr);
4803	}
4804
4805	phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4806	memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4807}
4808
4809/**
4810 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4811 * @pdev: pointer to pci device data structure.
4812 *
4813 * This routine is invoked to allocate the driver hba data structure for an
4814 * HBA device. If the allocation is successful, the phba reference to the
4815 * PCI device data structure is set.
4816 *
4817 * Return codes
4818 *      pointer to @phba - successful
4819 *      NULL - error
4820 **/
4821static struct lpfc_hba *
4822lpfc_hba_alloc(struct pci_dev *pdev)
4823{
4824	struct lpfc_hba *phba;
4825
4826	/* Allocate memory for HBA structure */
4827	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4828	if (!phba) {
4829		dev_err(&pdev->dev, "failed to allocate hba struct\n");
4830		return NULL;
4831	}
4832
4833	/* Set reference to PCI device in HBA structure */
4834	phba->pcidev = pdev;
4835
4836	/* Assign an unused board number */
4837	phba->brd_no = lpfc_get_instance();
4838	if (phba->brd_no < 0) {
4839		kfree(phba);
4840		return NULL;
4841	}
4842
4843	spin_lock_init(&phba->ct_ev_lock);
4844	INIT_LIST_HEAD(&phba->ct_ev_waiters);
4845
4846	return phba;
4847}
4848
4849/**
4850 * lpfc_hba_free - Free driver hba data structure with a device.
4851 * @phba: pointer to lpfc hba data structure.
4852 *
4853 * This routine is invoked to free the driver hba data structure with an
4854 * HBA device.
4855 **/
4856static void
4857lpfc_hba_free(struct lpfc_hba *phba)
4858{
4859	/* Release the driver assigned board number */
4860	idr_remove(&lpfc_hba_index, phba->brd_no);
4861
4862	kfree(phba);
4863	return;
4864}
4865
4866/**
4867 * lpfc_create_shost - Create hba physical port with associated scsi host.
4868 * @phba: pointer to lpfc hba data structure.
4869 *
4870 * This routine is invoked to create HBA physical port and associate a SCSI
4871 * host with it.
4872 *
4873 * Return codes
4874 *      0 - successful
4875 *      other values - error
4876 **/
4877static int
4878lpfc_create_shost(struct lpfc_hba *phba)
4879{
4880	struct lpfc_vport *vport;
4881	struct Scsi_Host  *shost;
4882
4883	/* Initialize HBA FC structure */
4884	phba->fc_edtov = FF_DEF_EDTOV;
4885	phba->fc_ratov = FF_DEF_RATOV;
4886	phba->fc_altov = FF_DEF_ALTOV;
4887	phba->fc_arbtov = FF_DEF_ARBTOV;
4888
4889	atomic_set(&phba->sdev_cnt, 0);
4890	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4891	if (!vport)
4892		return -ENODEV;
4893
4894	shost = lpfc_shost_from_vport(vport);
4895	phba->pport = vport;
4896	lpfc_debugfs_initialize(vport);
4897	/* Put reference to SCSI host to driver's device private data */
4898	pci_set_drvdata(phba->pcidev, shost);
4899
4900	return 0;
4901}
4902
4903/**
4904 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4905 * @phba: pointer to lpfc hba data structure.
4906 *
4907 * This routine is invoked to destroy HBA physical port and the associated
4908 * SCSI host.
4909 **/
4910static void
4911lpfc_destroy_shost(struct lpfc_hba *phba)
4912{
4913	struct lpfc_vport *vport = phba->pport;
4914
4915	/* Destroy physical port that associated with the SCSI host */
4916	destroy_port(vport);
4917
4918	return;
4919}
4920
4921/**
4922 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4923 * @phba: pointer to lpfc hba data structure.
4924 * @shost: the shost to be used to detect Block guard settings.
4925 *
4926 * This routine sets up the local Block guard protocol settings for @shost.
4927 * This routine also allocates memory for debugging bg buffers.
4928 **/
4929static void
4930lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4931{
4932	int pagecnt = 10;
4933	if (lpfc_prot_mask && lpfc_prot_guard) {
4934		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4935				"1478 Registering BlockGuard with the "
4936				"SCSI layer\n");
4937		scsi_host_set_prot(shost, lpfc_prot_mask);
4938		scsi_host_set_guard(shost, lpfc_prot_guard);
4939	}
4940	if (!_dump_buf_data) {
4941		while (pagecnt) {
4942			spin_lock_init(&_dump_buf_lock);
4943			_dump_buf_data =
4944				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4945			if (_dump_buf_data) {
4946				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4947					"9043 BLKGRD: allocated %d pages for "
4948				       "_dump_buf_data at 0x%p\n",
4949				       (1 << pagecnt), _dump_buf_data);
4950				_dump_buf_data_order = pagecnt;
4951				memset(_dump_buf_data, 0,
4952				       ((1 << PAGE_SHIFT) << pagecnt));
4953				break;
4954			} else
4955				--pagecnt;
4956		}
4957		if (!_dump_buf_data_order)
4958			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4959				"9044 BLKGRD: ERROR unable to allocate "
4960			       "memory for hexdump\n");
4961	} else
4962		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4963			"9045 BLKGRD: already allocated _dump_buf_data=0x%p"
4964		       "\n", _dump_buf_data);
4965	if (!_dump_buf_dif) {
4966		while (pagecnt) {
4967			_dump_buf_dif =
4968				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4969			if (_dump_buf_dif) {
4970				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4971					"9046 BLKGRD: allocated %d pages for "
4972				       "_dump_buf_dif at 0x%p\n",
4973				       (1 << pagecnt), _dump_buf_dif);
4974				_dump_buf_dif_order = pagecnt;
4975				memset(_dump_buf_dif, 0,
4976				       ((1 << PAGE_SHIFT) << pagecnt));
4977				break;
4978			} else
4979				--pagecnt;
4980		}
4981		if (!_dump_buf_dif_order)
4982			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4983			"9047 BLKGRD: ERROR unable to allocate "
4984			       "memory for hexdump\n");
4985	} else
4986		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4987			"9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
4988		       _dump_buf_dif);
4989}
4990
4991/**
4992 * lpfc_post_init_setup - Perform necessary device post initialization setup.
4993 * @phba: pointer to lpfc hba data structure.
4994 *
4995 * This routine is invoked to perform all the necessary post initialization
4996 * setup for the device.
4997 **/
4998static void
4999lpfc_post_init_setup(struct lpfc_hba *phba)
5000{
5001	struct Scsi_Host  *shost;
5002	struct lpfc_adapter_event_header adapter_event;
5003
5004	/* Get the default values for Model Name and Description */
5005	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
5006
5007	/*
5008	 * hba setup may have changed the hba_queue_depth so we need to
5009	 * adjust the value of can_queue.
5010	 */
5011	shost = pci_get_drvdata(phba->pcidev);
5012	shost->can_queue = phba->cfg_hba_queue_depth - 10;
5013	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
5014		lpfc_setup_bg(phba, shost);
5015
5016	lpfc_host_attrib_init(shost);
5017
5018	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
5019		spin_lock_irq(shost->host_lock);
5020		lpfc_poll_start_timer(phba);
5021		spin_unlock_irq(shost->host_lock);
5022	}
5023
5024	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5025			"0428 Perform SCSI scan\n");
5026	/* Send board arrival event to upper layer */
5027	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
5028	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
5029	fc_host_post_vendor_event(shost, fc_get_event_number(),
5030				  sizeof(adapter_event),
5031				  (char *) &adapter_event,
5032				  LPFC_NL_VENDOR_ID);
5033	return;
5034}
5035
5036/**
5037 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
5038 * @phba: pointer to lpfc hba data structure.
5039 *
5040 * This routine is invoked to set up the PCI device memory space for device
5041 * with SLI-3 interface spec.
5042 *
5043 * Return codes
5044 * 	0 - successful
5045 * 	other values - error
5046 **/
5047static int
5048lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5049{
5050	struct pci_dev *pdev;
5051	unsigned long bar0map_len, bar2map_len;
5052	int i, hbq_count;
5053	void *ptr;
5054	int error = -ENODEV;
5055
5056	/* Obtain PCI device reference */
5057	if (!phba->pcidev)
5058		return error;
5059	else
5060		pdev = phba->pcidev;
5061
5062	/* Set the device DMA mask size */
5063	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5064	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5065		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5066		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5067			return error;
5068		}
5069	}
5070
5071	/* Get the bus address of Bar0 and Bar2 and the number of bytes
5072	 * required by each mapping.
5073	 */
5074	phba->pci_bar0_map = pci_resource_start(pdev, 0);
5075	bar0map_len = pci_resource_len(pdev, 0);
5076
5077	phba->pci_bar2_map = pci_resource_start(pdev, 2);
5078	bar2map_len = pci_resource_len(pdev, 2);
5079
5080	/* Map HBA SLIM to a kernel virtual address. */
5081	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5082	if (!phba->slim_memmap_p) {
5083		dev_printk(KERN_ERR, &pdev->dev,
5084			   "ioremap failed for SLIM memory.\n");
5085		goto out;
5086	}
5087
5088	/* Map HBA Control Registers to a kernel virtual address. */
5089	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5090	if (!phba->ctrl_regs_memmap_p) {
5091		dev_printk(KERN_ERR, &pdev->dev,
5092			   "ioremap failed for HBA control registers.\n");
5093		goto out_iounmap_slim;
5094	}
5095
5096	/* Allocate memory for SLI-2 structures */
5097	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5098					       SLI2_SLIM_SIZE,
5099					       &phba->slim2p.phys,
5100					       GFP_KERNEL);
5101	if (!phba->slim2p.virt)
5102		goto out_iounmap;
5103
5104	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5105	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5106	phba->mbox_ext = (phba->slim2p.virt +
5107		offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5108	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5109	phba->IOCBs = (phba->slim2p.virt +
5110		       offsetof(struct lpfc_sli2_slim, IOCBs));
5111
5112	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5113						 lpfc_sli_hbq_size(),
5114						 &phba->hbqslimp.phys,
5115						 GFP_KERNEL);
5116	if (!phba->hbqslimp.virt)
5117		goto out_free_slim;
5118
5119	hbq_count = lpfc_sli_hbq_count();
5120	ptr = phba->hbqslimp.virt;
5121	for (i = 0; i < hbq_count; ++i) {
5122		phba->hbqs[i].hbq_virt = ptr;
5123		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5124		ptr += (lpfc_hbq_defs[i]->entry_count *
5125			sizeof(struct lpfc_hbq_entry));
5126	}
5127	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5128	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5129
5130	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5131
5132	INIT_LIST_HEAD(&phba->rb_pend_list);
5133
5134	phba->MBslimaddr = phba->slim_memmap_p;
5135	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5136	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5137	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5138	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5139
5140	return 0;
5141
5142out_free_slim:
5143	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5144			  phba->slim2p.virt, phba->slim2p.phys);
5145out_iounmap:
5146	iounmap(phba->ctrl_regs_memmap_p);
5147out_iounmap_slim:
5148	iounmap(phba->slim_memmap_p);
5149out:
5150	return error;
5151}
5152
5153/**
5154 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5155 * @phba: pointer to lpfc hba data structure.
5156 *
5157 * This routine is invoked to unset the PCI device memory space for device
5158 * with SLI-3 interface spec.
5159 **/
5160static void
5161lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5162{
5163	struct pci_dev *pdev;
5164
5165	/* Obtain PCI device reference */
5166	if (!phba->pcidev)
5167		return;
5168	else
5169		pdev = phba->pcidev;
5170
5171	/* Free coherent DMA memory allocated */
5172	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5173			  phba->hbqslimp.virt, phba->hbqslimp.phys);
5174	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5175			  phba->slim2p.virt, phba->slim2p.phys);
5176
5177	/* I/O memory unmap */
5178	iounmap(phba->ctrl_regs_memmap_p);
5179	iounmap(phba->slim_memmap_p);
5180
5181	return;
5182}
5183
5184/**
5185 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
5186 * @phba: pointer to lpfc hba data structure.
5187 *
5188 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5189 * done and check status.
5190 *
5191 * Return 0 if successful, otherwise -ENODEV.
5192 **/
5193int
5194lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5195{
5196	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
5197	int i, port_error = -ENODEV;
5198
5199	if (!phba->sli4_hba.STAregaddr)
5200		return -ENODEV;
5201
5202	/* Wait up to 30 seconds for the SLI Port POST done and ready */
5203	for (i = 0; i < 3000; i++) {
5204		sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
5205		/* Encounter fatal POST error, break out */
5206		if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
5207			port_error = -ENODEV;
5208			break;
5209		}
5210		if (LPFC_POST_STAGE_ARMFW_READY ==
5211		    bf_get(lpfc_hst_state_port_status, &sta_reg)) {
5212			port_error = 0;
5213			break;
5214		}
5215		msleep(10);
5216	}
5217
5218	if (port_error)
5219		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5220			"1408 Failure HBA POST Status: sta_reg=0x%x, "
5221			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
5222			"dl=x%x, pstatus=x%x\n", sta_reg.word0,
5223			bf_get(lpfc_hst_state_perr, &sta_reg),
5224			bf_get(lpfc_hst_state_sfi, &sta_reg),
5225			bf_get(lpfc_hst_state_nip, &sta_reg),
5226			bf_get(lpfc_hst_state_ipc, &sta_reg),
5227			bf_get(lpfc_hst_state_xrom, &sta_reg),
5228			bf_get(lpfc_hst_state_dl, &sta_reg),
5229			bf_get(lpfc_hst_state_port_status, &sta_reg));
5230
5231	/* Log device information */
5232	phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
5233	if (bf_get(lpfc_sli_intf_valid,
5234		   &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
5235		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5236				"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
5237				"FeatureL1=0x%x, FeatureL2=0x%x\n",
5238				bf_get(lpfc_sli_intf_sli_family,
5239				       &phba->sli4_hba.sli_intf),
5240				bf_get(lpfc_sli_intf_slirev,
5241				       &phba->sli4_hba.sli_intf),
5242				bf_get(lpfc_sli_intf_featurelevel1,
5243				       &phba->sli4_hba.sli_intf),
5244				bf_get(lpfc_sli_intf_featurelevel2,
5245				       &phba->sli4_hba.sli_intf));
5246	}
5247	phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
5248	phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
5249	/* With uncoverable error, log the error message and return error */
5250	uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
5251	uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
5252	if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5253	    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5254		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5255				"1422 HBA Unrecoverable error: "
5256				"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
5257				"ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
5258				uerrlo_reg.word0, uerrhi_reg.word0,
5259				phba->sli4_hba.ue_mask_lo,
5260				phba->sli4_hba.ue_mask_hi);
5261		return -ENODEV;
5262	}
5263
5264	return port_error;
5265}
5266
5267/**
5268 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5269 * @phba: pointer to lpfc hba data structure.
5270 *
5271 * This routine is invoked to set up SLI4 BAR0 PCI config space register
5272 * memory map.
5273 **/
5274static void
5275lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
5276{
5277	phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5278					LPFC_UERR_STATUS_LO;
5279	phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5280					LPFC_UERR_STATUS_HI;
5281	phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5282					LPFC_UE_MASK_LO;
5283	phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5284					LPFC_UE_MASK_HI;
5285	phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
5286					LPFC_SLI_INTF;
5287}
5288
5289/**
5290 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5291 * @phba: pointer to lpfc hba data structure.
5292 *
5293 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5294 * memory map.
5295 **/
5296static void
5297lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5298{
5299
5300	phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5301				    LPFC_HST_STATE;
5302	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5303				    LPFC_HST_ISR0;
5304	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5305				    LPFC_HST_IMR0;
5306	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5307				     LPFC_HST_ISCR0;
5308	return;
5309}
5310
5311/**
5312 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
5313 * @phba: pointer to lpfc hba data structure.
5314 * @vf: virtual function number
5315 *
5316 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5317 * based on the given viftual function number, @vf.
5318 *
5319 * Return 0 if successful, otherwise -ENODEV.
5320 **/
5321static int
5322lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5323{
5324	if (vf > LPFC_VIR_FUNC_MAX)
5325		return -ENODEV;
5326
5327	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5328				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5329	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5330				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5331	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5332				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5333	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5334				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5335	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5336				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5337	return 0;
5338}
5339
5340/**
5341 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
5342 * @phba: pointer to lpfc hba data structure.
5343 *
5344 * This routine is invoked to create the bootstrap mailbox
5345 * region consistent with the SLI-4 interface spec.  This
5346 * routine allocates all memory necessary to communicate
5347 * mailbox commands to the port and sets up all alignment
5348 * needs.  No locks are expected to be held when calling
5349 * this routine.
5350 *
5351 * Return codes
5352 * 	0 - successful
5353 * 	-ENOMEM - could not allocated memory.
5354 **/
5355static int
5356lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
5357{
5358	uint32_t bmbx_size;
5359	struct lpfc_dmabuf *dmabuf;
5360	struct dma_address *dma_address;
5361	uint32_t pa_addr;
5362	uint64_t phys_addr;
5363
5364	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5365	if (!dmabuf)
5366		return -ENOMEM;
5367
5368	/*
5369	 * The bootstrap mailbox region is comprised of 2 parts
5370	 * plus an alignment restriction of 16 bytes.
5371	 */
5372	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5373	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5374					  bmbx_size,
5375					  &dmabuf->phys,
5376					  GFP_KERNEL);
5377	if (!dmabuf->virt) {
5378		kfree(dmabuf);
5379		return -ENOMEM;
5380	}
5381	memset(dmabuf->virt, 0, bmbx_size);
5382
5383	/*
5384	 * Initialize the bootstrap mailbox pointers now so that the register
5385	 * operations are simple later.  The mailbox dma address is required
5386	 * to be 16-byte aligned.  Also align the virtual memory as each
5387	 * maibox is copied into the bmbx mailbox region before issuing the
5388	 * command to the port.
5389	 */
5390	phba->sli4_hba.bmbx.dmabuf = dmabuf;
5391	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
5392
5393	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
5394					      LPFC_ALIGN_16_BYTE);
5395	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
5396					      LPFC_ALIGN_16_BYTE);
5397
5398	/*
5399	 * Set the high and low physical addresses now.  The SLI4 alignment
5400	 * requirement is 16 bytes and the mailbox is posted to the port
5401	 * as two 30-bit addresses.  The other data is a bit marking whether
5402	 * the 30-bit address is the high or low address.
5403	 * Upcast bmbx aphys to 64bits so shift instruction compiles
5404	 * clean on 32 bit machines.
5405	 */
5406	dma_address = &phba->sli4_hba.bmbx.dma_address;
5407	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
5408	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
5409	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
5410					   LPFC_BMBX_BIT1_ADDR_HI);
5411
5412	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
5413	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
5414					   LPFC_BMBX_BIT1_ADDR_LO);
5415	return 0;
5416}
5417
5418/**
5419 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
5420 * @phba: pointer to lpfc hba data structure.
5421 *
5422 * This routine is invoked to teardown the bootstrap mailbox
5423 * region and release all host resources. This routine requires
5424 * the caller to ensure all mailbox commands recovered, no
5425 * additional mailbox comands are sent, and interrupts are disabled
5426 * before calling this routine.
5427 *
5428 **/
5429static void
5430lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5431{
5432	dma_free_coherent(&phba->pcidev->dev,
5433			  phba->sli4_hba.bmbx.bmbx_size,
5434			  phba->sli4_hba.bmbx.dmabuf->virt,
5435			  phba->sli4_hba.bmbx.dmabuf->phys);
5436
5437	kfree(phba->sli4_hba.bmbx.dmabuf);
5438	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
5439}
5440
5441/**
5442 * lpfc_sli4_read_config - Get the config parameters.
5443 * @phba: pointer to lpfc hba data structure.
5444 *
5445 * This routine is invoked to read the configuration parameters from the HBA.
5446 * The configuration parameters are used to set the base and maximum values
5447 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
5448 * allocation for the port.
5449 *
5450 * Return codes
5451 * 	0 - successful
5452 * 	-ENOMEM - No availble memory
5453 *      -EIO - The mailbox failed to complete successfully.
5454 **/
5455static int
5456lpfc_sli4_read_config(struct lpfc_hba *phba)
5457{
5458	LPFC_MBOXQ_t *pmb;
5459	struct lpfc_mbx_read_config *rd_config;
5460	uint32_t rc = 0;
5461
5462	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5463	if (!pmb) {
5464		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5465				"2011 Unable to allocate memory for issuing "
5466				"SLI_CONFIG_SPECIAL mailbox command\n");
5467		return -ENOMEM;
5468	}
5469
5470	lpfc_read_config(phba, pmb);
5471
5472	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5473	if (rc != MBX_SUCCESS) {
5474		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5475			"2012 Mailbox failed , mbxCmd x%x "
5476			"READ_CONFIG, mbxStatus x%x\n",
5477			bf_get(lpfc_mqe_command, &pmb->u.mqe),
5478			bf_get(lpfc_mqe_status, &pmb->u.mqe));
5479		rc = -EIO;
5480	} else {
5481		rd_config = &pmb->u.mqe.un.rd_config;
5482		phba->sli4_hba.max_cfg_param.max_xri =
5483			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5484		phba->sli4_hba.max_cfg_param.xri_base =
5485			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
5486		phba->sli4_hba.max_cfg_param.max_vpi =
5487			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
5488		phba->sli4_hba.max_cfg_param.vpi_base =
5489			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
5490		phba->sli4_hba.max_cfg_param.max_rpi =
5491			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
5492		phba->sli4_hba.max_cfg_param.rpi_base =
5493			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
5494		phba->sli4_hba.max_cfg_param.max_vfi =
5495			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
5496		phba->sli4_hba.max_cfg_param.vfi_base =
5497			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5498		phba->sli4_hba.max_cfg_param.max_fcfi =
5499			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5500		phba->sli4_hba.max_cfg_param.fcfi_base =
5501			bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
5502		phba->sli4_hba.max_cfg_param.max_eq =
5503			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5504		phba->sli4_hba.max_cfg_param.max_rq =
5505			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
5506		phba->sli4_hba.max_cfg_param.max_wq =
5507			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
5508		phba->sli4_hba.max_cfg_param.max_cq =
5509			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
5510		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
5511		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
5512		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
5513		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5514		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
5515		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
5516				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
5517		phba->max_vports = phba->max_vpi;
5518		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5519				"2003 cfg params XRI(B:%d M:%d), "
5520				"VPI(B:%d M:%d) "
5521				"VFI(B:%d M:%d) "
5522				"RPI(B:%d M:%d) "
5523				"FCFI(B:%d M:%d)\n",
5524				phba->sli4_hba.max_cfg_param.xri_base,
5525				phba->sli4_hba.max_cfg_param.max_xri,
5526				phba->sli4_hba.max_cfg_param.vpi_base,
5527				phba->sli4_hba.max_cfg_param.max_vpi,
5528				phba->sli4_hba.max_cfg_param.vfi_base,
5529				phba->sli4_hba.max_cfg_param.max_vfi,
5530				phba->sli4_hba.max_cfg_param.rpi_base,
5531				phba->sli4_hba.max_cfg_param.max_rpi,
5532				phba->sli4_hba.max_cfg_param.fcfi_base,
5533				phba->sli4_hba.max_cfg_param.max_fcfi);
5534	}
5535	mempool_free(pmb, phba->mbox_mem_pool);
5536
5537	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
5538	if (phba->cfg_hba_queue_depth >
5539		(phba->sli4_hba.max_cfg_param.max_xri -
5540			lpfc_sli4_get_els_iocb_cnt(phba)))
5541		phba->cfg_hba_queue_depth =
5542			phba->sli4_hba.max_cfg_param.max_xri -
5543				lpfc_sli4_get_els_iocb_cnt(phba);
5544	return rc;
5545}
5546
5547/**
5548 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
5549 * @phba: pointer to lpfc hba data structure.
5550 *
5551 * This routine is invoked to setup the host-side endian order to the
5552 * HBA consistent with the SLI-4 interface spec.
5553 *
5554 * Return codes
5555 * 	0 - successful
5556 * 	-ENOMEM - No availble memory
5557 *      -EIO - The mailbox failed to complete successfully.
5558 **/
5559static int
5560lpfc_setup_endian_order(struct lpfc_hba *phba)
5561{
5562	LPFC_MBOXQ_t *mboxq;
5563	uint32_t rc = 0;
5564	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
5565				      HOST_ENDIAN_HIGH_WORD1};
5566
5567	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5568	if (!mboxq) {
5569		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5570				"0492 Unable to allocate memory for issuing "
5571				"SLI_CONFIG_SPECIAL mailbox command\n");
5572		return -ENOMEM;
5573	}
5574
5575	/*
5576	 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
5577	 * words to contain special data values and no other data.
5578	 */
5579	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
5580	memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
5581	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5582	if (rc != MBX_SUCCESS) {
5583		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5584				"0493 SLI_CONFIG_SPECIAL mailbox failed with "
5585				"status x%x\n",
5586				rc);
5587		rc = -EIO;
5588	}
5589
5590	mempool_free(mboxq, phba->mbox_mem_pool);
5591	return rc;
5592}
5593
5594/**
5595 * lpfc_sli4_queue_create - Create all the SLI4 queues
5596 * @phba: pointer to lpfc hba data structure.
5597 *
5598 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
5599 * operation. For each SLI4 queue type, the parameters such as queue entry
5600 * count (queue depth) shall be taken from the module parameter. For now,
5601 * we just use some constant number as place holder.
5602 *
5603 * Return codes
5604 *      0 - successful
5605 *      -ENOMEM - No availble memory
5606 *      -EIO - The mailbox failed to complete successfully.
5607 **/
5608static int
5609lpfc_sli4_queue_create(struct lpfc_hba *phba)
5610{
5611	struct lpfc_queue *qdesc;
5612	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5613	int cfg_fcp_wq_count;
5614	int cfg_fcp_eq_count;
5615
5616	/*
5617	 * Sanity check for confiugred queue parameters against the run-time
5618	 * device parameters
5619	 */
5620
5621	/* Sanity check on FCP fast-path WQ parameters */
5622	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
5623	if (cfg_fcp_wq_count >
5624	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
5625		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
5626				   LPFC_SP_WQN_DEF;
5627		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
5628			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5629					"2581 Not enough WQs (%d) from "
5630					"the pci function for supporting "
5631					"FCP WQs (%d)\n",
5632					phba->sli4_hba.max_cfg_param.max_wq,
5633					phba->cfg_fcp_wq_count);
5634			goto out_error;
5635		}
5636		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5637				"2582 Not enough WQs (%d) from the pci "
5638				"function for supporting the requested "
5639				"FCP WQs (%d), the actual FCP WQs can "
5640				"be supported: %d\n",
5641				phba->sli4_hba.max_cfg_param.max_wq,
5642				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
5643	}
5644	/* The actual number of FCP work queues adopted */
5645	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
5646
5647	/* Sanity check on FCP fast-path EQ parameters */
5648	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
5649	if (cfg_fcp_eq_count >
5650	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
5651		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
5652				   LPFC_SP_EQN_DEF;
5653		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
5654			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5655					"2574 Not enough EQs (%d) from the "
5656					"pci function for supporting FCP "
5657					"EQs (%d)\n",
5658					phba->sli4_hba.max_cfg_param.max_eq,
5659					phba->cfg_fcp_eq_count);
5660			goto out_error;
5661		}
5662		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5663				"2575 Not enough EQs (%d) from the pci "
5664				"function for supporting the requested "
5665				"FCP EQs (%d), the actual FCP EQs can "
5666				"be supported: %d\n",
5667				phba->sli4_hba.max_cfg_param.max_eq,
5668				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
5669	}
5670	/* It does not make sense to have more EQs than WQs */
5671	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
5672		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5673				"2593 The FCP EQ count(%d) cannot be greater "
5674				"than the FCP WQ count(%d), limiting the "
5675				"FCP EQ count to %d\n", cfg_fcp_eq_count,
5676				phba->cfg_fcp_wq_count,
5677				phba->cfg_fcp_wq_count);
5678		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
5679	}
5680	/* The actual number of FCP event queues adopted */
5681	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
5682	/* The overall number of event queues used */
5683	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
5684
5685	/*
5686	 * Create Event Queues (EQs)
5687	 */
5688
5689	/* Get EQ depth from module parameter, fake the default for now */
5690	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
5691	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
5692
5693	/* Create slow path event queue */
5694	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5695				      phba->sli4_hba.eq_ecount);
5696	if (!qdesc) {
5697		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5698				"0496 Failed allocate slow-path EQ\n");
5699		goto out_error;
5700	}
5701	phba->sli4_hba.sp_eq = qdesc;
5702
5703	/* Create fast-path FCP Event Queue(s) */
5704	phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
5705			       phba->cfg_fcp_eq_count), GFP_KERNEL);
5706	if (!phba->sli4_hba.fp_eq) {
5707		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5708				"2576 Failed allocate memory for fast-path "
5709				"EQ record array\n");
5710		goto out_free_sp_eq;
5711	}
5712	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5713		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5714					      phba->sli4_hba.eq_ecount);
5715		if (!qdesc) {
5716			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5717					"0497 Failed allocate fast-path EQ\n");
5718			goto out_free_fp_eq;
5719		}
5720		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5721	}
5722
5723	/*
5724	 * Create Complete Queues (CQs)
5725	 */
5726
5727	/* Get CQ depth from module parameter, fake the default for now */
5728	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5729	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5730
5731	/* Create slow-path Mailbox Command Complete Queue */
5732	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5733				      phba->sli4_hba.cq_ecount);
5734	if (!qdesc) {
5735		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5736				"0500 Failed allocate slow-path mailbox CQ\n");
5737		goto out_free_fp_eq;
5738	}
5739	phba->sli4_hba.mbx_cq = qdesc;
5740
5741	/* Create slow-path ELS Complete Queue */
5742	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5743				      phba->sli4_hba.cq_ecount);
5744	if (!qdesc) {
5745		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5746				"0501 Failed allocate slow-path ELS CQ\n");
5747		goto out_free_mbx_cq;
5748	}
5749	phba->sli4_hba.els_cq = qdesc;
5750
5751
5752	/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5753	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5754				phba->cfg_fcp_eq_count), GFP_KERNEL);
5755	if (!phba->sli4_hba.fcp_cq) {
5756		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5757				"2577 Failed allocate memory for fast-path "
5758				"CQ record array\n");
5759		goto out_free_els_cq;
5760	}
5761	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5762		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5763					      phba->sli4_hba.cq_ecount);
5764		if (!qdesc) {
5765			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5766					"0499 Failed allocate fast-path FCP "
5767					"CQ (%d)\n", fcp_cqidx);
5768			goto out_free_fcp_cq;
5769		}
5770		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5771	}
5772
5773	/* Create Mailbox Command Queue */
5774	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5775	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5776
5777	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5778				      phba->sli4_hba.mq_ecount);
5779	if (!qdesc) {
5780		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5781				"0505 Failed allocate slow-path MQ\n");
5782		goto out_free_fcp_cq;
5783	}
5784	phba->sli4_hba.mbx_wq = qdesc;
5785
5786	/*
5787	 * Create all the Work Queues (WQs)
5788	 */
5789	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5790	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5791
5792	/* Create slow-path ELS Work Queue */
5793	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5794				      phba->sli4_hba.wq_ecount);
5795	if (!qdesc) {
5796		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5797				"0504 Failed allocate slow-path ELS WQ\n");
5798		goto out_free_mbx_wq;
5799	}
5800	phba->sli4_hba.els_wq = qdesc;
5801
5802	/* Create fast-path FCP Work Queue(s) */
5803	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5804				phba->cfg_fcp_wq_count), GFP_KERNEL);
5805	if (!phba->sli4_hba.fcp_wq) {
5806		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5807				"2578 Failed allocate memory for fast-path "
5808				"WQ record array\n");
5809		goto out_free_els_wq;
5810	}
5811	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5812		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5813					      phba->sli4_hba.wq_ecount);
5814		if (!qdesc) {
5815			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5816					"0503 Failed allocate fast-path FCP "
5817					"WQ (%d)\n", fcp_wqidx);
5818			goto out_free_fcp_wq;
5819		}
5820		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5821	}
5822
5823	/*
5824	 * Create Receive Queue (RQ)
5825	 */
5826	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5827	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5828
5829	/* Create Receive Queue for header */
5830	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5831				      phba->sli4_hba.rq_ecount);
5832	if (!qdesc) {
5833		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5834				"0506 Failed allocate receive HRQ\n");
5835		goto out_free_fcp_wq;
5836	}
5837	phba->sli4_hba.hdr_rq = qdesc;
5838
5839	/* Create Receive Queue for data */
5840	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5841				      phba->sli4_hba.rq_ecount);
5842	if (!qdesc) {
5843		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5844				"0507 Failed allocate receive DRQ\n");
5845		goto out_free_hdr_rq;
5846	}
5847	phba->sli4_hba.dat_rq = qdesc;
5848
5849	return 0;
5850
5851out_free_hdr_rq:
5852	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5853	phba->sli4_hba.hdr_rq = NULL;
5854out_free_fcp_wq:
5855	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5856		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5857		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5858	}
5859	kfree(phba->sli4_hba.fcp_wq);
5860out_free_els_wq:
5861	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5862	phba->sli4_hba.els_wq = NULL;
5863out_free_mbx_wq:
5864	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5865	phba->sli4_hba.mbx_wq = NULL;
5866out_free_fcp_cq:
5867	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5868		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5869		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5870	}
5871	kfree(phba->sli4_hba.fcp_cq);
5872out_free_els_cq:
5873	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5874	phba->sli4_hba.els_cq = NULL;
5875out_free_mbx_cq:
5876	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5877	phba->sli4_hba.mbx_cq = NULL;
5878out_free_fp_eq:
5879	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5880		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5881		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5882	}
5883	kfree(phba->sli4_hba.fp_eq);
5884out_free_sp_eq:
5885	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5886	phba->sli4_hba.sp_eq = NULL;
5887out_error:
5888	return -ENOMEM;
5889}
5890
5891/**
5892 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5893 * @phba: pointer to lpfc hba data structure.
5894 *
5895 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5896 * operation.
5897 *
5898 * Return codes
5899 *      0 - successful
5900 *      -ENOMEM - No availble memory
5901 *      -EIO - The mailbox failed to complete successfully.
5902 **/
5903static void
5904lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5905{
5906	int fcp_qidx;
5907
5908	/* Release mailbox command work queue */
5909	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5910	phba->sli4_hba.mbx_wq = NULL;
5911
5912	/* Release ELS work queue */
5913	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5914	phba->sli4_hba.els_wq = NULL;
5915
5916	/* Release FCP work queue */
5917	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5918		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5919	kfree(phba->sli4_hba.fcp_wq);
5920	phba->sli4_hba.fcp_wq = NULL;
5921
5922	/* Release unsolicited receive queue */
5923	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5924	phba->sli4_hba.hdr_rq = NULL;
5925	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5926	phba->sli4_hba.dat_rq = NULL;
5927
5928	/* Release ELS complete queue */
5929	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5930	phba->sli4_hba.els_cq = NULL;
5931
5932	/* Release mailbox command complete queue */
5933	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5934	phba->sli4_hba.mbx_cq = NULL;
5935
5936	/* Release FCP response complete queue */
5937	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5938		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5939	kfree(phba->sli4_hba.fcp_cq);
5940	phba->sli4_hba.fcp_cq = NULL;
5941
5942	/* Release fast-path event queue */
5943	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5944		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5945	kfree(phba->sli4_hba.fp_eq);
5946	phba->sli4_hba.fp_eq = NULL;
5947
5948	/* Release slow-path event queue */
5949	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5950	phba->sli4_hba.sp_eq = NULL;
5951
5952	return;
5953}
5954
5955/**
5956 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5957 * @phba: pointer to lpfc hba data structure.
5958 *
5959 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5960 * operation.
5961 *
5962 * Return codes
5963 *      0 - successful
5964 *      -ENOMEM - No availble memory
5965 *      -EIO - The mailbox failed to complete successfully.
5966 **/
5967int
5968lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5969{
5970	int rc = -ENOMEM;
5971	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5972	int fcp_cq_index = 0;
5973
5974	/*
5975	 * Set up Event Queues (EQs)
5976	 */
5977
5978	/* Set up slow-path event queue */
5979	if (!phba->sli4_hba.sp_eq) {
5980		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5981				"0520 Slow-path EQ not allocated\n");
5982		goto out_error;
5983	}
5984	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5985			    LPFC_SP_DEF_IMAX);
5986	if (rc) {
5987		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5988				"0521 Failed setup of slow-path EQ: "
5989				"rc = 0x%x\n", rc);
5990		goto out_error;
5991	}
5992	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5993			"2583 Slow-path EQ setup: queue-id=%d\n",
5994			phba->sli4_hba.sp_eq->queue_id);
5995
5996	/* Set up fast-path event queue */
5997	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5998		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5999			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6000					"0522 Fast-path EQ (%d) not "
6001					"allocated\n", fcp_eqidx);
6002			goto out_destroy_fp_eq;
6003		}
6004		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
6005				    phba->cfg_fcp_imax);
6006		if (rc) {
6007			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6008					"0523 Failed setup of fast-path EQ "
6009					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
6010			goto out_destroy_fp_eq;
6011		}
6012		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6013				"2584 Fast-path EQ setup: "
6014				"queue[%d]-id=%d\n", fcp_eqidx,
6015				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
6016	}
6017
6018	/*
6019	 * Set up Complete Queues (CQs)
6020	 */
6021
6022	/* Set up slow-path MBOX Complete Queue as the first CQ */
6023	if (!phba->sli4_hba.mbx_cq) {
6024		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6025				"0528 Mailbox CQ not allocated\n");
6026		goto out_destroy_fp_eq;
6027	}
6028	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
6029			    LPFC_MCQ, LPFC_MBOX);
6030	if (rc) {
6031		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6032				"0529 Failed setup of slow-path mailbox CQ: "
6033				"rc = 0x%x\n", rc);
6034		goto out_destroy_fp_eq;
6035	}
6036	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6037			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6038			phba->sli4_hba.mbx_cq->queue_id,
6039			phba->sli4_hba.sp_eq->queue_id);
6040
6041	/* Set up slow-path ELS Complete Queue */
6042	if (!phba->sli4_hba.els_cq) {
6043		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6044				"0530 ELS CQ not allocated\n");
6045		goto out_destroy_mbx_cq;
6046	}
6047	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
6048			    LPFC_WCQ, LPFC_ELS);
6049	if (rc) {
6050		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6051				"0531 Failed setup of slow-path ELS CQ: "
6052				"rc = 0x%x\n", rc);
6053		goto out_destroy_mbx_cq;
6054	}
6055	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6056			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
6057			phba->sli4_hba.els_cq->queue_id,
6058			phba->sli4_hba.sp_eq->queue_id);
6059
6060	/* Set up fast-path FCP Response Complete Queue */
6061	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
6062		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6063			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6064					"0526 Fast-path FCP CQ (%d) not "
6065					"allocated\n", fcp_cqidx);
6066			goto out_destroy_fcp_cq;
6067		}
6068		rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
6069				    phba->sli4_hba.fp_eq[fcp_cqidx],
6070				    LPFC_WCQ, LPFC_FCP);
6071		if (rc) {
6072			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6073					"0527 Failed setup of fast-path FCP "
6074					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6075			goto out_destroy_fcp_cq;
6076		}
6077		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6078				"2588 FCP CQ setup: cq[%d]-id=%d, "
6079				"parent eq[%d]-id=%d\n",
6080				fcp_cqidx,
6081				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6082				fcp_cqidx,
6083				phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
6084	}
6085
6086	/*
6087	 * Set up all the Work Queues (WQs)
6088	 */
6089
6090	/* Set up Mailbox Command Queue */
6091	if (!phba->sli4_hba.mbx_wq) {
6092		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6093				"0538 Slow-path MQ not allocated\n");
6094		goto out_destroy_fcp_cq;
6095	}
6096	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
6097			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
6098	if (rc) {
6099		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6100				"0539 Failed setup of slow-path MQ: "
6101				"rc = 0x%x\n", rc);
6102		goto out_destroy_fcp_cq;
6103	}
6104	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6105			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
6106			phba->sli4_hba.mbx_wq->queue_id,
6107			phba->sli4_hba.mbx_cq->queue_id);
6108
6109	/* Set up slow-path ELS Work Queue */
6110	if (!phba->sli4_hba.els_wq) {
6111		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6112				"0536 Slow-path ELS WQ not allocated\n");
6113		goto out_destroy_mbx_wq;
6114	}
6115	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
6116			    phba->sli4_hba.els_cq, LPFC_ELS);
6117	if (rc) {
6118		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6119				"0537 Failed setup of slow-path ELS WQ: "
6120				"rc = 0x%x\n", rc);
6121		goto out_destroy_mbx_wq;
6122	}
6123	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6124			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
6125			phba->sli4_hba.els_wq->queue_id,
6126			phba->sli4_hba.els_cq->queue_id);
6127
6128	/* Set up fast-path FCP Work Queue */
6129	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6130		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6131			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6132					"0534 Fast-path FCP WQ (%d) not "
6133					"allocated\n", fcp_wqidx);
6134			goto out_destroy_fcp_wq;
6135		}
6136		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6137				    phba->sli4_hba.fcp_cq[fcp_cq_index],
6138				    LPFC_FCP);
6139		if (rc) {
6140			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6141					"0535 Failed setup of fast-path FCP "
6142					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6143			goto out_destroy_fcp_wq;
6144		}
6145		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6146				"2591 FCP WQ setup: wq[%d]-id=%d, "
6147				"parent cq[%d]-id=%d\n",
6148				fcp_wqidx,
6149				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6150				fcp_cq_index,
6151				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6152		/* Round robin FCP Work Queue's Completion Queue assignment */
6153		fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
6154	}
6155
6156	/*
6157	 * Create Receive Queue (RQ)
6158	 */
6159	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6160		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6161				"0540 Receive Queue not allocated\n");
6162		goto out_destroy_fcp_wq;
6163	}
6164	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6165			    phba->sli4_hba.els_cq, LPFC_USOL);
6166	if (rc) {
6167		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6168				"0541 Failed setup of Receive Queue: "
6169				"rc = 0x%x\n", rc);
6170		goto out_destroy_fcp_wq;
6171	}
6172	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6173			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6174			"parent cq-id=%d\n",
6175			phba->sli4_hba.hdr_rq->queue_id,
6176			phba->sli4_hba.dat_rq->queue_id,
6177			phba->sli4_hba.els_cq->queue_id);
6178	return 0;
6179
6180out_destroy_fcp_wq:
6181	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6182		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6183	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6184out_destroy_mbx_wq:
6185	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6186out_destroy_fcp_cq:
6187	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6188		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6189	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6190out_destroy_mbx_cq:
6191	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6192out_destroy_fp_eq:
6193	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6194		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6195	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6196out_error:
6197	return rc;
6198}
6199
6200/**
6201 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
6202 * @phba: pointer to lpfc hba data structure.
6203 *
6204 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
6205 * operation.
6206 *
6207 * Return codes
6208 *      0 - successful
6209 *      -ENOMEM - No availble memory
6210 *      -EIO - The mailbox failed to complete successfully.
6211 **/
6212void
6213lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6214{
6215	int fcp_qidx;
6216
6217	/* Unset mailbox command work queue */
6218	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6219	/* Unset ELS work queue */
6220	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6221	/* Unset unsolicited receive queue */
6222	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
6223	/* Unset FCP work queue */
6224	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6225		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
6226	/* Unset mailbox command complete queue */
6227	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6228	/* Unset ELS complete queue */
6229	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6230	/* Unset FCP response complete queue */
6231	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6232		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6233	/* Unset fast-path event queue */
6234	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6235		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6236	/* Unset slow-path event queue */
6237	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6238}
6239
6240/**
6241 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
6242 * @phba: pointer to lpfc hba data structure.
6243 *
6244 * This routine is invoked to allocate and set up a pool of completion queue
6245 * events. The body of the completion queue event is a completion queue entry
6246 * CQE. For now, this pool is used for the interrupt service routine to queue
6247 * the following HBA completion queue events for the worker thread to process:
6248 *   - Mailbox asynchronous events
6249 *   - Receive queue completion unsolicited events
6250 * Later, this can be used for all the slow-path events.
6251 *
6252 * Return codes
6253 *      0 - successful
6254 *      -ENOMEM - No availble memory
6255 **/
6256static int
6257lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
6258{
6259	struct lpfc_cq_event *cq_event;
6260	int i;
6261
6262	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
6263		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
6264		if (!cq_event)
6265			goto out_pool_create_fail;
6266		list_add_tail(&cq_event->list,
6267			      &phba->sli4_hba.sp_cqe_event_pool);
6268	}
6269	return 0;
6270
6271out_pool_create_fail:
6272	lpfc_sli4_cq_event_pool_destroy(phba);
6273	return -ENOMEM;
6274}
6275
6276/**
6277 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
6278 * @phba: pointer to lpfc hba data structure.
6279 *
6280 * This routine is invoked to free the pool of completion queue events at
6281 * driver unload time. Note that, it is the responsibility of the driver
6282 * cleanup routine to free all the outstanding completion-queue events
6283 * allocated from this pool back into the pool before invoking this routine
6284 * to destroy the pool.
6285 **/
6286static void
6287lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
6288{
6289	struct lpfc_cq_event *cq_event, *next_cq_event;
6290
6291	list_for_each_entry_safe(cq_event, next_cq_event,
6292				 &phba->sli4_hba.sp_cqe_event_pool, list) {
6293		list_del(&cq_event->list);
6294		kfree(cq_event);
6295	}
6296}
6297
6298/**
6299 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6300 * @phba: pointer to lpfc hba data structure.
6301 *
6302 * This routine is the lock free version of the API invoked to allocate a
6303 * completion-queue event from the free pool.
6304 *
6305 * Return: Pointer to the newly allocated completion-queue event if successful
6306 *         NULL otherwise.
6307 **/
6308struct lpfc_cq_event *
6309__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6310{
6311	struct lpfc_cq_event *cq_event = NULL;
6312
6313	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
6314			 struct lpfc_cq_event, list);
6315	return cq_event;
6316}
6317
6318/**
6319 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6320 * @phba: pointer to lpfc hba data structure.
6321 *
6322 * This routine is the lock version of the API invoked to allocate a
6323 * completion-queue event from the free pool.
6324 *
6325 * Return: Pointer to the newly allocated completion-queue event if successful
6326 *         NULL otherwise.
6327 **/
6328struct lpfc_cq_event *
6329lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6330{
6331	struct lpfc_cq_event *cq_event;
6332	unsigned long iflags;
6333
6334	spin_lock_irqsave(&phba->hbalock, iflags);
6335	cq_event = __lpfc_sli4_cq_event_alloc(phba);
6336	spin_unlock_irqrestore(&phba->hbalock, iflags);
6337	return cq_event;
6338}
6339
6340/**
6341 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6342 * @phba: pointer to lpfc hba data structure.
6343 * @cq_event: pointer to the completion queue event to be freed.
6344 *
6345 * This routine is the lock free version of the API invoked to release a
6346 * completion-queue event back into the free pool.
6347 **/
6348void
6349__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6350			     struct lpfc_cq_event *cq_event)
6351{
6352	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
6353}
6354
6355/**
6356 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6357 * @phba: pointer to lpfc hba data structure.
6358 * @cq_event: pointer to the completion queue event to be freed.
6359 *
6360 * This routine is the lock version of the API invoked to release a
6361 * completion-queue event back into the free pool.
6362 **/
6363void
6364lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6365			   struct lpfc_cq_event *cq_event)
6366{
6367	unsigned long iflags;
6368	spin_lock_irqsave(&phba->hbalock, iflags);
6369	__lpfc_sli4_cq_event_release(phba, cq_event);
6370	spin_unlock_irqrestore(&phba->hbalock, iflags);
6371}
6372
6373/**
6374 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
6375 * @phba: pointer to lpfc hba data structure.
6376 *
6377 * This routine is to free all the pending completion-queue events to the
6378 * back into the free pool for device reset.
6379 **/
6380static void
6381lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
6382{
6383	LIST_HEAD(cqelist);
6384	struct lpfc_cq_event *cqe;
6385	unsigned long iflags;
6386
6387	/* Retrieve all the pending WCQEs from pending WCQE lists */
6388	spin_lock_irqsave(&phba->hbalock, iflags);
6389	/* Pending FCP XRI abort events */
6390	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
6391			 &cqelist);
6392	/* Pending ELS XRI abort events */
6393	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
6394			 &cqelist);
6395	/* Pending asynnc events */
6396	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
6397			 &cqelist);
6398	spin_unlock_irqrestore(&phba->hbalock, iflags);
6399
6400	while (!list_empty(&cqelist)) {
6401		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
6402		lpfc_sli4_cq_event_release(phba, cqe);
6403	}
6404}
6405
6406/**
6407 * lpfc_pci_function_reset - Reset pci function.
6408 * @phba: pointer to lpfc hba data structure.
6409 *
6410 * This routine is invoked to request a PCI function reset. It will destroys
6411 * all resources assigned to the PCI function which originates this request.
6412 *
6413 * Return codes
6414 *      0 - successful
6415 *      -ENOMEM - No availble memory
6416 *      -EIO - The mailbox failed to complete successfully.
6417 **/
6418int
6419lpfc_pci_function_reset(struct lpfc_hba *phba)
6420{
6421	LPFC_MBOXQ_t *mboxq;
6422	uint32_t rc = 0;
6423	uint32_t shdr_status, shdr_add_status;
6424	union lpfc_sli4_cfg_shdr *shdr;
6425
6426	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6427	if (!mboxq) {
6428		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6429				"0494 Unable to allocate memory for issuing "
6430				"SLI_FUNCTION_RESET mailbox command\n");
6431		return -ENOMEM;
6432	}
6433
6434	/* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
6435	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6436			 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
6437			 LPFC_SLI4_MBX_EMBED);
6438	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6439	shdr = (union lpfc_sli4_cfg_shdr *)
6440		&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6441	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6442	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6443	if (rc != MBX_TIMEOUT)
6444		mempool_free(mboxq, phba->mbox_mem_pool);
6445	if (shdr_status || shdr_add_status || rc) {
6446		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6447				"0495 SLI_FUNCTION_RESET mailbox failed with "
6448				"status x%x add_status x%x, mbx status x%x\n",
6449				shdr_status, shdr_add_status, rc);
6450		rc = -ENXIO;
6451	}
6452	return rc;
6453}
6454
6455/**
6456 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
6457 * @phba: pointer to lpfc hba data structure.
6458 * @cnt: number of nop mailbox commands to send.
6459 *
6460 * This routine is invoked to send a number @cnt of NOP mailbox command and
6461 * wait for each command to complete.
6462 *
6463 * Return: the number of NOP mailbox command completed.
6464 **/
6465static int
6466lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
6467{
6468	LPFC_MBOXQ_t *mboxq;
6469	int length, cmdsent;
6470	uint32_t mbox_tmo;
6471	uint32_t rc = 0;
6472	uint32_t shdr_status, shdr_add_status;
6473	union lpfc_sli4_cfg_shdr *shdr;
6474
6475	if (cnt == 0) {
6476		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6477				"2518 Requested to send 0 NOP mailbox cmd\n");
6478		return cnt;
6479	}
6480
6481	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6482	if (!mboxq) {
6483		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6484				"2519 Unable to allocate memory for issuing "
6485				"NOP mailbox command\n");
6486		return 0;
6487	}
6488
6489	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
6490	length = (sizeof(struct lpfc_mbx_nop) -
6491		  sizeof(struct lpfc_sli4_cfg_mhdr));
6492	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6493			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
6494
6495	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6496	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
6497		if (!phba->sli4_hba.intr_enable)
6498			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6499		else
6500			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6501		if (rc == MBX_TIMEOUT)
6502			break;
6503		/* Check return status */
6504		shdr = (union lpfc_sli4_cfg_shdr *)
6505			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6506		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6507		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
6508					 &shdr->response);
6509		if (shdr_status || shdr_add_status || rc) {
6510			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6511					"2520 NOP mailbox command failed "
6512					"status x%x add_status x%x mbx "
6513					"status x%x\n", shdr_status,
6514					shdr_add_status, rc);
6515			break;
6516		}
6517	}
6518
6519	if (rc != MBX_TIMEOUT)
6520		mempool_free(mboxq, phba->mbox_mem_pool);
6521
6522	return cmdsent;
6523}
6524
6525/**
6526 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
6527 * @phba: pointer to lpfc hba data structure.
6528 *
6529 * This routine is invoked to set up the PCI device memory space for device
6530 * with SLI-4 interface spec.
6531 *
6532 * Return codes
6533 * 	0 - successful
6534 * 	other values - error
6535 **/
6536static int
6537lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
6538{
6539	struct pci_dev *pdev;
6540	unsigned long bar0map_len, bar1map_len, bar2map_len;
6541	int error = -ENODEV;
6542
6543	/* Obtain PCI device reference */
6544	if (!phba->pcidev)
6545		return error;
6546	else
6547		pdev = phba->pcidev;
6548
6549	/* Set the device DMA mask size */
6550	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6551	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6552		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6553		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6554			return error;
6555		}
6556	}
6557
6558	/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
6559	 * number of bytes required by each mapping. They are actually
6560	 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
6561	 */
6562	if (pci_resource_start(pdev, 0)) {
6563		phba->pci_bar0_map = pci_resource_start(pdev, 0);
6564		bar0map_len = pci_resource_len(pdev, 0);
6565	} else {
6566		phba->pci_bar0_map = pci_resource_start(pdev, 1);
6567		bar0map_len = pci_resource_len(pdev, 1);
6568	}
6569	phba->pci_bar1_map = pci_resource_start(pdev, 2);
6570	bar1map_len = pci_resource_len(pdev, 2);
6571
6572	phba->pci_bar2_map = pci_resource_start(pdev, 4);
6573	bar2map_len = pci_resource_len(pdev, 4);
6574
6575	/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
6576	phba->sli4_hba.conf_regs_memmap_p =
6577				ioremap(phba->pci_bar0_map, bar0map_len);
6578	if (!phba->sli4_hba.conf_regs_memmap_p) {
6579		dev_printk(KERN_ERR, &pdev->dev,
6580			   "ioremap failed for SLI4 PCI config registers.\n");
6581		goto out;
6582	}
6583
6584	/* Map SLI4 HBA Control Register base to a kernel virtual address. */
6585	phba->sli4_hba.ctrl_regs_memmap_p =
6586				ioremap(phba->pci_bar1_map, bar1map_len);
6587	if (!phba->sli4_hba.ctrl_regs_memmap_p) {
6588		dev_printk(KERN_ERR, &pdev->dev,
6589			   "ioremap failed for SLI4 HBA control registers.\n");
6590		goto out_iounmap_conf;
6591	}
6592
6593	/* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
6594	phba->sli4_hba.drbl_regs_memmap_p =
6595				ioremap(phba->pci_bar2_map, bar2map_len);
6596	if (!phba->sli4_hba.drbl_regs_memmap_p) {
6597		dev_printk(KERN_ERR, &pdev->dev,
6598			   "ioremap failed for SLI4 HBA doorbell registers.\n");
6599		goto out_iounmap_ctrl;
6600	}
6601
6602	/* Set up BAR0 PCI config space register memory map */
6603	lpfc_sli4_bar0_register_memmap(phba);
6604
6605	/* Set up BAR1 register memory map */
6606	lpfc_sli4_bar1_register_memmap(phba);
6607
6608	/* Set up BAR2 register memory map */
6609	error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
6610	if (error)
6611		goto out_iounmap_all;
6612
6613	return 0;
6614
6615out_iounmap_all:
6616	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6617out_iounmap_ctrl:
6618	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6619out_iounmap_conf:
6620	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6621out:
6622	return error;
6623}
6624
6625/**
6626 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
6627 * @phba: pointer to lpfc hba data structure.
6628 *
6629 * This routine is invoked to unset the PCI device memory space for device
6630 * with SLI-4 interface spec.
6631 **/
6632static void
6633lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
6634{
6635	struct pci_dev *pdev;
6636
6637	/* Obtain PCI device reference */
6638	if (!phba->pcidev)
6639		return;
6640	else
6641		pdev = phba->pcidev;
6642
6643	/* Free coherent DMA memory allocated */
6644
6645	/* Unmap I/O memory space */
6646	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6647	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6648	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6649
6650	return;
6651}
6652
6653/**
6654 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6655 * @phba: pointer to lpfc hba data structure.
6656 *
6657 * This routine is invoked to enable the MSI-X interrupt vectors to device
6658 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6659 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6660 * invoked, enables either all or nothing, depending on the current
6661 * availability of PCI vector resources. The device driver is responsible
6662 * for calling the individual request_irq() to register each MSI-X vector
6663 * with a interrupt handler, which is done in this function. Note that
6664 * later when device is unloading, the driver should always call free_irq()
6665 * on all MSI-X vectors it has done request_irq() on before calling
6666 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
6667 * will be left with MSI-X enabled and leaks its vectors.
6668 *
6669 * Return codes
6670 *   0 - successful
6671 *   other values - error
6672 **/
6673static int
6674lpfc_sli_enable_msix(struct lpfc_hba *phba)
6675{
6676	int rc, i;
6677	LPFC_MBOXQ_t *pmb;
6678
6679	/* Set up MSI-X multi-message vectors */
6680	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6681		phba->msix_entries[i].entry = i;
6682
6683	/* Configure MSI-X capability structure */
6684	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
6685				ARRAY_SIZE(phba->msix_entries));
6686	if (rc) {
6687		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6688				"0420 PCI enable MSI-X failed (%d)\n", rc);
6689		goto msi_fail_out;
6690	}
6691	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6692		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6693				"0477 MSI-X entry[%d]: vector=x%x "
6694				"message=%d\n", i,
6695				phba->msix_entries[i].vector,
6696				phba->msix_entries[i].entry);
6697	/*
6698	 * Assign MSI-X vectors to interrupt handlers
6699	 */
6700
6701	/* vector-0 is associated to slow-path handler */
6702	rc = request_irq(phba->msix_entries[0].vector,
6703			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6704			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6705	if (rc) {
6706		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6707				"0421 MSI-X slow-path request_irq failed "
6708				"(%d)\n", rc);
6709		goto msi_fail_out;
6710	}
6711
6712	/* vector-1 is associated to fast-path handler */
6713	rc = request_irq(phba->msix_entries[1].vector,
6714			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6715			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
6716
6717	if (rc) {
6718		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6719				"0429 MSI-X fast-path request_irq failed "
6720				"(%d)\n", rc);
6721		goto irq_fail_out;
6722	}
6723
6724	/*
6725	 * Configure HBA MSI-X attention conditions to messages
6726	 */
6727	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6728
6729	if (!pmb) {
6730		rc = -ENOMEM;
6731		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6732				"0474 Unable to allocate memory for issuing "
6733				"MBOX_CONFIG_MSI command\n");
6734		goto mem_fail_out;
6735	}
6736	rc = lpfc_config_msi(phba, pmb);
6737	if (rc)
6738		goto mbx_fail_out;
6739	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6740	if (rc != MBX_SUCCESS) {
6741		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
6742				"0351 Config MSI mailbox command failed, "
6743				"mbxCmd x%x, mbxStatus x%x\n",
6744				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
6745		goto mbx_fail_out;
6746	}
6747
6748	/* Free memory allocated for mailbox command */
6749	mempool_free(pmb, phba->mbox_mem_pool);
6750	return rc;
6751
6752mbx_fail_out:
6753	/* Free memory allocated for mailbox command */
6754	mempool_free(pmb, phba->mbox_mem_pool);
6755
6756mem_fail_out:
6757	/* free the irq already requested */
6758	free_irq(phba->msix_entries[1].vector, phba);
6759
6760irq_fail_out:
6761	/* free the irq already requested */
6762	free_irq(phba->msix_entries[0].vector, phba);
6763
6764msi_fail_out:
6765	/* Unconfigure MSI-X capability structure */
6766	pci_disable_msix(phba->pcidev);
6767	return rc;
6768}
6769
6770/**
6771 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
6772 * @phba: pointer to lpfc hba data structure.
6773 *
6774 * This routine is invoked to release the MSI-X vectors and then disable the
6775 * MSI-X interrupt mode to device with SLI-3 interface spec.
6776 **/
6777static void
6778lpfc_sli_disable_msix(struct lpfc_hba *phba)
6779{
6780	int i;
6781
6782	/* Free up MSI-X multi-message vectors */
6783	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6784		free_irq(phba->msix_entries[i].vector, phba);
6785	/* Disable MSI-X */
6786	pci_disable_msix(phba->pcidev);
6787
6788	return;
6789}
6790
6791/**
6792 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
6793 * @phba: pointer to lpfc hba data structure.
6794 *
6795 * This routine is invoked to enable the MSI interrupt mode to device with
6796 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
6797 * enable the MSI vector. The device driver is responsible for calling the
6798 * request_irq() to register MSI vector with a interrupt the handler, which
6799 * is done in this function.
6800 *
6801 * Return codes
6802 * 	0 - successful
6803 * 	other values - error
6804 */
6805static int
6806lpfc_sli_enable_msi(struct lpfc_hba *phba)
6807{
6808	int rc;
6809
6810	rc = pci_enable_msi(phba->pcidev);
6811	if (!rc)
6812		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6813				"0462 PCI enable MSI mode success.\n");
6814	else {
6815		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6816				"0471 PCI enable MSI mode failed (%d)\n", rc);
6817		return rc;
6818	}
6819
6820	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6821			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6822	if (rc) {
6823		pci_disable_msi(phba->pcidev);
6824		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6825				"0478 MSI request_irq failed (%d)\n", rc);
6826	}
6827	return rc;
6828}
6829
6830/**
6831 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
6832 * @phba: pointer to lpfc hba data structure.
6833 *
6834 * This routine is invoked to disable the MSI interrupt mode to device with
6835 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
6836 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6837 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6838 * its vector.
6839 */
6840static void
6841lpfc_sli_disable_msi(struct lpfc_hba *phba)
6842{
6843	free_irq(phba->pcidev->irq, phba);
6844	pci_disable_msi(phba->pcidev);
6845	return;
6846}
6847
6848/**
6849 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
6850 * @phba: pointer to lpfc hba data structure.
6851 *
6852 * This routine is invoked to enable device interrupt and associate driver's
6853 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
6854 * spec. Depends on the interrupt mode configured to the driver, the driver
6855 * will try to fallback from the configured interrupt mode to an interrupt
6856 * mode which is supported by the platform, kernel, and device in the order
6857 * of:
6858 * MSI-X -> MSI -> IRQ.
6859 *
6860 * Return codes
6861 *   0 - successful
6862 *   other values - error
6863 **/
6864static uint32_t
6865lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6866{
6867	uint32_t intr_mode = LPFC_INTR_ERROR;
6868	int retval;
6869
6870	if (cfg_mode == 2) {
6871		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6872		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6873		if (!retval) {
6874			/* Now, try to enable MSI-X interrupt mode */
6875			retval = lpfc_sli_enable_msix(phba);
6876			if (!retval) {
6877				/* Indicate initialization to MSI-X mode */
6878				phba->intr_type = MSIX;
6879				intr_mode = 2;
6880			}
6881		}
6882	}
6883
6884	/* Fallback to MSI if MSI-X initialization failed */
6885	if (cfg_mode >= 1 && phba->intr_type == NONE) {
6886		retval = lpfc_sli_enable_msi(phba);
6887		if (!retval) {
6888			/* Indicate initialization to MSI mode */
6889			phba->intr_type = MSI;
6890			intr_mode = 1;
6891		}
6892	}
6893
6894	/* Fallback to INTx if both MSI-X/MSI initalization failed */
6895	if (phba->intr_type == NONE) {
6896		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6897				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6898		if (!retval) {
6899			/* Indicate initialization to INTx mode */
6900			phba->intr_type = INTx;
6901			intr_mode = 0;
6902		}
6903	}
6904	return intr_mode;
6905}
6906
6907/**
6908 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6909 * @phba: pointer to lpfc hba data structure.
6910 *
6911 * This routine is invoked to disable device interrupt and disassociate the
6912 * driver's interrupt handler(s) from interrupt vector(s) to device with
6913 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6914 * release the interrupt vector(s) for the message signaled interrupt.
6915 **/
6916static void
6917lpfc_sli_disable_intr(struct lpfc_hba *phba)
6918{
6919	/* Disable the currently initialized interrupt mode */
6920	if (phba->intr_type == MSIX)
6921		lpfc_sli_disable_msix(phba);
6922	else if (phba->intr_type == MSI)
6923		lpfc_sli_disable_msi(phba);
6924	else if (phba->intr_type == INTx)
6925		free_irq(phba->pcidev->irq, phba);
6926
6927	/* Reset interrupt management states */
6928	phba->intr_type = NONE;
6929	phba->sli.slistat.sli_intr = 0;
6930
6931	return;
6932}
6933
6934/**
6935 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6936 * @phba: pointer to lpfc hba data structure.
6937 *
6938 * This routine is invoked to enable the MSI-X interrupt vectors to device
6939 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6940 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6941 * enables either all or nothing, depending on the current availability of
6942 * PCI vector resources. The device driver is responsible for calling the
6943 * individual request_irq() to register each MSI-X vector with a interrupt
6944 * handler, which is done in this function. Note that later when device is
6945 * unloading, the driver should always call free_irq() on all MSI-X vectors
6946 * it has done request_irq() on before calling pci_disable_msix(). Failure
6947 * to do so results in a BUG_ON() and a device will be left with MSI-X
6948 * enabled and leaks its vectors.
6949 *
6950 * Return codes
6951 * 0 - successful
6952 * other values - error
6953 **/
6954static int
6955lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6956{
6957	int vectors, rc, index;
6958
6959	/* Set up MSI-X multi-message vectors */
6960	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6961		phba->sli4_hba.msix_entries[index].entry = index;
6962
6963	/* Configure MSI-X capability structure */
6964	vectors = phba->sli4_hba.cfg_eqn;
6965enable_msix_vectors:
6966	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6967			     vectors);
6968	if (rc > 1) {
6969		vectors = rc;
6970		goto enable_msix_vectors;
6971	} else if (rc) {
6972		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6973				"0484 PCI enable MSI-X failed (%d)\n", rc);
6974		goto msi_fail_out;
6975	}
6976
6977	/* Log MSI-X vector assignment */
6978	for (index = 0; index < vectors; index++)
6979		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6980				"0489 MSI-X entry[%d]: vector=x%x "
6981				"message=%d\n", index,
6982				phba->sli4_hba.msix_entries[index].vector,
6983				phba->sli4_hba.msix_entries[index].entry);
6984	/*
6985	 * Assign MSI-X vectors to interrupt handlers
6986	 */
6987
6988	/* The first vector must associated to slow-path handler for MQ */
6989	rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6990			 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6991			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6992	if (rc) {
6993		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6994				"0485 MSI-X slow-path request_irq failed "
6995				"(%d)\n", rc);
6996		goto msi_fail_out;
6997	}
6998
6999	/* The rest of the vector(s) are associated to fast-path handler(s) */
7000	for (index = 1; index < vectors; index++) {
7001		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
7002		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
7003		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
7004				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
7005				 LPFC_FP_DRIVER_HANDLER_NAME,
7006				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7007		if (rc) {
7008			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7009					"0486 MSI-X fast-path (%d) "
7010					"request_irq failed (%d)\n", index, rc);
7011			goto cfg_fail_out;
7012		}
7013	}
7014	phba->sli4_hba.msix_vec_nr = vectors;
7015
7016	return rc;
7017
7018cfg_fail_out:
7019	/* free the irq already requested */
7020	for (--index; index >= 1; index--)
7021		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
7022			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7023
7024	/* free the irq already requested */
7025	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7026
7027msi_fail_out:
7028	/* Unconfigure MSI-X capability structure */
7029	pci_disable_msix(phba->pcidev);
7030	return rc;
7031}
7032
7033/**
7034 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
7035 * @phba: pointer to lpfc hba data structure.
7036 *
7037 * This routine is invoked to release the MSI-X vectors and then disable the
7038 * MSI-X interrupt mode to device with SLI-4 interface spec.
7039 **/
7040static void
7041lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7042{
7043	int index;
7044
7045	/* Free up MSI-X multi-message vectors */
7046	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7047
7048	for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
7049		free_irq(phba->sli4_hba.msix_entries[index].vector,
7050			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7051
7052	/* Disable MSI-X */
7053	pci_disable_msix(phba->pcidev);
7054
7055	return;
7056}
7057
7058/**
7059 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
7060 * @phba: pointer to lpfc hba data structure.
7061 *
7062 * This routine is invoked to enable the MSI interrupt mode to device with
7063 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
7064 * to enable the MSI vector. The device driver is responsible for calling
7065 * the request_irq() to register MSI vector with a interrupt the handler,
7066 * which is done in this function.
7067 *
7068 * Return codes
7069 * 	0 - successful
7070 * 	other values - error
7071 **/
7072static int
7073lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7074{
7075	int rc, index;
7076
7077	rc = pci_enable_msi(phba->pcidev);
7078	if (!rc)
7079		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7080				"0487 PCI enable MSI mode success.\n");
7081	else {
7082		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7083				"0488 PCI enable MSI mode failed (%d)\n", rc);
7084		return rc;
7085	}
7086
7087	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7088			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7089	if (rc) {
7090		pci_disable_msi(phba->pcidev);
7091		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7092				"0490 MSI request_irq failed (%d)\n", rc);
7093		return rc;
7094	}
7095
7096	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
7097		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7098		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7099	}
7100
7101	return 0;
7102}
7103
7104/**
7105 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
7106 * @phba: pointer to lpfc hba data structure.
7107 *
7108 * This routine is invoked to disable the MSI interrupt mode to device with
7109 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
7110 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7111 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7112 * its vector.
7113 **/
7114static void
7115lpfc_sli4_disable_msi(struct lpfc_hba *phba)
7116{
7117	free_irq(phba->pcidev->irq, phba);
7118	pci_disable_msi(phba->pcidev);
7119	return;
7120}
7121
7122/**
7123 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
7124 * @phba: pointer to lpfc hba data structure.
7125 *
7126 * This routine is invoked to enable device interrupt and associate driver's
7127 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
7128 * interface spec. Depends on the interrupt mode configured to the driver,
7129 * the driver will try to fallback from the configured interrupt mode to an
7130 * interrupt mode which is supported by the platform, kernel, and device in
7131 * the order of:
7132 * MSI-X -> MSI -> IRQ.
7133 *
7134 * Return codes
7135 * 	0 - successful
7136 * 	other values - error
7137 **/
7138static uint32_t
7139lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7140{
7141	uint32_t intr_mode = LPFC_INTR_ERROR;
7142	int retval, index;
7143
7144	if (cfg_mode == 2) {
7145		/* Preparation before conf_msi mbox cmd */
7146		retval = 0;
7147		if (!retval) {
7148			/* Now, try to enable MSI-X interrupt mode */
7149			retval = lpfc_sli4_enable_msix(phba);
7150			if (!retval) {
7151				/* Indicate initialization to MSI-X mode */
7152				phba->intr_type = MSIX;
7153				intr_mode = 2;
7154			}
7155		}
7156	}
7157
7158	/* Fallback to MSI if MSI-X initialization failed */
7159	if (cfg_mode >= 1 && phba->intr_type == NONE) {
7160		retval = lpfc_sli4_enable_msi(phba);
7161		if (!retval) {
7162			/* Indicate initialization to MSI mode */
7163			phba->intr_type = MSI;
7164			intr_mode = 1;
7165		}
7166	}
7167
7168	/* Fallback to INTx if both MSI-X/MSI initalization failed */
7169	if (phba->intr_type == NONE) {
7170		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7171				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7172		if (!retval) {
7173			/* Indicate initialization to INTx mode */
7174			phba->intr_type = INTx;
7175			intr_mode = 0;
7176			for (index = 0; index < phba->cfg_fcp_eq_count;
7177			     index++) {
7178				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7179				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7180			}
7181		}
7182	}
7183	return intr_mode;
7184}
7185
7186/**
7187 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
7188 * @phba: pointer to lpfc hba data structure.
7189 *
7190 * This routine is invoked to disable device interrupt and disassociate
7191 * the driver's interrupt handler(s) from interrupt vector(s) to device
7192 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
7193 * will release the interrupt vector(s) for the message signaled interrupt.
7194 **/
7195static void
7196lpfc_sli4_disable_intr(struct lpfc_hba *phba)
7197{
7198	/* Disable the currently initialized interrupt mode */
7199	if (phba->intr_type == MSIX)
7200		lpfc_sli4_disable_msix(phba);
7201	else if (phba->intr_type == MSI)
7202		lpfc_sli4_disable_msi(phba);
7203	else if (phba->intr_type == INTx)
7204		free_irq(phba->pcidev->irq, phba);
7205
7206	/* Reset interrupt management states */
7207	phba->intr_type = NONE;
7208	phba->sli.slistat.sli_intr = 0;
7209
7210	return;
7211}
7212
7213/**
7214 * lpfc_unset_hba - Unset SLI3 hba device initialization
7215 * @phba: pointer to lpfc hba data structure.
7216 *
7217 * This routine is invoked to unset the HBA device initialization steps to
7218 * a device with SLI-3 interface spec.
7219 **/
7220static void
7221lpfc_unset_hba(struct lpfc_hba *phba)
7222{
7223	struct lpfc_vport *vport = phba->pport;
7224	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7225
7226	spin_lock_irq(shost->host_lock);
7227	vport->load_flag |= FC_UNLOADING;
7228	spin_unlock_irq(shost->host_lock);
7229
7230	lpfc_stop_hba_timers(phba);
7231
7232	phba->pport->work_port_events = 0;
7233
7234	lpfc_sli_hba_down(phba);
7235
7236	lpfc_sli_brdrestart(phba);
7237
7238	lpfc_sli_disable_intr(phba);
7239
7240	return;
7241}
7242
7243/**
7244 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
7245 * @phba: pointer to lpfc hba data structure.
7246 *
7247 * This routine is invoked to unset the HBA device initialization steps to
7248 * a device with SLI-4 interface spec.
7249 **/
7250static void
7251lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7252{
7253	struct lpfc_vport *vport = phba->pport;
7254	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7255
7256	spin_lock_irq(shost->host_lock);
7257	vport->load_flag |= FC_UNLOADING;
7258	spin_unlock_irq(shost->host_lock);
7259
7260	phba->pport->work_port_events = 0;
7261
7262	/* Stop the SLI4 device port */
7263	lpfc_stop_port(phba);
7264
7265	lpfc_sli4_disable_intr(phba);
7266
7267	/* Reset SLI4 HBA FCoE function */
7268	lpfc_pci_function_reset(phba);
7269
7270	return;
7271}
7272
7273/**
7274 * lpfc_sli4_hba_unset - Unset the fcoe hba
7275 * @phba: Pointer to HBA context object.
7276 *
7277 * This function is called in the SLI4 code path to reset the HBA's FCoE
7278 * function. The caller is not required to hold any lock. This routine
7279 * issues PCI function reset mailbox command to reset the FCoE function.
7280 * At the end of the function, it calls lpfc_hba_down_post function to
7281 * free any pending commands.
7282 **/
7283static void
7284lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7285{
7286	int wait_cnt = 0;
7287	LPFC_MBOXQ_t *mboxq;
7288
7289	lpfc_stop_hba_timers(phba);
7290	phba->sli4_hba.intr_enable = 0;
7291
7292	/*
7293	 * Gracefully wait out the potential current outstanding asynchronous
7294	 * mailbox command.
7295	 */
7296
7297	/* First, block any pending async mailbox command from posted */
7298	spin_lock_irq(&phba->hbalock);
7299	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7300	spin_unlock_irq(&phba->hbalock);
7301	/* Now, trying to wait it out if we can */
7302	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7303		msleep(10);
7304		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
7305			break;
7306	}
7307	/* Forcefully release the outstanding mailbox command if timed out */
7308	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7309		spin_lock_irq(&phba->hbalock);
7310		mboxq = phba->sli.mbox_active;
7311		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7312		__lpfc_mbox_cmpl_put(phba, mboxq);
7313		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7314		phba->sli.mbox_active = NULL;
7315		spin_unlock_irq(&phba->hbalock);
7316	}
7317
7318	/* Disable PCI subsystem interrupt */
7319	lpfc_sli4_disable_intr(phba);
7320
7321	/* Stop kthread signal shall trigger work_done one more time */
7322	kthread_stop(phba->worker_thread);
7323
7324	/* Reset SLI4 HBA FCoE function */
7325	lpfc_pci_function_reset(phba);
7326
7327	/* Stop the SLI4 device port */
7328	phba->pport->work_port_events = 0;
7329}
7330
7331 /**
7332 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
7333 * @phba: Pointer to HBA context object.
7334 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
7335 *
7336 * This function is called in the SLI4 code path to read the port's
7337 * sli4 capabilities.
7338 *
7339 * This function may be be called from any context that can block-wait
7340 * for the completion.  The expectation is that this routine is called
7341 * typically from probe_one or from the online routine.
7342 **/
7343int
7344lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7345{
7346	int rc;
7347	struct lpfc_mqe *mqe;
7348	struct lpfc_pc_sli4_params *sli4_params;
7349	uint32_t mbox_tmo;
7350
7351	rc = 0;
7352	mqe = &mboxq->u.mqe;
7353
7354	/* Read the port's SLI4 Parameters port capabilities */
7355	lpfc_sli4_params(mboxq);
7356	if (!phba->sli4_hba.intr_enable)
7357		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7358	else {
7359		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
7360		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7361	}
7362
7363	if (unlikely(rc))
7364		return 1;
7365
7366	sli4_params = &phba->sli4_hba.pc_sli4_params;
7367	sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
7368	sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
7369	sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
7370	sli4_params->featurelevel_1 = bf_get(featurelevel_1,
7371					     &mqe->un.sli4_params);
7372	sli4_params->featurelevel_2 = bf_get(featurelevel_2,
7373					     &mqe->un.sli4_params);
7374	sli4_params->proto_types = mqe->un.sli4_params.word3;
7375	sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
7376	sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
7377	sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
7378	sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
7379	sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
7380	sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
7381	sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
7382	sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
7383	sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
7384	sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
7385	sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
7386	sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
7387	sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
7388	sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
7389	sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
7390	sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
7391	sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
7392	sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7393	sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7394	sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
7395	return rc;
7396}
7397
7398/**
7399 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
7400 * @pdev: pointer to PCI device
7401 * @pid: pointer to PCI device identifier
7402 *
7403 * This routine is to be called to attach a device with SLI-3 interface spec
7404 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7405 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7406 * information of the device and driver to see if the driver state that it can
7407 * support this kind of device. If the match is successful, the driver core
7408 * invokes this routine. If this routine determines it can claim the HBA, it
7409 * does all the initialization that it needs to do to handle the HBA properly.
7410 *
7411 * Return code
7412 * 	0 - driver can claim the device
7413 * 	negative value - driver can not claim the device
7414 **/
7415static int __devinit
7416lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
7417{
7418	struct lpfc_hba   *phba;
7419	struct lpfc_vport *vport = NULL;
7420	struct Scsi_Host  *shost = NULL;
7421	int error;
7422	uint32_t cfg_mode, intr_mode;
7423
7424	/* Allocate memory for HBA structure */
7425	phba = lpfc_hba_alloc(pdev);
7426	if (!phba)
7427		return -ENOMEM;
7428
7429	/* Perform generic PCI device enabling operation */
7430	error = lpfc_enable_pci_dev(phba);
7431	if (error) {
7432		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7433				"1401 Failed to enable pci device.\n");
7434		goto out_free_phba;
7435	}
7436
7437	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
7438	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
7439	if (error)
7440		goto out_disable_pci_dev;
7441
7442	/* Set up SLI-3 specific device PCI memory space */
7443	error = lpfc_sli_pci_mem_setup(phba);
7444	if (error) {
7445		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7446				"1402 Failed to set up pci memory space.\n");
7447		goto out_disable_pci_dev;
7448	}
7449
7450	/* Set up phase-1 common device driver resources */
7451	error = lpfc_setup_driver_resource_phase1(phba);
7452	if (error) {
7453		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7454				"1403 Failed to set up driver resource.\n");
7455		goto out_unset_pci_mem_s3;
7456	}
7457
7458	/* Set up SLI-3 specific device driver resources */
7459	error = lpfc_sli_driver_resource_setup(phba);
7460	if (error) {
7461		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7462				"1404 Failed to set up driver resource.\n");
7463		goto out_unset_pci_mem_s3;
7464	}
7465
7466	/* Initialize and populate the iocb list per host */
7467	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
7468	if (error) {
7469		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7470				"1405 Failed to initialize iocb list.\n");
7471		goto out_unset_driver_resource_s3;
7472	}
7473
7474	/* Set up common device driver resources */
7475	error = lpfc_setup_driver_resource_phase2(phba);
7476	if (error) {
7477		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7478				"1406 Failed to set up driver resource.\n");
7479		goto out_free_iocb_list;
7480	}
7481
7482	/* Create SCSI host to the physical port */
7483	error = lpfc_create_shost(phba);
7484	if (error) {
7485		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7486				"1407 Failed to create scsi host.\n");
7487		goto out_unset_driver_resource;
7488	}
7489
7490	/* Configure sysfs attributes */
7491	vport = phba->pport;
7492	error = lpfc_alloc_sysfs_attr(vport);
7493	if (error) {
7494		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7495				"1476 Failed to allocate sysfs attr\n");
7496		goto out_destroy_shost;
7497	}
7498
7499	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
7500	/* Now, trying to enable interrupt and bring up the device */
7501	cfg_mode = phba->cfg_use_msi;
7502	while (true) {
7503		/* Put device to a known state before enabling interrupt */
7504		lpfc_stop_port(phba);
7505		/* Configure and enable interrupt */
7506		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
7507		if (intr_mode == LPFC_INTR_ERROR) {
7508			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7509					"0431 Failed to enable interrupt.\n");
7510			error = -ENODEV;
7511			goto out_free_sysfs_attr;
7512		}
7513		/* SLI-3 HBA setup */
7514		if (lpfc_sli_hba_setup(phba)) {
7515			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7516					"1477 Failed to set up hba\n");
7517			error = -ENODEV;
7518			goto out_remove_device;
7519		}
7520
7521		/* Wait 50ms for the interrupts of previous mailbox commands */
7522		msleep(50);
7523		/* Check active interrupts on message signaled interrupts */
7524		if (intr_mode == 0 ||
7525		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
7526			/* Log the current active interrupt mode */
7527			phba->intr_mode = intr_mode;
7528			lpfc_log_intr_mode(phba, intr_mode);
7529			break;
7530		} else {
7531			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7532					"0447 Configure interrupt mode (%d) "
7533					"failed active interrupt test.\n",
7534					intr_mode);
7535			/* Disable the current interrupt mode */
7536			lpfc_sli_disable_intr(phba);
7537			/* Try next level of interrupt mode */
7538			cfg_mode = --intr_mode;
7539		}
7540	}
7541
7542	/* Perform post initialization setup */
7543	lpfc_post_init_setup(phba);
7544
7545	/* Check if there are static vports to be created. */
7546	lpfc_create_static_vport(phba);
7547
7548	return 0;
7549
7550out_remove_device:
7551	lpfc_unset_hba(phba);
7552out_free_sysfs_attr:
7553	lpfc_free_sysfs_attr(vport);
7554out_destroy_shost:
7555	lpfc_destroy_shost(phba);
7556out_unset_driver_resource:
7557	lpfc_unset_driver_resource_phase2(phba);
7558out_free_iocb_list:
7559	lpfc_free_iocb_list(phba);
7560out_unset_driver_resource_s3:
7561	lpfc_sli_driver_resource_unset(phba);
7562out_unset_pci_mem_s3:
7563	lpfc_sli_pci_mem_unset(phba);
7564out_disable_pci_dev:
7565	lpfc_disable_pci_dev(phba);
7566	if (shost)
7567		scsi_host_put(shost);
7568out_free_phba:
7569	lpfc_hba_free(phba);
7570	return error;
7571}
7572
7573/**
7574 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
7575 * @pdev: pointer to PCI device
7576 *
7577 * This routine is to be called to disattach a device with SLI-3 interface
7578 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7579 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7580 * device to be removed from the PCI subsystem properly.
7581 **/
7582static void __devexit
7583lpfc_pci_remove_one_s3(struct pci_dev *pdev)
7584{
7585	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
7586	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7587	struct lpfc_vport **vports;
7588	struct lpfc_hba   *phba = vport->phba;
7589	int i;
7590	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
7591
7592	spin_lock_irq(&phba->hbalock);
7593	vport->load_flag |= FC_UNLOADING;
7594	spin_unlock_irq(&phba->hbalock);
7595
7596	lpfc_free_sysfs_attr(vport);
7597
7598	/* Release all the vports against this physical port */
7599	vports = lpfc_create_vport_work_array(phba);
7600	if (vports != NULL)
7601		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7602			fc_vport_terminate(vports[i]->fc_vport);
7603	lpfc_destroy_vport_work_array(phba, vports);
7604
7605	/* Remove FC host and then SCSI host with the physical port */
7606	fc_remove_host(shost);
7607	scsi_remove_host(shost);
7608	lpfc_cleanup(vport);
7609
7610	/*
7611	 * Bring down the SLI Layer. This step disable all interrupts,
7612	 * clears the rings, discards all mailbox commands, and resets
7613	 * the HBA.
7614	 */
7615
7616	/* HBA interrupt will be diabled after this call */
7617	lpfc_sli_hba_down(phba);
7618	/* Stop kthread signal shall trigger work_done one more time */
7619	kthread_stop(phba->worker_thread);
7620	/* Final cleanup of txcmplq and reset the HBA */
7621	lpfc_sli_brdrestart(phba);
7622
7623	lpfc_stop_hba_timers(phba);
7624	spin_lock_irq(&phba->hbalock);
7625	list_del_init(&vport->listentry);
7626	spin_unlock_irq(&phba->hbalock);
7627
7628	lpfc_debugfs_terminate(vport);
7629
7630	/* Disable interrupt */
7631	lpfc_sli_disable_intr(phba);
7632
7633	pci_set_drvdata(pdev, NULL);
7634	scsi_host_put(shost);
7635
7636	/*
7637	 * Call scsi_free before mem_free since scsi bufs are released to their
7638	 * corresponding pools here.
7639	 */
7640	lpfc_scsi_free(phba);
7641	lpfc_mem_free_all(phba);
7642
7643	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7644			  phba->hbqslimp.virt, phba->hbqslimp.phys);
7645
7646	/* Free resources associated with SLI2 interface */
7647	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7648			  phba->slim2p.virt, phba->slim2p.phys);
7649
7650	/* unmap adapter SLIM and Control Registers */
7651	iounmap(phba->ctrl_regs_memmap_p);
7652	iounmap(phba->slim_memmap_p);
7653
7654	lpfc_hba_free(phba);
7655
7656	pci_release_selected_regions(pdev, bars);
7657	pci_disable_device(pdev);
7658}
7659
7660/**
7661 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
7662 * @pdev: pointer to PCI device
7663 * @msg: power management message
7664 *
7665 * This routine is to be called from the kernel's PCI subsystem to support
7666 * system Power Management (PM) to device with SLI-3 interface spec. When
7667 * PM invokes this method, it quiesces the device by stopping the driver's
7668 * worker thread for the device, turning off device's interrupt and DMA,
7669 * and bring the device offline. Note that as the driver implements the
7670 * minimum PM requirements to a power-aware driver's PM support for the
7671 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7672 * to the suspend() method call will be treated as SUSPEND and the driver will
7673 * fully reinitialize its device during resume() method call, the driver will
7674 * set device to PCI_D3hot state in PCI config space instead of setting it
7675 * according to the @msg provided by the PM.
7676 *
7677 * Return code
7678 * 	0 - driver suspended the device
7679 * 	Error otherwise
7680 **/
7681static int
7682lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
7683{
7684	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7685	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7686
7687	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7688			"0473 PCI device Power Management suspend.\n");
7689
7690	/* Bring down the device */
7691	lpfc_offline_prep(phba);
7692	lpfc_offline(phba);
7693	kthread_stop(phba->worker_thread);
7694
7695	/* Disable interrupt from device */
7696	lpfc_sli_disable_intr(phba);
7697
7698	/* Save device state to PCI config space */
7699	pci_save_state(pdev);
7700	pci_set_power_state(pdev, PCI_D3hot);
7701
7702	return 0;
7703}
7704
7705/**
7706 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
7707 * @pdev: pointer to PCI device
7708 *
7709 * This routine is to be called from the kernel's PCI subsystem to support
7710 * system Power Management (PM) to device with SLI-3 interface spec. When PM
7711 * invokes this method, it restores the device's PCI config space state and
7712 * fully reinitializes the device and brings it online. Note that as the
7713 * driver implements the minimum PM requirements to a power-aware driver's
7714 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
7715 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
7716 * driver will fully reinitialize its device during resume() method call,
7717 * the device will be set to PCI_D0 directly in PCI config space before
7718 * restoring the state.
7719 *
7720 * Return code
7721 * 	0 - driver suspended the device
7722 * 	Error otherwise
7723 **/
7724static int
7725lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7726{
7727	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7728	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7729	uint32_t intr_mode;
7730	int error;
7731
7732	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7733			"0452 PCI device Power Management resume.\n");
7734
7735	/* Restore device state from PCI config space */
7736	pci_set_power_state(pdev, PCI_D0);
7737	pci_restore_state(pdev);
7738
7739	/*
7740	 * As the new kernel behavior of pci_restore_state() API call clears
7741	 * device saved_state flag, need to save the restored state again.
7742	 */
7743	pci_save_state(pdev);
7744
7745	if (pdev->is_busmaster)
7746		pci_set_master(pdev);
7747
7748	/* Startup the kernel thread for this host adapter. */
7749	phba->worker_thread = kthread_run(lpfc_do_work, phba,
7750					"lpfc_worker_%d", phba->brd_no);
7751	if (IS_ERR(phba->worker_thread)) {
7752		error = PTR_ERR(phba->worker_thread);
7753		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7754				"0434 PM resume failed to start worker "
7755				"thread: error=x%x.\n", error);
7756		return error;
7757	}
7758
7759	/* Configure and enable interrupt */
7760	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7761	if (intr_mode == LPFC_INTR_ERROR) {
7762		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7763				"0430 PM resume Failed to enable interrupt\n");
7764		return -EIO;
7765	} else
7766		phba->intr_mode = intr_mode;
7767
7768	/* Restart HBA and bring it online */
7769	lpfc_sli_brdrestart(phba);
7770	lpfc_online(phba);
7771
7772	/* Log the current active interrupt mode */
7773	lpfc_log_intr_mode(phba, phba->intr_mode);
7774
7775	return 0;
7776}
7777
7778/**
7779 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
7780 * @phba: pointer to lpfc hba data structure.
7781 *
7782 * This routine is called to prepare the SLI3 device for PCI slot recover. It
7783 * aborts all the outstanding SCSI I/Os to the pci device.
7784 **/
7785static void
7786lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
7787{
7788	struct lpfc_sli *psli = &phba->sli;
7789	struct lpfc_sli_ring  *pring;
7790
7791	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7792			"2723 PCI channel I/O abort preparing for recovery\n");
7793
7794	/*
7795	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
7796	 * and let the SCSI mid-layer to retry them to recover.
7797	 */
7798	pring = &psli->ring[psli->fcp_ring];
7799	lpfc_sli_abort_iocb_ring(phba, pring);
7800}
7801
7802/**
7803 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
7804 * @phba: pointer to lpfc hba data structure.
7805 *
7806 * This routine is called to prepare the SLI3 device for PCI slot reset. It
7807 * disables the device interrupt and pci device, and aborts the internal FCP
7808 * pending I/Os.
7809 **/
7810static void
7811lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7812{
7813	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7814			"2710 PCI channel disable preparing for reset\n");
7815
7816	/* Block any management I/Os to the device */
7817	lpfc_block_mgmt_io(phba);
7818
7819	/* Block all SCSI devices' I/Os on the host */
7820	lpfc_scsi_dev_block(phba);
7821
7822	/* stop all timers */
7823	lpfc_stop_hba_timers(phba);
7824
7825	/* Disable interrupt and pci device */
7826	lpfc_sli_disable_intr(phba);
7827	pci_disable_device(phba->pcidev);
7828
7829	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
7830	lpfc_sli_flush_fcp_rings(phba);
7831}
7832
7833/**
7834 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
7835 * @phba: pointer to lpfc hba data structure.
7836 *
7837 * This routine is called to prepare the SLI3 device for PCI slot permanently
7838 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
7839 * pending I/Os.
7840 **/
7841static void
7842lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7843{
7844	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7845			"2711 PCI channel permanent disable for failure\n");
7846	/* Block all SCSI devices' I/Os on the host */
7847	lpfc_scsi_dev_block(phba);
7848
7849	/* stop all timers */
7850	lpfc_stop_hba_timers(phba);
7851
7852	/* Clean up all driver's outstanding SCSI I/Os */
7853	lpfc_sli_flush_fcp_rings(phba);
7854}
7855
7856/**
7857 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7858 * @pdev: pointer to PCI device.
7859 * @state: the current PCI connection state.
7860 *
7861 * This routine is called from the PCI subsystem for I/O error handling to
7862 * device with SLI-3 interface spec. This function is called by the PCI
7863 * subsystem after a PCI bus error affecting this device has been detected.
7864 * When this function is invoked, it will need to stop all the I/Os and
7865 * interrupt(s) to the device. Once that is done, it will return
7866 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7867 * as desired.
7868 *
7869 * Return codes
7870 * 	PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
7871 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7872 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7873 **/
7874static pci_ers_result_t
7875lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7876{
7877	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7878	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7879
7880	switch (state) {
7881	case pci_channel_io_normal:
7882		/* Non-fatal error, prepare for recovery */
7883		lpfc_sli_prep_dev_for_recover(phba);
7884		return PCI_ERS_RESULT_CAN_RECOVER;
7885	case pci_channel_io_frozen:
7886		/* Fatal error, prepare for slot reset */
7887		lpfc_sli_prep_dev_for_reset(phba);
7888		return PCI_ERS_RESULT_NEED_RESET;
7889	case pci_channel_io_perm_failure:
7890		/* Permanent failure, prepare for device down */
7891		lpfc_sli_prep_dev_for_perm_failure(phba);
7892		return PCI_ERS_RESULT_DISCONNECT;
7893	default:
7894		/* Unknown state, prepare and request slot reset */
7895		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7896				"0472 Unknown PCI error state: x%x\n", state);
7897		lpfc_sli_prep_dev_for_reset(phba);
7898		return PCI_ERS_RESULT_NEED_RESET;
7899	}
7900}
7901
7902/**
7903 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
7904 * @pdev: pointer to PCI device.
7905 *
7906 * This routine is called from the PCI subsystem for error handling to
7907 * device with SLI-3 interface spec. This is called after PCI bus has been
7908 * reset to restart the PCI card from scratch, as if from a cold-boot.
7909 * During the PCI subsystem error recovery, after driver returns
7910 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7911 * recovery and then call this routine before calling the .resume method
7912 * to recover the device. This function will initialize the HBA device,
7913 * enable the interrupt, but it will just put the HBA to offline state
7914 * without passing any I/O traffic.
7915 *
7916 * Return codes
7917 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7918 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7919 */
7920static pci_ers_result_t
7921lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7922{
7923	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7924	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7925	struct lpfc_sli *psli = &phba->sli;
7926	uint32_t intr_mode;
7927
7928	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
7929	if (pci_enable_device_mem(pdev)) {
7930		printk(KERN_ERR "lpfc: Cannot re-enable "
7931			"PCI device after reset.\n");
7932		return PCI_ERS_RESULT_DISCONNECT;
7933	}
7934
7935	pci_restore_state(pdev);
7936
7937	/*
7938	 * As the new kernel behavior of pci_restore_state() API call clears
7939	 * device saved_state flag, need to save the restored state again.
7940	 */
7941	pci_save_state(pdev);
7942
7943	if (pdev->is_busmaster)
7944		pci_set_master(pdev);
7945
7946	spin_lock_irq(&phba->hbalock);
7947	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7948	spin_unlock_irq(&phba->hbalock);
7949
7950	/* Configure and enable interrupt */
7951	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7952	if (intr_mode == LPFC_INTR_ERROR) {
7953		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7954				"0427 Cannot re-enable interrupt after "
7955				"slot reset.\n");
7956		return PCI_ERS_RESULT_DISCONNECT;
7957	} else
7958		phba->intr_mode = intr_mode;
7959
7960	/* Take device offline, it will perform cleanup */
7961	lpfc_offline_prep(phba);
7962	lpfc_offline(phba);
7963	lpfc_sli_brdrestart(phba);
7964
7965	/* Log the current active interrupt mode */
7966	lpfc_log_intr_mode(phba, phba->intr_mode);
7967
7968	return PCI_ERS_RESULT_RECOVERED;
7969}
7970
7971/**
7972 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
7973 * @pdev: pointer to PCI device
7974 *
7975 * This routine is called from the PCI subsystem for error handling to device
7976 * with SLI-3 interface spec. It is called when kernel error recovery tells
7977 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7978 * error recovery. After this call, traffic can start to flow from this device
7979 * again.
7980 */
7981static void
7982lpfc_io_resume_s3(struct pci_dev *pdev)
7983{
7984	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7985	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7986
7987	/* Bring device online, it will be no-op for non-fatal error resume */
7988	lpfc_online(phba);
7989
7990	/* Clean up Advanced Error Reporting (AER) if needed */
7991	if (phba->hba_flag & HBA_AER_ENABLED)
7992		pci_cleanup_aer_uncorrect_error_status(pdev);
7993}
7994
7995/**
7996 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7997 * @phba: pointer to lpfc hba data structure.
7998 *
7999 * returns the number of ELS/CT IOCBs to reserve
8000 **/
8001int
8002lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
8003{
8004	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
8005
8006	if (phba->sli_rev == LPFC_SLI_REV4) {
8007		if (max_xri <= 100)
8008			return 10;
8009		else if (max_xri <= 256)
8010			return 25;
8011		else if (max_xri <= 512)
8012			return 50;
8013		else if (max_xri <= 1024)
8014			return 100;
8015		else
8016			return 150;
8017	} else
8018		return 0;
8019}
8020
8021/**
8022 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
8023 * @pdev: pointer to PCI device
8024 * @pid: pointer to PCI device identifier
8025 *
8026 * This routine is called from the kernel's PCI subsystem to device with
8027 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8028 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8029 * information of the device and driver to see if the driver state that it
8030 * can support this kind of device. If the match is successful, the driver
8031 * core invokes this routine. If this routine determines it can claim the HBA,
8032 * it does all the initialization that it needs to do to handle the HBA
8033 * properly.
8034 *
8035 * Return code
8036 * 	0 - driver can claim the device
8037 * 	negative value - driver can not claim the device
8038 **/
8039static int __devinit
8040lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8041{
8042	struct lpfc_hba   *phba;
8043	struct lpfc_vport *vport = NULL;
8044	struct Scsi_Host  *shost = NULL;
8045	int error;
8046	uint32_t cfg_mode, intr_mode;
8047	int mcnt;
8048
8049	/* Allocate memory for HBA structure */
8050	phba = lpfc_hba_alloc(pdev);
8051	if (!phba)
8052		return -ENOMEM;
8053
8054	/* Perform generic PCI device enabling operation */
8055	error = lpfc_enable_pci_dev(phba);
8056	if (error) {
8057		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8058				"1409 Failed to enable pci device.\n");
8059		goto out_free_phba;
8060	}
8061
8062	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
8063	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
8064	if (error)
8065		goto out_disable_pci_dev;
8066
8067	/* Set up SLI-4 specific device PCI memory space */
8068	error = lpfc_sli4_pci_mem_setup(phba);
8069	if (error) {
8070		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8071				"1410 Failed to set up pci memory space.\n");
8072		goto out_disable_pci_dev;
8073	}
8074
8075	/* Set up phase-1 common device driver resources */
8076	error = lpfc_setup_driver_resource_phase1(phba);
8077	if (error) {
8078		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8079				"1411 Failed to set up driver resource.\n");
8080		goto out_unset_pci_mem_s4;
8081	}
8082
8083	/* Set up SLI-4 Specific device driver resources */
8084	error = lpfc_sli4_driver_resource_setup(phba);
8085	if (error) {
8086		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8087				"1412 Failed to set up driver resource.\n");
8088		goto out_unset_pci_mem_s4;
8089	}
8090
8091	/* Initialize and populate the iocb list per host */
8092
8093	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8094			"2821 initialize iocb list %d.\n",
8095			phba->cfg_iocb_cnt*1024);
8096	error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
8097
8098	if (error) {
8099		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8100				"1413 Failed to initialize iocb list.\n");
8101		goto out_unset_driver_resource_s4;
8102	}
8103
8104	/* Set up common device driver resources */
8105	error = lpfc_setup_driver_resource_phase2(phba);
8106	if (error) {
8107		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8108				"1414 Failed to set up driver resource.\n");
8109		goto out_free_iocb_list;
8110	}
8111
8112	/* Create SCSI host to the physical port */
8113	error = lpfc_create_shost(phba);
8114	if (error) {
8115		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8116				"1415 Failed to create scsi host.\n");
8117		goto out_unset_driver_resource;
8118	}
8119
8120	/* Configure sysfs attributes */
8121	vport = phba->pport;
8122	error = lpfc_alloc_sysfs_attr(vport);
8123	if (error) {
8124		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8125				"1416 Failed to allocate sysfs attr\n");
8126		goto out_destroy_shost;
8127	}
8128
8129	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8130	/* Now, trying to enable interrupt and bring up the device */
8131	cfg_mode = phba->cfg_use_msi;
8132	while (true) {
8133		/* Put device to a known state before enabling interrupt */
8134		lpfc_stop_port(phba);
8135		/* Configure and enable interrupt */
8136		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
8137		if (intr_mode == LPFC_INTR_ERROR) {
8138			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8139					"0426 Failed to enable interrupt.\n");
8140			error = -ENODEV;
8141			goto out_free_sysfs_attr;
8142		}
8143		/* Default to single FCP EQ for non-MSI-X */
8144		if (phba->intr_type != MSIX)
8145			phba->cfg_fcp_eq_count = 1;
8146		else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count)
8147			phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
8148		/* Set up SLI-4 HBA */
8149		if (lpfc_sli4_hba_setup(phba)) {
8150			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8151					"1421 Failed to set up hba\n");
8152			error = -ENODEV;
8153			goto out_disable_intr;
8154		}
8155
8156		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
8157		if (intr_mode != 0)
8158			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
8159							    LPFC_ACT_INTR_CNT);
8160
8161		/* Check active interrupts received only for MSI/MSI-X */
8162		if (intr_mode == 0 ||
8163		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
8164			/* Log the current active interrupt mode */
8165			phba->intr_mode = intr_mode;
8166			lpfc_log_intr_mode(phba, intr_mode);
8167			break;
8168		}
8169		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8170				"0451 Configure interrupt mode (%d) "
8171				"failed active interrupt test.\n",
8172				intr_mode);
8173		/* Unset the preivous SLI-4 HBA setup */
8174		lpfc_sli4_unset_hba(phba);
8175		/* Try next level of interrupt mode */
8176		cfg_mode = --intr_mode;
8177	}
8178
8179	/* Perform post initialization setup */
8180	lpfc_post_init_setup(phba);
8181
8182	/* Check if there are static vports to be created. */
8183	lpfc_create_static_vport(phba);
8184
8185	return 0;
8186
8187out_disable_intr:
8188	lpfc_sli4_disable_intr(phba);
8189out_free_sysfs_attr:
8190	lpfc_free_sysfs_attr(vport);
8191out_destroy_shost:
8192	lpfc_destroy_shost(phba);
8193out_unset_driver_resource:
8194	lpfc_unset_driver_resource_phase2(phba);
8195out_free_iocb_list:
8196	lpfc_free_iocb_list(phba);
8197out_unset_driver_resource_s4:
8198	lpfc_sli4_driver_resource_unset(phba);
8199out_unset_pci_mem_s4:
8200	lpfc_sli4_pci_mem_unset(phba);
8201out_disable_pci_dev:
8202	lpfc_disable_pci_dev(phba);
8203	if (shost)
8204		scsi_host_put(shost);
8205out_free_phba:
8206	lpfc_hba_free(phba);
8207	return error;
8208}
8209
8210/**
8211 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
8212 * @pdev: pointer to PCI device
8213 *
8214 * This routine is called from the kernel's PCI subsystem to device with
8215 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8216 * removed from PCI bus, it performs all the necessary cleanup for the HBA
8217 * device to be removed from the PCI subsystem properly.
8218 **/
8219static void __devexit
8220lpfc_pci_remove_one_s4(struct pci_dev *pdev)
8221{
8222	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8223	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8224	struct lpfc_vport **vports;
8225	struct lpfc_hba *phba = vport->phba;
8226	int i;
8227
8228	/* Mark the device unloading flag */
8229	spin_lock_irq(&phba->hbalock);
8230	vport->load_flag |= FC_UNLOADING;
8231	spin_unlock_irq(&phba->hbalock);
8232
8233	/* Free the HBA sysfs attributes */
8234	lpfc_free_sysfs_attr(vport);
8235
8236	/* Release all the vports against this physical port */
8237	vports = lpfc_create_vport_work_array(phba);
8238	if (vports != NULL)
8239		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8240			fc_vport_terminate(vports[i]->fc_vport);
8241	lpfc_destroy_vport_work_array(phba, vports);
8242
8243	/* Remove FC host and then SCSI host with the physical port */
8244	fc_remove_host(shost);
8245	scsi_remove_host(shost);
8246
8247	/* Perform cleanup on the physical port */
8248	lpfc_cleanup(vport);
8249
8250	/*
8251	 * Bring down the SLI Layer. This step disables all interrupts,
8252	 * clears the rings, discards all mailbox commands, and resets
8253	 * the HBA FCoE function.
8254	 */
8255	lpfc_debugfs_terminate(vport);
8256	lpfc_sli4_hba_unset(phba);
8257
8258	spin_lock_irq(&phba->hbalock);
8259	list_del_init(&vport->listentry);
8260	spin_unlock_irq(&phba->hbalock);
8261
8262	/* Perform scsi free before driver resource_unset since scsi
8263	 * buffers are released to their corresponding pools here.
8264	 */
8265	lpfc_scsi_free(phba);
8266	lpfc_sli4_driver_resource_unset(phba);
8267
8268	/* Unmap adapter Control and Doorbell registers */
8269	lpfc_sli4_pci_mem_unset(phba);
8270
8271	/* Release PCI resources and disable device's PCI function */
8272	scsi_host_put(shost);
8273	lpfc_disable_pci_dev(phba);
8274
8275	/* Finally, free the driver's device data structure */
8276	lpfc_hba_free(phba);
8277
8278	return;
8279}
8280
8281/**
8282 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
8283 * @pdev: pointer to PCI device
8284 * @msg: power management message
8285 *
8286 * This routine is called from the kernel's PCI subsystem to support system
8287 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
8288 * this method, it quiesces the device by stopping the driver's worker
8289 * thread for the device, turning off device's interrupt and DMA, and bring
8290 * the device offline. Note that as the driver implements the minimum PM
8291 * requirements to a power-aware driver's PM support for suspend/resume -- all
8292 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
8293 * method call will be treated as SUSPEND and the driver will fully
8294 * reinitialize its device during resume() method call, the driver will set
8295 * device to PCI_D3hot state in PCI config space instead of setting it
8296 * according to the @msg provided by the PM.
8297 *
8298 * Return code
8299 * 	0 - driver suspended the device
8300 * 	Error otherwise
8301 **/
8302static int
8303lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
8304{
8305	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8306	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8307
8308	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8309			"2843 PCI device Power Management suspend.\n");
8310
8311	/* Bring down the device */
8312	lpfc_offline_prep(phba);
8313	lpfc_offline(phba);
8314	kthread_stop(phba->worker_thread);
8315
8316	/* Disable interrupt from device */
8317	lpfc_sli4_disable_intr(phba);
8318
8319	/* Save device state to PCI config space */
8320	pci_save_state(pdev);
8321	pci_set_power_state(pdev, PCI_D3hot);
8322
8323	return 0;
8324}
8325
8326/**
8327 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
8328 * @pdev: pointer to PCI device
8329 *
8330 * This routine is called from the kernel's PCI subsystem to support system
8331 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
8332 * this method, it restores the device's PCI config space state and fully
8333 * reinitializes the device and brings it online. Note that as the driver
8334 * implements the minimum PM requirements to a power-aware driver's PM for
8335 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8336 * to the suspend() method call will be treated as SUSPEND and the driver
8337 * will fully reinitialize its device during resume() method call, the device
8338 * will be set to PCI_D0 directly in PCI config space before restoring the
8339 * state.
8340 *
8341 * Return code
8342 * 	0 - driver suspended the device
8343 * 	Error otherwise
8344 **/
8345static int
8346lpfc_pci_resume_one_s4(struct pci_dev *pdev)
8347{
8348	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8349	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8350	uint32_t intr_mode;
8351	int error;
8352
8353	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8354			"0292 PCI device Power Management resume.\n");
8355
8356	/* Restore device state from PCI config space */
8357	pci_set_power_state(pdev, PCI_D0);
8358	pci_restore_state(pdev);
8359
8360	/*
8361	 * As the new kernel behavior of pci_restore_state() API call clears
8362	 * device saved_state flag, need to save the restored state again.
8363	 */
8364	pci_save_state(pdev);
8365
8366	if (pdev->is_busmaster)
8367		pci_set_master(pdev);
8368
8369	 /* Startup the kernel thread for this host adapter. */
8370	phba->worker_thread = kthread_run(lpfc_do_work, phba,
8371					"lpfc_worker_%d", phba->brd_no);
8372	if (IS_ERR(phba->worker_thread)) {
8373		error = PTR_ERR(phba->worker_thread);
8374		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8375				"0293 PM resume failed to start worker "
8376				"thread: error=x%x.\n", error);
8377		return error;
8378	}
8379
8380	/* Configure and enable interrupt */
8381	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
8382	if (intr_mode == LPFC_INTR_ERROR) {
8383		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8384				"0294 PM resume Failed to enable interrupt\n");
8385		return -EIO;
8386	} else
8387		phba->intr_mode = intr_mode;
8388
8389	/* Restart HBA and bring it online */
8390	lpfc_sli_brdrestart(phba);
8391	lpfc_online(phba);
8392
8393	/* Log the current active interrupt mode */
8394	lpfc_log_intr_mode(phba, phba->intr_mode);
8395
8396	return 0;
8397}
8398
8399/**
8400 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
8401 * @phba: pointer to lpfc hba data structure.
8402 *
8403 * This routine is called to prepare the SLI4 device for PCI slot recover. It
8404 * aborts all the outstanding SCSI I/Os to the pci device.
8405 **/
8406static void
8407lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
8408{
8409	struct lpfc_sli *psli = &phba->sli;
8410	struct lpfc_sli_ring  *pring;
8411
8412	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8413			"2828 PCI channel I/O abort preparing for recovery\n");
8414	/*
8415	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8416	 * and let the SCSI mid-layer to retry them to recover.
8417	 */
8418	pring = &psli->ring[psli->fcp_ring];
8419	lpfc_sli_abort_iocb_ring(phba, pring);
8420}
8421
8422/**
8423 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
8424 * @phba: pointer to lpfc hba data structure.
8425 *
8426 * This routine is called to prepare the SLI4 device for PCI slot reset. It
8427 * disables the device interrupt and pci device, and aborts the internal FCP
8428 * pending I/Os.
8429 **/
8430static void
8431lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
8432{
8433	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8434			"2826 PCI channel disable preparing for reset\n");
8435
8436	/* Block any management I/Os to the device */
8437	lpfc_block_mgmt_io(phba);
8438
8439	/* Block all SCSI devices' I/Os on the host */
8440	lpfc_scsi_dev_block(phba);
8441
8442	/* stop all timers */
8443	lpfc_stop_hba_timers(phba);
8444
8445	/* Disable interrupt and pci device */
8446	lpfc_sli4_disable_intr(phba);
8447	pci_disable_device(phba->pcidev);
8448
8449	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
8450	lpfc_sli_flush_fcp_rings(phba);
8451}
8452
8453/**
8454 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
8455 * @phba: pointer to lpfc hba data structure.
8456 *
8457 * This routine is called to prepare the SLI4 device for PCI slot permanently
8458 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
8459 * pending I/Os.
8460 **/
8461static void
8462lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
8463{
8464	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8465			"2827 PCI channel permanent disable for failure\n");
8466
8467	/* Block all SCSI devices' I/Os on the host */
8468	lpfc_scsi_dev_block(phba);
8469
8470	/* stop all timers */
8471	lpfc_stop_hba_timers(phba);
8472
8473	/* Clean up all driver's outstanding SCSI I/Os */
8474	lpfc_sli_flush_fcp_rings(phba);
8475}
8476
8477/**
8478 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
8479 * @pdev: pointer to PCI device.
8480 * @state: the current PCI connection state.
8481 *
8482 * This routine is called from the PCI subsystem for error handling to device
8483 * with SLI-4 interface spec. This function is called by the PCI subsystem
8484 * after a PCI bus error affecting this device has been detected. When this
8485 * function is invoked, it will need to stop all the I/Os and interrupt(s)
8486 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
8487 * for the PCI subsystem to perform proper recovery as desired.
8488 *
8489 * Return codes
8490 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8491 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8492 **/
8493static pci_ers_result_t
8494lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8495{
8496	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8497	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8498
8499	switch (state) {
8500	case pci_channel_io_normal:
8501		/* Non-fatal error, prepare for recovery */
8502		lpfc_sli4_prep_dev_for_recover(phba);
8503		return PCI_ERS_RESULT_CAN_RECOVER;
8504	case pci_channel_io_frozen:
8505		/* Fatal error, prepare for slot reset */
8506		lpfc_sli4_prep_dev_for_reset(phba);
8507		return PCI_ERS_RESULT_NEED_RESET;
8508	case pci_channel_io_perm_failure:
8509		/* Permanent failure, prepare for device down */
8510		lpfc_sli4_prep_dev_for_perm_failure(phba);
8511		return PCI_ERS_RESULT_DISCONNECT;
8512	default:
8513		/* Unknown state, prepare and request slot reset */
8514		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8515				"2825 Unknown PCI error state: x%x\n", state);
8516		lpfc_sli4_prep_dev_for_reset(phba);
8517		return PCI_ERS_RESULT_NEED_RESET;
8518	}
8519}
8520
8521/**
8522 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
8523 * @pdev: pointer to PCI device.
8524 *
8525 * This routine is called from the PCI subsystem for error handling to device
8526 * with SLI-4 interface spec. It is called after PCI bus has been reset to
8527 * restart the PCI card from scratch, as if from a cold-boot. During the
8528 * PCI subsystem error recovery, after the driver returns
8529 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
8530 * recovery and then call this routine before calling the .resume method to
8531 * recover the device. This function will initialize the HBA device, enable
8532 * the interrupt, but it will just put the HBA to offline state without
8533 * passing any I/O traffic.
8534 *
8535 * Return codes
8536 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
8537 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8538 */
8539static pci_ers_result_t
8540lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8541{
8542	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8543	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8544	struct lpfc_sli *psli = &phba->sli;
8545	uint32_t intr_mode;
8546
8547	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
8548	if (pci_enable_device_mem(pdev)) {
8549		printk(KERN_ERR "lpfc: Cannot re-enable "
8550			"PCI device after reset.\n");
8551		return PCI_ERS_RESULT_DISCONNECT;
8552	}
8553
8554	pci_restore_state(pdev);
8555	if (pdev->is_busmaster)
8556		pci_set_master(pdev);
8557
8558	spin_lock_irq(&phba->hbalock);
8559	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8560	spin_unlock_irq(&phba->hbalock);
8561
8562	/* Configure and enable interrupt */
8563	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
8564	if (intr_mode == LPFC_INTR_ERROR) {
8565		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8566				"2824 Cannot re-enable interrupt after "
8567				"slot reset.\n");
8568		return PCI_ERS_RESULT_DISCONNECT;
8569	} else
8570		phba->intr_mode = intr_mode;
8571
8572	/* Log the current active interrupt mode */
8573	lpfc_log_intr_mode(phba, phba->intr_mode);
8574
8575	return PCI_ERS_RESULT_RECOVERED;
8576}
8577
8578/**
8579 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
8580 * @pdev: pointer to PCI device
8581 *
8582 * This routine is called from the PCI subsystem for error handling to device
8583 * with SLI-4 interface spec. It is called when kernel error recovery tells
8584 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8585 * error recovery. After this call, traffic can start to flow from this device
8586 * again.
8587 **/
8588static void
8589lpfc_io_resume_s4(struct pci_dev *pdev)
8590{
8591	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8592	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8593
8594	/*
8595	 * In case of slot reset, as function reset is performed through
8596	 * mailbox command which needs DMA to be enabled, this operation
8597	 * has to be moved to the io resume phase. Taking device offline
8598	 * will perform the necessary cleanup.
8599	 */
8600	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
8601		/* Perform device reset */
8602		lpfc_offline_prep(phba);
8603		lpfc_offline(phba);
8604		lpfc_sli_brdrestart(phba);
8605		/* Bring the device back online */
8606		lpfc_online(phba);
8607	}
8608
8609	/* Clean up Advanced Error Reporting (AER) if needed */
8610	if (phba->hba_flag & HBA_AER_ENABLED)
8611		pci_cleanup_aer_uncorrect_error_status(pdev);
8612}
8613
8614/**
8615 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
8616 * @pdev: pointer to PCI device
8617 * @pid: pointer to PCI device identifier
8618 *
8619 * This routine is to be registered to the kernel's PCI subsystem. When an
8620 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
8621 * at PCI device-specific information of the device and driver to see if the
8622 * driver state that it can support this kind of device. If the match is
8623 * successful, the driver core invokes this routine. This routine dispatches
8624 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
8625 * do all the initialization that it needs to do to handle the HBA device
8626 * properly.
8627 *
8628 * Return code
8629 * 	0 - driver can claim the device
8630 * 	negative value - driver can not claim the device
8631 **/
8632static int __devinit
8633lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
8634{
8635	int rc;
8636	struct lpfc_sli_intf intf;
8637
8638	if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
8639		return -ENODEV;
8640
8641	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
8642	    (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
8643		rc = lpfc_pci_probe_one_s4(pdev, pid);
8644	else
8645		rc = lpfc_pci_probe_one_s3(pdev, pid);
8646
8647	return rc;
8648}
8649
8650/**
8651 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
8652 * @pdev: pointer to PCI device
8653 *
8654 * This routine is to be registered to the kernel's PCI subsystem. When an
8655 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
8656 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
8657 * remove routine, which will perform all the necessary cleanup for the
8658 * device to be removed from the PCI subsystem properly.
8659 **/
8660static void __devexit
8661lpfc_pci_remove_one(struct pci_dev *pdev)
8662{
8663	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8664	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8665
8666	switch (phba->pci_dev_grp) {
8667	case LPFC_PCI_DEV_LP:
8668		lpfc_pci_remove_one_s3(pdev);
8669		break;
8670	case LPFC_PCI_DEV_OC:
8671		lpfc_pci_remove_one_s4(pdev);
8672		break;
8673	default:
8674		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8675				"1424 Invalid PCI device group: 0x%x\n",
8676				phba->pci_dev_grp);
8677		break;
8678	}
8679	return;
8680}
8681
8682/**
8683 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
8684 * @pdev: pointer to PCI device
8685 * @msg: power management message
8686 *
8687 * This routine is to be registered to the kernel's PCI subsystem to support
8688 * system Power Management (PM). When PM invokes this method, it dispatches
8689 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
8690 * suspend the device.
8691 *
8692 * Return code
8693 * 	0 - driver suspended the device
8694 * 	Error otherwise
8695 **/
8696static int
8697lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
8698{
8699	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8700	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8701	int rc = -ENODEV;
8702
8703	switch (phba->pci_dev_grp) {
8704	case LPFC_PCI_DEV_LP:
8705		rc = lpfc_pci_suspend_one_s3(pdev, msg);
8706		break;
8707	case LPFC_PCI_DEV_OC:
8708		rc = lpfc_pci_suspend_one_s4(pdev, msg);
8709		break;
8710	default:
8711		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8712				"1425 Invalid PCI device group: 0x%x\n",
8713				phba->pci_dev_grp);
8714		break;
8715	}
8716	return rc;
8717}
8718
8719/**
8720 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
8721 * @pdev: pointer to PCI device
8722 *
8723 * This routine is to be registered to the kernel's PCI subsystem to support
8724 * system Power Management (PM). When PM invokes this method, it dispatches
8725 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
8726 * resume the device.
8727 *
8728 * Return code
8729 * 	0 - driver suspended the device
8730 * 	Error otherwise
8731 **/
8732static int
8733lpfc_pci_resume_one(struct pci_dev *pdev)
8734{
8735	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8736	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8737	int rc = -ENODEV;
8738
8739	switch (phba->pci_dev_grp) {
8740	case LPFC_PCI_DEV_LP:
8741		rc = lpfc_pci_resume_one_s3(pdev);
8742		break;
8743	case LPFC_PCI_DEV_OC:
8744		rc = lpfc_pci_resume_one_s4(pdev);
8745		break;
8746	default:
8747		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8748				"1426 Invalid PCI device group: 0x%x\n",
8749				phba->pci_dev_grp);
8750		break;
8751	}
8752	return rc;
8753}
8754
8755/**
8756 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
8757 * @pdev: pointer to PCI device.
8758 * @state: the current PCI connection state.
8759 *
8760 * This routine is registered to the PCI subsystem for error handling. This
8761 * function is called by the PCI subsystem after a PCI bus error affecting
8762 * this device has been detected. When this routine is invoked, it dispatches
8763 * the action to the proper SLI-3 or SLI-4 device error detected handling
8764 * routine, which will perform the proper error detected operation.
8765 *
8766 * Return codes
8767 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8768 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8769 **/
8770static pci_ers_result_t
8771lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
8772{
8773	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8774	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8775	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
8776
8777	switch (phba->pci_dev_grp) {
8778	case LPFC_PCI_DEV_LP:
8779		rc = lpfc_io_error_detected_s3(pdev, state);
8780		break;
8781	case LPFC_PCI_DEV_OC:
8782		rc = lpfc_io_error_detected_s4(pdev, state);
8783		break;
8784	default:
8785		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8786				"1427 Invalid PCI device group: 0x%x\n",
8787				phba->pci_dev_grp);
8788		break;
8789	}
8790	return rc;
8791}
8792
8793/**
8794 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
8795 * @pdev: pointer to PCI device.
8796 *
8797 * This routine is registered to the PCI subsystem for error handling. This
8798 * function is called after PCI bus has been reset to restart the PCI card
8799 * from scratch, as if from a cold-boot. When this routine is invoked, it
8800 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
8801 * routine, which will perform the proper device reset.
8802 *
8803 * Return codes
8804 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
8805 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8806 **/
8807static pci_ers_result_t
8808lpfc_io_slot_reset(struct pci_dev *pdev)
8809{
8810	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8811	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8812	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
8813
8814	switch (phba->pci_dev_grp) {
8815	case LPFC_PCI_DEV_LP:
8816		rc = lpfc_io_slot_reset_s3(pdev);
8817		break;
8818	case LPFC_PCI_DEV_OC:
8819		rc = lpfc_io_slot_reset_s4(pdev);
8820		break;
8821	default:
8822		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8823				"1428 Invalid PCI device group: 0x%x\n",
8824				phba->pci_dev_grp);
8825		break;
8826	}
8827	return rc;
8828}
8829
8830/**
8831 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
8832 * @pdev: pointer to PCI device
8833 *
8834 * This routine is registered to the PCI subsystem for error handling. It
8835 * is called when kernel error recovery tells the lpfc driver that it is
8836 * OK to resume normal PCI operation after PCI bus error recovery. When
8837 * this routine is invoked, it dispatches the action to the proper SLI-3
8838 * or SLI-4 device io_resume routine, which will resume the device operation.
8839 **/
8840static void
8841lpfc_io_resume(struct pci_dev *pdev)
8842{
8843	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8844	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8845
8846	switch (phba->pci_dev_grp) {
8847	case LPFC_PCI_DEV_LP:
8848		lpfc_io_resume_s3(pdev);
8849		break;
8850	case LPFC_PCI_DEV_OC:
8851		lpfc_io_resume_s4(pdev);
8852		break;
8853	default:
8854		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8855				"1429 Invalid PCI device group: 0x%x\n",
8856				phba->pci_dev_grp);
8857		break;
8858	}
8859	return;
8860}
8861
8862static struct pci_device_id lpfc_id_table[] = {
8863	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
8864		PCI_ANY_ID, PCI_ANY_ID, },
8865	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
8866		PCI_ANY_ID, PCI_ANY_ID, },
8867	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
8868		PCI_ANY_ID, PCI_ANY_ID, },
8869	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
8870		PCI_ANY_ID, PCI_ANY_ID, },
8871	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
8872		PCI_ANY_ID, PCI_ANY_ID, },
8873	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
8874		PCI_ANY_ID, PCI_ANY_ID, },
8875	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
8876		PCI_ANY_ID, PCI_ANY_ID, },
8877	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
8878		PCI_ANY_ID, PCI_ANY_ID, },
8879	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
8880		PCI_ANY_ID, PCI_ANY_ID, },
8881	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
8882		PCI_ANY_ID, PCI_ANY_ID, },
8883	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
8884		PCI_ANY_ID, PCI_ANY_ID, },
8885	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
8886		PCI_ANY_ID, PCI_ANY_ID, },
8887	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
8888		PCI_ANY_ID, PCI_ANY_ID, },
8889	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
8890		PCI_ANY_ID, PCI_ANY_ID, },
8891	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
8892		PCI_ANY_ID, PCI_ANY_ID, },
8893	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
8894		PCI_ANY_ID, PCI_ANY_ID, },
8895	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
8896		PCI_ANY_ID, PCI_ANY_ID, },
8897	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
8898		PCI_ANY_ID, PCI_ANY_ID, },
8899	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
8900		PCI_ANY_ID, PCI_ANY_ID, },
8901	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
8902		PCI_ANY_ID, PCI_ANY_ID, },
8903	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
8904		PCI_ANY_ID, PCI_ANY_ID, },
8905	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
8906		PCI_ANY_ID, PCI_ANY_ID, },
8907	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
8908		PCI_ANY_ID, PCI_ANY_ID, },
8909	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
8910		PCI_ANY_ID, PCI_ANY_ID, },
8911	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
8912		PCI_ANY_ID, PCI_ANY_ID, },
8913	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
8914		PCI_ANY_ID, PCI_ANY_ID, },
8915	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
8916		PCI_ANY_ID, PCI_ANY_ID, },
8917	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
8918		PCI_ANY_ID, PCI_ANY_ID, },
8919	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
8920		PCI_ANY_ID, PCI_ANY_ID, },
8921	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
8922		PCI_ANY_ID, PCI_ANY_ID, },
8923	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
8924		PCI_ANY_ID, PCI_ANY_ID, },
8925	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
8926		PCI_ANY_ID, PCI_ANY_ID, },
8927	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
8928		PCI_ANY_ID, PCI_ANY_ID, },
8929	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
8930		PCI_ANY_ID, PCI_ANY_ID, },
8931	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
8932		PCI_ANY_ID, PCI_ANY_ID, },
8933	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
8934		PCI_ANY_ID, PCI_ANY_ID, },
8935	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
8936		PCI_ANY_ID, PCI_ANY_ID, },
8937	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
8938		PCI_ANY_ID, PCI_ANY_ID, },
8939	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
8940		PCI_ANY_ID, PCI_ANY_ID, },
8941	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
8942		PCI_ANY_ID, PCI_ANY_ID, },
8943	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
8944		PCI_ANY_ID, PCI_ANY_ID, },
8945	{ 0 }
8946};
8947
8948MODULE_DEVICE_TABLE(pci, lpfc_id_table);
8949
8950static struct pci_error_handlers lpfc_err_handler = {
8951	.error_detected = lpfc_io_error_detected,
8952	.slot_reset = lpfc_io_slot_reset,
8953	.resume = lpfc_io_resume,
8954};
8955
8956static struct pci_driver lpfc_driver = {
8957	.name		= LPFC_DRIVER_NAME,
8958	.id_table	= lpfc_id_table,
8959	.probe		= lpfc_pci_probe_one,
8960	.remove		= __devexit_p(lpfc_pci_remove_one),
8961	.suspend        = lpfc_pci_suspend_one,
8962	.resume		= lpfc_pci_resume_one,
8963	.err_handler    = &lpfc_err_handler,
8964};
8965
8966/**
8967 * lpfc_init - lpfc module initialization routine
8968 *
8969 * This routine is to be invoked when the lpfc module is loaded into the
8970 * kernel. The special kernel macro module_init() is used to indicate the
8971 * role of this routine to the kernel as lpfc module entry point.
8972 *
8973 * Return codes
8974 *   0 - successful
8975 *   -ENOMEM - FC attach transport failed
8976 *   all others - failed
8977 */
8978static int __init
8979lpfc_init(void)
8980{
8981	int error = 0;
8982
8983	printk(LPFC_MODULE_DESC "\n");
8984	printk(LPFC_COPYRIGHT "\n");
8985
8986	if (lpfc_enable_npiv) {
8987		lpfc_transport_functions.vport_create = lpfc_vport_create;
8988		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
8989	}
8990	lpfc_transport_template =
8991				fc_attach_transport(&lpfc_transport_functions);
8992	if (lpfc_transport_template == NULL)
8993		return -ENOMEM;
8994	if (lpfc_enable_npiv) {
8995		lpfc_vport_transport_template =
8996			fc_attach_transport(&lpfc_vport_transport_functions);
8997		if (lpfc_vport_transport_template == NULL) {
8998			fc_release_transport(lpfc_transport_template);
8999			return -ENOMEM;
9000		}
9001	}
9002	error = pci_register_driver(&lpfc_driver);
9003	if (error) {
9004		fc_release_transport(lpfc_transport_template);
9005		if (lpfc_enable_npiv)
9006			fc_release_transport(lpfc_vport_transport_template);
9007	}
9008
9009	return error;
9010}
9011
9012/**
9013 * lpfc_exit - lpfc module removal routine
9014 *
9015 * This routine is invoked when the lpfc module is removed from the kernel.
9016 * The special kernel macro module_exit() is used to indicate the role of
9017 * this routine to the kernel as lpfc module exit point.
9018 */
9019static void __exit
9020lpfc_exit(void)
9021{
9022	pci_unregister_driver(&lpfc_driver);
9023	fc_release_transport(lpfc_transport_template);
9024	if (lpfc_enable_npiv)
9025		fc_release_transport(lpfc_vport_transport_template);
9026	if (_dump_buf_data) {
9027		printk(KERN_ERR	"9062 BLKGRD: freeing %lu pages for "
9028				"_dump_buf_data at 0x%p\n",
9029				(1L << _dump_buf_data_order), _dump_buf_data);
9030		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
9031	}
9032
9033	if (_dump_buf_dif) {
9034		printk(KERN_ERR	"9049 BLKGRD: freeing %lu pages for "
9035				"_dump_buf_dif at 0x%p\n",
9036				(1L << _dump_buf_dif_order), _dump_buf_dif);
9037		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
9038	}
9039}
9040
9041module_init(lpfc_init);
9042module_exit(lpfc_exit);
9043MODULE_LICENSE("GPL");
9044MODULE_DESCRIPTION(LPFC_MODULE_DESC);
9045MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
9046MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
9047