lpfc_init.c revision 63e801ce685d151c5faca8f491adc2ad2e732259
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
29#include <linux/spinlock.h>
30#include <linux/ctype.h>
31#include <linux/aer.h>
32#include <linux/slab.h>
33
34#include <scsi/scsi.h>
35#include <scsi/scsi_device.h>
36#include <scsi/scsi_host.h>
37#include <scsi/scsi_transport_fc.h>
38
39#include "lpfc_hw4.h"
40#include "lpfc_hw.h"
41#include "lpfc_sli.h"
42#include "lpfc_sli4.h"
43#include "lpfc_nl.h"
44#include "lpfc_disc.h"
45#include "lpfc_scsi.h"
46#include "lpfc.h"
47#include "lpfc_logmsg.h"
48#include "lpfc_crtn.h"
49#include "lpfc_vport.h"
50#include "lpfc_version.h"
51
52char *_dump_buf_data;
53unsigned long _dump_buf_data_order;
54char *_dump_buf_dif;
55unsigned long _dump_buf_dif_order;
56spinlock_t _dump_buf_lock;
57
58static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
59static int lpfc_post_rcv_buf(struct lpfc_hba *);
60static int lpfc_sli4_queue_create(struct lpfc_hba *);
61static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
62static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
63static int lpfc_setup_endian_order(struct lpfc_hba *);
64static int lpfc_sli4_read_config(struct lpfc_hba *);
65static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
66static void lpfc_free_sgl_list(struct lpfc_hba *);
67static int lpfc_init_sgl_list(struct lpfc_hba *);
68static int lpfc_init_active_sgl_array(struct lpfc_hba *);
69static void lpfc_free_active_sgl(struct lpfc_hba *);
70static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
71static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
72static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
73static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
74static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
75
76static struct scsi_transport_template *lpfc_transport_template = NULL;
77static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
78static DEFINE_IDR(lpfc_hba_index);
79
80/**
81 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
82 * @phba: pointer to lpfc hba data structure.
83 *
84 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
85 * mailbox command. It retrieves the revision information from the HBA and
86 * collects the Vital Product Data (VPD) about the HBA for preparing the
87 * configuration of the HBA.
88 *
89 * Return codes:
90 *   0 - success.
91 *   -ERESTART - requests the SLI layer to reset the HBA and try again.
92 *   Any other value - indicates an error.
93 **/
94int
95lpfc_config_port_prep(struct lpfc_hba *phba)
96{
97	lpfc_vpd_t *vp = &phba->vpd;
98	int i = 0, rc;
99	LPFC_MBOXQ_t *pmb;
100	MAILBOX_t *mb;
101	char *lpfc_vpd_data = NULL;
102	uint16_t offset = 0;
103	static char licensed[56] =
104		    "key unlock for use with gnu public licensed code only\0";
105	static int init_key = 1;
106
107	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
108	if (!pmb) {
109		phba->link_state = LPFC_HBA_ERROR;
110		return -ENOMEM;
111	}
112
113	mb = &pmb->u.mb;
114	phba->link_state = LPFC_INIT_MBX_CMDS;
115
116	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
117		if (init_key) {
118			uint32_t *ptext = (uint32_t *) licensed;
119
120			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
121				*ptext = cpu_to_be32(*ptext);
122			init_key = 0;
123		}
124
125		lpfc_read_nv(phba, pmb);
126		memset((char*)mb->un.varRDnvp.rsvd3, 0,
127			sizeof (mb->un.varRDnvp.rsvd3));
128		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
129			 sizeof (licensed));
130
131		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
132
133		if (rc != MBX_SUCCESS) {
134			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
135					"0324 Config Port initialization "
136					"error, mbxCmd x%x READ_NVPARM, "
137					"mbxStatus x%x\n",
138					mb->mbxCommand, mb->mbxStatus);
139			mempool_free(pmb, phba->mbox_mem_pool);
140			return -ERESTART;
141		}
142		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
143		       sizeof(phba->wwnn));
144		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
145		       sizeof(phba->wwpn));
146	}
147
148	phba->sli3_options = 0x0;
149
150	/* Setup and issue mailbox READ REV command */
151	lpfc_read_rev(phba, pmb);
152	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
153	if (rc != MBX_SUCCESS) {
154		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
155				"0439 Adapter failed to init, mbxCmd x%x "
156				"READ_REV, mbxStatus x%x\n",
157				mb->mbxCommand, mb->mbxStatus);
158		mempool_free( pmb, phba->mbox_mem_pool);
159		return -ERESTART;
160	}
161
162
163	/*
164	 * The value of rr must be 1 since the driver set the cv field to 1.
165	 * This setting requires the FW to set all revision fields.
166	 */
167	if (mb->un.varRdRev.rr == 0) {
168		vp->rev.rBit = 0;
169		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
170				"0440 Adapter failed to init, READ_REV has "
171				"missing revision information.\n");
172		mempool_free(pmb, phba->mbox_mem_pool);
173		return -ERESTART;
174	}
175
176	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
177		mempool_free(pmb, phba->mbox_mem_pool);
178		return -EINVAL;
179	}
180
181	/* Save information as VPD data */
182	vp->rev.rBit = 1;
183	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
184	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
185	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
186	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
187	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
188	vp->rev.biuRev = mb->un.varRdRev.biuRev;
189	vp->rev.smRev = mb->un.varRdRev.smRev;
190	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
191	vp->rev.endecRev = mb->un.varRdRev.endecRev;
192	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
193	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
194	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
195	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
196	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
197	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
198
199	/* If the sli feature level is less then 9, we must
200	 * tear down all RPIs and VPIs on link down if NPIV
201	 * is enabled.
202	 */
203	if (vp->rev.feaLevelHigh < 9)
204		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
205
206	if (lpfc_is_LC_HBA(phba->pcidev->device))
207		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
208						sizeof (phba->RandomData));
209
210	/* Get adapter VPD information */
211	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
212	if (!lpfc_vpd_data)
213		goto out_free_mbox;
214
215	do {
216		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
218
219		if (rc != MBX_SUCCESS) {
220			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
221					"0441 VPD not present on adapter, "
222					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
223					mb->mbxCommand, mb->mbxStatus);
224			mb->un.varDmp.word_cnt = 0;
225		}
226		/* dump mem may return a zero when finished or we got a
227		 * mailbox error, either way we are done.
228		 */
229		if (mb->un.varDmp.word_cnt == 0)
230			break;
231		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
232			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
233		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
234				      lpfc_vpd_data + offset,
235				      mb->un.varDmp.word_cnt);
236		offset += mb->un.varDmp.word_cnt;
237	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
238	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
239
240	kfree(lpfc_vpd_data);
241out_free_mbox:
242	mempool_free(pmb, phba->mbox_mem_pool);
243	return 0;
244}
245
246/**
247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
248 * @phba: pointer to lpfc hba data structure.
249 * @pmboxq: pointer to the driver internal queue element for mailbox command.
250 *
251 * This is the completion handler for driver's configuring asynchronous event
252 * mailbox command to the device. If the mailbox command returns successfully,
253 * it will set internal async event support flag to 1; otherwise, it will
254 * set internal async event support flag to 0.
255 **/
256static void
257lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
258{
259	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
260		phba->temp_sensor_support = 1;
261	else
262		phba->temp_sensor_support = 0;
263	mempool_free(pmboxq, phba->mbox_mem_pool);
264	return;
265}
266
267/**
268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
269 * @phba: pointer to lpfc hba data structure.
270 * @pmboxq: pointer to the driver internal queue element for mailbox command.
271 *
272 * This is the completion handler for dump mailbox command for getting
273 * wake up parameters. When this command complete, the response contain
274 * Option rom version of the HBA. This function translate the version number
275 * into a human readable string and store it in OptionROMVersion.
276 **/
277static void
278lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
279{
280	struct prog_id *prg;
281	uint32_t prog_id_word;
282	char dist = ' ';
283	/* character array used for decoding dist type. */
284	char dist_char[] = "nabx";
285
286	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
287		mempool_free(pmboxq, phba->mbox_mem_pool);
288		return;
289	}
290
291	prg = (struct prog_id *) &prog_id_word;
292
293	/* word 7 contain option rom version */
294	prog_id_word = pmboxq->u.mb.un.varWords[7];
295
296	/* Decode the Option rom version word to a readable string */
297	if (prg->dist < 4)
298		dist = dist_char[prg->dist];
299
300	if ((prg->dist == 3) && (prg->num == 0))
301		sprintf(phba->OptionROMVersion, "%d.%d%d",
302			prg->ver, prg->rev, prg->lev);
303	else
304		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
305			prg->ver, prg->rev, prg->lev,
306			dist, prg->num);
307	mempool_free(pmboxq, phba->mbox_mem_pool);
308	return;
309}
310
311/**
312 * lpfc_config_port_post - Perform lpfc initialization after config port
313 * @phba: pointer to lpfc hba data structure.
314 *
315 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
316 * command call. It performs all internal resource and state setups on the
317 * port: post IOCB buffers, enable appropriate host interrupt attentions,
318 * ELS ring timers, etc.
319 *
320 * Return codes
321 *   0 - success.
322 *   Any other value - error.
323 **/
324int
325lpfc_config_port_post(struct lpfc_hba *phba)
326{
327	struct lpfc_vport *vport = phba->pport;
328	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
329	LPFC_MBOXQ_t *pmb;
330	MAILBOX_t *mb;
331	struct lpfc_dmabuf *mp;
332	struct lpfc_sli *psli = &phba->sli;
333	uint32_t status, timeout;
334	int i, j;
335	int rc;
336
337	spin_lock_irq(&phba->hbalock);
338	/*
339	 * If the Config port completed correctly the HBA is not
340	 * over heated any more.
341	 */
342	if (phba->over_temp_state == HBA_OVER_TEMP)
343		phba->over_temp_state = HBA_NORMAL_TEMP;
344	spin_unlock_irq(&phba->hbalock);
345
346	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
347	if (!pmb) {
348		phba->link_state = LPFC_HBA_ERROR;
349		return -ENOMEM;
350	}
351	mb = &pmb->u.mb;
352
353	/* Get login parameters for NID.  */
354	rc = lpfc_read_sparam(phba, pmb, 0);
355	if (rc) {
356		mempool_free(pmb, phba->mbox_mem_pool);
357		return -ENOMEM;
358	}
359
360	pmb->vport = vport;
361	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
362		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
363				"0448 Adapter failed init, mbxCmd x%x "
364				"READ_SPARM mbxStatus x%x\n",
365				mb->mbxCommand, mb->mbxStatus);
366		phba->link_state = LPFC_HBA_ERROR;
367		mp = (struct lpfc_dmabuf *) pmb->context1;
368		mempool_free(pmb, phba->mbox_mem_pool);
369		lpfc_mbuf_free(phba, mp->virt, mp->phys);
370		kfree(mp);
371		return -EIO;
372	}
373
374	mp = (struct lpfc_dmabuf *) pmb->context1;
375
376	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
377	lpfc_mbuf_free(phba, mp->virt, mp->phys);
378	kfree(mp);
379	pmb->context1 = NULL;
380
381	if (phba->cfg_soft_wwnn)
382		u64_to_wwn(phba->cfg_soft_wwnn,
383			   vport->fc_sparam.nodeName.u.wwn);
384	if (phba->cfg_soft_wwpn)
385		u64_to_wwn(phba->cfg_soft_wwpn,
386			   vport->fc_sparam.portName.u.wwn);
387	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
388	       sizeof (struct lpfc_name));
389	memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
390	       sizeof (struct lpfc_name));
391
392	/* Update the fc_host data structures with new wwn. */
393	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
394	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
395	fc_host_max_npiv_vports(shost) = phba->max_vpi;
396
397	/* If no serial number in VPD data, use low 6 bytes of WWNN */
398	/* This should be consolidated into parse_vpd ? - mr */
399	if (phba->SerialNumber[0] == 0) {
400		uint8_t *outptr;
401
402		outptr = &vport->fc_nodename.u.s.IEEE[0];
403		for (i = 0; i < 12; i++) {
404			status = *outptr++;
405			j = ((status & 0xf0) >> 4);
406			if (j <= 9)
407				phba->SerialNumber[i] =
408				    (char)((uint8_t) 0x30 + (uint8_t) j);
409			else
410				phba->SerialNumber[i] =
411				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
412			i++;
413			j = (status & 0xf);
414			if (j <= 9)
415				phba->SerialNumber[i] =
416				    (char)((uint8_t) 0x30 + (uint8_t) j);
417			else
418				phba->SerialNumber[i] =
419				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
420		}
421	}
422
423	lpfc_read_config(phba, pmb);
424	pmb->vport = vport;
425	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
426		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
427				"0453 Adapter failed to init, mbxCmd x%x "
428				"READ_CONFIG, mbxStatus x%x\n",
429				mb->mbxCommand, mb->mbxStatus);
430		phba->link_state = LPFC_HBA_ERROR;
431		mempool_free( pmb, phba->mbox_mem_pool);
432		return -EIO;
433	}
434
435	/* Check if the port is disabled */
436	lpfc_sli_read_link_ste(phba);
437
438	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
439	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
440		phba->cfg_hba_queue_depth =
441			(mb->un.varRdConfig.max_xri + 1) -
442					lpfc_sli4_get_els_iocb_cnt(phba);
443
444	phba->lmt = mb->un.varRdConfig.lmt;
445
446	/* Get the default values for Model Name and Description */
447	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
448
449	if ((phba->cfg_link_speed > LINK_SPEED_10G)
450	    || ((phba->cfg_link_speed == LINK_SPEED_1G)
451		&& !(phba->lmt & LMT_1Gb))
452	    || ((phba->cfg_link_speed == LINK_SPEED_2G)
453		&& !(phba->lmt & LMT_2Gb))
454	    || ((phba->cfg_link_speed == LINK_SPEED_4G)
455		&& !(phba->lmt & LMT_4Gb))
456	    || ((phba->cfg_link_speed == LINK_SPEED_8G)
457		&& !(phba->lmt & LMT_8Gb))
458	    || ((phba->cfg_link_speed == LINK_SPEED_10G)
459		&& !(phba->lmt & LMT_10Gb))) {
460		/* Reset link speed to auto */
461		lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
462			"1302 Invalid speed for this board: "
463			"Reset link speed to auto: x%x\n",
464			phba->cfg_link_speed);
465			phba->cfg_link_speed = LINK_SPEED_AUTO;
466	}
467
468	phba->link_state = LPFC_LINK_DOWN;
469
470	/* Only process IOCBs on ELS ring till hba_state is READY */
471	if (psli->ring[psli->extra_ring].cmdringaddr)
472		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
473	if (psli->ring[psli->fcp_ring].cmdringaddr)
474		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
475	if (psli->ring[psli->next_ring].cmdringaddr)
476		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
477
478	/* Post receive buffers for desired rings */
479	if (phba->sli_rev != 3)
480		lpfc_post_rcv_buf(phba);
481
482	/*
483	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
484	 */
485	if (phba->intr_type == MSIX) {
486		rc = lpfc_config_msi(phba, pmb);
487		if (rc) {
488			mempool_free(pmb, phba->mbox_mem_pool);
489			return -EIO;
490		}
491		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
492		if (rc != MBX_SUCCESS) {
493			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
494					"0352 Config MSI mailbox command "
495					"failed, mbxCmd x%x, mbxStatus x%x\n",
496					pmb->u.mb.mbxCommand,
497					pmb->u.mb.mbxStatus);
498			mempool_free(pmb, phba->mbox_mem_pool);
499			return -EIO;
500		}
501	}
502
503	spin_lock_irq(&phba->hbalock);
504	/* Initialize ERATT handling flag */
505	phba->hba_flag &= ~HBA_ERATT_HANDLED;
506
507	/* Enable appropriate host interrupts */
508	status = readl(phba->HCregaddr);
509	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
510	if (psli->num_rings > 0)
511		status |= HC_R0INT_ENA;
512	if (psli->num_rings > 1)
513		status |= HC_R1INT_ENA;
514	if (psli->num_rings > 2)
515		status |= HC_R2INT_ENA;
516	if (psli->num_rings > 3)
517		status |= HC_R3INT_ENA;
518
519	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
520	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
521		status &= ~(HC_R0INT_ENA);
522
523	writel(status, phba->HCregaddr);
524	readl(phba->HCregaddr); /* flush */
525	spin_unlock_irq(&phba->hbalock);
526
527	/* Set up ring-0 (ELS) timer */
528	timeout = phba->fc_ratov * 2;
529	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
530	/* Set up heart beat (HB) timer */
531	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
532	phba->hb_outstanding = 0;
533	phba->last_completion_time = jiffies;
534	/* Set up error attention (ERATT) polling timer */
535	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
536
537	if (phba->hba_flag & LINK_DISABLED) {
538		lpfc_printf_log(phba,
539			KERN_ERR, LOG_INIT,
540			"2598 Adapter Link is disabled.\n");
541		lpfc_down_link(phba, pmb);
542		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
543		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
544		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
545			lpfc_printf_log(phba,
546			KERN_ERR, LOG_INIT,
547			"2599 Adapter failed to issue DOWN_LINK"
548			" mbox command rc 0x%x\n", rc);
549
550			mempool_free(pmb, phba->mbox_mem_pool);
551			return -EIO;
552		}
553	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
554		lpfc_init_link(phba, pmb, phba->cfg_topology,
555			phba->cfg_link_speed);
556		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
557		lpfc_set_loopback_flag(phba);
558		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
559		if (rc != MBX_SUCCESS) {
560			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
561				"0454 Adapter failed to init, mbxCmd x%x "
562				"INIT_LINK, mbxStatus x%x\n",
563				mb->mbxCommand, mb->mbxStatus);
564
565			/* Clear all interrupt enable conditions */
566			writel(0, phba->HCregaddr);
567			readl(phba->HCregaddr); /* flush */
568			/* Clear all pending interrupts */
569			writel(0xffffffff, phba->HAregaddr);
570			readl(phba->HAregaddr); /* flush */
571
572			phba->link_state = LPFC_HBA_ERROR;
573			if (rc != MBX_BUSY)
574				mempool_free(pmb, phba->mbox_mem_pool);
575			return -EIO;
576		}
577	}
578	/* MBOX buffer will be freed in mbox compl */
579	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
580	if (!pmb) {
581		phba->link_state = LPFC_HBA_ERROR;
582		return -ENOMEM;
583	}
584
585	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
586	pmb->mbox_cmpl = lpfc_config_async_cmpl;
587	pmb->vport = phba->pport;
588	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
589
590	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
591		lpfc_printf_log(phba,
592				KERN_ERR,
593				LOG_INIT,
594				"0456 Adapter failed to issue "
595				"ASYNCEVT_ENABLE mbox status x%x\n",
596				rc);
597		mempool_free(pmb, phba->mbox_mem_pool);
598	}
599
600	/* Get Option rom version */
601	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
602	if (!pmb) {
603		phba->link_state = LPFC_HBA_ERROR;
604		return -ENOMEM;
605	}
606
607	lpfc_dump_wakeup_param(phba, pmb);
608	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
609	pmb->vport = phba->pport;
610	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
611
612	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
613		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
614				"to get Option ROM version status x%x\n", rc);
615		mempool_free(pmb, phba->mbox_mem_pool);
616	}
617
618	return 0;
619}
620
621/**
622 * lpfc_hba_init_link - Initialize the FC link
623 * @phba: pointer to lpfc hba data structure.
624 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
625 *
626 * This routine will issue the INIT_LINK mailbox command call.
627 * It is available to other drivers through the lpfc_hba data
628 * structure for use as a delayed link up mechanism with the
629 * module parameter lpfc_suppress_link_up.
630 *
631 * Return code
632 *		0 - success
633 *		Any other value - error
634 **/
635int
636lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
637{
638	struct lpfc_vport *vport = phba->pport;
639	LPFC_MBOXQ_t *pmb;
640	MAILBOX_t *mb;
641	int rc;
642
643	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
644	if (!pmb) {
645		phba->link_state = LPFC_HBA_ERROR;
646		return -ENOMEM;
647	}
648	mb = &pmb->u.mb;
649	pmb->vport = vport;
650
651	lpfc_init_link(phba, pmb, phba->cfg_topology,
652		phba->cfg_link_speed);
653	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
654	lpfc_set_loopback_flag(phba);
655	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
656	if (rc != MBX_SUCCESS) {
657		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
658			"0498 Adapter failed to init, mbxCmd x%x "
659			"INIT_LINK, mbxStatus x%x\n",
660			mb->mbxCommand, mb->mbxStatus);
661		/* Clear all interrupt enable conditions */
662		writel(0, phba->HCregaddr);
663		readl(phba->HCregaddr); /* flush */
664		/* Clear all pending interrupts */
665		writel(0xffffffff, phba->HAregaddr);
666		readl(phba->HAregaddr); /* flush */
667		phba->link_state = LPFC_HBA_ERROR;
668		if (rc != MBX_BUSY || flag == MBX_POLL)
669			mempool_free(pmb, phba->mbox_mem_pool);
670		return -EIO;
671	}
672	phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
673	if (flag == MBX_POLL)
674		mempool_free(pmb, phba->mbox_mem_pool);
675
676	return 0;
677}
678
679/**
680 * lpfc_hba_down_link - this routine downs the FC link
681 * @phba: pointer to lpfc hba data structure.
682 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
683 *
684 * This routine will issue the DOWN_LINK mailbox command call.
685 * It is available to other drivers through the lpfc_hba data
686 * structure for use to stop the link.
687 *
688 * Return code
689 *		0 - success
690 *		Any other value - error
691 **/
692int
693lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
694{
695	LPFC_MBOXQ_t *pmb;
696	int rc;
697
698	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
699	if (!pmb) {
700		phba->link_state = LPFC_HBA_ERROR;
701		return -ENOMEM;
702	}
703
704	lpfc_printf_log(phba,
705		KERN_ERR, LOG_INIT,
706		"0491 Adapter Link is disabled.\n");
707	lpfc_down_link(phba, pmb);
708	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
709	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
710	if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
711		lpfc_printf_log(phba,
712		KERN_ERR, LOG_INIT,
713		"2522 Adapter failed to issue DOWN_LINK"
714		" mbox command rc 0x%x\n", rc);
715
716		mempool_free(pmb, phba->mbox_mem_pool);
717		return -EIO;
718	}
719	if (flag == MBX_POLL)
720		mempool_free(pmb, phba->mbox_mem_pool);
721
722	return 0;
723}
724
725/**
726 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
727 * @phba: pointer to lpfc HBA data structure.
728 *
729 * This routine will do LPFC uninitialization before the HBA is reset when
730 * bringing down the SLI Layer.
731 *
732 * Return codes
733 *   0 - success.
734 *   Any other value - error.
735 **/
736int
737lpfc_hba_down_prep(struct lpfc_hba *phba)
738{
739	struct lpfc_vport **vports;
740	int i;
741
742	if (phba->sli_rev <= LPFC_SLI_REV3) {
743		/* Disable interrupts */
744		writel(0, phba->HCregaddr);
745		readl(phba->HCregaddr); /* flush */
746	}
747
748	if (phba->pport->load_flag & FC_UNLOADING)
749		lpfc_cleanup_discovery_resources(phba->pport);
750	else {
751		vports = lpfc_create_vport_work_array(phba);
752		if (vports != NULL)
753			for (i = 0; i <= phba->max_vports &&
754				vports[i] != NULL; i++)
755				lpfc_cleanup_discovery_resources(vports[i]);
756		lpfc_destroy_vport_work_array(phba, vports);
757	}
758	return 0;
759}
760
761/**
762 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
763 * @phba: pointer to lpfc HBA data structure.
764 *
765 * This routine will do uninitialization after the HBA is reset when bring
766 * down the SLI Layer.
767 *
768 * Return codes
769 *   0 - success.
770 *   Any other value - error.
771 **/
772static int
773lpfc_hba_down_post_s3(struct lpfc_hba *phba)
774{
775	struct lpfc_sli *psli = &phba->sli;
776	struct lpfc_sli_ring *pring;
777	struct lpfc_dmabuf *mp, *next_mp;
778	LIST_HEAD(completions);
779	int i;
780
781	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
782		lpfc_sli_hbqbuf_free_all(phba);
783	else {
784		/* Cleanup preposted buffers on the ELS ring */
785		pring = &psli->ring[LPFC_ELS_RING];
786		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
787			list_del(&mp->list);
788			pring->postbufq_cnt--;
789			lpfc_mbuf_free(phba, mp->virt, mp->phys);
790			kfree(mp);
791		}
792	}
793
794	spin_lock_irq(&phba->hbalock);
795	for (i = 0; i < psli->num_rings; i++) {
796		pring = &psli->ring[i];
797
798		/* At this point in time the HBA is either reset or DOA. Either
799		 * way, nothing should be on txcmplq as it will NEVER complete.
800		 */
801		list_splice_init(&pring->txcmplq, &completions);
802		pring->txcmplq_cnt = 0;
803		spin_unlock_irq(&phba->hbalock);
804
805		/* Cancel all the IOCBs from the completions list */
806		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
807				      IOERR_SLI_ABORTED);
808
809		lpfc_sli_abort_iocb_ring(phba, pring);
810		spin_lock_irq(&phba->hbalock);
811	}
812	spin_unlock_irq(&phba->hbalock);
813
814	return 0;
815}
816
817/**
818 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
819 * @phba: pointer to lpfc HBA data structure.
820 *
821 * This routine will do uninitialization after the HBA is reset when bring
822 * down the SLI Layer.
823 *
824 * Return codes
825 *   0 - success.
826 *   Any other value - error.
827 **/
828static int
829lpfc_hba_down_post_s4(struct lpfc_hba *phba)
830{
831	struct lpfc_scsi_buf *psb, *psb_next;
832	LIST_HEAD(aborts);
833	int ret;
834	unsigned long iflag = 0;
835	struct lpfc_sglq *sglq_entry = NULL;
836
837	ret = lpfc_hba_down_post_s3(phba);
838	if (ret)
839		return ret;
840	/* At this point in time the HBA is either reset or DOA. Either
841	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
842	 * on the lpfc_sgl_list so that it can either be freed if the
843	 * driver is unloading or reposted if the driver is restarting
844	 * the port.
845	 */
846	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
847					/* scsl_buf_list */
848	/* abts_sgl_list_lock required because worker thread uses this
849	 * list.
850	 */
851	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
852	list_for_each_entry(sglq_entry,
853		&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
854		sglq_entry->state = SGL_FREED;
855
856	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
857			&phba->sli4_hba.lpfc_sgl_list);
858	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
859	/* abts_scsi_buf_list_lock required because worker thread uses this
860	 * list.
861	 */
862	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
863	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
864			&aborts);
865	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
866	spin_unlock_irq(&phba->hbalock);
867
868	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
869		psb->pCmd = NULL;
870		psb->status = IOSTAT_SUCCESS;
871	}
872	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
873	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
874	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
875	return 0;
876}
877
878/**
879 * lpfc_hba_down_post - Wrapper func for hba down post routine
880 * @phba: pointer to lpfc HBA data structure.
881 *
882 * This routine wraps the actual SLI3 or SLI4 routine for performing
883 * uninitialization after the HBA is reset when bring down the SLI Layer.
884 *
885 * Return codes
886 *   0 - success.
887 *   Any other value - error.
888 **/
889int
890lpfc_hba_down_post(struct lpfc_hba *phba)
891{
892	return (*phba->lpfc_hba_down_post)(phba);
893}
894
895/**
896 * lpfc_hb_timeout - The HBA-timer timeout handler
897 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
898 *
899 * This is the HBA-timer timeout handler registered to the lpfc driver. When
900 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
901 * work-port-events bitmap and the worker thread is notified. This timeout
902 * event will be used by the worker thread to invoke the actual timeout
903 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
904 * be performed in the timeout handler and the HBA timeout event bit shall
905 * be cleared by the worker thread after it has taken the event bitmap out.
906 **/
907static void
908lpfc_hb_timeout(unsigned long ptr)
909{
910	struct lpfc_hba *phba;
911	uint32_t tmo_posted;
912	unsigned long iflag;
913
914	phba = (struct lpfc_hba *)ptr;
915
916	/* Check for heart beat timeout conditions */
917	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
918	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
919	if (!tmo_posted)
920		phba->pport->work_port_events |= WORKER_HB_TMO;
921	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
922
923	/* Tell the worker thread there is work to do */
924	if (!tmo_posted)
925		lpfc_worker_wake_up(phba);
926	return;
927}
928
929/**
930 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
931 * @phba: pointer to lpfc hba data structure.
932 * @pmboxq: pointer to the driver internal queue element for mailbox command.
933 *
934 * This is the callback function to the lpfc heart-beat mailbox command.
935 * If configured, the lpfc driver issues the heart-beat mailbox command to
936 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
937 * heart-beat mailbox command is issued, the driver shall set up heart-beat
938 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
939 * heart-beat outstanding state. Once the mailbox command comes back and
940 * no error conditions detected, the heart-beat mailbox command timer is
941 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
942 * state is cleared for the next heart-beat. If the timer expired with the
943 * heart-beat outstanding state set, the driver will put the HBA offline.
944 **/
945static void
946lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
947{
948	unsigned long drvr_flag;
949
950	spin_lock_irqsave(&phba->hbalock, drvr_flag);
951	phba->hb_outstanding = 0;
952	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
953
954	/* Check and reset heart-beat timer is necessary */
955	mempool_free(pmboxq, phba->mbox_mem_pool);
956	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
957		!(phba->link_state == LPFC_HBA_ERROR) &&
958		!(phba->pport->load_flag & FC_UNLOADING))
959		mod_timer(&phba->hb_tmofunc,
960			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
961	return;
962}
963
964/**
965 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
966 * @phba: pointer to lpfc hba data structure.
967 *
968 * This is the actual HBA-timer timeout handler to be invoked by the worker
969 * thread whenever the HBA timer fired and HBA-timeout event posted. This
970 * handler performs any periodic operations needed for the device. If such
971 * periodic event has already been attended to either in the interrupt handler
972 * or by processing slow-ring or fast-ring events within the HBA-timer
973 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
974 * the timer for the next timeout period. If lpfc heart-beat mailbox command
975 * is configured and there is no heart-beat mailbox command outstanding, a
976 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
977 * has been a heart-beat mailbox command outstanding, the HBA shall be put
978 * to offline.
979 **/
980void
981lpfc_hb_timeout_handler(struct lpfc_hba *phba)
982{
983	struct lpfc_vport **vports;
984	LPFC_MBOXQ_t *pmboxq;
985	struct lpfc_dmabuf *buf_ptr;
986	int retval, i;
987	struct lpfc_sli *psli = &phba->sli;
988	LIST_HEAD(completions);
989
990	vports = lpfc_create_vport_work_array(phba);
991	if (vports != NULL)
992		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
993			lpfc_rcv_seq_check_edtov(vports[i]);
994	lpfc_destroy_vport_work_array(phba, vports);
995
996	if ((phba->link_state == LPFC_HBA_ERROR) ||
997		(phba->pport->load_flag & FC_UNLOADING) ||
998		(phba->pport->fc_flag & FC_OFFLINE_MODE))
999		return;
1000
1001	spin_lock_irq(&phba->pport->work_port_lock);
1002
1003	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
1004		jiffies)) {
1005		spin_unlock_irq(&phba->pport->work_port_lock);
1006		if (!phba->hb_outstanding)
1007			mod_timer(&phba->hb_tmofunc,
1008				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1009		else
1010			mod_timer(&phba->hb_tmofunc,
1011				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1012		return;
1013	}
1014	spin_unlock_irq(&phba->pport->work_port_lock);
1015
1016	if (phba->elsbuf_cnt &&
1017		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1018		spin_lock_irq(&phba->hbalock);
1019		list_splice_init(&phba->elsbuf, &completions);
1020		phba->elsbuf_cnt = 0;
1021		phba->elsbuf_prev_cnt = 0;
1022		spin_unlock_irq(&phba->hbalock);
1023
1024		while (!list_empty(&completions)) {
1025			list_remove_head(&completions, buf_ptr,
1026				struct lpfc_dmabuf, list);
1027			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1028			kfree(buf_ptr);
1029		}
1030	}
1031	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1032
1033	/* If there is no heart beat outstanding, issue a heartbeat command */
1034	if (phba->cfg_enable_hba_heartbeat) {
1035		if (!phba->hb_outstanding) {
1036			if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1037				(list_empty(&psli->mboxq))) {
1038				pmboxq = mempool_alloc(phba->mbox_mem_pool,
1039							GFP_KERNEL);
1040				if (!pmboxq) {
1041					mod_timer(&phba->hb_tmofunc,
1042						 jiffies +
1043						 HZ * LPFC_HB_MBOX_INTERVAL);
1044					return;
1045				}
1046
1047				lpfc_heart_beat(phba, pmboxq);
1048				pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1049				pmboxq->vport = phba->pport;
1050				retval = lpfc_sli_issue_mbox(phba, pmboxq,
1051						MBX_NOWAIT);
1052
1053				if (retval != MBX_BUSY &&
1054					retval != MBX_SUCCESS) {
1055					mempool_free(pmboxq,
1056							phba->mbox_mem_pool);
1057					mod_timer(&phba->hb_tmofunc,
1058						jiffies +
1059						HZ * LPFC_HB_MBOX_INTERVAL);
1060					return;
1061				}
1062				phba->skipped_hb = 0;
1063				phba->hb_outstanding = 1;
1064			} else if (time_before_eq(phba->last_completion_time,
1065					phba->skipped_hb)) {
1066				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1067					"2857 Last completion time not "
1068					" updated in %d ms\n",
1069					jiffies_to_msecs(jiffies
1070						 - phba->last_completion_time));
1071			} else
1072				phba->skipped_hb = jiffies;
1073
1074			mod_timer(&phba->hb_tmofunc,
1075				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1076			return;
1077		} else {
1078			/*
1079			* If heart beat timeout called with hb_outstanding set
1080			* we need to give the hb mailbox cmd a chance to
1081			* complete or TMO.
1082			*/
1083			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1084					"0459 Adapter heartbeat still out"
1085					"standing:last compl time was %d ms.\n",
1086					jiffies_to_msecs(jiffies
1087						 - phba->last_completion_time));
1088			mod_timer(&phba->hb_tmofunc,
1089				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1090		}
1091	}
1092}
1093
1094/**
1095 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1096 * @phba: pointer to lpfc hba data structure.
1097 *
1098 * This routine is called to bring the HBA offline when HBA hardware error
1099 * other than Port Error 6 has been detected.
1100 **/
1101static void
1102lpfc_offline_eratt(struct lpfc_hba *phba)
1103{
1104	struct lpfc_sli   *psli = &phba->sli;
1105
1106	spin_lock_irq(&phba->hbalock);
1107	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1108	spin_unlock_irq(&phba->hbalock);
1109	lpfc_offline_prep(phba);
1110
1111	lpfc_offline(phba);
1112	lpfc_reset_barrier(phba);
1113	spin_lock_irq(&phba->hbalock);
1114	lpfc_sli_brdreset(phba);
1115	spin_unlock_irq(&phba->hbalock);
1116	lpfc_hba_down_post(phba);
1117	lpfc_sli_brdready(phba, HS_MBRDY);
1118	lpfc_unblock_mgmt_io(phba);
1119	phba->link_state = LPFC_HBA_ERROR;
1120	return;
1121}
1122
1123/**
1124 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1125 * @phba: pointer to lpfc hba data structure.
1126 *
1127 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1128 * other than Port Error 6 has been detected.
1129 **/
1130static void
1131lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1132{
1133	lpfc_offline_prep(phba);
1134	lpfc_offline(phba);
1135	lpfc_sli4_brdreset(phba);
1136	lpfc_hba_down_post(phba);
1137	lpfc_sli4_post_status_check(phba);
1138	lpfc_unblock_mgmt_io(phba);
1139	phba->link_state = LPFC_HBA_ERROR;
1140}
1141
1142/**
1143 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1144 * @phba: pointer to lpfc hba data structure.
1145 *
1146 * This routine is invoked to handle the deferred HBA hardware error
1147 * conditions. This type of error is indicated by HBA by setting ER1
1148 * and another ER bit in the host status register. The driver will
1149 * wait until the ER1 bit clears before handling the error condition.
1150 **/
1151static void
1152lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1153{
1154	uint32_t old_host_status = phba->work_hs;
1155	struct lpfc_sli_ring  *pring;
1156	struct lpfc_sli *psli = &phba->sli;
1157
1158	/* If the pci channel is offline, ignore possible errors,
1159	 * since we cannot communicate with the pci card anyway.
1160	 */
1161	if (pci_channel_offline(phba->pcidev)) {
1162		spin_lock_irq(&phba->hbalock);
1163		phba->hba_flag &= ~DEFER_ERATT;
1164		spin_unlock_irq(&phba->hbalock);
1165		return;
1166	}
1167
1168	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1169		"0479 Deferred Adapter Hardware Error "
1170		"Data: x%x x%x x%x\n",
1171		phba->work_hs,
1172		phba->work_status[0], phba->work_status[1]);
1173
1174	spin_lock_irq(&phba->hbalock);
1175	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1176	spin_unlock_irq(&phba->hbalock);
1177
1178
1179	/*
1180	 * Firmware stops when it triggred erratt. That could cause the I/Os
1181	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1182	 * SCSI layer retry it after re-establishing link.
1183	 */
1184	pring = &psli->ring[psli->fcp_ring];
1185	lpfc_sli_abort_iocb_ring(phba, pring);
1186
1187	/*
1188	 * There was a firmware error. Take the hba offline and then
1189	 * attempt to restart it.
1190	 */
1191	lpfc_offline_prep(phba);
1192	lpfc_offline(phba);
1193
1194	/* Wait for the ER1 bit to clear.*/
1195	while (phba->work_hs & HS_FFER1) {
1196		msleep(100);
1197		phba->work_hs = readl(phba->HSregaddr);
1198		/* If driver is unloading let the worker thread continue */
1199		if (phba->pport->load_flag & FC_UNLOADING) {
1200			phba->work_hs = 0;
1201			break;
1202		}
1203	}
1204
1205	/*
1206	 * This is to ptrotect against a race condition in which
1207	 * first write to the host attention register clear the
1208	 * host status register.
1209	 */
1210	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1211		phba->work_hs = old_host_status & ~HS_FFER1;
1212
1213	spin_lock_irq(&phba->hbalock);
1214	phba->hba_flag &= ~DEFER_ERATT;
1215	spin_unlock_irq(&phba->hbalock);
1216	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1217	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1218}
1219
1220static void
1221lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1222{
1223	struct lpfc_board_event_header board_event;
1224	struct Scsi_Host *shost;
1225
1226	board_event.event_type = FC_REG_BOARD_EVENT;
1227	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1228	shost = lpfc_shost_from_vport(phba->pport);
1229	fc_host_post_vendor_event(shost, fc_get_event_number(),
1230				  sizeof(board_event),
1231				  (char *) &board_event,
1232				  LPFC_NL_VENDOR_ID);
1233}
1234
1235/**
1236 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1237 * @phba: pointer to lpfc hba data structure.
1238 *
1239 * This routine is invoked to handle the following HBA hardware error
1240 * conditions:
1241 * 1 - HBA error attention interrupt
1242 * 2 - DMA ring index out of range
1243 * 3 - Mailbox command came back as unknown
1244 **/
1245static void
1246lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1247{
1248	struct lpfc_vport *vport = phba->pport;
1249	struct lpfc_sli   *psli = &phba->sli;
1250	struct lpfc_sli_ring  *pring;
1251	uint32_t event_data;
1252	unsigned long temperature;
1253	struct temp_event temp_event_data;
1254	struct Scsi_Host  *shost;
1255
1256	/* If the pci channel is offline, ignore possible errors,
1257	 * since we cannot communicate with the pci card anyway.
1258	 */
1259	if (pci_channel_offline(phba->pcidev)) {
1260		spin_lock_irq(&phba->hbalock);
1261		phba->hba_flag &= ~DEFER_ERATT;
1262		spin_unlock_irq(&phba->hbalock);
1263		return;
1264	}
1265
1266	/* If resets are disabled then leave the HBA alone and return */
1267	if (!phba->cfg_enable_hba_reset)
1268		return;
1269
1270	/* Send an internal error event to mgmt application */
1271	lpfc_board_errevt_to_mgmt(phba);
1272
1273	if (phba->hba_flag & DEFER_ERATT)
1274		lpfc_handle_deferred_eratt(phba);
1275
1276	if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1277		if (phba->work_hs & HS_FFER6)
1278			/* Re-establishing Link */
1279			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1280					"1301 Re-establishing Link "
1281					"Data: x%x x%x x%x\n",
1282					phba->work_hs, phba->work_status[0],
1283					phba->work_status[1]);
1284		if (phba->work_hs & HS_FFER8)
1285			/* Device Zeroization */
1286			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1287					"2861 Host Authentication device "
1288					"zeroization Data:x%x x%x x%x\n",
1289					phba->work_hs, phba->work_status[0],
1290					phba->work_status[1]);
1291
1292		spin_lock_irq(&phba->hbalock);
1293		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1294		spin_unlock_irq(&phba->hbalock);
1295
1296		/*
1297		* Firmware stops when it triggled erratt with HS_FFER6.
1298		* That could cause the I/Os dropped by the firmware.
1299		* Error iocb (I/O) on txcmplq and let the SCSI layer
1300		* retry it after re-establishing link.
1301		*/
1302		pring = &psli->ring[psli->fcp_ring];
1303		lpfc_sli_abort_iocb_ring(phba, pring);
1304
1305		/*
1306		 * There was a firmware error.  Take the hba offline and then
1307		 * attempt to restart it.
1308		 */
1309		lpfc_offline_prep(phba);
1310		lpfc_offline(phba);
1311		lpfc_sli_brdrestart(phba);
1312		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1313			lpfc_unblock_mgmt_io(phba);
1314			return;
1315		}
1316		lpfc_unblock_mgmt_io(phba);
1317	} else if (phba->work_hs & HS_CRIT_TEMP) {
1318		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1319		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1320		temp_event_data.event_code = LPFC_CRIT_TEMP;
1321		temp_event_data.data = (uint32_t)temperature;
1322
1323		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1324				"0406 Adapter maximum temperature exceeded "
1325				"(%ld), taking this port offline "
1326				"Data: x%x x%x x%x\n",
1327				temperature, phba->work_hs,
1328				phba->work_status[0], phba->work_status[1]);
1329
1330		shost = lpfc_shost_from_vport(phba->pport);
1331		fc_host_post_vendor_event(shost, fc_get_event_number(),
1332					  sizeof(temp_event_data),
1333					  (char *) &temp_event_data,
1334					  SCSI_NL_VID_TYPE_PCI
1335					  | PCI_VENDOR_ID_EMULEX);
1336
1337		spin_lock_irq(&phba->hbalock);
1338		phba->over_temp_state = HBA_OVER_TEMP;
1339		spin_unlock_irq(&phba->hbalock);
1340		lpfc_offline_eratt(phba);
1341
1342	} else {
1343		/* The if clause above forces this code path when the status
1344		 * failure is a value other than FFER6. Do not call the offline
1345		 * twice. This is the adapter hardware error path.
1346		 */
1347		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1348				"0457 Adapter Hardware Error "
1349				"Data: x%x x%x x%x\n",
1350				phba->work_hs,
1351				phba->work_status[0], phba->work_status[1]);
1352
1353		event_data = FC_REG_DUMP_EVENT;
1354		shost = lpfc_shost_from_vport(vport);
1355		fc_host_post_vendor_event(shost, fc_get_event_number(),
1356				sizeof(event_data), (char *) &event_data,
1357				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1358
1359		lpfc_offline_eratt(phba);
1360	}
1361	return;
1362}
1363
1364/**
1365 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1366 * @phba: pointer to lpfc hba data structure.
1367 *
1368 * This routine is invoked to handle the SLI4 HBA hardware error attention
1369 * conditions.
1370 **/
1371static void
1372lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1373{
1374	struct lpfc_vport *vport = phba->pport;
1375	uint32_t event_data;
1376	struct Scsi_Host *shost;
1377
1378	/* If the pci channel is offline, ignore possible errors, since
1379	 * we cannot communicate with the pci card anyway.
1380	 */
1381	if (pci_channel_offline(phba->pcidev))
1382		return;
1383	/* If resets are disabled then leave the HBA alone and return */
1384	if (!phba->cfg_enable_hba_reset)
1385		return;
1386
1387	/* Send an internal error event to mgmt application */
1388	lpfc_board_errevt_to_mgmt(phba);
1389
1390	/* For now, the actual action for SLI4 device handling is not
1391	 * specified yet, just treated it as adaptor hardware failure
1392	 */
1393	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1394			"0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1395			phba->work_status[0], phba->work_status[1]);
1396
1397	event_data = FC_REG_DUMP_EVENT;
1398	shost = lpfc_shost_from_vport(vport);
1399	fc_host_post_vendor_event(shost, fc_get_event_number(),
1400				  sizeof(event_data), (char *) &event_data,
1401				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1402
1403	lpfc_sli4_offline_eratt(phba);
1404}
1405
1406/**
1407 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1408 * @phba: pointer to lpfc HBA data structure.
1409 *
1410 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1411 * routine from the API jump table function pointer from the lpfc_hba struct.
1412 *
1413 * Return codes
1414 *   0 - success.
1415 *   Any other value - error.
1416 **/
1417void
1418lpfc_handle_eratt(struct lpfc_hba *phba)
1419{
1420	(*phba->lpfc_handle_eratt)(phba);
1421}
1422
1423/**
1424 * lpfc_handle_latt - The HBA link event handler
1425 * @phba: pointer to lpfc hba data structure.
1426 *
1427 * This routine is invoked from the worker thread to handle a HBA host
1428 * attention link event.
1429 **/
1430void
1431lpfc_handle_latt(struct lpfc_hba *phba)
1432{
1433	struct lpfc_vport *vport = phba->pport;
1434	struct lpfc_sli   *psli = &phba->sli;
1435	LPFC_MBOXQ_t *pmb;
1436	volatile uint32_t control;
1437	struct lpfc_dmabuf *mp;
1438	int rc = 0;
1439
1440	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1441	if (!pmb) {
1442		rc = 1;
1443		goto lpfc_handle_latt_err_exit;
1444	}
1445
1446	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1447	if (!mp) {
1448		rc = 2;
1449		goto lpfc_handle_latt_free_pmb;
1450	}
1451
1452	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1453	if (!mp->virt) {
1454		rc = 3;
1455		goto lpfc_handle_latt_free_mp;
1456	}
1457
1458	/* Cleanup any outstanding ELS commands */
1459	lpfc_els_flush_all_cmd(phba);
1460
1461	psli->slistat.link_event++;
1462	lpfc_read_la(phba, pmb, mp);
1463	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
1464	pmb->vport = vport;
1465	/* Block ELS IOCBs until we have processed this mbox command */
1466	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1467	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1468	if (rc == MBX_NOT_FINISHED) {
1469		rc = 4;
1470		goto lpfc_handle_latt_free_mbuf;
1471	}
1472
1473	/* Clear Link Attention in HA REG */
1474	spin_lock_irq(&phba->hbalock);
1475	writel(HA_LATT, phba->HAregaddr);
1476	readl(phba->HAregaddr); /* flush */
1477	spin_unlock_irq(&phba->hbalock);
1478
1479	return;
1480
1481lpfc_handle_latt_free_mbuf:
1482	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1483	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1484lpfc_handle_latt_free_mp:
1485	kfree(mp);
1486lpfc_handle_latt_free_pmb:
1487	mempool_free(pmb, phba->mbox_mem_pool);
1488lpfc_handle_latt_err_exit:
1489	/* Enable Link attention interrupts */
1490	spin_lock_irq(&phba->hbalock);
1491	psli->sli_flag |= LPFC_PROCESS_LA;
1492	control = readl(phba->HCregaddr);
1493	control |= HC_LAINT_ENA;
1494	writel(control, phba->HCregaddr);
1495	readl(phba->HCregaddr); /* flush */
1496
1497	/* Clear Link Attention in HA REG */
1498	writel(HA_LATT, phba->HAregaddr);
1499	readl(phba->HAregaddr); /* flush */
1500	spin_unlock_irq(&phba->hbalock);
1501	lpfc_linkdown(phba);
1502	phba->link_state = LPFC_HBA_ERROR;
1503
1504	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1505		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1506
1507	return;
1508}
1509
1510/**
1511 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1512 * @phba: pointer to lpfc hba data structure.
1513 * @vpd: pointer to the vital product data.
1514 * @len: length of the vital product data in bytes.
1515 *
1516 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1517 * an array of characters. In this routine, the ModelName, ProgramType, and
1518 * ModelDesc, etc. fields of the phba data structure will be populated.
1519 *
1520 * Return codes
1521 *   0 - pointer to the VPD passed in is NULL
1522 *   1 - success
1523 **/
1524int
1525lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1526{
1527	uint8_t lenlo, lenhi;
1528	int Length;
1529	int i, j;
1530	int finished = 0;
1531	int index = 0;
1532
1533	if (!vpd)
1534		return 0;
1535
1536	/* Vital Product */
1537	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1538			"0455 Vital Product Data: x%x x%x x%x x%x\n",
1539			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1540			(uint32_t) vpd[3]);
1541	while (!finished && (index < (len - 4))) {
1542		switch (vpd[index]) {
1543		case 0x82:
1544		case 0x91:
1545			index += 1;
1546			lenlo = vpd[index];
1547			index += 1;
1548			lenhi = vpd[index];
1549			index += 1;
1550			i = ((((unsigned short)lenhi) << 8) + lenlo);
1551			index += i;
1552			break;
1553		case 0x90:
1554			index += 1;
1555			lenlo = vpd[index];
1556			index += 1;
1557			lenhi = vpd[index];
1558			index += 1;
1559			Length = ((((unsigned short)lenhi) << 8) + lenlo);
1560			if (Length > len - index)
1561				Length = len - index;
1562			while (Length > 0) {
1563			/* Look for Serial Number */
1564			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1565				index += 2;
1566				i = vpd[index];
1567				index += 1;
1568				j = 0;
1569				Length -= (3+i);
1570				while(i--) {
1571					phba->SerialNumber[j++] = vpd[index++];
1572					if (j == 31)
1573						break;
1574				}
1575				phba->SerialNumber[j] = 0;
1576				continue;
1577			}
1578			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1579				phba->vpd_flag |= VPD_MODEL_DESC;
1580				index += 2;
1581				i = vpd[index];
1582				index += 1;
1583				j = 0;
1584				Length -= (3+i);
1585				while(i--) {
1586					phba->ModelDesc[j++] = vpd[index++];
1587					if (j == 255)
1588						break;
1589				}
1590				phba->ModelDesc[j] = 0;
1591				continue;
1592			}
1593			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1594				phba->vpd_flag |= VPD_MODEL_NAME;
1595				index += 2;
1596				i = vpd[index];
1597				index += 1;
1598				j = 0;
1599				Length -= (3+i);
1600				while(i--) {
1601					phba->ModelName[j++] = vpd[index++];
1602					if (j == 79)
1603						break;
1604				}
1605				phba->ModelName[j] = 0;
1606				continue;
1607			}
1608			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1609				phba->vpd_flag |= VPD_PROGRAM_TYPE;
1610				index += 2;
1611				i = vpd[index];
1612				index += 1;
1613				j = 0;
1614				Length -= (3+i);
1615				while(i--) {
1616					phba->ProgramType[j++] = vpd[index++];
1617					if (j == 255)
1618						break;
1619				}
1620				phba->ProgramType[j] = 0;
1621				continue;
1622			}
1623			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1624				phba->vpd_flag |= VPD_PORT;
1625				index += 2;
1626				i = vpd[index];
1627				index += 1;
1628				j = 0;
1629				Length -= (3+i);
1630				while(i--) {
1631				phba->Port[j++] = vpd[index++];
1632				if (j == 19)
1633					break;
1634				}
1635				phba->Port[j] = 0;
1636				continue;
1637			}
1638			else {
1639				index += 2;
1640				i = vpd[index];
1641				index += 1;
1642				index += i;
1643				Length -= (3 + i);
1644			}
1645		}
1646		finished = 0;
1647		break;
1648		case 0x78:
1649			finished = 1;
1650			break;
1651		default:
1652			index ++;
1653			break;
1654		}
1655	}
1656
1657	return(1);
1658}
1659
1660/**
1661 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1662 * @phba: pointer to lpfc hba data structure.
1663 * @mdp: pointer to the data structure to hold the derived model name.
1664 * @descp: pointer to the data structure to hold the derived description.
1665 *
1666 * This routine retrieves HBA's description based on its registered PCI device
1667 * ID. The @descp passed into this function points to an array of 256 chars. It
1668 * shall be returned with the model name, maximum speed, and the host bus type.
1669 * The @mdp passed into this function points to an array of 80 chars. When the
1670 * function returns, the @mdp will be filled with the model name.
1671 **/
1672static void
1673lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1674{
1675	lpfc_vpd_t *vp;
1676	uint16_t dev_id = phba->pcidev->device;
1677	int max_speed;
1678	int GE = 0;
1679	int oneConnect = 0; /* default is not a oneConnect */
1680	struct {
1681		char *name;
1682		char *bus;
1683		char *function;
1684	} m = {"<Unknown>", "", ""};
1685
1686	if (mdp && mdp[0] != '\0'
1687		&& descp && descp[0] != '\0')
1688		return;
1689
1690	if (phba->lmt & LMT_10Gb)
1691		max_speed = 10;
1692	else if (phba->lmt & LMT_8Gb)
1693		max_speed = 8;
1694	else if (phba->lmt & LMT_4Gb)
1695		max_speed = 4;
1696	else if (phba->lmt & LMT_2Gb)
1697		max_speed = 2;
1698	else
1699		max_speed = 1;
1700
1701	vp = &phba->vpd;
1702
1703	switch (dev_id) {
1704	case PCI_DEVICE_ID_FIREFLY:
1705		m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1706		break;
1707	case PCI_DEVICE_ID_SUPERFLY:
1708		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1709			m = (typeof(m)){"LP7000", "PCI",
1710					"Fibre Channel Adapter"};
1711		else
1712			m = (typeof(m)){"LP7000E", "PCI",
1713					"Fibre Channel Adapter"};
1714		break;
1715	case PCI_DEVICE_ID_DRAGONFLY:
1716		m = (typeof(m)){"LP8000", "PCI",
1717				"Fibre Channel Adapter"};
1718		break;
1719	case PCI_DEVICE_ID_CENTAUR:
1720		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1721			m = (typeof(m)){"LP9002", "PCI",
1722					"Fibre Channel Adapter"};
1723		else
1724			m = (typeof(m)){"LP9000", "PCI",
1725					"Fibre Channel Adapter"};
1726		break;
1727	case PCI_DEVICE_ID_RFLY:
1728		m = (typeof(m)){"LP952", "PCI",
1729				"Fibre Channel Adapter"};
1730		break;
1731	case PCI_DEVICE_ID_PEGASUS:
1732		m = (typeof(m)){"LP9802", "PCI-X",
1733				"Fibre Channel Adapter"};
1734		break;
1735	case PCI_DEVICE_ID_THOR:
1736		m = (typeof(m)){"LP10000", "PCI-X",
1737				"Fibre Channel Adapter"};
1738		break;
1739	case PCI_DEVICE_ID_VIPER:
1740		m = (typeof(m)){"LPX1000",  "PCI-X",
1741				"Fibre Channel Adapter"};
1742		break;
1743	case PCI_DEVICE_ID_PFLY:
1744		m = (typeof(m)){"LP982", "PCI-X",
1745				"Fibre Channel Adapter"};
1746		break;
1747	case PCI_DEVICE_ID_TFLY:
1748		m = (typeof(m)){"LP1050", "PCI-X",
1749				"Fibre Channel Adapter"};
1750		break;
1751	case PCI_DEVICE_ID_HELIOS:
1752		m = (typeof(m)){"LP11000", "PCI-X2",
1753				"Fibre Channel Adapter"};
1754		break;
1755	case PCI_DEVICE_ID_HELIOS_SCSP:
1756		m = (typeof(m)){"LP11000-SP", "PCI-X2",
1757				"Fibre Channel Adapter"};
1758		break;
1759	case PCI_DEVICE_ID_HELIOS_DCSP:
1760		m = (typeof(m)){"LP11002-SP",  "PCI-X2",
1761				"Fibre Channel Adapter"};
1762		break;
1763	case PCI_DEVICE_ID_NEPTUNE:
1764		m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1765		break;
1766	case PCI_DEVICE_ID_NEPTUNE_SCSP:
1767		m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1768		break;
1769	case PCI_DEVICE_ID_NEPTUNE_DCSP:
1770		m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1771		break;
1772	case PCI_DEVICE_ID_BMID:
1773		m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1774		break;
1775	case PCI_DEVICE_ID_BSMB:
1776		m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1777		break;
1778	case PCI_DEVICE_ID_ZEPHYR:
1779		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1780		break;
1781	case PCI_DEVICE_ID_ZEPHYR_SCSP:
1782		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1783		break;
1784	case PCI_DEVICE_ID_ZEPHYR_DCSP:
1785		m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1786		GE = 1;
1787		break;
1788	case PCI_DEVICE_ID_ZMID:
1789		m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1790		break;
1791	case PCI_DEVICE_ID_ZSMB:
1792		m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1793		break;
1794	case PCI_DEVICE_ID_LP101:
1795		m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1796		break;
1797	case PCI_DEVICE_ID_LP10000S:
1798		m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1799		break;
1800	case PCI_DEVICE_ID_LP11000S:
1801		m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1802		break;
1803	case PCI_DEVICE_ID_LPE11000S:
1804		m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1805		break;
1806	case PCI_DEVICE_ID_SAT:
1807		m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1808		break;
1809	case PCI_DEVICE_ID_SAT_MID:
1810		m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1811		break;
1812	case PCI_DEVICE_ID_SAT_SMB:
1813		m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1814		break;
1815	case PCI_DEVICE_ID_SAT_DCSP:
1816		m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1817		break;
1818	case PCI_DEVICE_ID_SAT_SCSP:
1819		m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1820		break;
1821	case PCI_DEVICE_ID_SAT_S:
1822		m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1823		break;
1824	case PCI_DEVICE_ID_HORNET:
1825		m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1826		GE = 1;
1827		break;
1828	case PCI_DEVICE_ID_PROTEUS_VF:
1829		m = (typeof(m)){"LPev12000", "PCIe IOV",
1830				"Fibre Channel Adapter"};
1831		break;
1832	case PCI_DEVICE_ID_PROTEUS_PF:
1833		m = (typeof(m)){"LPev12000", "PCIe IOV",
1834				"Fibre Channel Adapter"};
1835		break;
1836	case PCI_DEVICE_ID_PROTEUS_S:
1837		m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1838				"Fibre Channel Adapter"};
1839		break;
1840	case PCI_DEVICE_ID_TIGERSHARK:
1841		oneConnect = 1;
1842		m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1843		break;
1844	case PCI_DEVICE_ID_TOMCAT:
1845		oneConnect = 1;
1846		m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1847		break;
1848	case PCI_DEVICE_ID_FALCON:
1849		m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1850				"EmulexSecure Fibre"};
1851		break;
1852	case PCI_DEVICE_ID_BALIUS:
1853		m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
1854				"Fibre Channel Adapter"};
1855		break;
1856	default:
1857		m = (typeof(m)){"Unknown", "", ""};
1858		break;
1859	}
1860
1861	if (mdp && mdp[0] == '\0')
1862		snprintf(mdp, 79,"%s", m.name);
1863	/* oneConnect hba requires special processing, they are all initiators
1864	 * and we put the port number on the end
1865	 */
1866	if (descp && descp[0] == '\0') {
1867		if (oneConnect)
1868			snprintf(descp, 255,
1869				"Emulex OneConnect %s, %s Initiator, Port %s",
1870				m.name, m.function,
1871				phba->Port);
1872		else
1873			snprintf(descp, 255,
1874				"Emulex %s %d%s %s %s",
1875				m.name, max_speed, (GE) ? "GE" : "Gb",
1876				m.bus, m.function);
1877	}
1878}
1879
1880/**
1881 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1882 * @phba: pointer to lpfc hba data structure.
1883 * @pring: pointer to a IOCB ring.
1884 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1885 *
1886 * This routine posts a given number of IOCBs with the associated DMA buffer
1887 * descriptors specified by the cnt argument to the given IOCB ring.
1888 *
1889 * Return codes
1890 *   The number of IOCBs NOT able to be posted to the IOCB ring.
1891 **/
1892int
1893lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1894{
1895	IOCB_t *icmd;
1896	struct lpfc_iocbq *iocb;
1897	struct lpfc_dmabuf *mp1, *mp2;
1898
1899	cnt += pring->missbufcnt;
1900
1901	/* While there are buffers to post */
1902	while (cnt > 0) {
1903		/* Allocate buffer for  command iocb */
1904		iocb = lpfc_sli_get_iocbq(phba);
1905		if (iocb == NULL) {
1906			pring->missbufcnt = cnt;
1907			return cnt;
1908		}
1909		icmd = &iocb->iocb;
1910
1911		/* 2 buffers can be posted per command */
1912		/* Allocate buffer to post */
1913		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1914		if (mp1)
1915		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1916		if (!mp1 || !mp1->virt) {
1917			kfree(mp1);
1918			lpfc_sli_release_iocbq(phba, iocb);
1919			pring->missbufcnt = cnt;
1920			return cnt;
1921		}
1922
1923		INIT_LIST_HEAD(&mp1->list);
1924		/* Allocate buffer to post */
1925		if (cnt > 1) {
1926			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1927			if (mp2)
1928				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1929							    &mp2->phys);
1930			if (!mp2 || !mp2->virt) {
1931				kfree(mp2);
1932				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1933				kfree(mp1);
1934				lpfc_sli_release_iocbq(phba, iocb);
1935				pring->missbufcnt = cnt;
1936				return cnt;
1937			}
1938
1939			INIT_LIST_HEAD(&mp2->list);
1940		} else {
1941			mp2 = NULL;
1942		}
1943
1944		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1945		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1946		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1947		icmd->ulpBdeCount = 1;
1948		cnt--;
1949		if (mp2) {
1950			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1951			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1952			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1953			cnt--;
1954			icmd->ulpBdeCount = 2;
1955		}
1956
1957		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1958		icmd->ulpLe = 1;
1959
1960		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1961		    IOCB_ERROR) {
1962			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1963			kfree(mp1);
1964			cnt++;
1965			if (mp2) {
1966				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1967				kfree(mp2);
1968				cnt++;
1969			}
1970			lpfc_sli_release_iocbq(phba, iocb);
1971			pring->missbufcnt = cnt;
1972			return cnt;
1973		}
1974		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1975		if (mp2)
1976			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1977	}
1978	pring->missbufcnt = 0;
1979	return 0;
1980}
1981
1982/**
1983 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
1984 * @phba: pointer to lpfc hba data structure.
1985 *
1986 * This routine posts initial receive IOCB buffers to the ELS ring. The
1987 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1988 * set to 64 IOCBs.
1989 *
1990 * Return codes
1991 *   0 - success (currently always success)
1992 **/
1993static int
1994lpfc_post_rcv_buf(struct lpfc_hba *phba)
1995{
1996	struct lpfc_sli *psli = &phba->sli;
1997
1998	/* Ring 0, ELS / CT buffers */
1999	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2000	/* Ring 2 - FCP no buffers needed */
2001
2002	return 0;
2003}
2004
2005#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2006
2007/**
2008 * lpfc_sha_init - Set up initial array of hash table entries
2009 * @HashResultPointer: pointer to an array as hash table.
2010 *
2011 * This routine sets up the initial values to the array of hash table entries
2012 * for the LC HBAs.
2013 **/
2014static void
2015lpfc_sha_init(uint32_t * HashResultPointer)
2016{
2017	HashResultPointer[0] = 0x67452301;
2018	HashResultPointer[1] = 0xEFCDAB89;
2019	HashResultPointer[2] = 0x98BADCFE;
2020	HashResultPointer[3] = 0x10325476;
2021	HashResultPointer[4] = 0xC3D2E1F0;
2022}
2023
2024/**
2025 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2026 * @HashResultPointer: pointer to an initial/result hash table.
2027 * @HashWorkingPointer: pointer to an working hash table.
2028 *
2029 * This routine iterates an initial hash table pointed by @HashResultPointer
2030 * with the values from the working hash table pointeed by @HashWorkingPointer.
2031 * The results are putting back to the initial hash table, returned through
2032 * the @HashResultPointer as the result hash table.
2033 **/
2034static void
2035lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2036{
2037	int t;
2038	uint32_t TEMP;
2039	uint32_t A, B, C, D, E;
2040	t = 16;
2041	do {
2042		HashWorkingPointer[t] =
2043		    S(1,
2044		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2045								     8] ^
2046		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2047	} while (++t <= 79);
2048	t = 0;
2049	A = HashResultPointer[0];
2050	B = HashResultPointer[1];
2051	C = HashResultPointer[2];
2052	D = HashResultPointer[3];
2053	E = HashResultPointer[4];
2054
2055	do {
2056		if (t < 20) {
2057			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2058		} else if (t < 40) {
2059			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2060		} else if (t < 60) {
2061			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2062		} else {
2063			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2064		}
2065		TEMP += S(5, A) + E + HashWorkingPointer[t];
2066		E = D;
2067		D = C;
2068		C = S(30, B);
2069		B = A;
2070		A = TEMP;
2071	} while (++t <= 79);
2072
2073	HashResultPointer[0] += A;
2074	HashResultPointer[1] += B;
2075	HashResultPointer[2] += C;
2076	HashResultPointer[3] += D;
2077	HashResultPointer[4] += E;
2078
2079}
2080
2081/**
2082 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2083 * @RandomChallenge: pointer to the entry of host challenge random number array.
2084 * @HashWorking: pointer to the entry of the working hash array.
2085 *
2086 * This routine calculates the working hash array referred by @HashWorking
2087 * from the challenge random numbers associated with the host, referred by
2088 * @RandomChallenge. The result is put into the entry of the working hash
2089 * array and returned by reference through @HashWorking.
2090 **/
2091static void
2092lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2093{
2094	*HashWorking = (*RandomChallenge ^ *HashWorking);
2095}
2096
2097/**
2098 * lpfc_hba_init - Perform special handling for LC HBA initialization
2099 * @phba: pointer to lpfc hba data structure.
2100 * @hbainit: pointer to an array of unsigned 32-bit integers.
2101 *
2102 * This routine performs the special handling for LC HBA initialization.
2103 **/
2104void
2105lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2106{
2107	int t;
2108	uint32_t *HashWorking;
2109	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2110
2111	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2112	if (!HashWorking)
2113		return;
2114
2115	HashWorking[0] = HashWorking[78] = *pwwnn++;
2116	HashWorking[1] = HashWorking[79] = *pwwnn;
2117
2118	for (t = 0; t < 7; t++)
2119		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2120
2121	lpfc_sha_init(hbainit);
2122	lpfc_sha_iterate(hbainit, HashWorking);
2123	kfree(HashWorking);
2124}
2125
2126/**
2127 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2128 * @vport: pointer to a virtual N_Port data structure.
2129 *
2130 * This routine performs the necessary cleanups before deleting the @vport.
2131 * It invokes the discovery state machine to perform necessary state
2132 * transitions and to release the ndlps associated with the @vport. Note,
2133 * the physical port is treated as @vport 0.
2134 **/
2135void
2136lpfc_cleanup(struct lpfc_vport *vport)
2137{
2138	struct lpfc_hba   *phba = vport->phba;
2139	struct lpfc_nodelist *ndlp, *next_ndlp;
2140	int i = 0;
2141
2142	if (phba->link_state > LPFC_LINK_DOWN)
2143		lpfc_port_link_failure(vport);
2144
2145	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2146		if (!NLP_CHK_NODE_ACT(ndlp)) {
2147			ndlp = lpfc_enable_node(vport, ndlp,
2148						NLP_STE_UNUSED_NODE);
2149			if (!ndlp)
2150				continue;
2151			spin_lock_irq(&phba->ndlp_lock);
2152			NLP_SET_FREE_REQ(ndlp);
2153			spin_unlock_irq(&phba->ndlp_lock);
2154			/* Trigger the release of the ndlp memory */
2155			lpfc_nlp_put(ndlp);
2156			continue;
2157		}
2158		spin_lock_irq(&phba->ndlp_lock);
2159		if (NLP_CHK_FREE_REQ(ndlp)) {
2160			/* The ndlp should not be in memory free mode already */
2161			spin_unlock_irq(&phba->ndlp_lock);
2162			continue;
2163		} else
2164			/* Indicate request for freeing ndlp memory */
2165			NLP_SET_FREE_REQ(ndlp);
2166		spin_unlock_irq(&phba->ndlp_lock);
2167
2168		if (vport->port_type != LPFC_PHYSICAL_PORT &&
2169		    ndlp->nlp_DID == Fabric_DID) {
2170			/* Just free up ndlp with Fabric_DID for vports */
2171			lpfc_nlp_put(ndlp);
2172			continue;
2173		}
2174
2175		if (ndlp->nlp_type & NLP_FABRIC)
2176			lpfc_disc_state_machine(vport, ndlp, NULL,
2177					NLP_EVT_DEVICE_RECOVERY);
2178
2179		lpfc_disc_state_machine(vport, ndlp, NULL,
2180					     NLP_EVT_DEVICE_RM);
2181
2182	}
2183
2184	/* At this point, ALL ndlp's should be gone
2185	 * because of the previous NLP_EVT_DEVICE_RM.
2186	 * Lets wait for this to happen, if needed.
2187	 */
2188	while (!list_empty(&vport->fc_nodes)) {
2189		if (i++ > 3000) {
2190			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2191				"0233 Nodelist not empty\n");
2192			list_for_each_entry_safe(ndlp, next_ndlp,
2193						&vport->fc_nodes, nlp_listp) {
2194				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2195						LOG_NODE,
2196						"0282 did:x%x ndlp:x%p "
2197						"usgmap:x%x refcnt:%d\n",
2198						ndlp->nlp_DID, (void *)ndlp,
2199						ndlp->nlp_usg_map,
2200						atomic_read(
2201							&ndlp->kref.refcount));
2202			}
2203			break;
2204		}
2205
2206		/* Wait for any activity on ndlps to settle */
2207		msleep(10);
2208	}
2209}
2210
2211/**
2212 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2213 * @vport: pointer to a virtual N_Port data structure.
2214 *
2215 * This routine stops all the timers associated with a @vport. This function
2216 * is invoked before disabling or deleting a @vport. Note that the physical
2217 * port is treated as @vport 0.
2218 **/
2219void
2220lpfc_stop_vport_timers(struct lpfc_vport *vport)
2221{
2222	del_timer_sync(&vport->els_tmofunc);
2223	del_timer_sync(&vport->fc_fdmitmo);
2224	lpfc_can_disctmo(vport);
2225	return;
2226}
2227
2228/**
2229 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2230 * @phba: pointer to lpfc hba data structure.
2231 *
2232 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2233 * caller of this routine should already hold the host lock.
2234 **/
2235void
2236__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2237{
2238	/* Clear pending FCF rediscovery wait flag */
2239	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2240
2241	/* Now, try to stop the timer */
2242	del_timer(&phba->fcf.redisc_wait);
2243}
2244
2245/**
2246 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2247 * @phba: pointer to lpfc hba data structure.
2248 *
2249 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2250 * checks whether the FCF rediscovery wait timer is pending with the host
2251 * lock held before proceeding with disabling the timer and clearing the
2252 * wait timer pendig flag.
2253 **/
2254void
2255lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2256{
2257	spin_lock_irq(&phba->hbalock);
2258	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2259		/* FCF rediscovery timer already fired or stopped */
2260		spin_unlock_irq(&phba->hbalock);
2261		return;
2262	}
2263	__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2264	/* Clear failover in progress flags */
2265	phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2266	spin_unlock_irq(&phba->hbalock);
2267}
2268
2269/**
2270 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2271 * @phba: pointer to lpfc hba data structure.
2272 *
2273 * This routine stops all the timers associated with a HBA. This function is
2274 * invoked before either putting a HBA offline or unloading the driver.
2275 **/
2276void
2277lpfc_stop_hba_timers(struct lpfc_hba *phba)
2278{
2279	lpfc_stop_vport_timers(phba->pport);
2280	del_timer_sync(&phba->sli.mbox_tmo);
2281	del_timer_sync(&phba->fabric_block_timer);
2282	del_timer_sync(&phba->eratt_poll);
2283	del_timer_sync(&phba->hb_tmofunc);
2284	phba->hb_outstanding = 0;
2285
2286	switch (phba->pci_dev_grp) {
2287	case LPFC_PCI_DEV_LP:
2288		/* Stop any LightPulse device specific driver timers */
2289		del_timer_sync(&phba->fcp_poll_timer);
2290		break;
2291	case LPFC_PCI_DEV_OC:
2292		/* Stop any OneConnect device sepcific driver timers */
2293		lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2294		break;
2295	default:
2296		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2297				"0297 Invalid device group (x%x)\n",
2298				phba->pci_dev_grp);
2299		break;
2300	}
2301	return;
2302}
2303
2304/**
2305 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2306 * @phba: pointer to lpfc hba data structure.
2307 *
2308 * This routine marks a HBA's management interface as blocked. Once the HBA's
2309 * management interface is marked as blocked, all the user space access to
2310 * the HBA, whether they are from sysfs interface or libdfc interface will
2311 * all be blocked. The HBA is set to block the management interface when the
2312 * driver prepares the HBA interface for online or offline.
2313 **/
2314static void
2315lpfc_block_mgmt_io(struct lpfc_hba * phba)
2316{
2317	unsigned long iflag;
2318	uint8_t actcmd = MBX_HEARTBEAT;
2319	unsigned long timeout;
2320
2321
2322	spin_lock_irqsave(&phba->hbalock, iflag);
2323	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2324	if (phba->sli.mbox_active)
2325		actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2326	spin_unlock_irqrestore(&phba->hbalock, iflag);
2327	/* Determine how long we might wait for the active mailbox
2328	 * command to be gracefully completed by firmware.
2329	 */
2330	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
2331			jiffies;
2332	/* Wait for the outstnading mailbox command to complete */
2333	while (phba->sli.mbox_active) {
2334		/* Check active mailbox complete status every 2ms */
2335		msleep(2);
2336		if (time_after(jiffies, timeout)) {
2337			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2338				"2813 Mgmt IO is Blocked %x "
2339				"- mbox cmd %x still active\n",
2340				phba->sli.sli_flag, actcmd);
2341			break;
2342		}
2343	}
2344}
2345
2346/**
2347 * lpfc_online - Initialize and bring a HBA online
2348 * @phba: pointer to lpfc hba data structure.
2349 *
2350 * This routine initializes the HBA and brings a HBA online. During this
2351 * process, the management interface is blocked to prevent user space access
2352 * to the HBA interfering with the driver initialization.
2353 *
2354 * Return codes
2355 *   0 - successful
2356 *   1 - failed
2357 **/
2358int
2359lpfc_online(struct lpfc_hba *phba)
2360{
2361	struct lpfc_vport *vport;
2362	struct lpfc_vport **vports;
2363	int i;
2364
2365	if (!phba)
2366		return 0;
2367	vport = phba->pport;
2368
2369	if (!(vport->fc_flag & FC_OFFLINE_MODE))
2370		return 0;
2371
2372	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2373			"0458 Bring Adapter online\n");
2374
2375	lpfc_block_mgmt_io(phba);
2376
2377	if (!lpfc_sli_queue_setup(phba)) {
2378		lpfc_unblock_mgmt_io(phba);
2379		return 1;
2380	}
2381
2382	if (phba->sli_rev == LPFC_SLI_REV4) {
2383		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2384			lpfc_unblock_mgmt_io(phba);
2385			return 1;
2386		}
2387	} else {
2388		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
2389			lpfc_unblock_mgmt_io(phba);
2390			return 1;
2391		}
2392	}
2393
2394	vports = lpfc_create_vport_work_array(phba);
2395	if (vports != NULL)
2396		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2397			struct Scsi_Host *shost;
2398			shost = lpfc_shost_from_vport(vports[i]);
2399			spin_lock_irq(shost->host_lock);
2400			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2401			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2402				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2403			if (phba->sli_rev == LPFC_SLI_REV4)
2404				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2405			spin_unlock_irq(shost->host_lock);
2406		}
2407		lpfc_destroy_vport_work_array(phba, vports);
2408
2409	lpfc_unblock_mgmt_io(phba);
2410	return 0;
2411}
2412
2413/**
2414 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2415 * @phba: pointer to lpfc hba data structure.
2416 *
2417 * This routine marks a HBA's management interface as not blocked. Once the
2418 * HBA's management interface is marked as not blocked, all the user space
2419 * access to the HBA, whether they are from sysfs interface or libdfc
2420 * interface will be allowed. The HBA is set to block the management interface
2421 * when the driver prepares the HBA interface for online or offline and then
2422 * set to unblock the management interface afterwards.
2423 **/
2424void
2425lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2426{
2427	unsigned long iflag;
2428
2429	spin_lock_irqsave(&phba->hbalock, iflag);
2430	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2431	spin_unlock_irqrestore(&phba->hbalock, iflag);
2432}
2433
2434/**
2435 * lpfc_offline_prep - Prepare a HBA to be brought offline
2436 * @phba: pointer to lpfc hba data structure.
2437 *
2438 * This routine is invoked to prepare a HBA to be brought offline. It performs
2439 * unregistration login to all the nodes on all vports and flushes the mailbox
2440 * queue to make it ready to be brought offline.
2441 **/
2442void
2443lpfc_offline_prep(struct lpfc_hba * phba)
2444{
2445	struct lpfc_vport *vport = phba->pport;
2446	struct lpfc_nodelist  *ndlp, *next_ndlp;
2447	struct lpfc_vport **vports;
2448	struct Scsi_Host *shost;
2449	int i;
2450
2451	if (vport->fc_flag & FC_OFFLINE_MODE)
2452		return;
2453
2454	lpfc_block_mgmt_io(phba);
2455
2456	lpfc_linkdown(phba);
2457
2458	/* Issue an unreg_login to all nodes on all vports */
2459	vports = lpfc_create_vport_work_array(phba);
2460	if (vports != NULL) {
2461		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2462			if (vports[i]->load_flag & FC_UNLOADING)
2463				continue;
2464			shost = lpfc_shost_from_vport(vports[i]);
2465			spin_lock_irq(shost->host_lock);
2466			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2467			vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2468			vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2469			spin_unlock_irq(shost->host_lock);
2470
2471			shost =	lpfc_shost_from_vport(vports[i]);
2472			list_for_each_entry_safe(ndlp, next_ndlp,
2473						 &vports[i]->fc_nodes,
2474						 nlp_listp) {
2475				if (!NLP_CHK_NODE_ACT(ndlp))
2476					continue;
2477				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2478					continue;
2479				if (ndlp->nlp_type & NLP_FABRIC) {
2480					lpfc_disc_state_machine(vports[i], ndlp,
2481						NULL, NLP_EVT_DEVICE_RECOVERY);
2482					lpfc_disc_state_machine(vports[i], ndlp,
2483						NULL, NLP_EVT_DEVICE_RM);
2484				}
2485				spin_lock_irq(shost->host_lock);
2486				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2487				spin_unlock_irq(shost->host_lock);
2488				lpfc_unreg_rpi(vports[i], ndlp);
2489			}
2490		}
2491	}
2492	lpfc_destroy_vport_work_array(phba, vports);
2493
2494	lpfc_sli_mbox_sys_shutdown(phba);
2495}
2496
2497/**
2498 * lpfc_offline - Bring a HBA offline
2499 * @phba: pointer to lpfc hba data structure.
2500 *
2501 * This routine actually brings a HBA offline. It stops all the timers
2502 * associated with the HBA, brings down the SLI layer, and eventually
2503 * marks the HBA as in offline state for the upper layer protocol.
2504 **/
2505void
2506lpfc_offline(struct lpfc_hba *phba)
2507{
2508	struct Scsi_Host  *shost;
2509	struct lpfc_vport **vports;
2510	int i;
2511
2512	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2513		return;
2514
2515	/* stop port and all timers associated with this hba */
2516	lpfc_stop_port(phba);
2517	vports = lpfc_create_vport_work_array(phba);
2518	if (vports != NULL)
2519		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2520			lpfc_stop_vport_timers(vports[i]);
2521	lpfc_destroy_vport_work_array(phba, vports);
2522	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2523			"0460 Bring Adapter offline\n");
2524	/* Bring down the SLI Layer and cleanup.  The HBA is offline
2525	   now.  */
2526	lpfc_sli_hba_down(phba);
2527	spin_lock_irq(&phba->hbalock);
2528	phba->work_ha = 0;
2529	spin_unlock_irq(&phba->hbalock);
2530	vports = lpfc_create_vport_work_array(phba);
2531	if (vports != NULL)
2532		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2533			shost = lpfc_shost_from_vport(vports[i]);
2534			spin_lock_irq(shost->host_lock);
2535			vports[i]->work_port_events = 0;
2536			vports[i]->fc_flag |= FC_OFFLINE_MODE;
2537			spin_unlock_irq(shost->host_lock);
2538		}
2539	lpfc_destroy_vport_work_array(phba, vports);
2540}
2541
2542/**
2543 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2544 * @phba: pointer to lpfc hba data structure.
2545 *
2546 * This routine is to free all the SCSI buffers and IOCBs from the driver
2547 * list back to kernel. It is called from lpfc_pci_remove_one to free
2548 * the internal resources before the device is removed from the system.
2549 *
2550 * Return codes
2551 *   0 - successful (for now, it always returns 0)
2552 **/
2553static int
2554lpfc_scsi_free(struct lpfc_hba *phba)
2555{
2556	struct lpfc_scsi_buf *sb, *sb_next;
2557	struct lpfc_iocbq *io, *io_next;
2558
2559	spin_lock_irq(&phba->hbalock);
2560	/* Release all the lpfc_scsi_bufs maintained by this host. */
2561	spin_lock(&phba->scsi_buf_list_lock);
2562	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2563		list_del(&sb->list);
2564		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2565			      sb->dma_handle);
2566		kfree(sb);
2567		phba->total_scsi_bufs--;
2568	}
2569	spin_unlock(&phba->scsi_buf_list_lock);
2570
2571	/* Release all the lpfc_iocbq entries maintained by this host. */
2572	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2573		list_del(&io->list);
2574		kfree(io);
2575		phba->total_iocbq_bufs--;
2576	}
2577	spin_unlock_irq(&phba->hbalock);
2578	return 0;
2579}
2580
2581/**
2582 * lpfc_create_port - Create an FC port
2583 * @phba: pointer to lpfc hba data structure.
2584 * @instance: a unique integer ID to this FC port.
2585 * @dev: pointer to the device data structure.
2586 *
2587 * This routine creates a FC port for the upper layer protocol. The FC port
2588 * can be created on top of either a physical port or a virtual port provided
2589 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2590 * and associates the FC port created before adding the shost into the SCSI
2591 * layer.
2592 *
2593 * Return codes
2594 *   @vport - pointer to the virtual N_Port data structure.
2595 *   NULL - port create failed.
2596 **/
2597struct lpfc_vport *
2598lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2599{
2600	struct lpfc_vport *vport;
2601	struct Scsi_Host  *shost;
2602	int error = 0;
2603
2604	if (dev != &phba->pcidev->dev)
2605		shost = scsi_host_alloc(&lpfc_vport_template,
2606					sizeof(struct lpfc_vport));
2607	else
2608		shost = scsi_host_alloc(&lpfc_template,
2609					sizeof(struct lpfc_vport));
2610	if (!shost)
2611		goto out;
2612
2613	vport = (struct lpfc_vport *) shost->hostdata;
2614	vport->phba = phba;
2615	vport->load_flag |= FC_LOADING;
2616	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2617	vport->fc_rscn_flush = 0;
2618
2619	lpfc_get_vport_cfgparam(vport);
2620	shost->unique_id = instance;
2621	shost->max_id = LPFC_MAX_TARGET;
2622	shost->max_lun = vport->cfg_max_luns;
2623	shost->this_id = -1;
2624	shost->max_cmd_len = 16;
2625	if (phba->sli_rev == LPFC_SLI_REV4) {
2626		shost->dma_boundary =
2627			phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2628		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2629	}
2630
2631	/*
2632	 * Set initial can_queue value since 0 is no longer supported and
2633	 * scsi_add_host will fail. This will be adjusted later based on the
2634	 * max xri value determined in hba setup.
2635	 */
2636	shost->can_queue = phba->cfg_hba_queue_depth - 10;
2637	if (dev != &phba->pcidev->dev) {
2638		shost->transportt = lpfc_vport_transport_template;
2639		vport->port_type = LPFC_NPIV_PORT;
2640	} else {
2641		shost->transportt = lpfc_transport_template;
2642		vport->port_type = LPFC_PHYSICAL_PORT;
2643	}
2644
2645	/* Initialize all internally managed lists. */
2646	INIT_LIST_HEAD(&vport->fc_nodes);
2647	INIT_LIST_HEAD(&vport->rcv_buffer_list);
2648	spin_lock_init(&vport->work_port_lock);
2649
2650	init_timer(&vport->fc_disctmo);
2651	vport->fc_disctmo.function = lpfc_disc_timeout;
2652	vport->fc_disctmo.data = (unsigned long)vport;
2653
2654	init_timer(&vport->fc_fdmitmo);
2655	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2656	vport->fc_fdmitmo.data = (unsigned long)vport;
2657
2658	init_timer(&vport->els_tmofunc);
2659	vport->els_tmofunc.function = lpfc_els_timeout;
2660	vport->els_tmofunc.data = (unsigned long)vport;
2661	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2662	if (error)
2663		goto out_put_shost;
2664
2665	spin_lock_irq(&phba->hbalock);
2666	list_add_tail(&vport->listentry, &phba->port_list);
2667	spin_unlock_irq(&phba->hbalock);
2668	return vport;
2669
2670out_put_shost:
2671	scsi_host_put(shost);
2672out:
2673	return NULL;
2674}
2675
2676/**
2677 * destroy_port -  destroy an FC port
2678 * @vport: pointer to an lpfc virtual N_Port data structure.
2679 *
2680 * This routine destroys a FC port from the upper layer protocol. All the
2681 * resources associated with the port are released.
2682 **/
2683void
2684destroy_port(struct lpfc_vport *vport)
2685{
2686	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2687	struct lpfc_hba  *phba = vport->phba;
2688
2689	lpfc_debugfs_terminate(vport);
2690	fc_remove_host(shost);
2691	scsi_remove_host(shost);
2692
2693	spin_lock_irq(&phba->hbalock);
2694	list_del_init(&vport->listentry);
2695	spin_unlock_irq(&phba->hbalock);
2696
2697	lpfc_cleanup(vport);
2698	return;
2699}
2700
2701/**
2702 * lpfc_get_instance - Get a unique integer ID
2703 *
2704 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2705 * uses the kernel idr facility to perform the task.
2706 *
2707 * Return codes:
2708 *   instance - a unique integer ID allocated as the new instance.
2709 *   -1 - lpfc get instance failed.
2710 **/
2711int
2712lpfc_get_instance(void)
2713{
2714	int instance = 0;
2715
2716	/* Assign an unused number */
2717	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2718		return -1;
2719	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2720		return -1;
2721	return instance;
2722}
2723
2724/**
2725 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2726 * @shost: pointer to SCSI host data structure.
2727 * @time: elapsed time of the scan in jiffies.
2728 *
2729 * This routine is called by the SCSI layer with a SCSI host to determine
2730 * whether the scan host is finished.
2731 *
2732 * Note: there is no scan_start function as adapter initialization will have
2733 * asynchronously kicked off the link initialization.
2734 *
2735 * Return codes
2736 *   0 - SCSI host scan is not over yet.
2737 *   1 - SCSI host scan is over.
2738 **/
2739int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2740{
2741	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2742	struct lpfc_hba   *phba = vport->phba;
2743	int stat = 0;
2744
2745	spin_lock_irq(shost->host_lock);
2746
2747	if (vport->load_flag & FC_UNLOADING) {
2748		stat = 1;
2749		goto finished;
2750	}
2751	if (time >= 30 * HZ) {
2752		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2753				"0461 Scanning longer than 30 "
2754				"seconds.  Continuing initialization\n");
2755		stat = 1;
2756		goto finished;
2757	}
2758	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2759		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2760				"0465 Link down longer than 15 "
2761				"seconds.  Continuing initialization\n");
2762		stat = 1;
2763		goto finished;
2764	}
2765
2766	if (vport->port_state != LPFC_VPORT_READY)
2767		goto finished;
2768	if (vport->num_disc_nodes || vport->fc_prli_sent)
2769		goto finished;
2770	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2771		goto finished;
2772	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2773		goto finished;
2774
2775	stat = 1;
2776
2777finished:
2778	spin_unlock_irq(shost->host_lock);
2779	return stat;
2780}
2781
2782/**
2783 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2784 * @shost: pointer to SCSI host data structure.
2785 *
2786 * This routine initializes a given SCSI host attributes on a FC port. The
2787 * SCSI host can be either on top of a physical port or a virtual port.
2788 **/
2789void lpfc_host_attrib_init(struct Scsi_Host *shost)
2790{
2791	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2792	struct lpfc_hba   *phba = vport->phba;
2793	/*
2794	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
2795	 */
2796
2797	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2798	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2799	fc_host_supported_classes(shost) = FC_COS_CLASS3;
2800
2801	memset(fc_host_supported_fc4s(shost), 0,
2802	       sizeof(fc_host_supported_fc4s(shost)));
2803	fc_host_supported_fc4s(shost)[2] = 1;
2804	fc_host_supported_fc4s(shost)[7] = 1;
2805
2806	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2807				 sizeof fc_host_symbolic_name(shost));
2808
2809	fc_host_supported_speeds(shost) = 0;
2810	if (phba->lmt & LMT_10Gb)
2811		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2812	if (phba->lmt & LMT_8Gb)
2813		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2814	if (phba->lmt & LMT_4Gb)
2815		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2816	if (phba->lmt & LMT_2Gb)
2817		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2818	if (phba->lmt & LMT_1Gb)
2819		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2820
2821	fc_host_maxframe_size(shost) =
2822		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2823		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2824
2825	fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
2826
2827	/* This value is also unchanging */
2828	memset(fc_host_active_fc4s(shost), 0,
2829	       sizeof(fc_host_active_fc4s(shost)));
2830	fc_host_active_fc4s(shost)[2] = 1;
2831	fc_host_active_fc4s(shost)[7] = 1;
2832
2833	fc_host_max_npiv_vports(shost) = phba->max_vpi;
2834	spin_lock_irq(shost->host_lock);
2835	vport->load_flag &= ~FC_LOADING;
2836	spin_unlock_irq(shost->host_lock);
2837}
2838
2839/**
2840 * lpfc_stop_port_s3 - Stop SLI3 device port
2841 * @phba: pointer to lpfc hba data structure.
2842 *
2843 * This routine is invoked to stop an SLI3 device port, it stops the device
2844 * from generating interrupts and stops the device driver's timers for the
2845 * device.
2846 **/
2847static void
2848lpfc_stop_port_s3(struct lpfc_hba *phba)
2849{
2850	/* Clear all interrupt enable conditions */
2851	writel(0, phba->HCregaddr);
2852	readl(phba->HCregaddr); /* flush */
2853	/* Clear all pending interrupts */
2854	writel(0xffffffff, phba->HAregaddr);
2855	readl(phba->HAregaddr); /* flush */
2856
2857	/* Reset some HBA SLI setup states */
2858	lpfc_stop_hba_timers(phba);
2859	phba->pport->work_port_events = 0;
2860}
2861
2862/**
2863 * lpfc_stop_port_s4 - Stop SLI4 device port
2864 * @phba: pointer to lpfc hba data structure.
2865 *
2866 * This routine is invoked to stop an SLI4 device port, it stops the device
2867 * from generating interrupts and stops the device driver's timers for the
2868 * device.
2869 **/
2870static void
2871lpfc_stop_port_s4(struct lpfc_hba *phba)
2872{
2873	/* Reset some HBA SLI4 setup states */
2874	lpfc_stop_hba_timers(phba);
2875	phba->pport->work_port_events = 0;
2876	phba->sli4_hba.intr_enable = 0;
2877}
2878
2879/**
2880 * lpfc_stop_port - Wrapper function for stopping hba port
2881 * @phba: Pointer to HBA context object.
2882 *
2883 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2884 * the API jump table function pointer from the lpfc_hba struct.
2885 **/
2886void
2887lpfc_stop_port(struct lpfc_hba *phba)
2888{
2889	phba->lpfc_stop_port(phba);
2890}
2891
2892/**
2893 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
2894 * @phba: Pointer to hba for which this call is being executed.
2895 *
2896 * This routine starts the timer waiting for the FCF rediscovery to complete.
2897 **/
2898void
2899lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
2900{
2901	unsigned long fcf_redisc_wait_tmo =
2902		(jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
2903	/* Start fcf rediscovery wait period timer */
2904	mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
2905	spin_lock_irq(&phba->hbalock);
2906	/* Allow action to new fcf asynchronous event */
2907	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2908	/* Mark the FCF rediscovery pending state */
2909	phba->fcf.fcf_flag |= FCF_REDISC_PEND;
2910	spin_unlock_irq(&phba->hbalock);
2911}
2912
2913/**
2914 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
2915 * @ptr: Map to lpfc_hba data structure pointer.
2916 *
2917 * This routine is invoked when waiting for FCF table rediscover has been
2918 * timed out. If new FCF record(s) has (have) been discovered during the
2919 * wait period, a new FCF event shall be added to the FCOE async event
2920 * list, and then worker thread shall be waked up for processing from the
2921 * worker thread context.
2922 **/
2923void
2924lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2925{
2926	struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2927
2928	/* Don't send FCF rediscovery event if timer cancelled */
2929	spin_lock_irq(&phba->hbalock);
2930	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2931		spin_unlock_irq(&phba->hbalock);
2932		return;
2933	}
2934	/* Clear FCF rediscovery timer pending flag */
2935	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2936	/* FCF rediscovery event to worker thread */
2937	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2938	spin_unlock_irq(&phba->hbalock);
2939	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2940			"2776 FCF rediscover quiescent timer expired\n");
2941	/* wake up worker thread */
2942	lpfc_worker_wake_up(phba);
2943}
2944
2945/**
2946 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
2947 * @phba: pointer to lpfc hba data structure.
2948 *
2949 * This function uses the QUERY_FW_CFG mailbox command to determine if the
2950 * firmware loaded supports FCoE. A return of zero indicates that the mailbox
2951 * was successful and the firmware supports FCoE. Any other return indicates
2952 * a error. It is assumed that this function will be called before interrupts
2953 * are enabled.
2954 **/
2955static int
2956lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
2957{
2958	int rc = 0;
2959	LPFC_MBOXQ_t *mboxq;
2960	struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
2961	uint32_t length;
2962	uint32_t shdr_status, shdr_add_status;
2963
2964	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2965	if (!mboxq) {
2966		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2967				"2621 Failed to allocate mbox for "
2968				"query firmware config cmd\n");
2969		return -ENOMEM;
2970	}
2971	query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
2972	length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
2973		  sizeof(struct lpfc_sli4_cfg_mhdr));
2974	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
2975			 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
2976			 length, LPFC_SLI4_MBX_EMBED);
2977	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2978	/* The IOCTL status is embedded in the mailbox subheader. */
2979	shdr_status = bf_get(lpfc_mbox_hdr_status,
2980			     &query_fw_cfg->header.cfg_shdr.response);
2981	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2982				 &query_fw_cfg->header.cfg_shdr.response);
2983	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2984		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2985				"2622 Query Firmware Config failed "
2986				"mbx status x%x, status x%x add_status x%x\n",
2987				rc, shdr_status, shdr_add_status);
2988		return -EINVAL;
2989	}
2990	if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
2991		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2992				"2623 FCoE Function not supported by firmware. "
2993				"Function mode = %08x\n",
2994				query_fw_cfg->function_mode);
2995		return -EINVAL;
2996	}
2997	if (rc != MBX_TIMEOUT)
2998		mempool_free(mboxq, phba->mbox_mem_pool);
2999	return 0;
3000}
3001
3002/**
3003 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3004 * @phba: pointer to lpfc hba data structure.
3005 * @acqe_link: pointer to the async link completion queue entry.
3006 *
3007 * This routine is to parse the SLI4 link-attention link fault code and
3008 * translate it into the base driver's read link attention mailbox command
3009 * status.
3010 *
3011 * Return: Link-attention status in terms of base driver's coding.
3012 **/
3013static uint16_t
3014lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3015			   struct lpfc_acqe_link *acqe_link)
3016{
3017	uint16_t latt_fault;
3018
3019	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3020	case LPFC_ASYNC_LINK_FAULT_NONE:
3021	case LPFC_ASYNC_LINK_FAULT_LOCAL:
3022	case LPFC_ASYNC_LINK_FAULT_REMOTE:
3023		latt_fault = 0;
3024		break;
3025	default:
3026		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3027				"0398 Invalid link fault code: x%x\n",
3028				bf_get(lpfc_acqe_link_fault, acqe_link));
3029		latt_fault = MBXERR_ERROR;
3030		break;
3031	}
3032	return latt_fault;
3033}
3034
3035/**
3036 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3037 * @phba: pointer to lpfc hba data structure.
3038 * @acqe_link: pointer to the async link completion queue entry.
3039 *
3040 * This routine is to parse the SLI4 link attention type and translate it
3041 * into the base driver's link attention type coding.
3042 *
3043 * Return: Link attention type in terms of base driver's coding.
3044 **/
3045static uint8_t
3046lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3047			  struct lpfc_acqe_link *acqe_link)
3048{
3049	uint8_t att_type;
3050
3051	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3052	case LPFC_ASYNC_LINK_STATUS_DOWN:
3053	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3054		att_type = AT_LINK_DOWN;
3055		break;
3056	case LPFC_ASYNC_LINK_STATUS_UP:
3057		/* Ignore physical link up events - wait for logical link up */
3058		att_type = AT_RESERVED;
3059		break;
3060	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3061		att_type = AT_LINK_UP;
3062		break;
3063	default:
3064		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3065				"0399 Invalid link attention type: x%x\n",
3066				bf_get(lpfc_acqe_link_status, acqe_link));
3067		att_type = AT_RESERVED;
3068		break;
3069	}
3070	return att_type;
3071}
3072
3073/**
3074 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3075 * @phba: pointer to lpfc hba data structure.
3076 * @acqe_link: pointer to the async link completion queue entry.
3077 *
3078 * This routine is to parse the SLI4 link-attention link speed and translate
3079 * it into the base driver's link-attention link speed coding.
3080 *
3081 * Return: Link-attention link speed in terms of base driver's coding.
3082 **/
3083static uint8_t
3084lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3085				struct lpfc_acqe_link *acqe_link)
3086{
3087	uint8_t link_speed;
3088
3089	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3090	case LPFC_ASYNC_LINK_SPEED_ZERO:
3091		link_speed = LA_UNKNW_LINK;
3092		break;
3093	case LPFC_ASYNC_LINK_SPEED_10MBPS:
3094		link_speed = LA_UNKNW_LINK;
3095		break;
3096	case LPFC_ASYNC_LINK_SPEED_100MBPS:
3097		link_speed = LA_UNKNW_LINK;
3098		break;
3099	case LPFC_ASYNC_LINK_SPEED_1GBPS:
3100		link_speed = LA_1GHZ_LINK;
3101		break;
3102	case LPFC_ASYNC_LINK_SPEED_10GBPS:
3103		link_speed = LA_10GHZ_LINK;
3104		break;
3105	default:
3106		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3107				"0483 Invalid link-attention link speed: x%x\n",
3108				bf_get(lpfc_acqe_link_speed, acqe_link));
3109		link_speed = LA_UNKNW_LINK;
3110		break;
3111	}
3112	return link_speed;
3113}
3114
3115/**
3116 * lpfc_sli4_async_link_evt - Process the asynchronous link event
3117 * @phba: pointer to lpfc hba data structure.
3118 * @acqe_link: pointer to the async link completion queue entry.
3119 *
3120 * This routine is to handle the SLI4 asynchronous link event.
3121 **/
3122static void
3123lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3124			 struct lpfc_acqe_link *acqe_link)
3125{
3126	struct lpfc_dmabuf *mp;
3127	LPFC_MBOXQ_t *pmb;
3128	MAILBOX_t *mb;
3129	READ_LA_VAR *la;
3130	uint8_t att_type;
3131
3132	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3133	if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
3134		return;
3135	phba->fcoe_eventtag = acqe_link->event_tag;
3136	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3137	if (!pmb) {
3138		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3139				"0395 The mboxq allocation failed\n");
3140		return;
3141	}
3142	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3143	if (!mp) {
3144		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3145				"0396 The lpfc_dmabuf allocation failed\n");
3146		goto out_free_pmb;
3147	}
3148	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3149	if (!mp->virt) {
3150		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3151				"0397 The mbuf allocation failed\n");
3152		goto out_free_dmabuf;
3153	}
3154
3155	/* Cleanup any outstanding ELS commands */
3156	lpfc_els_flush_all_cmd(phba);
3157
3158	/* Block ELS IOCBs until we have done process link event */
3159	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3160
3161	/* Update link event statistics */
3162	phba->sli.slistat.link_event++;
3163
3164	/* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
3165	lpfc_read_la(phba, pmb, mp);
3166	pmb->vport = phba->pport;
3167
3168	/* Parse and translate status field */
3169	mb = &pmb->u.mb;
3170	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3171
3172	/* Parse and translate link attention fields */
3173	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
3174	la->eventTag = acqe_link->event_tag;
3175	la->attType = att_type;
3176	la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
3177
3178	/* Fake the the following irrelvant fields */
3179	la->topology = TOPOLOGY_PT_PT;
3180	la->granted_AL_PA = 0;
3181	la->il = 0;
3182	la->pb = 0;
3183	la->fa = 0;
3184	la->mm = 0;
3185
3186	/* Keep the link status for extra SLI4 state machine reference */
3187	phba->sli4_hba.link_state.speed =
3188				bf_get(lpfc_acqe_link_speed, acqe_link);
3189	phba->sli4_hba.link_state.duplex =
3190				bf_get(lpfc_acqe_link_duplex, acqe_link);
3191	phba->sli4_hba.link_state.status =
3192				bf_get(lpfc_acqe_link_status, acqe_link);
3193	phba->sli4_hba.link_state.physical =
3194				bf_get(lpfc_acqe_link_physical, acqe_link);
3195	phba->sli4_hba.link_state.fault =
3196				bf_get(lpfc_acqe_link_fault, acqe_link);
3197	phba->sli4_hba.link_state.logical_speed =
3198				bf_get(lpfc_acqe_qos_link_speed, acqe_link);
3199
3200	/* Invoke the lpfc_handle_latt mailbox command callback function */
3201	lpfc_mbx_cmpl_read_la(phba, pmb);
3202
3203	return;
3204
3205out_free_dmabuf:
3206	kfree(mp);
3207out_free_pmb:
3208	mempool_free(pmb, phba->mbox_mem_pool);
3209}
3210
3211/**
3212 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3213 * @vport: pointer to vport data structure.
3214 *
3215 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3216 * response to a CVL event.
3217 *
3218 * Return the pointer to the ndlp with the vport if successful, otherwise
3219 * return NULL.
3220 **/
3221static struct lpfc_nodelist *
3222lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3223{
3224	struct lpfc_nodelist *ndlp;
3225	struct Scsi_Host *shost;
3226	struct lpfc_hba *phba;
3227
3228	if (!vport)
3229		return NULL;
3230	phba = vport->phba;
3231	if (!phba)
3232		return NULL;
3233	ndlp = lpfc_findnode_did(vport, Fabric_DID);
3234	if (!ndlp) {
3235		/* Cannot find existing Fabric ndlp, so allocate a new one */
3236		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3237		if (!ndlp)
3238			return 0;
3239		lpfc_nlp_init(vport, ndlp, Fabric_DID);
3240		/* Set the node type */
3241		ndlp->nlp_type |= NLP_FABRIC;
3242		/* Put ndlp onto node list */
3243		lpfc_enqueue_node(vport, ndlp);
3244	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
3245		/* re-setup ndlp without removing from node list */
3246		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3247		if (!ndlp)
3248			return 0;
3249	}
3250	if ((phba->pport->port_state < LPFC_FLOGI) &&
3251		(phba->pport->port_state != LPFC_VPORT_FAILED))
3252		return NULL;
3253	/* If virtual link is not yet instantiated ignore CVL */
3254	if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
3255		&& (vport->port_state != LPFC_VPORT_FAILED))
3256		return NULL;
3257	shost = lpfc_shost_from_vport(vport);
3258	if (!shost)
3259		return NULL;
3260	lpfc_linkdown_port(vport);
3261	lpfc_cleanup_pending_mbox(vport);
3262	spin_lock_irq(shost->host_lock);
3263	vport->fc_flag |= FC_VPORT_CVL_RCVD;
3264	spin_unlock_irq(shost->host_lock);
3265
3266	return ndlp;
3267}
3268
3269/**
3270 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3271 * @vport: pointer to lpfc hba data structure.
3272 *
3273 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3274 * response to a FCF dead event.
3275 **/
3276static void
3277lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3278{
3279	struct lpfc_vport **vports;
3280	int i;
3281
3282	vports = lpfc_create_vport_work_array(phba);
3283	if (vports)
3284		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3285			lpfc_sli4_perform_vport_cvl(vports[i]);
3286	lpfc_destroy_vport_work_array(phba, vports);
3287}
3288
3289/**
3290 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
3291 * @phba: pointer to lpfc hba data structure.
3292 * @acqe_link: pointer to the async fcoe completion queue entry.
3293 *
3294 * This routine is to handle the SLI4 asynchronous fcoe event.
3295 **/
3296static void
3297lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3298			 struct lpfc_acqe_fcoe *acqe_fcoe)
3299{
3300	uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
3301	int rc;
3302	struct lpfc_vport *vport;
3303	struct lpfc_nodelist *ndlp;
3304	struct Scsi_Host  *shost;
3305	int active_vlink_present;
3306	struct lpfc_vport **vports;
3307	int i;
3308
3309	phba->fc_eventTag = acqe_fcoe->event_tag;
3310	phba->fcoe_eventtag = acqe_fcoe->event_tag;
3311	switch (event_type) {
3312	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
3313	case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
3314		if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
3315			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3316					LOG_DISCOVERY,
3317					"2546 New FCF event, evt_tag:x%x, "
3318					"index:x%x\n",
3319					acqe_fcoe->event_tag,
3320					acqe_fcoe->index);
3321		else
3322			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3323					LOG_DISCOVERY,
3324					"2788 FCF param modified event, "
3325					"evt_tag:x%x, index:x%x\n",
3326					acqe_fcoe->event_tag,
3327					acqe_fcoe->index);
3328		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3329			/*
3330			 * During period of FCF discovery, read the FCF
3331			 * table record indexed by the event to update
3332			 * FCF roundrobin failover eligible FCF bmask.
3333			 */
3334			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3335					LOG_DISCOVERY,
3336					"2779 Read FCF (x%x) for updating "
3337					"roundrobin FCF failover bmask\n",
3338					acqe_fcoe->index);
3339			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3340		}
3341
3342		/* If the FCF discovery is in progress, do nothing. */
3343		spin_lock_irq(&phba->hbalock);
3344		if (phba->hba_flag & FCF_TS_INPROG) {
3345			spin_unlock_irq(&phba->hbalock);
3346			break;
3347		}
3348		/* If fast FCF failover rescan event is pending, do nothing */
3349		if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3350			spin_unlock_irq(&phba->hbalock);
3351			break;
3352		}
3353
3354		/* If the FCF has been in discovered state, do nothing. */
3355		if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3356			spin_unlock_irq(&phba->hbalock);
3357			break;
3358		}
3359		spin_unlock_irq(&phba->hbalock);
3360
3361		/* Otherwise, scan the entire FCF table and re-discover SAN */
3362		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3363				"2770 Start FCF table scan per async FCF "
3364				"event, evt_tag:x%x, index:x%x\n",
3365				acqe_fcoe->event_tag, acqe_fcoe->index);
3366		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3367						     LPFC_FCOE_FCF_GET_FIRST);
3368		if (rc)
3369			lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3370					"2547 Issue FCF scan read FCF mailbox "
3371					"command failed (x%x)\n", rc);
3372		break;
3373
3374	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
3375		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3376			"2548 FCF Table full count 0x%x tag 0x%x\n",
3377			bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
3378			acqe_fcoe->event_tag);
3379		break;
3380
3381	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
3382		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3383			"2549 FCF (x%x) disconnected from network, "
3384			"tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3385		/*
3386		 * If we are in the middle of FCF failover process, clear
3387		 * the corresponding FCF bit in the roundrobin bitmap.
3388		 */
3389		spin_lock_irq(&phba->hbalock);
3390		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3391			spin_unlock_irq(&phba->hbalock);
3392			/* Update FLOGI FCF failover eligible FCF bmask */
3393			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
3394			break;
3395		}
3396		spin_unlock_irq(&phba->hbalock);
3397
3398		/* If the event is not for currently used fcf do nothing */
3399		if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
3400			break;
3401
3402		/*
3403		 * Otherwise, request the port to rediscover the entire FCF
3404		 * table for a fast recovery from case that the current FCF
3405		 * is no longer valid as we are not in the middle of FCF
3406		 * failover process already.
3407		 */
3408		spin_lock_irq(&phba->hbalock);
3409		/* Mark the fast failover process in progress */
3410		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3411		spin_unlock_irq(&phba->hbalock);
3412
3413		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3414				"2771 Start FCF fast failover process due to "
3415				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3416				"\n", acqe_fcoe->event_tag, acqe_fcoe->index);
3417		rc = lpfc_sli4_redisc_fcf_table(phba);
3418		if (rc) {
3419			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3420					LOG_DISCOVERY,
3421					"2772 Issue FCF rediscover mabilbox "
3422					"command failed, fail through to FCF "
3423					"dead event\n");
3424			spin_lock_irq(&phba->hbalock);
3425			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3426			spin_unlock_irq(&phba->hbalock);
3427			/*
3428			 * Last resort will fail over by treating this
3429			 * as a link down to FCF registration.
3430			 */
3431			lpfc_sli4_fcf_dead_failthrough(phba);
3432		} else {
3433			/* Reset FCF roundrobin bmask for new discovery */
3434			memset(phba->fcf.fcf_rr_bmask, 0,
3435			       sizeof(*phba->fcf.fcf_rr_bmask));
3436			/*
3437			 * Handling fast FCF failover to a DEAD FCF event is
3438			 * considered equalivant to receiving CVL to all vports.
3439			 */
3440			lpfc_sli4_perform_all_vport_cvl(phba);
3441		}
3442		break;
3443	case LPFC_FCOE_EVENT_TYPE_CVL:
3444		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3445			"2718 Clear Virtual Link Received for VPI 0x%x"
3446			" tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3447		vport = lpfc_find_vport_by_vpid(phba,
3448				acqe_fcoe->index - phba->vpi_base);
3449		ndlp = lpfc_sli4_perform_vport_cvl(vport);
3450		if (!ndlp)
3451			break;
3452		active_vlink_present = 0;
3453
3454		vports = lpfc_create_vport_work_array(phba);
3455		if (vports) {
3456			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3457					i++) {
3458				if ((!(vports[i]->fc_flag &
3459					FC_VPORT_CVL_RCVD)) &&
3460					(vports[i]->port_state > LPFC_FDISC)) {
3461					active_vlink_present = 1;
3462					break;
3463				}
3464			}
3465			lpfc_destroy_vport_work_array(phba, vports);
3466		}
3467
3468		if (active_vlink_present) {
3469			/*
3470			 * If there are other active VLinks present,
3471			 * re-instantiate the Vlink using FDISC.
3472			 */
3473			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3474			shost = lpfc_shost_from_vport(vport);
3475			spin_lock_irq(shost->host_lock);
3476			ndlp->nlp_flag |= NLP_DELAY_TMO;
3477			spin_unlock_irq(shost->host_lock);
3478			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3479			vport->port_state = LPFC_FDISC;
3480		} else {
3481			/*
3482			 * Otherwise, we request port to rediscover
3483			 * the entire FCF table for a fast recovery
3484			 * from possible case that the current FCF
3485			 * is no longer valid if we are not already
3486			 * in the FCF failover process.
3487			 */
3488			spin_lock_irq(&phba->hbalock);
3489			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3490				spin_unlock_irq(&phba->hbalock);
3491				break;
3492			}
3493			/* Mark the fast failover process in progress */
3494			phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3495			spin_unlock_irq(&phba->hbalock);
3496			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3497					LOG_DISCOVERY,
3498					"2773 Start FCF failover per CVL, "
3499					"evt_tag:x%x\n", acqe_fcoe->event_tag);
3500			rc = lpfc_sli4_redisc_fcf_table(phba);
3501			if (rc) {
3502				lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3503						LOG_DISCOVERY,
3504						"2774 Issue FCF rediscover "
3505						"mabilbox command failed, "
3506						"through to CVL event\n");
3507				spin_lock_irq(&phba->hbalock);
3508				phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3509				spin_unlock_irq(&phba->hbalock);
3510				/*
3511				 * Last resort will be re-try on the
3512				 * the current registered FCF entry.
3513				 */
3514				lpfc_retry_pport_discovery(phba);
3515			} else
3516				/*
3517				 * Reset FCF roundrobin bmask for new
3518				 * discovery.
3519				 */
3520				memset(phba->fcf.fcf_rr_bmask, 0,
3521				       sizeof(*phba->fcf.fcf_rr_bmask));
3522		}
3523		break;
3524	default:
3525		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3526			"0288 Unknown FCoE event type 0x%x event tag "
3527			"0x%x\n", event_type, acqe_fcoe->event_tag);
3528		break;
3529	}
3530}
3531
3532/**
3533 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3534 * @phba: pointer to lpfc hba data structure.
3535 * @acqe_link: pointer to the async dcbx completion queue entry.
3536 *
3537 * This routine is to handle the SLI4 asynchronous dcbx event.
3538 **/
3539static void
3540lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3541			 struct lpfc_acqe_dcbx *acqe_dcbx)
3542{
3543	phba->fc_eventTag = acqe_dcbx->event_tag;
3544	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3545			"0290 The SLI4 DCBX asynchronous event is not "
3546			"handled yet\n");
3547}
3548
3549/**
3550 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
3551 * @phba: pointer to lpfc hba data structure.
3552 * @acqe_link: pointer to the async grp5 completion queue entry.
3553 *
3554 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
3555 * is an asynchronous notified of a logical link speed change.  The Port
3556 * reports the logical link speed in units of 10Mbps.
3557 **/
3558static void
3559lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
3560			 struct lpfc_acqe_grp5 *acqe_grp5)
3561{
3562	uint16_t prev_ll_spd;
3563
3564	phba->fc_eventTag = acqe_grp5->event_tag;
3565	phba->fcoe_eventtag = acqe_grp5->event_tag;
3566	prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
3567	phba->sli4_hba.link_state.logical_speed =
3568		(bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
3569	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3570			"2789 GRP5 Async Event: Updating logical link speed "
3571			"from %dMbps to %dMbps\n", (prev_ll_spd * 10),
3572			(phba->sli4_hba.link_state.logical_speed*10));
3573}
3574
3575/**
3576 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3577 * @phba: pointer to lpfc hba data structure.
3578 *
3579 * This routine is invoked by the worker thread to process all the pending
3580 * SLI4 asynchronous events.
3581 **/
3582void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3583{
3584	struct lpfc_cq_event *cq_event;
3585
3586	/* First, declare the async event has been handled */
3587	spin_lock_irq(&phba->hbalock);
3588	phba->hba_flag &= ~ASYNC_EVENT;
3589	spin_unlock_irq(&phba->hbalock);
3590	/* Now, handle all the async events */
3591	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3592		/* Get the first event from the head of the event queue */
3593		spin_lock_irq(&phba->hbalock);
3594		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3595				 cq_event, struct lpfc_cq_event, list);
3596		spin_unlock_irq(&phba->hbalock);
3597		/* Process the asynchronous event */
3598		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3599		case LPFC_TRAILER_CODE_LINK:
3600			lpfc_sli4_async_link_evt(phba,
3601						 &cq_event->cqe.acqe_link);
3602			break;
3603		case LPFC_TRAILER_CODE_FCOE:
3604			lpfc_sli4_async_fcoe_evt(phba,
3605						 &cq_event->cqe.acqe_fcoe);
3606			break;
3607		case LPFC_TRAILER_CODE_DCBX:
3608			lpfc_sli4_async_dcbx_evt(phba,
3609						 &cq_event->cqe.acqe_dcbx);
3610			break;
3611		case LPFC_TRAILER_CODE_GRP5:
3612			lpfc_sli4_async_grp5_evt(phba,
3613						 &cq_event->cqe.acqe_grp5);
3614			break;
3615		default:
3616			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3617					"1804 Invalid asynchrous event code: "
3618					"x%x\n", bf_get(lpfc_trailer_code,
3619					&cq_event->cqe.mcqe_cmpl));
3620			break;
3621		}
3622		/* Free the completion event processed to the free pool */
3623		lpfc_sli4_cq_event_release(phba, cq_event);
3624	}
3625}
3626
3627/**
3628 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3629 * @phba: pointer to lpfc hba data structure.
3630 *
3631 * This routine is invoked by the worker thread to process FCF table
3632 * rediscovery pending completion event.
3633 **/
3634void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3635{
3636	int rc;
3637
3638	spin_lock_irq(&phba->hbalock);
3639	/* Clear FCF rediscovery timeout event */
3640	phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3641	/* Clear driver fast failover FCF record flag */
3642	phba->fcf.failover_rec.flag = 0;
3643	/* Set state for FCF fast failover */
3644	phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3645	spin_unlock_irq(&phba->hbalock);
3646
3647	/* Scan FCF table from the first entry to re-discover SAN */
3648	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3649			"2777 Start post-quiescent FCF table scan\n");
3650	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3651	if (rc)
3652		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3653				"2747 Issue FCF scan read FCF mailbox "
3654				"command failed 0x%x\n", rc);
3655}
3656
3657/**
3658 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3659 * @phba: pointer to lpfc hba data structure.
3660 * @dev_grp: The HBA PCI-Device group number.
3661 *
3662 * This routine is invoked to set up the per HBA PCI-Device group function
3663 * API jump table entries.
3664 *
3665 * Return: 0 if success, otherwise -ENODEV
3666 **/
3667int
3668lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3669{
3670	int rc;
3671
3672	/* Set up lpfc PCI-device group */
3673	phba->pci_dev_grp = dev_grp;
3674
3675	/* The LPFC_PCI_DEV_OC uses SLI4 */
3676	if (dev_grp == LPFC_PCI_DEV_OC)
3677		phba->sli_rev = LPFC_SLI_REV4;
3678
3679	/* Set up device INIT API function jump table */
3680	rc = lpfc_init_api_table_setup(phba, dev_grp);
3681	if (rc)
3682		return -ENODEV;
3683	/* Set up SCSI API function jump table */
3684	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3685	if (rc)
3686		return -ENODEV;
3687	/* Set up SLI API function jump table */
3688	rc = lpfc_sli_api_table_setup(phba, dev_grp);
3689	if (rc)
3690		return -ENODEV;
3691	/* Set up MBOX API function jump table */
3692	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3693	if (rc)
3694		return -ENODEV;
3695
3696	return 0;
3697}
3698
3699/**
3700 * lpfc_log_intr_mode - Log the active interrupt mode
3701 * @phba: pointer to lpfc hba data structure.
3702 * @intr_mode: active interrupt mode adopted.
3703 *
3704 * This routine it invoked to log the currently used active interrupt mode
3705 * to the device.
3706 **/
3707static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3708{
3709	switch (intr_mode) {
3710	case 0:
3711		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3712				"0470 Enable INTx interrupt mode.\n");
3713		break;
3714	case 1:
3715		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3716				"0481 Enabled MSI interrupt mode.\n");
3717		break;
3718	case 2:
3719		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3720				"0480 Enabled MSI-X interrupt mode.\n");
3721		break;
3722	default:
3723		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3724				"0482 Illegal interrupt mode.\n");
3725		break;
3726	}
3727	return;
3728}
3729
3730/**
3731 * lpfc_enable_pci_dev - Enable a generic PCI device.
3732 * @phba: pointer to lpfc hba data structure.
3733 *
3734 * This routine is invoked to enable the PCI device that is common to all
3735 * PCI devices.
3736 *
3737 * Return codes
3738 * 	0 - successful
3739 * 	other values - error
3740 **/
3741static int
3742lpfc_enable_pci_dev(struct lpfc_hba *phba)
3743{
3744	struct pci_dev *pdev;
3745	int bars;
3746
3747	/* Obtain PCI device reference */
3748	if (!phba->pcidev)
3749		goto out_error;
3750	else
3751		pdev = phba->pcidev;
3752	/* Select PCI BARs */
3753	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3754	/* Enable PCI device */
3755	if (pci_enable_device_mem(pdev))
3756		goto out_error;
3757	/* Request PCI resource for the device */
3758	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3759		goto out_disable_device;
3760	/* Set up device as PCI master and save state for EEH */
3761	pci_set_master(pdev);
3762	pci_try_set_mwi(pdev);
3763	pci_save_state(pdev);
3764
3765	return 0;
3766
3767out_disable_device:
3768	pci_disable_device(pdev);
3769out_error:
3770	return -ENODEV;
3771}
3772
3773/**
3774 * lpfc_disable_pci_dev - Disable a generic PCI device.
3775 * @phba: pointer to lpfc hba data structure.
3776 *
3777 * This routine is invoked to disable the PCI device that is common to all
3778 * PCI devices.
3779 **/
3780static void
3781lpfc_disable_pci_dev(struct lpfc_hba *phba)
3782{
3783	struct pci_dev *pdev;
3784	int bars;
3785
3786	/* Obtain PCI device reference */
3787	if (!phba->pcidev)
3788		return;
3789	else
3790		pdev = phba->pcidev;
3791	/* Select PCI BARs */
3792	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3793	/* Release PCI resource and disable PCI device */
3794	pci_release_selected_regions(pdev, bars);
3795	pci_disable_device(pdev);
3796	/* Null out PCI private reference to driver */
3797	pci_set_drvdata(pdev, NULL);
3798
3799	return;
3800}
3801
3802/**
3803 * lpfc_reset_hba - Reset a hba
3804 * @phba: pointer to lpfc hba data structure.
3805 *
3806 * This routine is invoked to reset a hba device. It brings the HBA
3807 * offline, performs a board restart, and then brings the board back
3808 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3809 * on outstanding mailbox commands.
3810 **/
3811void
3812lpfc_reset_hba(struct lpfc_hba *phba)
3813{
3814	/* If resets are disabled then set error state and return. */
3815	if (!phba->cfg_enable_hba_reset) {
3816		phba->link_state = LPFC_HBA_ERROR;
3817		return;
3818	}
3819	lpfc_offline_prep(phba);
3820	lpfc_offline(phba);
3821	lpfc_sli_brdrestart(phba);
3822	lpfc_online(phba);
3823	lpfc_unblock_mgmt_io(phba);
3824}
3825
3826/**
3827 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3828 * @phba: pointer to lpfc hba data structure.
3829 *
3830 * This routine is invoked to set up the driver internal resources specific to
3831 * support the SLI-3 HBA device it attached to.
3832 *
3833 * Return codes
3834 * 	0 - successful
3835 * 	other values - error
3836 **/
3837static int
3838lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3839{
3840	struct lpfc_sli *psli;
3841
3842	/*
3843	 * Initialize timers used by driver
3844	 */
3845
3846	/* Heartbeat timer */
3847	init_timer(&phba->hb_tmofunc);
3848	phba->hb_tmofunc.function = lpfc_hb_timeout;
3849	phba->hb_tmofunc.data = (unsigned long)phba;
3850
3851	psli = &phba->sli;
3852	/* MBOX heartbeat timer */
3853	init_timer(&psli->mbox_tmo);
3854	psli->mbox_tmo.function = lpfc_mbox_timeout;
3855	psli->mbox_tmo.data = (unsigned long) phba;
3856	/* FCP polling mode timer */
3857	init_timer(&phba->fcp_poll_timer);
3858	phba->fcp_poll_timer.function = lpfc_poll_timeout;
3859	phba->fcp_poll_timer.data = (unsigned long) phba;
3860	/* Fabric block timer */
3861	init_timer(&phba->fabric_block_timer);
3862	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3863	phba->fabric_block_timer.data = (unsigned long) phba;
3864	/* EA polling mode timer */
3865	init_timer(&phba->eratt_poll);
3866	phba->eratt_poll.function = lpfc_poll_eratt;
3867	phba->eratt_poll.data = (unsigned long) phba;
3868
3869	/* Host attention work mask setup */
3870	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3871	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3872
3873	/* Get all the module params for configuring this host */
3874	lpfc_get_cfgparam(phba);
3875	if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
3876		phba->menlo_flag |= HBA_MENLO_SUPPORT;
3877		/* check for menlo minimum sg count */
3878		if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
3879			phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
3880	}
3881
3882	/*
3883	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3884	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3885	 * 2 segments are added since the IOCB needs a command and response bde.
3886	 */
3887	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3888		sizeof(struct fcp_rsp) +
3889			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3890
3891	if (phba->cfg_enable_bg) {
3892		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3893		phba->cfg_sg_dma_buf_size +=
3894			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3895	}
3896
3897	/* Also reinitialize the host templates with new values. */
3898	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3899	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3900
3901	phba->max_vpi = LPFC_MAX_VPI;
3902	/* This will be set to correct value after config_port mbox */
3903	phba->max_vports = 0;
3904
3905	/*
3906	 * Initialize the SLI Layer to run with lpfc HBAs.
3907	 */
3908	lpfc_sli_setup(phba);
3909	lpfc_sli_queue_setup(phba);
3910
3911	/* Allocate device driver memory */
3912	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3913		return -ENOMEM;
3914
3915	return 0;
3916}
3917
3918/**
3919 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3920 * @phba: pointer to lpfc hba data structure.
3921 *
3922 * This routine is invoked to unset the driver internal resources set up
3923 * specific for supporting the SLI-3 HBA device it attached to.
3924 **/
3925static void
3926lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3927{
3928	/* Free device driver memory allocated */
3929	lpfc_mem_free_all(phba);
3930
3931	return;
3932}
3933
3934/**
3935 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3936 * @phba: pointer to lpfc hba data structure.
3937 *
3938 * This routine is invoked to set up the driver internal resources specific to
3939 * support the SLI-4 HBA device it attached to.
3940 *
3941 * Return codes
3942 * 	0 - successful
3943 * 	other values - error
3944 **/
3945static int
3946lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3947{
3948	struct lpfc_sli *psli;
3949	LPFC_MBOXQ_t *mboxq;
3950	int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3951	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3952	struct lpfc_mqe *mqe;
3953	int longs;
3954
3955	/* Before proceed, wait for POST done and device ready */
3956	rc = lpfc_sli4_post_status_check(phba);
3957	if (rc)
3958		return -ENODEV;
3959
3960	/*
3961	 * Initialize timers used by driver
3962	 */
3963
3964	/* Heartbeat timer */
3965	init_timer(&phba->hb_tmofunc);
3966	phba->hb_tmofunc.function = lpfc_hb_timeout;
3967	phba->hb_tmofunc.data = (unsigned long)phba;
3968
3969	psli = &phba->sli;
3970	/* MBOX heartbeat timer */
3971	init_timer(&psli->mbox_tmo);
3972	psli->mbox_tmo.function = lpfc_mbox_timeout;
3973	psli->mbox_tmo.data = (unsigned long) phba;
3974	/* Fabric block timer */
3975	init_timer(&phba->fabric_block_timer);
3976	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3977	phba->fabric_block_timer.data = (unsigned long) phba;
3978	/* EA polling mode timer */
3979	init_timer(&phba->eratt_poll);
3980	phba->eratt_poll.function = lpfc_poll_eratt;
3981	phba->eratt_poll.data = (unsigned long) phba;
3982	/* FCF rediscover timer */
3983	init_timer(&phba->fcf.redisc_wait);
3984	phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
3985	phba->fcf.redisc_wait.data = (unsigned long)phba;
3986
3987	/*
3988	 * We need to do a READ_CONFIG mailbox command here before
3989	 * calling lpfc_get_cfgparam. For VFs this will report the
3990	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3991	 * All of the resources allocated
3992	 * for this Port are tied to these values.
3993	 */
3994	/* Get all the module params for configuring this host */
3995	lpfc_get_cfgparam(phba);
3996	phba->max_vpi = LPFC_MAX_VPI;
3997	/* This will be set to correct value after the read_config mbox */
3998	phba->max_vports = 0;
3999
4000	/* Program the default value of vlan_id and fc_map */
4001	phba->valid_vlan = 0;
4002	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4003	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4004	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4005
4006	/*
4007	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4008	 * used to create the sg_dma_buf_pool must be dynamically calculated.
4009	 * 2 segments are added since the IOCB needs a command and response bde.
4010	 * To insure that the scsi sgl does not cross a 4k page boundary only
4011	 * sgl sizes of must be a power of 2.
4012	 */
4013	buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4014		    ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
4015	/* Feature Level 1 hardware is limited to 2 pages */
4016	if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
4017	     LPFC_SLI_INTF_FEATURELEVEL1_1))
4018		max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
4019	else
4020		max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
4021	for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4022	     dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4023	     dma_buf_size = dma_buf_size << 1)
4024		;
4025	if (dma_buf_size == max_buf_size)
4026		phba->cfg_sg_seg_cnt = (dma_buf_size -
4027			sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
4028			(2 * sizeof(struct sli4_sge))) /
4029				sizeof(struct sli4_sge);
4030	phba->cfg_sg_dma_buf_size = dma_buf_size;
4031
4032	/* Initialize buffer queue management fields */
4033	hbq_count = lpfc_sli_hbq_count();
4034	for (i = 0; i < hbq_count; ++i)
4035		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4036	INIT_LIST_HEAD(&phba->rb_pend_list);
4037	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4038	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
4039
4040	/*
4041	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
4042	 */
4043	/* Initialize the Abort scsi buffer list used by driver */
4044	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
4045	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
4046	/* This abort list used by worker thread */
4047	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4048
4049	/*
4050	 * Initialize dirver internal slow-path work queues
4051	 */
4052
4053	/* Driver internel slow-path CQ Event pool */
4054	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
4055	/* Response IOCB work queue list */
4056	INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
4057	/* Asynchronous event CQ Event work queue list */
4058	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4059	/* Fast-path XRI aborted CQ Event work queue list */
4060	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4061	/* Slow-path XRI aborted CQ Event work queue list */
4062	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4063	/* Receive queue CQ Event work queue list */
4064	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4065
4066	/* Initialize the driver internal SLI layer lists. */
4067	lpfc_sli_setup(phba);
4068	lpfc_sli_queue_setup(phba);
4069
4070	/* Allocate device driver memory */
4071	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4072	if (rc)
4073		return -ENOMEM;
4074
4075	/* Create the bootstrap mailbox command */
4076	rc = lpfc_create_bootstrap_mbox(phba);
4077	if (unlikely(rc))
4078		goto out_free_mem;
4079
4080	/* Set up the host's endian order with the device. */
4081	rc = lpfc_setup_endian_order(phba);
4082	if (unlikely(rc))
4083		goto out_free_bsmbx;
4084
4085	rc = lpfc_sli4_fw_cfg_check(phba);
4086	if (unlikely(rc))
4087		goto out_free_bsmbx;
4088
4089	/* Set up the hba's configuration parameters. */
4090	rc = lpfc_sli4_read_config(phba);
4091	if (unlikely(rc))
4092		goto out_free_bsmbx;
4093
4094	/* Perform a function reset */
4095	rc = lpfc_pci_function_reset(phba);
4096	if (unlikely(rc))
4097		goto out_free_bsmbx;
4098
4099	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4100						       GFP_KERNEL);
4101	if (!mboxq) {
4102		rc = -ENOMEM;
4103		goto out_free_bsmbx;
4104	}
4105
4106	/* Get the Supported Pages. It is always available. */
4107	lpfc_supported_pages(mboxq);
4108	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4109	if (unlikely(rc)) {
4110		rc = -EIO;
4111		mempool_free(mboxq, phba->mbox_mem_pool);
4112		goto out_free_bsmbx;
4113	}
4114
4115	mqe = &mboxq->u.mqe;
4116	memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4117	       LPFC_MAX_SUPPORTED_PAGES);
4118	for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4119		switch (pn_page[i]) {
4120		case LPFC_SLI4_PARAMETERS:
4121			phba->sli4_hba.pc_sli4_params.supported = 1;
4122			break;
4123		default:
4124			break;
4125		}
4126	}
4127
4128	/* Read the port's SLI4 Parameters capabilities if supported. */
4129	if (phba->sli4_hba.pc_sli4_params.supported)
4130		rc = lpfc_pc_sli4_params_get(phba, mboxq);
4131	mempool_free(mboxq, phba->mbox_mem_pool);
4132	if (rc) {
4133		rc = -EIO;
4134		goto out_free_bsmbx;
4135	}
4136	/* Create all the SLI4 queues */
4137	rc = lpfc_sli4_queue_create(phba);
4138	if (rc)
4139		goto out_free_bsmbx;
4140
4141	/* Create driver internal CQE event pool */
4142	rc = lpfc_sli4_cq_event_pool_create(phba);
4143	if (rc)
4144		goto out_destroy_queue;
4145
4146	/* Initialize and populate the iocb list per host */
4147	rc = lpfc_init_sgl_list(phba);
4148	if (rc) {
4149		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4150				"1400 Failed to initialize sgl list.\n");
4151		goto out_destroy_cq_event_pool;
4152	}
4153	rc = lpfc_init_active_sgl_array(phba);
4154	if (rc) {
4155		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4156				"1430 Failed to initialize sgl list.\n");
4157		goto out_free_sgl_list;
4158	}
4159
4160	rc = lpfc_sli4_init_rpi_hdrs(phba);
4161	if (rc) {
4162		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4163				"1432 Failed to initialize rpi headers.\n");
4164		goto out_free_active_sgl;
4165	}
4166
4167	/* Allocate eligible FCF bmask memory for FCF roundrobin failover */
4168	longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4169	phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4170					 GFP_KERNEL);
4171	if (!phba->fcf.fcf_rr_bmask) {
4172		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4173				"2759 Failed allocate memory for FCF round "
4174				"robin failover bmask\n");
4175		goto out_remove_rpi_hdrs;
4176	}
4177
4178	phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4179				    phba->cfg_fcp_eq_count), GFP_KERNEL);
4180	if (!phba->sli4_hba.fcp_eq_hdl) {
4181		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4182				"2572 Failed allocate memory for fast-path "
4183				"per-EQ handle array\n");
4184		goto out_free_fcf_rr_bmask;
4185	}
4186
4187	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4188				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4189	if (!phba->sli4_hba.msix_entries) {
4190		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4191				"2573 Failed allocate memory for msi-x "
4192				"interrupt vector entries\n");
4193		goto out_free_fcp_eq_hdl;
4194	}
4195
4196	return rc;
4197
4198out_free_fcp_eq_hdl:
4199	kfree(phba->sli4_hba.fcp_eq_hdl);
4200out_free_fcf_rr_bmask:
4201	kfree(phba->fcf.fcf_rr_bmask);
4202out_remove_rpi_hdrs:
4203	lpfc_sli4_remove_rpi_hdrs(phba);
4204out_free_active_sgl:
4205	lpfc_free_active_sgl(phba);
4206out_free_sgl_list:
4207	lpfc_free_sgl_list(phba);
4208out_destroy_cq_event_pool:
4209	lpfc_sli4_cq_event_pool_destroy(phba);
4210out_destroy_queue:
4211	lpfc_sli4_queue_destroy(phba);
4212out_free_bsmbx:
4213	lpfc_destroy_bootstrap_mbox(phba);
4214out_free_mem:
4215	lpfc_mem_free(phba);
4216	return rc;
4217}
4218
4219/**
4220 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4221 * @phba: pointer to lpfc hba data structure.
4222 *
4223 * This routine is invoked to unset the driver internal resources set up
4224 * specific for supporting the SLI-4 HBA device it attached to.
4225 **/
4226static void
4227lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4228{
4229	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4230
4231	/* Free memory allocated for msi-x interrupt vector entries */
4232	kfree(phba->sli4_hba.msix_entries);
4233
4234	/* Free memory allocated for fast-path work queue handles */
4235	kfree(phba->sli4_hba.fcp_eq_hdl);
4236
4237	/* Free the allocated rpi headers. */
4238	lpfc_sli4_remove_rpi_hdrs(phba);
4239	lpfc_sli4_remove_rpis(phba);
4240
4241	/* Free eligible FCF index bmask */
4242	kfree(phba->fcf.fcf_rr_bmask);
4243
4244	/* Free the ELS sgl list */
4245	lpfc_free_active_sgl(phba);
4246	lpfc_free_sgl_list(phba);
4247
4248	/* Free the SCSI sgl management array */
4249	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4250
4251	/* Free the SLI4 queues */
4252	lpfc_sli4_queue_destroy(phba);
4253
4254	/* Free the completion queue EQ event pool */
4255	lpfc_sli4_cq_event_release_all(phba);
4256	lpfc_sli4_cq_event_pool_destroy(phba);
4257
4258	/* Free the bsmbx region. */
4259	lpfc_destroy_bootstrap_mbox(phba);
4260
4261	/* Free the SLI Layer memory with SLI4 HBAs */
4262	lpfc_mem_free_all(phba);
4263
4264	/* Free the current connect table */
4265	list_for_each_entry_safe(conn_entry, next_conn_entry,
4266		&phba->fcf_conn_rec_list, list) {
4267		list_del_init(&conn_entry->list);
4268		kfree(conn_entry);
4269	}
4270
4271	return;
4272}
4273
4274/**
4275 * lpfc_init_api_table_setup - Set up init api fucntion jump table
4276 * @phba: The hba struct for which this call is being executed.
4277 * @dev_grp: The HBA PCI-Device group number.
4278 *
4279 * This routine sets up the device INIT interface API function jump table
4280 * in @phba struct.
4281 *
4282 * Returns: 0 - success, -ENODEV - failure.
4283 **/
4284int
4285lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4286{
4287	phba->lpfc_hba_init_link = lpfc_hba_init_link;
4288	phba->lpfc_hba_down_link = lpfc_hba_down_link;
4289	switch (dev_grp) {
4290	case LPFC_PCI_DEV_LP:
4291		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4292		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4293		phba->lpfc_stop_port = lpfc_stop_port_s3;
4294		break;
4295	case LPFC_PCI_DEV_OC:
4296		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4297		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4298		phba->lpfc_stop_port = lpfc_stop_port_s4;
4299		break;
4300	default:
4301		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4302				"1431 Invalid HBA PCI-device group: 0x%x\n",
4303				dev_grp);
4304		return -ENODEV;
4305		break;
4306	}
4307	return 0;
4308}
4309
4310/**
4311 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4312 * @phba: pointer to lpfc hba data structure.
4313 *
4314 * This routine is invoked to set up the driver internal resources before the
4315 * device specific resource setup to support the HBA device it attached to.
4316 *
4317 * Return codes
4318 *	0 - successful
4319 *	other values - error
4320 **/
4321static int
4322lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4323{
4324	/*
4325	 * Driver resources common to all SLI revisions
4326	 */
4327	atomic_set(&phba->fast_event_count, 0);
4328	spin_lock_init(&phba->hbalock);
4329
4330	/* Initialize ndlp management spinlock */
4331	spin_lock_init(&phba->ndlp_lock);
4332
4333	INIT_LIST_HEAD(&phba->port_list);
4334	INIT_LIST_HEAD(&phba->work_list);
4335	init_waitqueue_head(&phba->wait_4_mlo_m_q);
4336
4337	/* Initialize the wait queue head for the kernel thread */
4338	init_waitqueue_head(&phba->work_waitq);
4339
4340	/* Initialize the scsi buffer list used by driver for scsi IO */
4341	spin_lock_init(&phba->scsi_buf_list_lock);
4342	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4343
4344	/* Initialize the fabric iocb list */
4345	INIT_LIST_HEAD(&phba->fabric_iocb_list);
4346
4347	/* Initialize list to save ELS buffers */
4348	INIT_LIST_HEAD(&phba->elsbuf);
4349
4350	/* Initialize FCF connection rec list */
4351	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4352
4353	return 0;
4354}
4355
4356/**
4357 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4358 * @phba: pointer to lpfc hba data structure.
4359 *
4360 * This routine is invoked to set up the driver internal resources after the
4361 * device specific resource setup to support the HBA device it attached to.
4362 *
4363 * Return codes
4364 * 	0 - successful
4365 * 	other values - error
4366 **/
4367static int
4368lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4369{
4370	int error;
4371
4372	/* Startup the kernel thread for this host adapter. */
4373	phba->worker_thread = kthread_run(lpfc_do_work, phba,
4374					  "lpfc_worker_%d", phba->brd_no);
4375	if (IS_ERR(phba->worker_thread)) {
4376		error = PTR_ERR(phba->worker_thread);
4377		return error;
4378	}
4379
4380	return 0;
4381}
4382
4383/**
4384 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4385 * @phba: pointer to lpfc hba data structure.
4386 *
4387 * This routine is invoked to unset the driver internal resources set up after
4388 * the device specific resource setup for supporting the HBA device it
4389 * attached to.
4390 **/
4391static void
4392lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4393{
4394	/* Stop kernel worker thread */
4395	kthread_stop(phba->worker_thread);
4396}
4397
4398/**
4399 * lpfc_free_iocb_list - Free iocb list.
4400 * @phba: pointer to lpfc hba data structure.
4401 *
4402 * This routine is invoked to free the driver's IOCB list and memory.
4403 **/
4404static void
4405lpfc_free_iocb_list(struct lpfc_hba *phba)
4406{
4407	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4408
4409	spin_lock_irq(&phba->hbalock);
4410	list_for_each_entry_safe(iocbq_entry, iocbq_next,
4411				 &phba->lpfc_iocb_list, list) {
4412		list_del(&iocbq_entry->list);
4413		kfree(iocbq_entry);
4414		phba->total_iocbq_bufs--;
4415	}
4416	spin_unlock_irq(&phba->hbalock);
4417
4418	return;
4419}
4420
4421/**
4422 * lpfc_init_iocb_list - Allocate and initialize iocb list.
4423 * @phba: pointer to lpfc hba data structure.
4424 *
4425 * This routine is invoked to allocate and initizlize the driver's IOCB
4426 * list and set up the IOCB tag array accordingly.
4427 *
4428 * Return codes
4429 *	0 - successful
4430 *	other values - error
4431 **/
4432static int
4433lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4434{
4435	struct lpfc_iocbq *iocbq_entry = NULL;
4436	uint16_t iotag;
4437	int i;
4438
4439	/* Initialize and populate the iocb list per host.  */
4440	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
4441	for (i = 0; i < iocb_count; i++) {
4442		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
4443		if (iocbq_entry == NULL) {
4444			printk(KERN_ERR "%s: only allocated %d iocbs of "
4445				"expected %d count. Unloading driver.\n",
4446				__func__, i, LPFC_IOCB_LIST_CNT);
4447			goto out_free_iocbq;
4448		}
4449
4450		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4451		if (iotag == 0) {
4452			kfree(iocbq_entry);
4453			printk(KERN_ERR "%s: failed to allocate IOTAG. "
4454				"Unloading driver.\n", __func__);
4455			goto out_free_iocbq;
4456		}
4457		iocbq_entry->sli4_xritag = NO_XRI;
4458
4459		spin_lock_irq(&phba->hbalock);
4460		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4461		phba->total_iocbq_bufs++;
4462		spin_unlock_irq(&phba->hbalock);
4463	}
4464
4465	return 0;
4466
4467out_free_iocbq:
4468	lpfc_free_iocb_list(phba);
4469
4470	return -ENOMEM;
4471}
4472
4473/**
4474 * lpfc_free_sgl_list - Free sgl list.
4475 * @phba: pointer to lpfc hba data structure.
4476 *
4477 * This routine is invoked to free the driver's sgl list and memory.
4478 **/
4479static void
4480lpfc_free_sgl_list(struct lpfc_hba *phba)
4481{
4482	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4483	LIST_HEAD(sglq_list);
4484
4485	spin_lock_irq(&phba->hbalock);
4486	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
4487	spin_unlock_irq(&phba->hbalock);
4488
4489	list_for_each_entry_safe(sglq_entry, sglq_next,
4490				 &sglq_list, list) {
4491		list_del(&sglq_entry->list);
4492		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
4493		kfree(sglq_entry);
4494		phba->sli4_hba.total_sglq_bufs--;
4495	}
4496	kfree(phba->sli4_hba.lpfc_els_sgl_array);
4497}
4498
4499/**
4500 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
4501 * @phba: pointer to lpfc hba data structure.
4502 *
4503 * This routine is invoked to allocate the driver's active sgl memory.
4504 * This array will hold the sglq_entry's for active IOs.
4505 **/
4506static int
4507lpfc_init_active_sgl_array(struct lpfc_hba *phba)
4508{
4509	int size;
4510	size = sizeof(struct lpfc_sglq *);
4511	size *= phba->sli4_hba.max_cfg_param.max_xri;
4512
4513	phba->sli4_hba.lpfc_sglq_active_list =
4514		kzalloc(size, GFP_KERNEL);
4515	if (!phba->sli4_hba.lpfc_sglq_active_list)
4516		return -ENOMEM;
4517	return 0;
4518}
4519
4520/**
4521 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
4522 * @phba: pointer to lpfc hba data structure.
4523 *
4524 * This routine is invoked to walk through the array of active sglq entries
4525 * and free all of the resources.
4526 * This is just a place holder for now.
4527 **/
4528static void
4529lpfc_free_active_sgl(struct lpfc_hba *phba)
4530{
4531	kfree(phba->sli4_hba.lpfc_sglq_active_list);
4532}
4533
4534/**
4535 * lpfc_init_sgl_list - Allocate and initialize sgl list.
4536 * @phba: pointer to lpfc hba data structure.
4537 *
4538 * This routine is invoked to allocate and initizlize the driver's sgl
4539 * list and set up the sgl xritag tag array accordingly.
4540 *
4541 * Return codes
4542 *	0 - successful
4543 *	other values - error
4544 **/
4545static int
4546lpfc_init_sgl_list(struct lpfc_hba *phba)
4547{
4548	struct lpfc_sglq *sglq_entry = NULL;
4549	int i;
4550	int els_xri_cnt;
4551
4552	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4553	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4554				"2400 lpfc_init_sgl_list els %d.\n",
4555				els_xri_cnt);
4556	/* Initialize and populate the sglq list per host/VF. */
4557	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
4558	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
4559
4560	/* Sanity check on XRI management */
4561	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
4562		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4563				"2562 No room left for SCSI XRI allocation: "
4564				"max_xri=%d, els_xri=%d\n",
4565				phba->sli4_hba.max_cfg_param.max_xri,
4566				els_xri_cnt);
4567		return -ENOMEM;
4568	}
4569
4570	/* Allocate memory for the ELS XRI management array */
4571	phba->sli4_hba.lpfc_els_sgl_array =
4572			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
4573			GFP_KERNEL);
4574
4575	if (!phba->sli4_hba.lpfc_els_sgl_array) {
4576		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4577				"2401 Failed to allocate memory for ELS "
4578				"XRI management array of size %d.\n",
4579				els_xri_cnt);
4580		return -ENOMEM;
4581	}
4582
4583	/* Keep the SCSI XRI into the XRI management array */
4584	phba->sli4_hba.scsi_xri_max =
4585			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4586	phba->sli4_hba.scsi_xri_cnt = 0;
4587
4588	phba->sli4_hba.lpfc_scsi_psb_array =
4589			kzalloc((sizeof(struct lpfc_scsi_buf *) *
4590			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
4591
4592	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
4593		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4594				"2563 Failed to allocate memory for SCSI "
4595				"XRI management array of size %d.\n",
4596				phba->sli4_hba.scsi_xri_max);
4597		kfree(phba->sli4_hba.lpfc_els_sgl_array);
4598		return -ENOMEM;
4599	}
4600
4601	for (i = 0; i < els_xri_cnt; i++) {
4602		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
4603		if (sglq_entry == NULL) {
4604			printk(KERN_ERR "%s: only allocated %d sgls of "
4605				"expected %d count. Unloading driver.\n",
4606				__func__, i, els_xri_cnt);
4607			goto out_free_mem;
4608		}
4609
4610		sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
4611		if (sglq_entry->sli4_xritag == NO_XRI) {
4612			kfree(sglq_entry);
4613			printk(KERN_ERR "%s: failed to allocate XRI.\n"
4614				"Unloading driver.\n", __func__);
4615			goto out_free_mem;
4616		}
4617		sglq_entry->buff_type = GEN_BUFF_TYPE;
4618		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4619		if (sglq_entry->virt == NULL) {
4620			kfree(sglq_entry);
4621			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
4622				"Unloading driver.\n", __func__);
4623			goto out_free_mem;
4624		}
4625		sglq_entry->sgl = sglq_entry->virt;
4626		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4627
4628		/* The list order is used by later block SGL registraton */
4629		spin_lock_irq(&phba->hbalock);
4630		sglq_entry->state = SGL_FREED;
4631		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4632		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4633		phba->sli4_hba.total_sglq_bufs++;
4634		spin_unlock_irq(&phba->hbalock);
4635	}
4636	return 0;
4637
4638out_free_mem:
4639	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4640	lpfc_free_sgl_list(phba);
4641	return -ENOMEM;
4642}
4643
4644/**
4645 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
4646 * @phba: pointer to lpfc hba data structure.
4647 *
4648 * This routine is invoked to post rpi header templates to the
4649 * HBA consistent with the SLI-4 interface spec.  This routine
4650 * posts a PAGE_SIZE memory region to the port to hold up to
4651 * PAGE_SIZE modulo 64 rpi context headers.
4652 * No locks are held here because this is an initialization routine
4653 * called only from probe or lpfc_online when interrupts are not
4654 * enabled and the driver is reinitializing the device.
4655 *
4656 * Return codes
4657 * 	0 - successful
4658 * 	-ENOMEM - No availble memory
4659 *      -EIO - The mailbox failed to complete successfully.
4660 **/
4661int
4662lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4663{
4664	int rc = 0;
4665	int longs;
4666	uint16_t rpi_count;
4667	struct lpfc_rpi_hdr *rpi_hdr;
4668
4669	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4670
4671	/*
4672	 * Provision an rpi bitmask range for discovery. The total count
4673	 * is the difference between max and base + 1.
4674	 */
4675	rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
4676		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4677
4678	longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4679	phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
4680					   GFP_KERNEL);
4681	if (!phba->sli4_hba.rpi_bmask)
4682		return -ENOMEM;
4683
4684	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4685	if (!rpi_hdr) {
4686		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4687				"0391 Error during rpi post operation\n");
4688		lpfc_sli4_remove_rpis(phba);
4689		rc = -ENODEV;
4690	}
4691
4692	return rc;
4693}
4694
4695/**
4696 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4697 * @phba: pointer to lpfc hba data structure.
4698 *
4699 * This routine is invoked to allocate a single 4KB memory region to
4700 * support rpis and stores them in the phba.  This single region
4701 * provides support for up to 64 rpis.  The region is used globally
4702 * by the device.
4703 *
4704 * Returns:
4705 *   A valid rpi hdr on success.
4706 *   A NULL pointer on any failure.
4707 **/
4708struct lpfc_rpi_hdr *
4709lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4710{
4711	uint16_t rpi_limit, curr_rpi_range;
4712	struct lpfc_dmabuf *dmabuf;
4713	struct lpfc_rpi_hdr *rpi_hdr;
4714
4715	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4716		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4717
4718	spin_lock_irq(&phba->hbalock);
4719	curr_rpi_range = phba->sli4_hba.next_rpi;
4720	spin_unlock_irq(&phba->hbalock);
4721
4722	/*
4723	 * The port has a limited number of rpis. The increment here
4724	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4725	 * and to allow the full max_rpi range per port.
4726	 */
4727	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4728		return NULL;
4729
4730	/*
4731	 * First allocate the protocol header region for the port.  The
4732	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4733	 */
4734	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4735	if (!dmabuf)
4736		return NULL;
4737
4738	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4739					  LPFC_HDR_TEMPLATE_SIZE,
4740					  &dmabuf->phys,
4741					  GFP_KERNEL);
4742	if (!dmabuf->virt) {
4743		rpi_hdr = NULL;
4744		goto err_free_dmabuf;
4745	}
4746
4747	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4748	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4749		rpi_hdr = NULL;
4750		goto err_free_coherent;
4751	}
4752
4753	/* Save the rpi header data for cleanup later. */
4754	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4755	if (!rpi_hdr)
4756		goto err_free_coherent;
4757
4758	rpi_hdr->dmabuf = dmabuf;
4759	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4760	rpi_hdr->page_count = 1;
4761	spin_lock_irq(&phba->hbalock);
4762	rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4763	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4764
4765	/*
4766	 * The next_rpi stores the next module-64 rpi value to post
4767	 * in any subsequent rpi memory region postings.
4768	 */
4769	phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4770	spin_unlock_irq(&phba->hbalock);
4771	return rpi_hdr;
4772
4773 err_free_coherent:
4774	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4775			  dmabuf->virt, dmabuf->phys);
4776 err_free_dmabuf:
4777	kfree(dmabuf);
4778	return NULL;
4779}
4780
4781/**
4782 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4783 * @phba: pointer to lpfc hba data structure.
4784 *
4785 * This routine is invoked to remove all memory resources allocated
4786 * to support rpis. This routine presumes the caller has released all
4787 * rpis consumed by fabric or port logins and is prepared to have
4788 * the header pages removed.
4789 **/
4790void
4791lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4792{
4793	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4794
4795	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4796				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4797		list_del(&rpi_hdr->list);
4798		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4799				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4800		kfree(rpi_hdr->dmabuf);
4801		kfree(rpi_hdr);
4802	}
4803
4804	phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4805	memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4806}
4807
4808/**
4809 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4810 * @pdev: pointer to pci device data structure.
4811 *
4812 * This routine is invoked to allocate the driver hba data structure for an
4813 * HBA device. If the allocation is successful, the phba reference to the
4814 * PCI device data structure is set.
4815 *
4816 * Return codes
4817 *      pointer to @phba - successful
4818 *      NULL - error
4819 **/
4820static struct lpfc_hba *
4821lpfc_hba_alloc(struct pci_dev *pdev)
4822{
4823	struct lpfc_hba *phba;
4824
4825	/* Allocate memory for HBA structure */
4826	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4827	if (!phba) {
4828		dev_err(&pdev->dev, "failed to allocate hba struct\n");
4829		return NULL;
4830	}
4831
4832	/* Set reference to PCI device in HBA structure */
4833	phba->pcidev = pdev;
4834
4835	/* Assign an unused board number */
4836	phba->brd_no = lpfc_get_instance();
4837	if (phba->brd_no < 0) {
4838		kfree(phba);
4839		return NULL;
4840	}
4841
4842	spin_lock_init(&phba->ct_ev_lock);
4843	INIT_LIST_HEAD(&phba->ct_ev_waiters);
4844
4845	return phba;
4846}
4847
4848/**
4849 * lpfc_hba_free - Free driver hba data structure with a device.
4850 * @phba: pointer to lpfc hba data structure.
4851 *
4852 * This routine is invoked to free the driver hba data structure with an
4853 * HBA device.
4854 **/
4855static void
4856lpfc_hba_free(struct lpfc_hba *phba)
4857{
4858	/* Release the driver assigned board number */
4859	idr_remove(&lpfc_hba_index, phba->brd_no);
4860
4861	kfree(phba);
4862	return;
4863}
4864
4865/**
4866 * lpfc_create_shost - Create hba physical port with associated scsi host.
4867 * @phba: pointer to lpfc hba data structure.
4868 *
4869 * This routine is invoked to create HBA physical port and associate a SCSI
4870 * host with it.
4871 *
4872 * Return codes
4873 *      0 - successful
4874 *      other values - error
4875 **/
4876static int
4877lpfc_create_shost(struct lpfc_hba *phba)
4878{
4879	struct lpfc_vport *vport;
4880	struct Scsi_Host  *shost;
4881
4882	/* Initialize HBA FC structure */
4883	phba->fc_edtov = FF_DEF_EDTOV;
4884	phba->fc_ratov = FF_DEF_RATOV;
4885	phba->fc_altov = FF_DEF_ALTOV;
4886	phba->fc_arbtov = FF_DEF_ARBTOV;
4887
4888	atomic_set(&phba->sdev_cnt, 0);
4889	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4890	if (!vport)
4891		return -ENODEV;
4892
4893	shost = lpfc_shost_from_vport(vport);
4894	phba->pport = vport;
4895	lpfc_debugfs_initialize(vport);
4896	/* Put reference to SCSI host to driver's device private data */
4897	pci_set_drvdata(phba->pcidev, shost);
4898
4899	return 0;
4900}
4901
4902/**
4903 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4904 * @phba: pointer to lpfc hba data structure.
4905 *
4906 * This routine is invoked to destroy HBA physical port and the associated
4907 * SCSI host.
4908 **/
4909static void
4910lpfc_destroy_shost(struct lpfc_hba *phba)
4911{
4912	struct lpfc_vport *vport = phba->pport;
4913
4914	/* Destroy physical port that associated with the SCSI host */
4915	destroy_port(vport);
4916
4917	return;
4918}
4919
4920/**
4921 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4922 * @phba: pointer to lpfc hba data structure.
4923 * @shost: the shost to be used to detect Block guard settings.
4924 *
4925 * This routine sets up the local Block guard protocol settings for @shost.
4926 * This routine also allocates memory for debugging bg buffers.
4927 **/
4928static void
4929lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4930{
4931	int pagecnt = 10;
4932	if (lpfc_prot_mask && lpfc_prot_guard) {
4933		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4934				"1478 Registering BlockGuard with the "
4935				"SCSI layer\n");
4936		scsi_host_set_prot(shost, lpfc_prot_mask);
4937		scsi_host_set_guard(shost, lpfc_prot_guard);
4938	}
4939	if (!_dump_buf_data) {
4940		while (pagecnt) {
4941			spin_lock_init(&_dump_buf_lock);
4942			_dump_buf_data =
4943				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4944			if (_dump_buf_data) {
4945				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4946					"9043 BLKGRD: allocated %d pages for "
4947				       "_dump_buf_data at 0x%p\n",
4948				       (1 << pagecnt), _dump_buf_data);
4949				_dump_buf_data_order = pagecnt;
4950				memset(_dump_buf_data, 0,
4951				       ((1 << PAGE_SHIFT) << pagecnt));
4952				break;
4953			} else
4954				--pagecnt;
4955		}
4956		if (!_dump_buf_data_order)
4957			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4958				"9044 BLKGRD: ERROR unable to allocate "
4959			       "memory for hexdump\n");
4960	} else
4961		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4962			"9045 BLKGRD: already allocated _dump_buf_data=0x%p"
4963		       "\n", _dump_buf_data);
4964	if (!_dump_buf_dif) {
4965		while (pagecnt) {
4966			_dump_buf_dif =
4967				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4968			if (_dump_buf_dif) {
4969				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4970					"9046 BLKGRD: allocated %d pages for "
4971				       "_dump_buf_dif at 0x%p\n",
4972				       (1 << pagecnt), _dump_buf_dif);
4973				_dump_buf_dif_order = pagecnt;
4974				memset(_dump_buf_dif, 0,
4975				       ((1 << PAGE_SHIFT) << pagecnt));
4976				break;
4977			} else
4978				--pagecnt;
4979		}
4980		if (!_dump_buf_dif_order)
4981			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4982			"9047 BLKGRD: ERROR unable to allocate "
4983			       "memory for hexdump\n");
4984	} else
4985		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4986			"9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
4987		       _dump_buf_dif);
4988}
4989
4990/**
4991 * lpfc_post_init_setup - Perform necessary device post initialization setup.
4992 * @phba: pointer to lpfc hba data structure.
4993 *
4994 * This routine is invoked to perform all the necessary post initialization
4995 * setup for the device.
4996 **/
4997static void
4998lpfc_post_init_setup(struct lpfc_hba *phba)
4999{
5000	struct Scsi_Host  *shost;
5001	struct lpfc_adapter_event_header adapter_event;
5002
5003	/* Get the default values for Model Name and Description */
5004	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
5005
5006	/*
5007	 * hba setup may have changed the hba_queue_depth so we need to
5008	 * adjust the value of can_queue.
5009	 */
5010	shost = pci_get_drvdata(phba->pcidev);
5011	shost->can_queue = phba->cfg_hba_queue_depth - 10;
5012	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
5013		lpfc_setup_bg(phba, shost);
5014
5015	lpfc_host_attrib_init(shost);
5016
5017	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
5018		spin_lock_irq(shost->host_lock);
5019		lpfc_poll_start_timer(phba);
5020		spin_unlock_irq(shost->host_lock);
5021	}
5022
5023	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5024			"0428 Perform SCSI scan\n");
5025	/* Send board arrival event to upper layer */
5026	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
5027	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
5028	fc_host_post_vendor_event(shost, fc_get_event_number(),
5029				  sizeof(adapter_event),
5030				  (char *) &adapter_event,
5031				  LPFC_NL_VENDOR_ID);
5032	return;
5033}
5034
5035/**
5036 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
5037 * @phba: pointer to lpfc hba data structure.
5038 *
5039 * This routine is invoked to set up the PCI device memory space for device
5040 * with SLI-3 interface spec.
5041 *
5042 * Return codes
5043 * 	0 - successful
5044 * 	other values - error
5045 **/
5046static int
5047lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5048{
5049	struct pci_dev *pdev;
5050	unsigned long bar0map_len, bar2map_len;
5051	int i, hbq_count;
5052	void *ptr;
5053	int error = -ENODEV;
5054
5055	/* Obtain PCI device reference */
5056	if (!phba->pcidev)
5057		return error;
5058	else
5059		pdev = phba->pcidev;
5060
5061	/* Set the device DMA mask size */
5062	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5063	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5064		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5065		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5066			return error;
5067		}
5068	}
5069
5070	/* Get the bus address of Bar0 and Bar2 and the number of bytes
5071	 * required by each mapping.
5072	 */
5073	phba->pci_bar0_map = pci_resource_start(pdev, 0);
5074	bar0map_len = pci_resource_len(pdev, 0);
5075
5076	phba->pci_bar2_map = pci_resource_start(pdev, 2);
5077	bar2map_len = pci_resource_len(pdev, 2);
5078
5079	/* Map HBA SLIM to a kernel virtual address. */
5080	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5081	if (!phba->slim_memmap_p) {
5082		dev_printk(KERN_ERR, &pdev->dev,
5083			   "ioremap failed for SLIM memory.\n");
5084		goto out;
5085	}
5086
5087	/* Map HBA Control Registers to a kernel virtual address. */
5088	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5089	if (!phba->ctrl_regs_memmap_p) {
5090		dev_printk(KERN_ERR, &pdev->dev,
5091			   "ioremap failed for HBA control registers.\n");
5092		goto out_iounmap_slim;
5093	}
5094
5095	/* Allocate memory for SLI-2 structures */
5096	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5097					       SLI2_SLIM_SIZE,
5098					       &phba->slim2p.phys,
5099					       GFP_KERNEL);
5100	if (!phba->slim2p.virt)
5101		goto out_iounmap;
5102
5103	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5104	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5105	phba->mbox_ext = (phba->slim2p.virt +
5106		offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5107	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5108	phba->IOCBs = (phba->slim2p.virt +
5109		       offsetof(struct lpfc_sli2_slim, IOCBs));
5110
5111	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5112						 lpfc_sli_hbq_size(),
5113						 &phba->hbqslimp.phys,
5114						 GFP_KERNEL);
5115	if (!phba->hbqslimp.virt)
5116		goto out_free_slim;
5117
5118	hbq_count = lpfc_sli_hbq_count();
5119	ptr = phba->hbqslimp.virt;
5120	for (i = 0; i < hbq_count; ++i) {
5121		phba->hbqs[i].hbq_virt = ptr;
5122		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5123		ptr += (lpfc_hbq_defs[i]->entry_count *
5124			sizeof(struct lpfc_hbq_entry));
5125	}
5126	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5127	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5128
5129	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5130
5131	INIT_LIST_HEAD(&phba->rb_pend_list);
5132
5133	phba->MBslimaddr = phba->slim_memmap_p;
5134	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5135	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5136	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5137	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5138
5139	return 0;
5140
5141out_free_slim:
5142	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5143			  phba->slim2p.virt, phba->slim2p.phys);
5144out_iounmap:
5145	iounmap(phba->ctrl_regs_memmap_p);
5146out_iounmap_slim:
5147	iounmap(phba->slim_memmap_p);
5148out:
5149	return error;
5150}
5151
5152/**
5153 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5154 * @phba: pointer to lpfc hba data structure.
5155 *
5156 * This routine is invoked to unset the PCI device memory space for device
5157 * with SLI-3 interface spec.
5158 **/
5159static void
5160lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5161{
5162	struct pci_dev *pdev;
5163
5164	/* Obtain PCI device reference */
5165	if (!phba->pcidev)
5166		return;
5167	else
5168		pdev = phba->pcidev;
5169
5170	/* Free coherent DMA memory allocated */
5171	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5172			  phba->hbqslimp.virt, phba->hbqslimp.phys);
5173	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5174			  phba->slim2p.virt, phba->slim2p.phys);
5175
5176	/* I/O memory unmap */
5177	iounmap(phba->ctrl_regs_memmap_p);
5178	iounmap(phba->slim_memmap_p);
5179
5180	return;
5181}
5182
5183/**
5184 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
5185 * @phba: pointer to lpfc hba data structure.
5186 *
5187 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5188 * done and check status.
5189 *
5190 * Return 0 if successful, otherwise -ENODEV.
5191 **/
5192int
5193lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5194{
5195	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
5196	int i, port_error = -ENODEV;
5197
5198	if (!phba->sli4_hba.STAregaddr)
5199		return -ENODEV;
5200
5201	/* Wait up to 30 seconds for the SLI Port POST done and ready */
5202	for (i = 0; i < 3000; i++) {
5203		sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
5204		/* Encounter fatal POST error, break out */
5205		if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
5206			port_error = -ENODEV;
5207			break;
5208		}
5209		if (LPFC_POST_STAGE_ARMFW_READY ==
5210		    bf_get(lpfc_hst_state_port_status, &sta_reg)) {
5211			port_error = 0;
5212			break;
5213		}
5214		msleep(10);
5215	}
5216
5217	if (port_error)
5218		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5219			"1408 Failure HBA POST Status: sta_reg=0x%x, "
5220			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
5221			"dl=x%x, pstatus=x%x\n", sta_reg.word0,
5222			bf_get(lpfc_hst_state_perr, &sta_reg),
5223			bf_get(lpfc_hst_state_sfi, &sta_reg),
5224			bf_get(lpfc_hst_state_nip, &sta_reg),
5225			bf_get(lpfc_hst_state_ipc, &sta_reg),
5226			bf_get(lpfc_hst_state_xrom, &sta_reg),
5227			bf_get(lpfc_hst_state_dl, &sta_reg),
5228			bf_get(lpfc_hst_state_port_status, &sta_reg));
5229
5230	/* Log device information */
5231	phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
5232	if (bf_get(lpfc_sli_intf_valid,
5233		   &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
5234		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5235				"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
5236				"FeatureL1=0x%x, FeatureL2=0x%x\n",
5237				bf_get(lpfc_sli_intf_sli_family,
5238				       &phba->sli4_hba.sli_intf),
5239				bf_get(lpfc_sli_intf_slirev,
5240				       &phba->sli4_hba.sli_intf),
5241				bf_get(lpfc_sli_intf_featurelevel1,
5242				       &phba->sli4_hba.sli_intf),
5243				bf_get(lpfc_sli_intf_featurelevel2,
5244				       &phba->sli4_hba.sli_intf));
5245	}
5246	phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
5247	phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
5248	/* With uncoverable error, log the error message and return error */
5249	uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
5250	uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
5251	if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5252	    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5253		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5254				"1422 HBA Unrecoverable error: "
5255				"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
5256				"ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
5257				uerrlo_reg.word0, uerrhi_reg.word0,
5258				phba->sli4_hba.ue_mask_lo,
5259				phba->sli4_hba.ue_mask_hi);
5260		return -ENODEV;
5261	}
5262
5263	return port_error;
5264}
5265
5266/**
5267 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5268 * @phba: pointer to lpfc hba data structure.
5269 *
5270 * This routine is invoked to set up SLI4 BAR0 PCI config space register
5271 * memory map.
5272 **/
5273static void
5274lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
5275{
5276	phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5277					LPFC_UERR_STATUS_LO;
5278	phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5279					LPFC_UERR_STATUS_HI;
5280	phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5281					LPFC_UE_MASK_LO;
5282	phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5283					LPFC_UE_MASK_HI;
5284	phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
5285					LPFC_SLI_INTF;
5286}
5287
5288/**
5289 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5290 * @phba: pointer to lpfc hba data structure.
5291 *
5292 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5293 * memory map.
5294 **/
5295static void
5296lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5297{
5298
5299	phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5300				    LPFC_HST_STATE;
5301	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5302				    LPFC_HST_ISR0;
5303	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5304				    LPFC_HST_IMR0;
5305	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5306				     LPFC_HST_ISCR0;
5307	return;
5308}
5309
5310/**
5311 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
5312 * @phba: pointer to lpfc hba data structure.
5313 * @vf: virtual function number
5314 *
5315 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5316 * based on the given viftual function number, @vf.
5317 *
5318 * Return 0 if successful, otherwise -ENODEV.
5319 **/
5320static int
5321lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5322{
5323	if (vf > LPFC_VIR_FUNC_MAX)
5324		return -ENODEV;
5325
5326	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5327				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5328	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5329				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5330	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5331				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5332	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5333				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5334	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5335				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5336	return 0;
5337}
5338
5339/**
5340 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
5341 * @phba: pointer to lpfc hba data structure.
5342 *
5343 * This routine is invoked to create the bootstrap mailbox
5344 * region consistent with the SLI-4 interface spec.  This
5345 * routine allocates all memory necessary to communicate
5346 * mailbox commands to the port and sets up all alignment
5347 * needs.  No locks are expected to be held when calling
5348 * this routine.
5349 *
5350 * Return codes
5351 * 	0 - successful
5352 * 	-ENOMEM - could not allocated memory.
5353 **/
5354static int
5355lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
5356{
5357	uint32_t bmbx_size;
5358	struct lpfc_dmabuf *dmabuf;
5359	struct dma_address *dma_address;
5360	uint32_t pa_addr;
5361	uint64_t phys_addr;
5362
5363	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5364	if (!dmabuf)
5365		return -ENOMEM;
5366
5367	/*
5368	 * The bootstrap mailbox region is comprised of 2 parts
5369	 * plus an alignment restriction of 16 bytes.
5370	 */
5371	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5372	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5373					  bmbx_size,
5374					  &dmabuf->phys,
5375					  GFP_KERNEL);
5376	if (!dmabuf->virt) {
5377		kfree(dmabuf);
5378		return -ENOMEM;
5379	}
5380	memset(dmabuf->virt, 0, bmbx_size);
5381
5382	/*
5383	 * Initialize the bootstrap mailbox pointers now so that the register
5384	 * operations are simple later.  The mailbox dma address is required
5385	 * to be 16-byte aligned.  Also align the virtual memory as each
5386	 * maibox is copied into the bmbx mailbox region before issuing the
5387	 * command to the port.
5388	 */
5389	phba->sli4_hba.bmbx.dmabuf = dmabuf;
5390	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
5391
5392	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
5393					      LPFC_ALIGN_16_BYTE);
5394	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
5395					      LPFC_ALIGN_16_BYTE);
5396
5397	/*
5398	 * Set the high and low physical addresses now.  The SLI4 alignment
5399	 * requirement is 16 bytes and the mailbox is posted to the port
5400	 * as two 30-bit addresses.  The other data is a bit marking whether
5401	 * the 30-bit address is the high or low address.
5402	 * Upcast bmbx aphys to 64bits so shift instruction compiles
5403	 * clean on 32 bit machines.
5404	 */
5405	dma_address = &phba->sli4_hba.bmbx.dma_address;
5406	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
5407	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
5408	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
5409					   LPFC_BMBX_BIT1_ADDR_HI);
5410
5411	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
5412	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
5413					   LPFC_BMBX_BIT1_ADDR_LO);
5414	return 0;
5415}
5416
5417/**
5418 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
5419 * @phba: pointer to lpfc hba data structure.
5420 *
5421 * This routine is invoked to teardown the bootstrap mailbox
5422 * region and release all host resources. This routine requires
5423 * the caller to ensure all mailbox commands recovered, no
5424 * additional mailbox comands are sent, and interrupts are disabled
5425 * before calling this routine.
5426 *
5427 **/
5428static void
5429lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5430{
5431	dma_free_coherent(&phba->pcidev->dev,
5432			  phba->sli4_hba.bmbx.bmbx_size,
5433			  phba->sli4_hba.bmbx.dmabuf->virt,
5434			  phba->sli4_hba.bmbx.dmabuf->phys);
5435
5436	kfree(phba->sli4_hba.bmbx.dmabuf);
5437	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
5438}
5439
5440/**
5441 * lpfc_sli4_read_config - Get the config parameters.
5442 * @phba: pointer to lpfc hba data structure.
5443 *
5444 * This routine is invoked to read the configuration parameters from the HBA.
5445 * The configuration parameters are used to set the base and maximum values
5446 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
5447 * allocation for the port.
5448 *
5449 * Return codes
5450 * 	0 - successful
5451 * 	-ENOMEM - No availble memory
5452 *      -EIO - The mailbox failed to complete successfully.
5453 **/
5454static int
5455lpfc_sli4_read_config(struct lpfc_hba *phba)
5456{
5457	LPFC_MBOXQ_t *pmb;
5458	struct lpfc_mbx_read_config *rd_config;
5459	uint32_t rc = 0;
5460
5461	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5462	if (!pmb) {
5463		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5464				"2011 Unable to allocate memory for issuing "
5465				"SLI_CONFIG_SPECIAL mailbox command\n");
5466		return -ENOMEM;
5467	}
5468
5469	lpfc_read_config(phba, pmb);
5470
5471	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5472	if (rc != MBX_SUCCESS) {
5473		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5474			"2012 Mailbox failed , mbxCmd x%x "
5475			"READ_CONFIG, mbxStatus x%x\n",
5476			bf_get(lpfc_mqe_command, &pmb->u.mqe),
5477			bf_get(lpfc_mqe_status, &pmb->u.mqe));
5478		rc = -EIO;
5479	} else {
5480		rd_config = &pmb->u.mqe.un.rd_config;
5481		phba->sli4_hba.max_cfg_param.max_xri =
5482			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5483		phba->sli4_hba.max_cfg_param.xri_base =
5484			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
5485		phba->sli4_hba.max_cfg_param.max_vpi =
5486			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
5487		phba->sli4_hba.max_cfg_param.vpi_base =
5488			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
5489		phba->sli4_hba.max_cfg_param.max_rpi =
5490			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
5491		phba->sli4_hba.max_cfg_param.rpi_base =
5492			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
5493		phba->sli4_hba.max_cfg_param.max_vfi =
5494			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
5495		phba->sli4_hba.max_cfg_param.vfi_base =
5496			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5497		phba->sli4_hba.max_cfg_param.max_fcfi =
5498			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5499		phba->sli4_hba.max_cfg_param.fcfi_base =
5500			bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
5501		phba->sli4_hba.max_cfg_param.max_eq =
5502			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5503		phba->sli4_hba.max_cfg_param.max_rq =
5504			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
5505		phba->sli4_hba.max_cfg_param.max_wq =
5506			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
5507		phba->sli4_hba.max_cfg_param.max_cq =
5508			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
5509		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
5510		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
5511		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
5512		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5513		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
5514		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
5515				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
5516		phba->max_vports = phba->max_vpi;
5517		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5518				"2003 cfg params XRI(B:%d M:%d), "
5519				"VPI(B:%d M:%d) "
5520				"VFI(B:%d M:%d) "
5521				"RPI(B:%d M:%d) "
5522				"FCFI(B:%d M:%d)\n",
5523				phba->sli4_hba.max_cfg_param.xri_base,
5524				phba->sli4_hba.max_cfg_param.max_xri,
5525				phba->sli4_hba.max_cfg_param.vpi_base,
5526				phba->sli4_hba.max_cfg_param.max_vpi,
5527				phba->sli4_hba.max_cfg_param.vfi_base,
5528				phba->sli4_hba.max_cfg_param.max_vfi,
5529				phba->sli4_hba.max_cfg_param.rpi_base,
5530				phba->sli4_hba.max_cfg_param.max_rpi,
5531				phba->sli4_hba.max_cfg_param.fcfi_base,
5532				phba->sli4_hba.max_cfg_param.max_fcfi);
5533	}
5534	mempool_free(pmb, phba->mbox_mem_pool);
5535
5536	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
5537	if (phba->cfg_hba_queue_depth >
5538		(phba->sli4_hba.max_cfg_param.max_xri -
5539			lpfc_sli4_get_els_iocb_cnt(phba)))
5540		phba->cfg_hba_queue_depth =
5541			phba->sli4_hba.max_cfg_param.max_xri -
5542				lpfc_sli4_get_els_iocb_cnt(phba);
5543	return rc;
5544}
5545
5546/**
5547 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
5548 * @phba: pointer to lpfc hba data structure.
5549 *
5550 * This routine is invoked to setup the host-side endian order to the
5551 * HBA consistent with the SLI-4 interface spec.
5552 *
5553 * Return codes
5554 * 	0 - successful
5555 * 	-ENOMEM - No availble memory
5556 *      -EIO - The mailbox failed to complete successfully.
5557 **/
5558static int
5559lpfc_setup_endian_order(struct lpfc_hba *phba)
5560{
5561	LPFC_MBOXQ_t *mboxq;
5562	uint32_t rc = 0;
5563	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
5564				      HOST_ENDIAN_HIGH_WORD1};
5565
5566	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5567	if (!mboxq) {
5568		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5569				"0492 Unable to allocate memory for issuing "
5570				"SLI_CONFIG_SPECIAL mailbox command\n");
5571		return -ENOMEM;
5572	}
5573
5574	/*
5575	 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
5576	 * words to contain special data values and no other data.
5577	 */
5578	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
5579	memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
5580	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5581	if (rc != MBX_SUCCESS) {
5582		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5583				"0493 SLI_CONFIG_SPECIAL mailbox failed with "
5584				"status x%x\n",
5585				rc);
5586		rc = -EIO;
5587	}
5588
5589	mempool_free(mboxq, phba->mbox_mem_pool);
5590	return rc;
5591}
5592
5593/**
5594 * lpfc_sli4_queue_create - Create all the SLI4 queues
5595 * @phba: pointer to lpfc hba data structure.
5596 *
5597 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
5598 * operation. For each SLI4 queue type, the parameters such as queue entry
5599 * count (queue depth) shall be taken from the module parameter. For now,
5600 * we just use some constant number as place holder.
5601 *
5602 * Return codes
5603 *      0 - successful
5604 *      -ENOMEM - No availble memory
5605 *      -EIO - The mailbox failed to complete successfully.
5606 **/
5607static int
5608lpfc_sli4_queue_create(struct lpfc_hba *phba)
5609{
5610	struct lpfc_queue *qdesc;
5611	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5612	int cfg_fcp_wq_count;
5613	int cfg_fcp_eq_count;
5614
5615	/*
5616	 * Sanity check for confiugred queue parameters against the run-time
5617	 * device parameters
5618	 */
5619
5620	/* Sanity check on FCP fast-path WQ parameters */
5621	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
5622	if (cfg_fcp_wq_count >
5623	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
5624		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
5625				   LPFC_SP_WQN_DEF;
5626		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
5627			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5628					"2581 Not enough WQs (%d) from "
5629					"the pci function for supporting "
5630					"FCP WQs (%d)\n",
5631					phba->sli4_hba.max_cfg_param.max_wq,
5632					phba->cfg_fcp_wq_count);
5633			goto out_error;
5634		}
5635		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5636				"2582 Not enough WQs (%d) from the pci "
5637				"function for supporting the requested "
5638				"FCP WQs (%d), the actual FCP WQs can "
5639				"be supported: %d\n",
5640				phba->sli4_hba.max_cfg_param.max_wq,
5641				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
5642	}
5643	/* The actual number of FCP work queues adopted */
5644	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
5645
5646	/* Sanity check on FCP fast-path EQ parameters */
5647	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
5648	if (cfg_fcp_eq_count >
5649	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
5650		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
5651				   LPFC_SP_EQN_DEF;
5652		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
5653			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5654					"2574 Not enough EQs (%d) from the "
5655					"pci function for supporting FCP "
5656					"EQs (%d)\n",
5657					phba->sli4_hba.max_cfg_param.max_eq,
5658					phba->cfg_fcp_eq_count);
5659			goto out_error;
5660		}
5661		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5662				"2575 Not enough EQs (%d) from the pci "
5663				"function for supporting the requested "
5664				"FCP EQs (%d), the actual FCP EQs can "
5665				"be supported: %d\n",
5666				phba->sli4_hba.max_cfg_param.max_eq,
5667				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
5668	}
5669	/* It does not make sense to have more EQs than WQs */
5670	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
5671		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5672				"2593 The FCP EQ count(%d) cannot be greater "
5673				"than the FCP WQ count(%d), limiting the "
5674				"FCP EQ count to %d\n", cfg_fcp_eq_count,
5675				phba->cfg_fcp_wq_count,
5676				phba->cfg_fcp_wq_count);
5677		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
5678	}
5679	/* The actual number of FCP event queues adopted */
5680	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
5681	/* The overall number of event queues used */
5682	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
5683
5684	/*
5685	 * Create Event Queues (EQs)
5686	 */
5687
5688	/* Get EQ depth from module parameter, fake the default for now */
5689	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
5690	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
5691
5692	/* Create slow path event queue */
5693	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5694				      phba->sli4_hba.eq_ecount);
5695	if (!qdesc) {
5696		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5697				"0496 Failed allocate slow-path EQ\n");
5698		goto out_error;
5699	}
5700	phba->sli4_hba.sp_eq = qdesc;
5701
5702	/* Create fast-path FCP Event Queue(s) */
5703	phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
5704			       phba->cfg_fcp_eq_count), GFP_KERNEL);
5705	if (!phba->sli4_hba.fp_eq) {
5706		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5707				"2576 Failed allocate memory for fast-path "
5708				"EQ record array\n");
5709		goto out_free_sp_eq;
5710	}
5711	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5712		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5713					      phba->sli4_hba.eq_ecount);
5714		if (!qdesc) {
5715			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5716					"0497 Failed allocate fast-path EQ\n");
5717			goto out_free_fp_eq;
5718		}
5719		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5720	}
5721
5722	/*
5723	 * Create Complete Queues (CQs)
5724	 */
5725
5726	/* Get CQ depth from module parameter, fake the default for now */
5727	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5728	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5729
5730	/* Create slow-path Mailbox Command Complete Queue */
5731	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5732				      phba->sli4_hba.cq_ecount);
5733	if (!qdesc) {
5734		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5735				"0500 Failed allocate slow-path mailbox CQ\n");
5736		goto out_free_fp_eq;
5737	}
5738	phba->sli4_hba.mbx_cq = qdesc;
5739
5740	/* Create slow-path ELS Complete Queue */
5741	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5742				      phba->sli4_hba.cq_ecount);
5743	if (!qdesc) {
5744		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5745				"0501 Failed allocate slow-path ELS CQ\n");
5746		goto out_free_mbx_cq;
5747	}
5748	phba->sli4_hba.els_cq = qdesc;
5749
5750
5751	/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5752	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5753				phba->cfg_fcp_eq_count), GFP_KERNEL);
5754	if (!phba->sli4_hba.fcp_cq) {
5755		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5756				"2577 Failed allocate memory for fast-path "
5757				"CQ record array\n");
5758		goto out_free_els_cq;
5759	}
5760	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5761		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5762					      phba->sli4_hba.cq_ecount);
5763		if (!qdesc) {
5764			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5765					"0499 Failed allocate fast-path FCP "
5766					"CQ (%d)\n", fcp_cqidx);
5767			goto out_free_fcp_cq;
5768		}
5769		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5770	}
5771
5772	/* Create Mailbox Command Queue */
5773	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5774	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5775
5776	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5777				      phba->sli4_hba.mq_ecount);
5778	if (!qdesc) {
5779		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5780				"0505 Failed allocate slow-path MQ\n");
5781		goto out_free_fcp_cq;
5782	}
5783	phba->sli4_hba.mbx_wq = qdesc;
5784
5785	/*
5786	 * Create all the Work Queues (WQs)
5787	 */
5788	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5789	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5790
5791	/* Create slow-path ELS Work Queue */
5792	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5793				      phba->sli4_hba.wq_ecount);
5794	if (!qdesc) {
5795		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5796				"0504 Failed allocate slow-path ELS WQ\n");
5797		goto out_free_mbx_wq;
5798	}
5799	phba->sli4_hba.els_wq = qdesc;
5800
5801	/* Create fast-path FCP Work Queue(s) */
5802	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5803				phba->cfg_fcp_wq_count), GFP_KERNEL);
5804	if (!phba->sli4_hba.fcp_wq) {
5805		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5806				"2578 Failed allocate memory for fast-path "
5807				"WQ record array\n");
5808		goto out_free_els_wq;
5809	}
5810	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5811		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5812					      phba->sli4_hba.wq_ecount);
5813		if (!qdesc) {
5814			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5815					"0503 Failed allocate fast-path FCP "
5816					"WQ (%d)\n", fcp_wqidx);
5817			goto out_free_fcp_wq;
5818		}
5819		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5820	}
5821
5822	/*
5823	 * Create Receive Queue (RQ)
5824	 */
5825	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5826	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5827
5828	/* Create Receive Queue for header */
5829	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5830				      phba->sli4_hba.rq_ecount);
5831	if (!qdesc) {
5832		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5833				"0506 Failed allocate receive HRQ\n");
5834		goto out_free_fcp_wq;
5835	}
5836	phba->sli4_hba.hdr_rq = qdesc;
5837
5838	/* Create Receive Queue for data */
5839	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5840				      phba->sli4_hba.rq_ecount);
5841	if (!qdesc) {
5842		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5843				"0507 Failed allocate receive DRQ\n");
5844		goto out_free_hdr_rq;
5845	}
5846	phba->sli4_hba.dat_rq = qdesc;
5847
5848	return 0;
5849
5850out_free_hdr_rq:
5851	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5852	phba->sli4_hba.hdr_rq = NULL;
5853out_free_fcp_wq:
5854	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5855		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5856		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5857	}
5858	kfree(phba->sli4_hba.fcp_wq);
5859out_free_els_wq:
5860	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5861	phba->sli4_hba.els_wq = NULL;
5862out_free_mbx_wq:
5863	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5864	phba->sli4_hba.mbx_wq = NULL;
5865out_free_fcp_cq:
5866	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5867		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5868		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5869	}
5870	kfree(phba->sli4_hba.fcp_cq);
5871out_free_els_cq:
5872	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5873	phba->sli4_hba.els_cq = NULL;
5874out_free_mbx_cq:
5875	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5876	phba->sli4_hba.mbx_cq = NULL;
5877out_free_fp_eq:
5878	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5879		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5880		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5881	}
5882	kfree(phba->sli4_hba.fp_eq);
5883out_free_sp_eq:
5884	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5885	phba->sli4_hba.sp_eq = NULL;
5886out_error:
5887	return -ENOMEM;
5888}
5889
5890/**
5891 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5892 * @phba: pointer to lpfc hba data structure.
5893 *
5894 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5895 * operation.
5896 *
5897 * Return codes
5898 *      0 - successful
5899 *      -ENOMEM - No availble memory
5900 *      -EIO - The mailbox failed to complete successfully.
5901 **/
5902static void
5903lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5904{
5905	int fcp_qidx;
5906
5907	/* Release mailbox command work queue */
5908	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5909	phba->sli4_hba.mbx_wq = NULL;
5910
5911	/* Release ELS work queue */
5912	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5913	phba->sli4_hba.els_wq = NULL;
5914
5915	/* Release FCP work queue */
5916	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5917		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5918	kfree(phba->sli4_hba.fcp_wq);
5919	phba->sli4_hba.fcp_wq = NULL;
5920
5921	/* Release unsolicited receive queue */
5922	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5923	phba->sli4_hba.hdr_rq = NULL;
5924	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5925	phba->sli4_hba.dat_rq = NULL;
5926
5927	/* Release ELS complete queue */
5928	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5929	phba->sli4_hba.els_cq = NULL;
5930
5931	/* Release mailbox command complete queue */
5932	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5933	phba->sli4_hba.mbx_cq = NULL;
5934
5935	/* Release FCP response complete queue */
5936	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5937		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5938	kfree(phba->sli4_hba.fcp_cq);
5939	phba->sli4_hba.fcp_cq = NULL;
5940
5941	/* Release fast-path event queue */
5942	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5943		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5944	kfree(phba->sli4_hba.fp_eq);
5945	phba->sli4_hba.fp_eq = NULL;
5946
5947	/* Release slow-path event queue */
5948	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5949	phba->sli4_hba.sp_eq = NULL;
5950
5951	return;
5952}
5953
5954/**
5955 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5956 * @phba: pointer to lpfc hba data structure.
5957 *
5958 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5959 * operation.
5960 *
5961 * Return codes
5962 *      0 - successful
5963 *      -ENOMEM - No availble memory
5964 *      -EIO - The mailbox failed to complete successfully.
5965 **/
5966int
5967lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5968{
5969	int rc = -ENOMEM;
5970	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5971	int fcp_cq_index = 0;
5972
5973	/*
5974	 * Set up Event Queues (EQs)
5975	 */
5976
5977	/* Set up slow-path event queue */
5978	if (!phba->sli4_hba.sp_eq) {
5979		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5980				"0520 Slow-path EQ not allocated\n");
5981		goto out_error;
5982	}
5983	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5984			    LPFC_SP_DEF_IMAX);
5985	if (rc) {
5986		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5987				"0521 Failed setup of slow-path EQ: "
5988				"rc = 0x%x\n", rc);
5989		goto out_error;
5990	}
5991	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5992			"2583 Slow-path EQ setup: queue-id=%d\n",
5993			phba->sli4_hba.sp_eq->queue_id);
5994
5995	/* Set up fast-path event queue */
5996	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5997		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5998			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5999					"0522 Fast-path EQ (%d) not "
6000					"allocated\n", fcp_eqidx);
6001			goto out_destroy_fp_eq;
6002		}
6003		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
6004				    phba->cfg_fcp_imax);
6005		if (rc) {
6006			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6007					"0523 Failed setup of fast-path EQ "
6008					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
6009			goto out_destroy_fp_eq;
6010		}
6011		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6012				"2584 Fast-path EQ setup: "
6013				"queue[%d]-id=%d\n", fcp_eqidx,
6014				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
6015	}
6016
6017	/*
6018	 * Set up Complete Queues (CQs)
6019	 */
6020
6021	/* Set up slow-path MBOX Complete Queue as the first CQ */
6022	if (!phba->sli4_hba.mbx_cq) {
6023		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6024				"0528 Mailbox CQ not allocated\n");
6025		goto out_destroy_fp_eq;
6026	}
6027	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
6028			    LPFC_MCQ, LPFC_MBOX);
6029	if (rc) {
6030		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6031				"0529 Failed setup of slow-path mailbox CQ: "
6032				"rc = 0x%x\n", rc);
6033		goto out_destroy_fp_eq;
6034	}
6035	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6036			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6037			phba->sli4_hba.mbx_cq->queue_id,
6038			phba->sli4_hba.sp_eq->queue_id);
6039
6040	/* Set up slow-path ELS Complete Queue */
6041	if (!phba->sli4_hba.els_cq) {
6042		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6043				"0530 ELS CQ not allocated\n");
6044		goto out_destroy_mbx_cq;
6045	}
6046	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
6047			    LPFC_WCQ, LPFC_ELS);
6048	if (rc) {
6049		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6050				"0531 Failed setup of slow-path ELS CQ: "
6051				"rc = 0x%x\n", rc);
6052		goto out_destroy_mbx_cq;
6053	}
6054	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6055			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
6056			phba->sli4_hba.els_cq->queue_id,
6057			phba->sli4_hba.sp_eq->queue_id);
6058
6059	/* Set up fast-path FCP Response Complete Queue */
6060	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
6061		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6062			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6063					"0526 Fast-path FCP CQ (%d) not "
6064					"allocated\n", fcp_cqidx);
6065			goto out_destroy_fcp_cq;
6066		}
6067		rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
6068				    phba->sli4_hba.fp_eq[fcp_cqidx],
6069				    LPFC_WCQ, LPFC_FCP);
6070		if (rc) {
6071			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6072					"0527 Failed setup of fast-path FCP "
6073					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6074			goto out_destroy_fcp_cq;
6075		}
6076		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6077				"2588 FCP CQ setup: cq[%d]-id=%d, "
6078				"parent eq[%d]-id=%d\n",
6079				fcp_cqidx,
6080				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6081				fcp_cqidx,
6082				phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
6083	}
6084
6085	/*
6086	 * Set up all the Work Queues (WQs)
6087	 */
6088
6089	/* Set up Mailbox Command Queue */
6090	if (!phba->sli4_hba.mbx_wq) {
6091		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6092				"0538 Slow-path MQ not allocated\n");
6093		goto out_destroy_fcp_cq;
6094	}
6095	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
6096			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
6097	if (rc) {
6098		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6099				"0539 Failed setup of slow-path MQ: "
6100				"rc = 0x%x\n", rc);
6101		goto out_destroy_fcp_cq;
6102	}
6103	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6104			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
6105			phba->sli4_hba.mbx_wq->queue_id,
6106			phba->sli4_hba.mbx_cq->queue_id);
6107
6108	/* Set up slow-path ELS Work Queue */
6109	if (!phba->sli4_hba.els_wq) {
6110		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6111				"0536 Slow-path ELS WQ not allocated\n");
6112		goto out_destroy_mbx_wq;
6113	}
6114	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
6115			    phba->sli4_hba.els_cq, LPFC_ELS);
6116	if (rc) {
6117		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6118				"0537 Failed setup of slow-path ELS WQ: "
6119				"rc = 0x%x\n", rc);
6120		goto out_destroy_mbx_wq;
6121	}
6122	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6123			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
6124			phba->sli4_hba.els_wq->queue_id,
6125			phba->sli4_hba.els_cq->queue_id);
6126
6127	/* Set up fast-path FCP Work Queue */
6128	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6129		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6130			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6131					"0534 Fast-path FCP WQ (%d) not "
6132					"allocated\n", fcp_wqidx);
6133			goto out_destroy_fcp_wq;
6134		}
6135		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6136				    phba->sli4_hba.fcp_cq[fcp_cq_index],
6137				    LPFC_FCP);
6138		if (rc) {
6139			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6140					"0535 Failed setup of fast-path FCP "
6141					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6142			goto out_destroy_fcp_wq;
6143		}
6144		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6145				"2591 FCP WQ setup: wq[%d]-id=%d, "
6146				"parent cq[%d]-id=%d\n",
6147				fcp_wqidx,
6148				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6149				fcp_cq_index,
6150				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6151		/* Round robin FCP Work Queue's Completion Queue assignment */
6152		fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
6153	}
6154
6155	/*
6156	 * Create Receive Queue (RQ)
6157	 */
6158	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6159		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6160				"0540 Receive Queue not allocated\n");
6161		goto out_destroy_fcp_wq;
6162	}
6163	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6164			    phba->sli4_hba.els_cq, LPFC_USOL);
6165	if (rc) {
6166		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6167				"0541 Failed setup of Receive Queue: "
6168				"rc = 0x%x\n", rc);
6169		goto out_destroy_fcp_wq;
6170	}
6171	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6172			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6173			"parent cq-id=%d\n",
6174			phba->sli4_hba.hdr_rq->queue_id,
6175			phba->sli4_hba.dat_rq->queue_id,
6176			phba->sli4_hba.els_cq->queue_id);
6177	return 0;
6178
6179out_destroy_fcp_wq:
6180	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6181		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6182	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6183out_destroy_mbx_wq:
6184	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6185out_destroy_fcp_cq:
6186	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6187		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6188	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6189out_destroy_mbx_cq:
6190	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6191out_destroy_fp_eq:
6192	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6193		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6194	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6195out_error:
6196	return rc;
6197}
6198
6199/**
6200 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
6201 * @phba: pointer to lpfc hba data structure.
6202 *
6203 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
6204 * operation.
6205 *
6206 * Return codes
6207 *      0 - successful
6208 *      -ENOMEM - No availble memory
6209 *      -EIO - The mailbox failed to complete successfully.
6210 **/
6211void
6212lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6213{
6214	int fcp_qidx;
6215
6216	/* Unset mailbox command work queue */
6217	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6218	/* Unset ELS work queue */
6219	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6220	/* Unset unsolicited receive queue */
6221	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
6222	/* Unset FCP work queue */
6223	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6224		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
6225	/* Unset mailbox command complete queue */
6226	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6227	/* Unset ELS complete queue */
6228	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6229	/* Unset FCP response complete queue */
6230	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6231		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6232	/* Unset fast-path event queue */
6233	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6234		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6235	/* Unset slow-path event queue */
6236	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6237}
6238
6239/**
6240 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
6241 * @phba: pointer to lpfc hba data structure.
6242 *
6243 * This routine is invoked to allocate and set up a pool of completion queue
6244 * events. The body of the completion queue event is a completion queue entry
6245 * CQE. For now, this pool is used for the interrupt service routine to queue
6246 * the following HBA completion queue events for the worker thread to process:
6247 *   - Mailbox asynchronous events
6248 *   - Receive queue completion unsolicited events
6249 * Later, this can be used for all the slow-path events.
6250 *
6251 * Return codes
6252 *      0 - successful
6253 *      -ENOMEM - No availble memory
6254 **/
6255static int
6256lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
6257{
6258	struct lpfc_cq_event *cq_event;
6259	int i;
6260
6261	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
6262		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
6263		if (!cq_event)
6264			goto out_pool_create_fail;
6265		list_add_tail(&cq_event->list,
6266			      &phba->sli4_hba.sp_cqe_event_pool);
6267	}
6268	return 0;
6269
6270out_pool_create_fail:
6271	lpfc_sli4_cq_event_pool_destroy(phba);
6272	return -ENOMEM;
6273}
6274
6275/**
6276 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
6277 * @phba: pointer to lpfc hba data structure.
6278 *
6279 * This routine is invoked to free the pool of completion queue events at
6280 * driver unload time. Note that, it is the responsibility of the driver
6281 * cleanup routine to free all the outstanding completion-queue events
6282 * allocated from this pool back into the pool before invoking this routine
6283 * to destroy the pool.
6284 **/
6285static void
6286lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
6287{
6288	struct lpfc_cq_event *cq_event, *next_cq_event;
6289
6290	list_for_each_entry_safe(cq_event, next_cq_event,
6291				 &phba->sli4_hba.sp_cqe_event_pool, list) {
6292		list_del(&cq_event->list);
6293		kfree(cq_event);
6294	}
6295}
6296
6297/**
6298 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6299 * @phba: pointer to lpfc hba data structure.
6300 *
6301 * This routine is the lock free version of the API invoked to allocate a
6302 * completion-queue event from the free pool.
6303 *
6304 * Return: Pointer to the newly allocated completion-queue event if successful
6305 *         NULL otherwise.
6306 **/
6307struct lpfc_cq_event *
6308__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6309{
6310	struct lpfc_cq_event *cq_event = NULL;
6311
6312	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
6313			 struct lpfc_cq_event, list);
6314	return cq_event;
6315}
6316
6317/**
6318 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6319 * @phba: pointer to lpfc hba data structure.
6320 *
6321 * This routine is the lock version of the API invoked to allocate a
6322 * completion-queue event from the free pool.
6323 *
6324 * Return: Pointer to the newly allocated completion-queue event if successful
6325 *         NULL otherwise.
6326 **/
6327struct lpfc_cq_event *
6328lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6329{
6330	struct lpfc_cq_event *cq_event;
6331	unsigned long iflags;
6332
6333	spin_lock_irqsave(&phba->hbalock, iflags);
6334	cq_event = __lpfc_sli4_cq_event_alloc(phba);
6335	spin_unlock_irqrestore(&phba->hbalock, iflags);
6336	return cq_event;
6337}
6338
6339/**
6340 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6341 * @phba: pointer to lpfc hba data structure.
6342 * @cq_event: pointer to the completion queue event to be freed.
6343 *
6344 * This routine is the lock free version of the API invoked to release a
6345 * completion-queue event back into the free pool.
6346 **/
6347void
6348__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6349			     struct lpfc_cq_event *cq_event)
6350{
6351	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
6352}
6353
6354/**
6355 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6356 * @phba: pointer to lpfc hba data structure.
6357 * @cq_event: pointer to the completion queue event to be freed.
6358 *
6359 * This routine is the lock version of the API invoked to release a
6360 * completion-queue event back into the free pool.
6361 **/
6362void
6363lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6364			   struct lpfc_cq_event *cq_event)
6365{
6366	unsigned long iflags;
6367	spin_lock_irqsave(&phba->hbalock, iflags);
6368	__lpfc_sli4_cq_event_release(phba, cq_event);
6369	spin_unlock_irqrestore(&phba->hbalock, iflags);
6370}
6371
6372/**
6373 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
6374 * @phba: pointer to lpfc hba data structure.
6375 *
6376 * This routine is to free all the pending completion-queue events to the
6377 * back into the free pool for device reset.
6378 **/
6379static void
6380lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
6381{
6382	LIST_HEAD(cqelist);
6383	struct lpfc_cq_event *cqe;
6384	unsigned long iflags;
6385
6386	/* Retrieve all the pending WCQEs from pending WCQE lists */
6387	spin_lock_irqsave(&phba->hbalock, iflags);
6388	/* Pending FCP XRI abort events */
6389	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
6390			 &cqelist);
6391	/* Pending ELS XRI abort events */
6392	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
6393			 &cqelist);
6394	/* Pending asynnc events */
6395	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
6396			 &cqelist);
6397	spin_unlock_irqrestore(&phba->hbalock, iflags);
6398
6399	while (!list_empty(&cqelist)) {
6400		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
6401		lpfc_sli4_cq_event_release(phba, cqe);
6402	}
6403}
6404
6405/**
6406 * lpfc_pci_function_reset - Reset pci function.
6407 * @phba: pointer to lpfc hba data structure.
6408 *
6409 * This routine is invoked to request a PCI function reset. It will destroys
6410 * all resources assigned to the PCI function which originates this request.
6411 *
6412 * Return codes
6413 *      0 - successful
6414 *      -ENOMEM - No availble memory
6415 *      -EIO - The mailbox failed to complete successfully.
6416 **/
6417int
6418lpfc_pci_function_reset(struct lpfc_hba *phba)
6419{
6420	LPFC_MBOXQ_t *mboxq;
6421	uint32_t rc = 0;
6422	uint32_t shdr_status, shdr_add_status;
6423	union lpfc_sli4_cfg_shdr *shdr;
6424
6425	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6426	if (!mboxq) {
6427		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6428				"0494 Unable to allocate memory for issuing "
6429				"SLI_FUNCTION_RESET mailbox command\n");
6430		return -ENOMEM;
6431	}
6432
6433	/* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
6434	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6435			 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
6436			 LPFC_SLI4_MBX_EMBED);
6437	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6438	shdr = (union lpfc_sli4_cfg_shdr *)
6439		&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6440	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6441	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6442	if (rc != MBX_TIMEOUT)
6443		mempool_free(mboxq, phba->mbox_mem_pool);
6444	if (shdr_status || shdr_add_status || rc) {
6445		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6446				"0495 SLI_FUNCTION_RESET mailbox failed with "
6447				"status x%x add_status x%x, mbx status x%x\n",
6448				shdr_status, shdr_add_status, rc);
6449		rc = -ENXIO;
6450	}
6451	return rc;
6452}
6453
6454/**
6455 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
6456 * @phba: pointer to lpfc hba data structure.
6457 * @cnt: number of nop mailbox commands to send.
6458 *
6459 * This routine is invoked to send a number @cnt of NOP mailbox command and
6460 * wait for each command to complete.
6461 *
6462 * Return: the number of NOP mailbox command completed.
6463 **/
6464static int
6465lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
6466{
6467	LPFC_MBOXQ_t *mboxq;
6468	int length, cmdsent;
6469	uint32_t mbox_tmo;
6470	uint32_t rc = 0;
6471	uint32_t shdr_status, shdr_add_status;
6472	union lpfc_sli4_cfg_shdr *shdr;
6473
6474	if (cnt == 0) {
6475		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6476				"2518 Requested to send 0 NOP mailbox cmd\n");
6477		return cnt;
6478	}
6479
6480	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6481	if (!mboxq) {
6482		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6483				"2519 Unable to allocate memory for issuing "
6484				"NOP mailbox command\n");
6485		return 0;
6486	}
6487
6488	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
6489	length = (sizeof(struct lpfc_mbx_nop) -
6490		  sizeof(struct lpfc_sli4_cfg_mhdr));
6491	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6492			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
6493
6494	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6495	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
6496		if (!phba->sli4_hba.intr_enable)
6497			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6498		else
6499			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6500		if (rc == MBX_TIMEOUT)
6501			break;
6502		/* Check return status */
6503		shdr = (union lpfc_sli4_cfg_shdr *)
6504			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6505		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6506		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
6507					 &shdr->response);
6508		if (shdr_status || shdr_add_status || rc) {
6509			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6510					"2520 NOP mailbox command failed "
6511					"status x%x add_status x%x mbx "
6512					"status x%x\n", shdr_status,
6513					shdr_add_status, rc);
6514			break;
6515		}
6516	}
6517
6518	if (rc != MBX_TIMEOUT)
6519		mempool_free(mboxq, phba->mbox_mem_pool);
6520
6521	return cmdsent;
6522}
6523
6524/**
6525 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
6526 * @phba: pointer to lpfc hba data structure.
6527 *
6528 * This routine is invoked to set up the PCI device memory space for device
6529 * with SLI-4 interface spec.
6530 *
6531 * Return codes
6532 * 	0 - successful
6533 * 	other values - error
6534 **/
6535static int
6536lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
6537{
6538	struct pci_dev *pdev;
6539	unsigned long bar0map_len, bar1map_len, bar2map_len;
6540	int error = -ENODEV;
6541
6542	/* Obtain PCI device reference */
6543	if (!phba->pcidev)
6544		return error;
6545	else
6546		pdev = phba->pcidev;
6547
6548	/* Set the device DMA mask size */
6549	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6550	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6551		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6552		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6553			return error;
6554		}
6555	}
6556
6557	/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
6558	 * number of bytes required by each mapping. They are actually
6559	 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
6560	 */
6561	if (pci_resource_start(pdev, 0)) {
6562		phba->pci_bar0_map = pci_resource_start(pdev, 0);
6563		bar0map_len = pci_resource_len(pdev, 0);
6564	} else {
6565		phba->pci_bar0_map = pci_resource_start(pdev, 1);
6566		bar0map_len = pci_resource_len(pdev, 1);
6567	}
6568	phba->pci_bar1_map = pci_resource_start(pdev, 2);
6569	bar1map_len = pci_resource_len(pdev, 2);
6570
6571	phba->pci_bar2_map = pci_resource_start(pdev, 4);
6572	bar2map_len = pci_resource_len(pdev, 4);
6573
6574	/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
6575	phba->sli4_hba.conf_regs_memmap_p =
6576				ioremap(phba->pci_bar0_map, bar0map_len);
6577	if (!phba->sli4_hba.conf_regs_memmap_p) {
6578		dev_printk(KERN_ERR, &pdev->dev,
6579			   "ioremap failed for SLI4 PCI config registers.\n");
6580		goto out;
6581	}
6582
6583	/* Map SLI4 HBA Control Register base to a kernel virtual address. */
6584	phba->sli4_hba.ctrl_regs_memmap_p =
6585				ioremap(phba->pci_bar1_map, bar1map_len);
6586	if (!phba->sli4_hba.ctrl_regs_memmap_p) {
6587		dev_printk(KERN_ERR, &pdev->dev,
6588			   "ioremap failed for SLI4 HBA control registers.\n");
6589		goto out_iounmap_conf;
6590	}
6591
6592	/* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
6593	phba->sli4_hba.drbl_regs_memmap_p =
6594				ioremap(phba->pci_bar2_map, bar2map_len);
6595	if (!phba->sli4_hba.drbl_regs_memmap_p) {
6596		dev_printk(KERN_ERR, &pdev->dev,
6597			   "ioremap failed for SLI4 HBA doorbell registers.\n");
6598		goto out_iounmap_ctrl;
6599	}
6600
6601	/* Set up BAR0 PCI config space register memory map */
6602	lpfc_sli4_bar0_register_memmap(phba);
6603
6604	/* Set up BAR1 register memory map */
6605	lpfc_sli4_bar1_register_memmap(phba);
6606
6607	/* Set up BAR2 register memory map */
6608	error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
6609	if (error)
6610		goto out_iounmap_all;
6611
6612	return 0;
6613
6614out_iounmap_all:
6615	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6616out_iounmap_ctrl:
6617	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6618out_iounmap_conf:
6619	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6620out:
6621	return error;
6622}
6623
6624/**
6625 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
6626 * @phba: pointer to lpfc hba data structure.
6627 *
6628 * This routine is invoked to unset the PCI device memory space for device
6629 * with SLI-4 interface spec.
6630 **/
6631static void
6632lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
6633{
6634	struct pci_dev *pdev;
6635
6636	/* Obtain PCI device reference */
6637	if (!phba->pcidev)
6638		return;
6639	else
6640		pdev = phba->pcidev;
6641
6642	/* Free coherent DMA memory allocated */
6643
6644	/* Unmap I/O memory space */
6645	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6646	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6647	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6648
6649	return;
6650}
6651
6652/**
6653 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6654 * @phba: pointer to lpfc hba data structure.
6655 *
6656 * This routine is invoked to enable the MSI-X interrupt vectors to device
6657 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6658 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6659 * invoked, enables either all or nothing, depending on the current
6660 * availability of PCI vector resources. The device driver is responsible
6661 * for calling the individual request_irq() to register each MSI-X vector
6662 * with a interrupt handler, which is done in this function. Note that
6663 * later when device is unloading, the driver should always call free_irq()
6664 * on all MSI-X vectors it has done request_irq() on before calling
6665 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
6666 * will be left with MSI-X enabled and leaks its vectors.
6667 *
6668 * Return codes
6669 *   0 - successful
6670 *   other values - error
6671 **/
6672static int
6673lpfc_sli_enable_msix(struct lpfc_hba *phba)
6674{
6675	int rc, i;
6676	LPFC_MBOXQ_t *pmb;
6677
6678	/* Set up MSI-X multi-message vectors */
6679	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6680		phba->msix_entries[i].entry = i;
6681
6682	/* Configure MSI-X capability structure */
6683	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
6684				ARRAY_SIZE(phba->msix_entries));
6685	if (rc) {
6686		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6687				"0420 PCI enable MSI-X failed (%d)\n", rc);
6688		goto msi_fail_out;
6689	}
6690	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6691		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6692				"0477 MSI-X entry[%d]: vector=x%x "
6693				"message=%d\n", i,
6694				phba->msix_entries[i].vector,
6695				phba->msix_entries[i].entry);
6696	/*
6697	 * Assign MSI-X vectors to interrupt handlers
6698	 */
6699
6700	/* vector-0 is associated to slow-path handler */
6701	rc = request_irq(phba->msix_entries[0].vector,
6702			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6703			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6704	if (rc) {
6705		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6706				"0421 MSI-X slow-path request_irq failed "
6707				"(%d)\n", rc);
6708		goto msi_fail_out;
6709	}
6710
6711	/* vector-1 is associated to fast-path handler */
6712	rc = request_irq(phba->msix_entries[1].vector,
6713			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6714			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
6715
6716	if (rc) {
6717		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6718				"0429 MSI-X fast-path request_irq failed "
6719				"(%d)\n", rc);
6720		goto irq_fail_out;
6721	}
6722
6723	/*
6724	 * Configure HBA MSI-X attention conditions to messages
6725	 */
6726	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6727
6728	if (!pmb) {
6729		rc = -ENOMEM;
6730		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6731				"0474 Unable to allocate memory for issuing "
6732				"MBOX_CONFIG_MSI command\n");
6733		goto mem_fail_out;
6734	}
6735	rc = lpfc_config_msi(phba, pmb);
6736	if (rc)
6737		goto mbx_fail_out;
6738	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6739	if (rc != MBX_SUCCESS) {
6740		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
6741				"0351 Config MSI mailbox command failed, "
6742				"mbxCmd x%x, mbxStatus x%x\n",
6743				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
6744		goto mbx_fail_out;
6745	}
6746
6747	/* Free memory allocated for mailbox command */
6748	mempool_free(pmb, phba->mbox_mem_pool);
6749	return rc;
6750
6751mbx_fail_out:
6752	/* Free memory allocated for mailbox command */
6753	mempool_free(pmb, phba->mbox_mem_pool);
6754
6755mem_fail_out:
6756	/* free the irq already requested */
6757	free_irq(phba->msix_entries[1].vector, phba);
6758
6759irq_fail_out:
6760	/* free the irq already requested */
6761	free_irq(phba->msix_entries[0].vector, phba);
6762
6763msi_fail_out:
6764	/* Unconfigure MSI-X capability structure */
6765	pci_disable_msix(phba->pcidev);
6766	return rc;
6767}
6768
6769/**
6770 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
6771 * @phba: pointer to lpfc hba data structure.
6772 *
6773 * This routine is invoked to release the MSI-X vectors and then disable the
6774 * MSI-X interrupt mode to device with SLI-3 interface spec.
6775 **/
6776static void
6777lpfc_sli_disable_msix(struct lpfc_hba *phba)
6778{
6779	int i;
6780
6781	/* Free up MSI-X multi-message vectors */
6782	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6783		free_irq(phba->msix_entries[i].vector, phba);
6784	/* Disable MSI-X */
6785	pci_disable_msix(phba->pcidev);
6786
6787	return;
6788}
6789
6790/**
6791 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
6792 * @phba: pointer to lpfc hba data structure.
6793 *
6794 * This routine is invoked to enable the MSI interrupt mode to device with
6795 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
6796 * enable the MSI vector. The device driver is responsible for calling the
6797 * request_irq() to register MSI vector with a interrupt the handler, which
6798 * is done in this function.
6799 *
6800 * Return codes
6801 * 	0 - successful
6802 * 	other values - error
6803 */
6804static int
6805lpfc_sli_enable_msi(struct lpfc_hba *phba)
6806{
6807	int rc;
6808
6809	rc = pci_enable_msi(phba->pcidev);
6810	if (!rc)
6811		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6812				"0462 PCI enable MSI mode success.\n");
6813	else {
6814		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6815				"0471 PCI enable MSI mode failed (%d)\n", rc);
6816		return rc;
6817	}
6818
6819	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6820			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6821	if (rc) {
6822		pci_disable_msi(phba->pcidev);
6823		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6824				"0478 MSI request_irq failed (%d)\n", rc);
6825	}
6826	return rc;
6827}
6828
6829/**
6830 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
6831 * @phba: pointer to lpfc hba data structure.
6832 *
6833 * This routine is invoked to disable the MSI interrupt mode to device with
6834 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
6835 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6836 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6837 * its vector.
6838 */
6839static void
6840lpfc_sli_disable_msi(struct lpfc_hba *phba)
6841{
6842	free_irq(phba->pcidev->irq, phba);
6843	pci_disable_msi(phba->pcidev);
6844	return;
6845}
6846
6847/**
6848 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
6849 * @phba: pointer to lpfc hba data structure.
6850 *
6851 * This routine is invoked to enable device interrupt and associate driver's
6852 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
6853 * spec. Depends on the interrupt mode configured to the driver, the driver
6854 * will try to fallback from the configured interrupt mode to an interrupt
6855 * mode which is supported by the platform, kernel, and device in the order
6856 * of:
6857 * MSI-X -> MSI -> IRQ.
6858 *
6859 * Return codes
6860 *   0 - successful
6861 *   other values - error
6862 **/
6863static uint32_t
6864lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6865{
6866	uint32_t intr_mode = LPFC_INTR_ERROR;
6867	int retval;
6868
6869	if (cfg_mode == 2) {
6870		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6871		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6872		if (!retval) {
6873			/* Now, try to enable MSI-X interrupt mode */
6874			retval = lpfc_sli_enable_msix(phba);
6875			if (!retval) {
6876				/* Indicate initialization to MSI-X mode */
6877				phba->intr_type = MSIX;
6878				intr_mode = 2;
6879			}
6880		}
6881	}
6882
6883	/* Fallback to MSI if MSI-X initialization failed */
6884	if (cfg_mode >= 1 && phba->intr_type == NONE) {
6885		retval = lpfc_sli_enable_msi(phba);
6886		if (!retval) {
6887			/* Indicate initialization to MSI mode */
6888			phba->intr_type = MSI;
6889			intr_mode = 1;
6890		}
6891	}
6892
6893	/* Fallback to INTx if both MSI-X/MSI initalization failed */
6894	if (phba->intr_type == NONE) {
6895		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6896				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6897		if (!retval) {
6898			/* Indicate initialization to INTx mode */
6899			phba->intr_type = INTx;
6900			intr_mode = 0;
6901		}
6902	}
6903	return intr_mode;
6904}
6905
6906/**
6907 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6908 * @phba: pointer to lpfc hba data structure.
6909 *
6910 * This routine is invoked to disable device interrupt and disassociate the
6911 * driver's interrupt handler(s) from interrupt vector(s) to device with
6912 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6913 * release the interrupt vector(s) for the message signaled interrupt.
6914 **/
6915static void
6916lpfc_sli_disable_intr(struct lpfc_hba *phba)
6917{
6918	/* Disable the currently initialized interrupt mode */
6919	if (phba->intr_type == MSIX)
6920		lpfc_sli_disable_msix(phba);
6921	else if (phba->intr_type == MSI)
6922		lpfc_sli_disable_msi(phba);
6923	else if (phba->intr_type == INTx)
6924		free_irq(phba->pcidev->irq, phba);
6925
6926	/* Reset interrupt management states */
6927	phba->intr_type = NONE;
6928	phba->sli.slistat.sli_intr = 0;
6929
6930	return;
6931}
6932
6933/**
6934 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6935 * @phba: pointer to lpfc hba data structure.
6936 *
6937 * This routine is invoked to enable the MSI-X interrupt vectors to device
6938 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6939 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6940 * enables either all or nothing, depending on the current availability of
6941 * PCI vector resources. The device driver is responsible for calling the
6942 * individual request_irq() to register each MSI-X vector with a interrupt
6943 * handler, which is done in this function. Note that later when device is
6944 * unloading, the driver should always call free_irq() on all MSI-X vectors
6945 * it has done request_irq() on before calling pci_disable_msix(). Failure
6946 * to do so results in a BUG_ON() and a device will be left with MSI-X
6947 * enabled and leaks its vectors.
6948 *
6949 * Return codes
6950 * 0 - successful
6951 * other values - error
6952 **/
6953static int
6954lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6955{
6956	int vectors, rc, index;
6957
6958	/* Set up MSI-X multi-message vectors */
6959	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6960		phba->sli4_hba.msix_entries[index].entry = index;
6961
6962	/* Configure MSI-X capability structure */
6963	vectors = phba->sli4_hba.cfg_eqn;
6964enable_msix_vectors:
6965	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6966			     vectors);
6967	if (rc > 1) {
6968		vectors = rc;
6969		goto enable_msix_vectors;
6970	} else if (rc) {
6971		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6972				"0484 PCI enable MSI-X failed (%d)\n", rc);
6973		goto msi_fail_out;
6974	}
6975
6976	/* Log MSI-X vector assignment */
6977	for (index = 0; index < vectors; index++)
6978		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6979				"0489 MSI-X entry[%d]: vector=x%x "
6980				"message=%d\n", index,
6981				phba->sli4_hba.msix_entries[index].vector,
6982				phba->sli4_hba.msix_entries[index].entry);
6983	/*
6984	 * Assign MSI-X vectors to interrupt handlers
6985	 */
6986
6987	/* The first vector must associated to slow-path handler for MQ */
6988	rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6989			 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6990			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6991	if (rc) {
6992		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6993				"0485 MSI-X slow-path request_irq failed "
6994				"(%d)\n", rc);
6995		goto msi_fail_out;
6996	}
6997
6998	/* The rest of the vector(s) are associated to fast-path handler(s) */
6999	for (index = 1; index < vectors; index++) {
7000		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
7001		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
7002		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
7003				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
7004				 LPFC_FP_DRIVER_HANDLER_NAME,
7005				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7006		if (rc) {
7007			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7008					"0486 MSI-X fast-path (%d) "
7009					"request_irq failed (%d)\n", index, rc);
7010			goto cfg_fail_out;
7011		}
7012	}
7013	phba->sli4_hba.msix_vec_nr = vectors;
7014
7015	return rc;
7016
7017cfg_fail_out:
7018	/* free the irq already requested */
7019	for (--index; index >= 1; index--)
7020		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
7021			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7022
7023	/* free the irq already requested */
7024	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7025
7026msi_fail_out:
7027	/* Unconfigure MSI-X capability structure */
7028	pci_disable_msix(phba->pcidev);
7029	return rc;
7030}
7031
7032/**
7033 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
7034 * @phba: pointer to lpfc hba data structure.
7035 *
7036 * This routine is invoked to release the MSI-X vectors and then disable the
7037 * MSI-X interrupt mode to device with SLI-4 interface spec.
7038 **/
7039static void
7040lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7041{
7042	int index;
7043
7044	/* Free up MSI-X multi-message vectors */
7045	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7046
7047	for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
7048		free_irq(phba->sli4_hba.msix_entries[index].vector,
7049			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7050
7051	/* Disable MSI-X */
7052	pci_disable_msix(phba->pcidev);
7053
7054	return;
7055}
7056
7057/**
7058 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
7059 * @phba: pointer to lpfc hba data structure.
7060 *
7061 * This routine is invoked to enable the MSI interrupt mode to device with
7062 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
7063 * to enable the MSI vector. The device driver is responsible for calling
7064 * the request_irq() to register MSI vector with a interrupt the handler,
7065 * which is done in this function.
7066 *
7067 * Return codes
7068 * 	0 - successful
7069 * 	other values - error
7070 **/
7071static int
7072lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7073{
7074	int rc, index;
7075
7076	rc = pci_enable_msi(phba->pcidev);
7077	if (!rc)
7078		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7079				"0487 PCI enable MSI mode success.\n");
7080	else {
7081		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7082				"0488 PCI enable MSI mode failed (%d)\n", rc);
7083		return rc;
7084	}
7085
7086	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7087			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7088	if (rc) {
7089		pci_disable_msi(phba->pcidev);
7090		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7091				"0490 MSI request_irq failed (%d)\n", rc);
7092		return rc;
7093	}
7094
7095	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
7096		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7097		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7098	}
7099
7100	return 0;
7101}
7102
7103/**
7104 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
7105 * @phba: pointer to lpfc hba data structure.
7106 *
7107 * This routine is invoked to disable the MSI interrupt mode to device with
7108 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
7109 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7110 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7111 * its vector.
7112 **/
7113static void
7114lpfc_sli4_disable_msi(struct lpfc_hba *phba)
7115{
7116	free_irq(phba->pcidev->irq, phba);
7117	pci_disable_msi(phba->pcidev);
7118	return;
7119}
7120
7121/**
7122 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
7123 * @phba: pointer to lpfc hba data structure.
7124 *
7125 * This routine is invoked to enable device interrupt and associate driver's
7126 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
7127 * interface spec. Depends on the interrupt mode configured to the driver,
7128 * the driver will try to fallback from the configured interrupt mode to an
7129 * interrupt mode which is supported by the platform, kernel, and device in
7130 * the order of:
7131 * MSI-X -> MSI -> IRQ.
7132 *
7133 * Return codes
7134 * 	0 - successful
7135 * 	other values - error
7136 **/
7137static uint32_t
7138lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7139{
7140	uint32_t intr_mode = LPFC_INTR_ERROR;
7141	int retval, index;
7142
7143	if (cfg_mode == 2) {
7144		/* Preparation before conf_msi mbox cmd */
7145		retval = 0;
7146		if (!retval) {
7147			/* Now, try to enable MSI-X interrupt mode */
7148			retval = lpfc_sli4_enable_msix(phba);
7149			if (!retval) {
7150				/* Indicate initialization to MSI-X mode */
7151				phba->intr_type = MSIX;
7152				intr_mode = 2;
7153			}
7154		}
7155	}
7156
7157	/* Fallback to MSI if MSI-X initialization failed */
7158	if (cfg_mode >= 1 && phba->intr_type == NONE) {
7159		retval = lpfc_sli4_enable_msi(phba);
7160		if (!retval) {
7161			/* Indicate initialization to MSI mode */
7162			phba->intr_type = MSI;
7163			intr_mode = 1;
7164		}
7165	}
7166
7167	/* Fallback to INTx if both MSI-X/MSI initalization failed */
7168	if (phba->intr_type == NONE) {
7169		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7170				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7171		if (!retval) {
7172			/* Indicate initialization to INTx mode */
7173			phba->intr_type = INTx;
7174			intr_mode = 0;
7175			for (index = 0; index < phba->cfg_fcp_eq_count;
7176			     index++) {
7177				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7178				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7179			}
7180		}
7181	}
7182	return intr_mode;
7183}
7184
7185/**
7186 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
7187 * @phba: pointer to lpfc hba data structure.
7188 *
7189 * This routine is invoked to disable device interrupt and disassociate
7190 * the driver's interrupt handler(s) from interrupt vector(s) to device
7191 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
7192 * will release the interrupt vector(s) for the message signaled interrupt.
7193 **/
7194static void
7195lpfc_sli4_disable_intr(struct lpfc_hba *phba)
7196{
7197	/* Disable the currently initialized interrupt mode */
7198	if (phba->intr_type == MSIX)
7199		lpfc_sli4_disable_msix(phba);
7200	else if (phba->intr_type == MSI)
7201		lpfc_sli4_disable_msi(phba);
7202	else if (phba->intr_type == INTx)
7203		free_irq(phba->pcidev->irq, phba);
7204
7205	/* Reset interrupt management states */
7206	phba->intr_type = NONE;
7207	phba->sli.slistat.sli_intr = 0;
7208
7209	return;
7210}
7211
7212/**
7213 * lpfc_unset_hba - Unset SLI3 hba device initialization
7214 * @phba: pointer to lpfc hba data structure.
7215 *
7216 * This routine is invoked to unset the HBA device initialization steps to
7217 * a device with SLI-3 interface spec.
7218 **/
7219static void
7220lpfc_unset_hba(struct lpfc_hba *phba)
7221{
7222	struct lpfc_vport *vport = phba->pport;
7223	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7224
7225	spin_lock_irq(shost->host_lock);
7226	vport->load_flag |= FC_UNLOADING;
7227	spin_unlock_irq(shost->host_lock);
7228
7229	lpfc_stop_hba_timers(phba);
7230
7231	phba->pport->work_port_events = 0;
7232
7233	lpfc_sli_hba_down(phba);
7234
7235	lpfc_sli_brdrestart(phba);
7236
7237	lpfc_sli_disable_intr(phba);
7238
7239	return;
7240}
7241
7242/**
7243 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
7244 * @phba: pointer to lpfc hba data structure.
7245 *
7246 * This routine is invoked to unset the HBA device initialization steps to
7247 * a device with SLI-4 interface spec.
7248 **/
7249static void
7250lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7251{
7252	struct lpfc_vport *vport = phba->pport;
7253	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7254
7255	spin_lock_irq(shost->host_lock);
7256	vport->load_flag |= FC_UNLOADING;
7257	spin_unlock_irq(shost->host_lock);
7258
7259	phba->pport->work_port_events = 0;
7260
7261	/* Stop the SLI4 device port */
7262	lpfc_stop_port(phba);
7263
7264	lpfc_sli4_disable_intr(phba);
7265
7266	/* Reset SLI4 HBA FCoE function */
7267	lpfc_pci_function_reset(phba);
7268
7269	return;
7270}
7271
7272/**
7273 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
7274 * @phba: Pointer to HBA context object.
7275 *
7276 * This function is called in the SLI4 code path to wait for completion
7277 * of device's XRIs exchange busy. It will check the XRI exchange busy
7278 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
7279 * that, it will check the XRI exchange busy on outstanding FCP and ELS
7280 * I/Os every 30 seconds, log error message, and wait forever. Only when
7281 * all XRI exchange busy complete, the driver unload shall proceed with
7282 * invoking the function reset ioctl mailbox command to the CNA and the
7283 * the rest of the driver unload resource release.
7284 **/
7285static void
7286lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
7287{
7288	int wait_time = 0;
7289	int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
7290	int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7291
7292	while (!fcp_xri_cmpl || !els_xri_cmpl) {
7293		if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
7294			if (!fcp_xri_cmpl)
7295				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7296						"2877 FCP XRI exchange busy "
7297						"wait time: %d seconds.\n",
7298						wait_time/1000);
7299			if (!els_xri_cmpl)
7300				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7301						"2878 ELS XRI exchange busy "
7302						"wait time: %d seconds.\n",
7303						wait_time/1000);
7304			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
7305			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
7306		} else {
7307			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
7308			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
7309		}
7310		fcp_xri_cmpl =
7311			list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
7312		els_xri_cmpl =
7313			list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7314	}
7315}
7316
7317/**
7318 * lpfc_sli4_hba_unset - Unset the fcoe hba
7319 * @phba: Pointer to HBA context object.
7320 *
7321 * This function is called in the SLI4 code path to reset the HBA's FCoE
7322 * function. The caller is not required to hold any lock. This routine
7323 * issues PCI function reset mailbox command to reset the FCoE function.
7324 * At the end of the function, it calls lpfc_hba_down_post function to
7325 * free any pending commands.
7326 **/
7327static void
7328lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7329{
7330	int wait_cnt = 0;
7331	LPFC_MBOXQ_t *mboxq;
7332
7333	lpfc_stop_hba_timers(phba);
7334	phba->sli4_hba.intr_enable = 0;
7335
7336	/*
7337	 * Gracefully wait out the potential current outstanding asynchronous
7338	 * mailbox command.
7339	 */
7340
7341	/* First, block any pending async mailbox command from posted */
7342	spin_lock_irq(&phba->hbalock);
7343	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7344	spin_unlock_irq(&phba->hbalock);
7345	/* Now, trying to wait it out if we can */
7346	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7347		msleep(10);
7348		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
7349			break;
7350	}
7351	/* Forcefully release the outstanding mailbox command if timed out */
7352	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7353		spin_lock_irq(&phba->hbalock);
7354		mboxq = phba->sli.mbox_active;
7355		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7356		__lpfc_mbox_cmpl_put(phba, mboxq);
7357		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7358		phba->sli.mbox_active = NULL;
7359		spin_unlock_irq(&phba->hbalock);
7360	}
7361
7362	/* Abort all iocbs associated with the hba */
7363	lpfc_sli_hba_iocb_abort(phba);
7364
7365	/* Wait for completion of device XRI exchange busy */
7366	lpfc_sli4_xri_exchange_busy_wait(phba);
7367
7368	/* Disable PCI subsystem interrupt */
7369	lpfc_sli4_disable_intr(phba);
7370
7371	/* Stop kthread signal shall trigger work_done one more time */
7372	kthread_stop(phba->worker_thread);
7373
7374	/* Reset SLI4 HBA FCoE function */
7375	lpfc_pci_function_reset(phba);
7376
7377	/* Stop the SLI4 device port */
7378	phba->pport->work_port_events = 0;
7379}
7380
7381 /**
7382 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
7383 * @phba: Pointer to HBA context object.
7384 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
7385 *
7386 * This function is called in the SLI4 code path to read the port's
7387 * sli4 capabilities.
7388 *
7389 * This function may be be called from any context that can block-wait
7390 * for the completion.  The expectation is that this routine is called
7391 * typically from probe_one or from the online routine.
7392 **/
7393int
7394lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7395{
7396	int rc;
7397	struct lpfc_mqe *mqe;
7398	struct lpfc_pc_sli4_params *sli4_params;
7399	uint32_t mbox_tmo;
7400
7401	rc = 0;
7402	mqe = &mboxq->u.mqe;
7403
7404	/* Read the port's SLI4 Parameters port capabilities */
7405	lpfc_sli4_params(mboxq);
7406	if (!phba->sli4_hba.intr_enable)
7407		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7408	else {
7409		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
7410		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7411	}
7412
7413	if (unlikely(rc))
7414		return 1;
7415
7416	sli4_params = &phba->sli4_hba.pc_sli4_params;
7417	sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
7418	sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
7419	sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
7420	sli4_params->featurelevel_1 = bf_get(featurelevel_1,
7421					     &mqe->un.sli4_params);
7422	sli4_params->featurelevel_2 = bf_get(featurelevel_2,
7423					     &mqe->un.sli4_params);
7424	sli4_params->proto_types = mqe->un.sli4_params.word3;
7425	sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
7426	sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
7427	sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
7428	sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
7429	sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
7430	sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
7431	sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
7432	sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
7433	sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
7434	sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
7435	sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
7436	sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
7437	sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
7438	sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
7439	sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
7440	sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
7441	sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
7442	sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7443	sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7444	sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
7445	return rc;
7446}
7447
7448/**
7449 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
7450 * @pdev: pointer to PCI device
7451 * @pid: pointer to PCI device identifier
7452 *
7453 * This routine is to be called to attach a device with SLI-3 interface spec
7454 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7455 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7456 * information of the device and driver to see if the driver state that it can
7457 * support this kind of device. If the match is successful, the driver core
7458 * invokes this routine. If this routine determines it can claim the HBA, it
7459 * does all the initialization that it needs to do to handle the HBA properly.
7460 *
7461 * Return code
7462 * 	0 - driver can claim the device
7463 * 	negative value - driver can not claim the device
7464 **/
7465static int __devinit
7466lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
7467{
7468	struct lpfc_hba   *phba;
7469	struct lpfc_vport *vport = NULL;
7470	struct Scsi_Host  *shost = NULL;
7471	int error;
7472	uint32_t cfg_mode, intr_mode;
7473
7474	/* Allocate memory for HBA structure */
7475	phba = lpfc_hba_alloc(pdev);
7476	if (!phba)
7477		return -ENOMEM;
7478
7479	/* Perform generic PCI device enabling operation */
7480	error = lpfc_enable_pci_dev(phba);
7481	if (error) {
7482		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7483				"1401 Failed to enable pci device.\n");
7484		goto out_free_phba;
7485	}
7486
7487	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
7488	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
7489	if (error)
7490		goto out_disable_pci_dev;
7491
7492	/* Set up SLI-3 specific device PCI memory space */
7493	error = lpfc_sli_pci_mem_setup(phba);
7494	if (error) {
7495		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7496				"1402 Failed to set up pci memory space.\n");
7497		goto out_disable_pci_dev;
7498	}
7499
7500	/* Set up phase-1 common device driver resources */
7501	error = lpfc_setup_driver_resource_phase1(phba);
7502	if (error) {
7503		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7504				"1403 Failed to set up driver resource.\n");
7505		goto out_unset_pci_mem_s3;
7506	}
7507
7508	/* Set up SLI-3 specific device driver resources */
7509	error = lpfc_sli_driver_resource_setup(phba);
7510	if (error) {
7511		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7512				"1404 Failed to set up driver resource.\n");
7513		goto out_unset_pci_mem_s3;
7514	}
7515
7516	/* Initialize and populate the iocb list per host */
7517	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
7518	if (error) {
7519		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7520				"1405 Failed to initialize iocb list.\n");
7521		goto out_unset_driver_resource_s3;
7522	}
7523
7524	/* Set up common device driver resources */
7525	error = lpfc_setup_driver_resource_phase2(phba);
7526	if (error) {
7527		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7528				"1406 Failed to set up driver resource.\n");
7529		goto out_free_iocb_list;
7530	}
7531
7532	/* Create SCSI host to the physical port */
7533	error = lpfc_create_shost(phba);
7534	if (error) {
7535		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7536				"1407 Failed to create scsi host.\n");
7537		goto out_unset_driver_resource;
7538	}
7539
7540	/* Configure sysfs attributes */
7541	vport = phba->pport;
7542	error = lpfc_alloc_sysfs_attr(vport);
7543	if (error) {
7544		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7545				"1476 Failed to allocate sysfs attr\n");
7546		goto out_destroy_shost;
7547	}
7548
7549	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
7550	/* Now, trying to enable interrupt and bring up the device */
7551	cfg_mode = phba->cfg_use_msi;
7552	while (true) {
7553		/* Put device to a known state before enabling interrupt */
7554		lpfc_stop_port(phba);
7555		/* Configure and enable interrupt */
7556		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
7557		if (intr_mode == LPFC_INTR_ERROR) {
7558			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7559					"0431 Failed to enable interrupt.\n");
7560			error = -ENODEV;
7561			goto out_free_sysfs_attr;
7562		}
7563		/* SLI-3 HBA setup */
7564		if (lpfc_sli_hba_setup(phba)) {
7565			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7566					"1477 Failed to set up hba\n");
7567			error = -ENODEV;
7568			goto out_remove_device;
7569		}
7570
7571		/* Wait 50ms for the interrupts of previous mailbox commands */
7572		msleep(50);
7573		/* Check active interrupts on message signaled interrupts */
7574		if (intr_mode == 0 ||
7575		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
7576			/* Log the current active interrupt mode */
7577			phba->intr_mode = intr_mode;
7578			lpfc_log_intr_mode(phba, intr_mode);
7579			break;
7580		} else {
7581			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7582					"0447 Configure interrupt mode (%d) "
7583					"failed active interrupt test.\n",
7584					intr_mode);
7585			/* Disable the current interrupt mode */
7586			lpfc_sli_disable_intr(phba);
7587			/* Try next level of interrupt mode */
7588			cfg_mode = --intr_mode;
7589		}
7590	}
7591
7592	/* Perform post initialization setup */
7593	lpfc_post_init_setup(phba);
7594
7595	/* Check if there are static vports to be created. */
7596	lpfc_create_static_vport(phba);
7597
7598	return 0;
7599
7600out_remove_device:
7601	lpfc_unset_hba(phba);
7602out_free_sysfs_attr:
7603	lpfc_free_sysfs_attr(vport);
7604out_destroy_shost:
7605	lpfc_destroy_shost(phba);
7606out_unset_driver_resource:
7607	lpfc_unset_driver_resource_phase2(phba);
7608out_free_iocb_list:
7609	lpfc_free_iocb_list(phba);
7610out_unset_driver_resource_s3:
7611	lpfc_sli_driver_resource_unset(phba);
7612out_unset_pci_mem_s3:
7613	lpfc_sli_pci_mem_unset(phba);
7614out_disable_pci_dev:
7615	lpfc_disable_pci_dev(phba);
7616	if (shost)
7617		scsi_host_put(shost);
7618out_free_phba:
7619	lpfc_hba_free(phba);
7620	return error;
7621}
7622
7623/**
7624 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
7625 * @pdev: pointer to PCI device
7626 *
7627 * This routine is to be called to disattach a device with SLI-3 interface
7628 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7629 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7630 * device to be removed from the PCI subsystem properly.
7631 **/
7632static void __devexit
7633lpfc_pci_remove_one_s3(struct pci_dev *pdev)
7634{
7635	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
7636	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7637	struct lpfc_vport **vports;
7638	struct lpfc_hba   *phba = vport->phba;
7639	int i;
7640	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
7641
7642	spin_lock_irq(&phba->hbalock);
7643	vport->load_flag |= FC_UNLOADING;
7644	spin_unlock_irq(&phba->hbalock);
7645
7646	lpfc_free_sysfs_attr(vport);
7647
7648	/* Release all the vports against this physical port */
7649	vports = lpfc_create_vport_work_array(phba);
7650	if (vports != NULL)
7651		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7652			fc_vport_terminate(vports[i]->fc_vport);
7653	lpfc_destroy_vport_work_array(phba, vports);
7654
7655	/* Remove FC host and then SCSI host with the physical port */
7656	fc_remove_host(shost);
7657	scsi_remove_host(shost);
7658	lpfc_cleanup(vport);
7659
7660	/*
7661	 * Bring down the SLI Layer. This step disable all interrupts,
7662	 * clears the rings, discards all mailbox commands, and resets
7663	 * the HBA.
7664	 */
7665
7666	/* HBA interrupt will be diabled after this call */
7667	lpfc_sli_hba_down(phba);
7668	/* Stop kthread signal shall trigger work_done one more time */
7669	kthread_stop(phba->worker_thread);
7670	/* Final cleanup of txcmplq and reset the HBA */
7671	lpfc_sli_brdrestart(phba);
7672
7673	lpfc_stop_hba_timers(phba);
7674	spin_lock_irq(&phba->hbalock);
7675	list_del_init(&vport->listentry);
7676	spin_unlock_irq(&phba->hbalock);
7677
7678	lpfc_debugfs_terminate(vport);
7679
7680	/* Disable interrupt */
7681	lpfc_sli_disable_intr(phba);
7682
7683	pci_set_drvdata(pdev, NULL);
7684	scsi_host_put(shost);
7685
7686	/*
7687	 * Call scsi_free before mem_free since scsi bufs are released to their
7688	 * corresponding pools here.
7689	 */
7690	lpfc_scsi_free(phba);
7691	lpfc_mem_free_all(phba);
7692
7693	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7694			  phba->hbqslimp.virt, phba->hbqslimp.phys);
7695
7696	/* Free resources associated with SLI2 interface */
7697	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7698			  phba->slim2p.virt, phba->slim2p.phys);
7699
7700	/* unmap adapter SLIM and Control Registers */
7701	iounmap(phba->ctrl_regs_memmap_p);
7702	iounmap(phba->slim_memmap_p);
7703
7704	lpfc_hba_free(phba);
7705
7706	pci_release_selected_regions(pdev, bars);
7707	pci_disable_device(pdev);
7708}
7709
7710/**
7711 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
7712 * @pdev: pointer to PCI device
7713 * @msg: power management message
7714 *
7715 * This routine is to be called from the kernel's PCI subsystem to support
7716 * system Power Management (PM) to device with SLI-3 interface spec. When
7717 * PM invokes this method, it quiesces the device by stopping the driver's
7718 * worker thread for the device, turning off device's interrupt and DMA,
7719 * and bring the device offline. Note that as the driver implements the
7720 * minimum PM requirements to a power-aware driver's PM support for the
7721 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7722 * to the suspend() method call will be treated as SUSPEND and the driver will
7723 * fully reinitialize its device during resume() method call, the driver will
7724 * set device to PCI_D3hot state in PCI config space instead of setting it
7725 * according to the @msg provided by the PM.
7726 *
7727 * Return code
7728 * 	0 - driver suspended the device
7729 * 	Error otherwise
7730 **/
7731static int
7732lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
7733{
7734	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7735	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7736
7737	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7738			"0473 PCI device Power Management suspend.\n");
7739
7740	/* Bring down the device */
7741	lpfc_offline_prep(phba);
7742	lpfc_offline(phba);
7743	kthread_stop(phba->worker_thread);
7744
7745	/* Disable interrupt from device */
7746	lpfc_sli_disable_intr(phba);
7747
7748	/* Save device state to PCI config space */
7749	pci_save_state(pdev);
7750	pci_set_power_state(pdev, PCI_D3hot);
7751
7752	return 0;
7753}
7754
7755/**
7756 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
7757 * @pdev: pointer to PCI device
7758 *
7759 * This routine is to be called from the kernel's PCI subsystem to support
7760 * system Power Management (PM) to device with SLI-3 interface spec. When PM
7761 * invokes this method, it restores the device's PCI config space state and
7762 * fully reinitializes the device and brings it online. Note that as the
7763 * driver implements the minimum PM requirements to a power-aware driver's
7764 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
7765 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
7766 * driver will fully reinitialize its device during resume() method call,
7767 * the device will be set to PCI_D0 directly in PCI config space before
7768 * restoring the state.
7769 *
7770 * Return code
7771 * 	0 - driver suspended the device
7772 * 	Error otherwise
7773 **/
7774static int
7775lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7776{
7777	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7778	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7779	uint32_t intr_mode;
7780	int error;
7781
7782	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7783			"0452 PCI device Power Management resume.\n");
7784
7785	/* Restore device state from PCI config space */
7786	pci_set_power_state(pdev, PCI_D0);
7787	pci_restore_state(pdev);
7788
7789	/*
7790	 * As the new kernel behavior of pci_restore_state() API call clears
7791	 * device saved_state flag, need to save the restored state again.
7792	 */
7793	pci_save_state(pdev);
7794
7795	if (pdev->is_busmaster)
7796		pci_set_master(pdev);
7797
7798	/* Startup the kernel thread for this host adapter. */
7799	phba->worker_thread = kthread_run(lpfc_do_work, phba,
7800					"lpfc_worker_%d", phba->brd_no);
7801	if (IS_ERR(phba->worker_thread)) {
7802		error = PTR_ERR(phba->worker_thread);
7803		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7804				"0434 PM resume failed to start worker "
7805				"thread: error=x%x.\n", error);
7806		return error;
7807	}
7808
7809	/* Configure and enable interrupt */
7810	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7811	if (intr_mode == LPFC_INTR_ERROR) {
7812		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7813				"0430 PM resume Failed to enable interrupt\n");
7814		return -EIO;
7815	} else
7816		phba->intr_mode = intr_mode;
7817
7818	/* Restart HBA and bring it online */
7819	lpfc_sli_brdrestart(phba);
7820	lpfc_online(phba);
7821
7822	/* Log the current active interrupt mode */
7823	lpfc_log_intr_mode(phba, phba->intr_mode);
7824
7825	return 0;
7826}
7827
7828/**
7829 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
7830 * @phba: pointer to lpfc hba data structure.
7831 *
7832 * This routine is called to prepare the SLI3 device for PCI slot recover. It
7833 * aborts all the outstanding SCSI I/Os to the pci device.
7834 **/
7835static void
7836lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
7837{
7838	struct lpfc_sli *psli = &phba->sli;
7839	struct lpfc_sli_ring  *pring;
7840
7841	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7842			"2723 PCI channel I/O abort preparing for recovery\n");
7843
7844	/*
7845	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
7846	 * and let the SCSI mid-layer to retry them to recover.
7847	 */
7848	pring = &psli->ring[psli->fcp_ring];
7849	lpfc_sli_abort_iocb_ring(phba, pring);
7850}
7851
7852/**
7853 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
7854 * @phba: pointer to lpfc hba data structure.
7855 *
7856 * This routine is called to prepare the SLI3 device for PCI slot reset. It
7857 * disables the device interrupt and pci device, and aborts the internal FCP
7858 * pending I/Os.
7859 **/
7860static void
7861lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7862{
7863	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7864			"2710 PCI channel disable preparing for reset\n");
7865
7866	/* Block any management I/Os to the device */
7867	lpfc_block_mgmt_io(phba);
7868
7869	/* Block all SCSI devices' I/Os on the host */
7870	lpfc_scsi_dev_block(phba);
7871
7872	/* stop all timers */
7873	lpfc_stop_hba_timers(phba);
7874
7875	/* Disable interrupt and pci device */
7876	lpfc_sli_disable_intr(phba);
7877	pci_disable_device(phba->pcidev);
7878
7879	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
7880	lpfc_sli_flush_fcp_rings(phba);
7881}
7882
7883/**
7884 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
7885 * @phba: pointer to lpfc hba data structure.
7886 *
7887 * This routine is called to prepare the SLI3 device for PCI slot permanently
7888 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
7889 * pending I/Os.
7890 **/
7891static void
7892lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7893{
7894	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7895			"2711 PCI channel permanent disable for failure\n");
7896	/* Block all SCSI devices' I/Os on the host */
7897	lpfc_scsi_dev_block(phba);
7898
7899	/* stop all timers */
7900	lpfc_stop_hba_timers(phba);
7901
7902	/* Clean up all driver's outstanding SCSI I/Os */
7903	lpfc_sli_flush_fcp_rings(phba);
7904}
7905
7906/**
7907 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7908 * @pdev: pointer to PCI device.
7909 * @state: the current PCI connection state.
7910 *
7911 * This routine is called from the PCI subsystem for I/O error handling to
7912 * device with SLI-3 interface spec. This function is called by the PCI
7913 * subsystem after a PCI bus error affecting this device has been detected.
7914 * When this function is invoked, it will need to stop all the I/Os and
7915 * interrupt(s) to the device. Once that is done, it will return
7916 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7917 * as desired.
7918 *
7919 * Return codes
7920 * 	PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
7921 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7922 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7923 **/
7924static pci_ers_result_t
7925lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7926{
7927	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7928	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7929
7930	switch (state) {
7931	case pci_channel_io_normal:
7932		/* Non-fatal error, prepare for recovery */
7933		lpfc_sli_prep_dev_for_recover(phba);
7934		return PCI_ERS_RESULT_CAN_RECOVER;
7935	case pci_channel_io_frozen:
7936		/* Fatal error, prepare for slot reset */
7937		lpfc_sli_prep_dev_for_reset(phba);
7938		return PCI_ERS_RESULT_NEED_RESET;
7939	case pci_channel_io_perm_failure:
7940		/* Permanent failure, prepare for device down */
7941		lpfc_sli_prep_dev_for_perm_failure(phba);
7942		return PCI_ERS_RESULT_DISCONNECT;
7943	default:
7944		/* Unknown state, prepare and request slot reset */
7945		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7946				"0472 Unknown PCI error state: x%x\n", state);
7947		lpfc_sli_prep_dev_for_reset(phba);
7948		return PCI_ERS_RESULT_NEED_RESET;
7949	}
7950}
7951
7952/**
7953 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
7954 * @pdev: pointer to PCI device.
7955 *
7956 * This routine is called from the PCI subsystem for error handling to
7957 * device with SLI-3 interface spec. This is called after PCI bus has been
7958 * reset to restart the PCI card from scratch, as if from a cold-boot.
7959 * During the PCI subsystem error recovery, after driver returns
7960 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7961 * recovery and then call this routine before calling the .resume method
7962 * to recover the device. This function will initialize the HBA device,
7963 * enable the interrupt, but it will just put the HBA to offline state
7964 * without passing any I/O traffic.
7965 *
7966 * Return codes
7967 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7968 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7969 */
7970static pci_ers_result_t
7971lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7972{
7973	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7974	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7975	struct lpfc_sli *psli = &phba->sli;
7976	uint32_t intr_mode;
7977
7978	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
7979	if (pci_enable_device_mem(pdev)) {
7980		printk(KERN_ERR "lpfc: Cannot re-enable "
7981			"PCI device after reset.\n");
7982		return PCI_ERS_RESULT_DISCONNECT;
7983	}
7984
7985	pci_restore_state(pdev);
7986
7987	/*
7988	 * As the new kernel behavior of pci_restore_state() API call clears
7989	 * device saved_state flag, need to save the restored state again.
7990	 */
7991	pci_save_state(pdev);
7992
7993	if (pdev->is_busmaster)
7994		pci_set_master(pdev);
7995
7996	spin_lock_irq(&phba->hbalock);
7997	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7998	spin_unlock_irq(&phba->hbalock);
7999
8000	/* Configure and enable interrupt */
8001	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8002	if (intr_mode == LPFC_INTR_ERROR) {
8003		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8004				"0427 Cannot re-enable interrupt after "
8005				"slot reset.\n");
8006		return PCI_ERS_RESULT_DISCONNECT;
8007	} else
8008		phba->intr_mode = intr_mode;
8009
8010	/* Take device offline, it will perform cleanup */
8011	lpfc_offline_prep(phba);
8012	lpfc_offline(phba);
8013	lpfc_sli_brdrestart(phba);
8014
8015	/* Log the current active interrupt mode */
8016	lpfc_log_intr_mode(phba, phba->intr_mode);
8017
8018	return PCI_ERS_RESULT_RECOVERED;
8019}
8020
8021/**
8022 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
8023 * @pdev: pointer to PCI device
8024 *
8025 * This routine is called from the PCI subsystem for error handling to device
8026 * with SLI-3 interface spec. It is called when kernel error recovery tells
8027 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8028 * error recovery. After this call, traffic can start to flow from this device
8029 * again.
8030 */
8031static void
8032lpfc_io_resume_s3(struct pci_dev *pdev)
8033{
8034	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8035	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8036
8037	/* Bring device online, it will be no-op for non-fatal error resume */
8038	lpfc_online(phba);
8039
8040	/* Clean up Advanced Error Reporting (AER) if needed */
8041	if (phba->hba_flag & HBA_AER_ENABLED)
8042		pci_cleanup_aer_uncorrect_error_status(pdev);
8043}
8044
8045/**
8046 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
8047 * @phba: pointer to lpfc hba data structure.
8048 *
8049 * returns the number of ELS/CT IOCBs to reserve
8050 **/
8051int
8052lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
8053{
8054	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
8055
8056	if (phba->sli_rev == LPFC_SLI_REV4) {
8057		if (max_xri <= 100)
8058			return 10;
8059		else if (max_xri <= 256)
8060			return 25;
8061		else if (max_xri <= 512)
8062			return 50;
8063		else if (max_xri <= 1024)
8064			return 100;
8065		else
8066			return 150;
8067	} else
8068		return 0;
8069}
8070
8071/**
8072 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
8073 * @pdev: pointer to PCI device
8074 * @pid: pointer to PCI device identifier
8075 *
8076 * This routine is called from the kernel's PCI subsystem to device with
8077 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8078 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8079 * information of the device and driver to see if the driver state that it
8080 * can support this kind of device. If the match is successful, the driver
8081 * core invokes this routine. If this routine determines it can claim the HBA,
8082 * it does all the initialization that it needs to do to handle the HBA
8083 * properly.
8084 *
8085 * Return code
8086 * 	0 - driver can claim the device
8087 * 	negative value - driver can not claim the device
8088 **/
8089static int __devinit
8090lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8091{
8092	struct lpfc_hba   *phba;
8093	struct lpfc_vport *vport = NULL;
8094	struct Scsi_Host  *shost = NULL;
8095	int error;
8096	uint32_t cfg_mode, intr_mode;
8097	int mcnt;
8098
8099	/* Allocate memory for HBA structure */
8100	phba = lpfc_hba_alloc(pdev);
8101	if (!phba)
8102		return -ENOMEM;
8103
8104	/* Perform generic PCI device enabling operation */
8105	error = lpfc_enable_pci_dev(phba);
8106	if (error) {
8107		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8108				"1409 Failed to enable pci device.\n");
8109		goto out_free_phba;
8110	}
8111
8112	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
8113	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
8114	if (error)
8115		goto out_disable_pci_dev;
8116
8117	/* Set up SLI-4 specific device PCI memory space */
8118	error = lpfc_sli4_pci_mem_setup(phba);
8119	if (error) {
8120		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8121				"1410 Failed to set up pci memory space.\n");
8122		goto out_disable_pci_dev;
8123	}
8124
8125	/* Set up phase-1 common device driver resources */
8126	error = lpfc_setup_driver_resource_phase1(phba);
8127	if (error) {
8128		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8129				"1411 Failed to set up driver resource.\n");
8130		goto out_unset_pci_mem_s4;
8131	}
8132
8133	/* Set up SLI-4 Specific device driver resources */
8134	error = lpfc_sli4_driver_resource_setup(phba);
8135	if (error) {
8136		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8137				"1412 Failed to set up driver resource.\n");
8138		goto out_unset_pci_mem_s4;
8139	}
8140
8141	/* Initialize and populate the iocb list per host */
8142
8143	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8144			"2821 initialize iocb list %d.\n",
8145			phba->cfg_iocb_cnt*1024);
8146	error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
8147
8148	if (error) {
8149		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8150				"1413 Failed to initialize iocb list.\n");
8151		goto out_unset_driver_resource_s4;
8152	}
8153
8154	/* Set up common device driver resources */
8155	error = lpfc_setup_driver_resource_phase2(phba);
8156	if (error) {
8157		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8158				"1414 Failed to set up driver resource.\n");
8159		goto out_free_iocb_list;
8160	}
8161
8162	/* Create SCSI host to the physical port */
8163	error = lpfc_create_shost(phba);
8164	if (error) {
8165		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8166				"1415 Failed to create scsi host.\n");
8167		goto out_unset_driver_resource;
8168	}
8169
8170	/* Configure sysfs attributes */
8171	vport = phba->pport;
8172	error = lpfc_alloc_sysfs_attr(vport);
8173	if (error) {
8174		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8175				"1416 Failed to allocate sysfs attr\n");
8176		goto out_destroy_shost;
8177	}
8178
8179	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8180	/* Now, trying to enable interrupt and bring up the device */
8181	cfg_mode = phba->cfg_use_msi;
8182	while (true) {
8183		/* Put device to a known state before enabling interrupt */
8184		lpfc_stop_port(phba);
8185		/* Configure and enable interrupt */
8186		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
8187		if (intr_mode == LPFC_INTR_ERROR) {
8188			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8189					"0426 Failed to enable interrupt.\n");
8190			error = -ENODEV;
8191			goto out_free_sysfs_attr;
8192		}
8193		/* Default to single FCP EQ for non-MSI-X */
8194		if (phba->intr_type != MSIX)
8195			phba->cfg_fcp_eq_count = 1;
8196		else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count)
8197			phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
8198		/* Set up SLI-4 HBA */
8199		if (lpfc_sli4_hba_setup(phba)) {
8200			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8201					"1421 Failed to set up hba\n");
8202			error = -ENODEV;
8203			goto out_disable_intr;
8204		}
8205
8206		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
8207		if (intr_mode != 0)
8208			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
8209							    LPFC_ACT_INTR_CNT);
8210
8211		/* Check active interrupts received only for MSI/MSI-X */
8212		if (intr_mode == 0 ||
8213		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
8214			/* Log the current active interrupt mode */
8215			phba->intr_mode = intr_mode;
8216			lpfc_log_intr_mode(phba, intr_mode);
8217			break;
8218		}
8219		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8220				"0451 Configure interrupt mode (%d) "
8221				"failed active interrupt test.\n",
8222				intr_mode);
8223		/* Unset the preivous SLI-4 HBA setup */
8224		lpfc_sli4_unset_hba(phba);
8225		/* Try next level of interrupt mode */
8226		cfg_mode = --intr_mode;
8227	}
8228
8229	/* Perform post initialization setup */
8230	lpfc_post_init_setup(phba);
8231
8232	/* Check if there are static vports to be created. */
8233	lpfc_create_static_vport(phba);
8234
8235	return 0;
8236
8237out_disable_intr:
8238	lpfc_sli4_disable_intr(phba);
8239out_free_sysfs_attr:
8240	lpfc_free_sysfs_attr(vport);
8241out_destroy_shost:
8242	lpfc_destroy_shost(phba);
8243out_unset_driver_resource:
8244	lpfc_unset_driver_resource_phase2(phba);
8245out_free_iocb_list:
8246	lpfc_free_iocb_list(phba);
8247out_unset_driver_resource_s4:
8248	lpfc_sli4_driver_resource_unset(phba);
8249out_unset_pci_mem_s4:
8250	lpfc_sli4_pci_mem_unset(phba);
8251out_disable_pci_dev:
8252	lpfc_disable_pci_dev(phba);
8253	if (shost)
8254		scsi_host_put(shost);
8255out_free_phba:
8256	lpfc_hba_free(phba);
8257	return error;
8258}
8259
8260/**
8261 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
8262 * @pdev: pointer to PCI device
8263 *
8264 * This routine is called from the kernel's PCI subsystem to device with
8265 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8266 * removed from PCI bus, it performs all the necessary cleanup for the HBA
8267 * device to be removed from the PCI subsystem properly.
8268 **/
8269static void __devexit
8270lpfc_pci_remove_one_s4(struct pci_dev *pdev)
8271{
8272	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8273	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8274	struct lpfc_vport **vports;
8275	struct lpfc_hba *phba = vport->phba;
8276	int i;
8277
8278	/* Mark the device unloading flag */
8279	spin_lock_irq(&phba->hbalock);
8280	vport->load_flag |= FC_UNLOADING;
8281	spin_unlock_irq(&phba->hbalock);
8282
8283	/* Free the HBA sysfs attributes */
8284	lpfc_free_sysfs_attr(vport);
8285
8286	/* Release all the vports against this physical port */
8287	vports = lpfc_create_vport_work_array(phba);
8288	if (vports != NULL)
8289		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8290			fc_vport_terminate(vports[i]->fc_vport);
8291	lpfc_destroy_vport_work_array(phba, vports);
8292
8293	/* Remove FC host and then SCSI host with the physical port */
8294	fc_remove_host(shost);
8295	scsi_remove_host(shost);
8296
8297	/* Perform cleanup on the physical port */
8298	lpfc_cleanup(vport);
8299
8300	/*
8301	 * Bring down the SLI Layer. This step disables all interrupts,
8302	 * clears the rings, discards all mailbox commands, and resets
8303	 * the HBA FCoE function.
8304	 */
8305	lpfc_debugfs_terminate(vport);
8306	lpfc_sli4_hba_unset(phba);
8307
8308	spin_lock_irq(&phba->hbalock);
8309	list_del_init(&vport->listentry);
8310	spin_unlock_irq(&phba->hbalock);
8311
8312	/* Perform scsi free before driver resource_unset since scsi
8313	 * buffers are released to their corresponding pools here.
8314	 */
8315	lpfc_scsi_free(phba);
8316	lpfc_sli4_driver_resource_unset(phba);
8317
8318	/* Unmap adapter Control and Doorbell registers */
8319	lpfc_sli4_pci_mem_unset(phba);
8320
8321	/* Release PCI resources and disable device's PCI function */
8322	scsi_host_put(shost);
8323	lpfc_disable_pci_dev(phba);
8324
8325	/* Finally, free the driver's device data structure */
8326	lpfc_hba_free(phba);
8327
8328	return;
8329}
8330
8331/**
8332 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
8333 * @pdev: pointer to PCI device
8334 * @msg: power management message
8335 *
8336 * This routine is called from the kernel's PCI subsystem to support system
8337 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
8338 * this method, it quiesces the device by stopping the driver's worker
8339 * thread for the device, turning off device's interrupt and DMA, and bring
8340 * the device offline. Note that as the driver implements the minimum PM
8341 * requirements to a power-aware driver's PM support for suspend/resume -- all
8342 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
8343 * method call will be treated as SUSPEND and the driver will fully
8344 * reinitialize its device during resume() method call, the driver will set
8345 * device to PCI_D3hot state in PCI config space instead of setting it
8346 * according to the @msg provided by the PM.
8347 *
8348 * Return code
8349 * 	0 - driver suspended the device
8350 * 	Error otherwise
8351 **/
8352static int
8353lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
8354{
8355	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8356	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8357
8358	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8359			"2843 PCI device Power Management suspend.\n");
8360
8361	/* Bring down the device */
8362	lpfc_offline_prep(phba);
8363	lpfc_offline(phba);
8364	kthread_stop(phba->worker_thread);
8365
8366	/* Disable interrupt from device */
8367	lpfc_sli4_disable_intr(phba);
8368
8369	/* Save device state to PCI config space */
8370	pci_save_state(pdev);
8371	pci_set_power_state(pdev, PCI_D3hot);
8372
8373	return 0;
8374}
8375
8376/**
8377 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
8378 * @pdev: pointer to PCI device
8379 *
8380 * This routine is called from the kernel's PCI subsystem to support system
8381 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
8382 * this method, it restores the device's PCI config space state and fully
8383 * reinitializes the device and brings it online. Note that as the driver
8384 * implements the minimum PM requirements to a power-aware driver's PM for
8385 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8386 * to the suspend() method call will be treated as SUSPEND and the driver
8387 * will fully reinitialize its device during resume() method call, the device
8388 * will be set to PCI_D0 directly in PCI config space before restoring the
8389 * state.
8390 *
8391 * Return code
8392 * 	0 - driver suspended the device
8393 * 	Error otherwise
8394 **/
8395static int
8396lpfc_pci_resume_one_s4(struct pci_dev *pdev)
8397{
8398	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8399	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8400	uint32_t intr_mode;
8401	int error;
8402
8403	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8404			"0292 PCI device Power Management resume.\n");
8405
8406	/* Restore device state from PCI config space */
8407	pci_set_power_state(pdev, PCI_D0);
8408	pci_restore_state(pdev);
8409
8410	/*
8411	 * As the new kernel behavior of pci_restore_state() API call clears
8412	 * device saved_state flag, need to save the restored state again.
8413	 */
8414	pci_save_state(pdev);
8415
8416	if (pdev->is_busmaster)
8417		pci_set_master(pdev);
8418
8419	 /* Startup the kernel thread for this host adapter. */
8420	phba->worker_thread = kthread_run(lpfc_do_work, phba,
8421					"lpfc_worker_%d", phba->brd_no);
8422	if (IS_ERR(phba->worker_thread)) {
8423		error = PTR_ERR(phba->worker_thread);
8424		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8425				"0293 PM resume failed to start worker "
8426				"thread: error=x%x.\n", error);
8427		return error;
8428	}
8429
8430	/* Configure and enable interrupt */
8431	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
8432	if (intr_mode == LPFC_INTR_ERROR) {
8433		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8434				"0294 PM resume Failed to enable interrupt\n");
8435		return -EIO;
8436	} else
8437		phba->intr_mode = intr_mode;
8438
8439	/* Restart HBA and bring it online */
8440	lpfc_sli_brdrestart(phba);
8441	lpfc_online(phba);
8442
8443	/* Log the current active interrupt mode */
8444	lpfc_log_intr_mode(phba, phba->intr_mode);
8445
8446	return 0;
8447}
8448
8449/**
8450 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
8451 * @phba: pointer to lpfc hba data structure.
8452 *
8453 * This routine is called to prepare the SLI4 device for PCI slot recover. It
8454 * aborts all the outstanding SCSI I/Os to the pci device.
8455 **/
8456static void
8457lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
8458{
8459	struct lpfc_sli *psli = &phba->sli;
8460	struct lpfc_sli_ring  *pring;
8461
8462	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8463			"2828 PCI channel I/O abort preparing for recovery\n");
8464	/*
8465	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8466	 * and let the SCSI mid-layer to retry them to recover.
8467	 */
8468	pring = &psli->ring[psli->fcp_ring];
8469	lpfc_sli_abort_iocb_ring(phba, pring);
8470}
8471
8472/**
8473 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
8474 * @phba: pointer to lpfc hba data structure.
8475 *
8476 * This routine is called to prepare the SLI4 device for PCI slot reset. It
8477 * disables the device interrupt and pci device, and aborts the internal FCP
8478 * pending I/Os.
8479 **/
8480static void
8481lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
8482{
8483	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8484			"2826 PCI channel disable preparing for reset\n");
8485
8486	/* Block any management I/Os to the device */
8487	lpfc_block_mgmt_io(phba);
8488
8489	/* Block all SCSI devices' I/Os on the host */
8490	lpfc_scsi_dev_block(phba);
8491
8492	/* stop all timers */
8493	lpfc_stop_hba_timers(phba);
8494
8495	/* Disable interrupt and pci device */
8496	lpfc_sli4_disable_intr(phba);
8497	pci_disable_device(phba->pcidev);
8498
8499	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
8500	lpfc_sli_flush_fcp_rings(phba);
8501}
8502
8503/**
8504 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
8505 * @phba: pointer to lpfc hba data structure.
8506 *
8507 * This routine is called to prepare the SLI4 device for PCI slot permanently
8508 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
8509 * pending I/Os.
8510 **/
8511static void
8512lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
8513{
8514	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8515			"2827 PCI channel permanent disable for failure\n");
8516
8517	/* Block all SCSI devices' I/Os on the host */
8518	lpfc_scsi_dev_block(phba);
8519
8520	/* stop all timers */
8521	lpfc_stop_hba_timers(phba);
8522
8523	/* Clean up all driver's outstanding SCSI I/Os */
8524	lpfc_sli_flush_fcp_rings(phba);
8525}
8526
8527/**
8528 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
8529 * @pdev: pointer to PCI device.
8530 * @state: the current PCI connection state.
8531 *
8532 * This routine is called from the PCI subsystem for error handling to device
8533 * with SLI-4 interface spec. This function is called by the PCI subsystem
8534 * after a PCI bus error affecting this device has been detected. When this
8535 * function is invoked, it will need to stop all the I/Os and interrupt(s)
8536 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
8537 * for the PCI subsystem to perform proper recovery as desired.
8538 *
8539 * Return codes
8540 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8541 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8542 **/
8543static pci_ers_result_t
8544lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8545{
8546	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8547	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8548
8549	switch (state) {
8550	case pci_channel_io_normal:
8551		/* Non-fatal error, prepare for recovery */
8552		lpfc_sli4_prep_dev_for_recover(phba);
8553		return PCI_ERS_RESULT_CAN_RECOVER;
8554	case pci_channel_io_frozen:
8555		/* Fatal error, prepare for slot reset */
8556		lpfc_sli4_prep_dev_for_reset(phba);
8557		return PCI_ERS_RESULT_NEED_RESET;
8558	case pci_channel_io_perm_failure:
8559		/* Permanent failure, prepare for device down */
8560		lpfc_sli4_prep_dev_for_perm_failure(phba);
8561		return PCI_ERS_RESULT_DISCONNECT;
8562	default:
8563		/* Unknown state, prepare and request slot reset */
8564		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8565				"2825 Unknown PCI error state: x%x\n", state);
8566		lpfc_sli4_prep_dev_for_reset(phba);
8567		return PCI_ERS_RESULT_NEED_RESET;
8568	}
8569}
8570
8571/**
8572 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
8573 * @pdev: pointer to PCI device.
8574 *
8575 * This routine is called from the PCI subsystem for error handling to device
8576 * with SLI-4 interface spec. It is called after PCI bus has been reset to
8577 * restart the PCI card from scratch, as if from a cold-boot. During the
8578 * PCI subsystem error recovery, after the driver returns
8579 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
8580 * recovery and then call this routine before calling the .resume method to
8581 * recover the device. This function will initialize the HBA device, enable
8582 * the interrupt, but it will just put the HBA to offline state without
8583 * passing any I/O traffic.
8584 *
8585 * Return codes
8586 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
8587 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8588 */
8589static pci_ers_result_t
8590lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8591{
8592	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8593	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8594	struct lpfc_sli *psli = &phba->sli;
8595	uint32_t intr_mode;
8596
8597	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
8598	if (pci_enable_device_mem(pdev)) {
8599		printk(KERN_ERR "lpfc: Cannot re-enable "
8600			"PCI device after reset.\n");
8601		return PCI_ERS_RESULT_DISCONNECT;
8602	}
8603
8604	pci_restore_state(pdev);
8605	if (pdev->is_busmaster)
8606		pci_set_master(pdev);
8607
8608	spin_lock_irq(&phba->hbalock);
8609	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8610	spin_unlock_irq(&phba->hbalock);
8611
8612	/* Configure and enable interrupt */
8613	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
8614	if (intr_mode == LPFC_INTR_ERROR) {
8615		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8616				"2824 Cannot re-enable interrupt after "
8617				"slot reset.\n");
8618		return PCI_ERS_RESULT_DISCONNECT;
8619	} else
8620		phba->intr_mode = intr_mode;
8621
8622	/* Log the current active interrupt mode */
8623	lpfc_log_intr_mode(phba, phba->intr_mode);
8624
8625	return PCI_ERS_RESULT_RECOVERED;
8626}
8627
8628/**
8629 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
8630 * @pdev: pointer to PCI device
8631 *
8632 * This routine is called from the PCI subsystem for error handling to device
8633 * with SLI-4 interface spec. It is called when kernel error recovery tells
8634 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8635 * error recovery. After this call, traffic can start to flow from this device
8636 * again.
8637 **/
8638static void
8639lpfc_io_resume_s4(struct pci_dev *pdev)
8640{
8641	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8642	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8643
8644	/*
8645	 * In case of slot reset, as function reset is performed through
8646	 * mailbox command which needs DMA to be enabled, this operation
8647	 * has to be moved to the io resume phase. Taking device offline
8648	 * will perform the necessary cleanup.
8649	 */
8650	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
8651		/* Perform device reset */
8652		lpfc_offline_prep(phba);
8653		lpfc_offline(phba);
8654		lpfc_sli_brdrestart(phba);
8655		/* Bring the device back online */
8656		lpfc_online(phba);
8657	}
8658
8659	/* Clean up Advanced Error Reporting (AER) if needed */
8660	if (phba->hba_flag & HBA_AER_ENABLED)
8661		pci_cleanup_aer_uncorrect_error_status(pdev);
8662}
8663
8664/**
8665 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
8666 * @pdev: pointer to PCI device
8667 * @pid: pointer to PCI device identifier
8668 *
8669 * This routine is to be registered to the kernel's PCI subsystem. When an
8670 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
8671 * at PCI device-specific information of the device and driver to see if the
8672 * driver state that it can support this kind of device. If the match is
8673 * successful, the driver core invokes this routine. This routine dispatches
8674 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
8675 * do all the initialization that it needs to do to handle the HBA device
8676 * properly.
8677 *
8678 * Return code
8679 * 	0 - driver can claim the device
8680 * 	negative value - driver can not claim the device
8681 **/
8682static int __devinit
8683lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
8684{
8685	int rc;
8686	struct lpfc_sli_intf intf;
8687
8688	if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
8689		return -ENODEV;
8690
8691	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
8692	    (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
8693		rc = lpfc_pci_probe_one_s4(pdev, pid);
8694	else
8695		rc = lpfc_pci_probe_one_s3(pdev, pid);
8696
8697	return rc;
8698}
8699
8700/**
8701 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
8702 * @pdev: pointer to PCI device
8703 *
8704 * This routine is to be registered to the kernel's PCI subsystem. When an
8705 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
8706 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
8707 * remove routine, which will perform all the necessary cleanup for the
8708 * device to be removed from the PCI subsystem properly.
8709 **/
8710static void __devexit
8711lpfc_pci_remove_one(struct pci_dev *pdev)
8712{
8713	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8714	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8715
8716	switch (phba->pci_dev_grp) {
8717	case LPFC_PCI_DEV_LP:
8718		lpfc_pci_remove_one_s3(pdev);
8719		break;
8720	case LPFC_PCI_DEV_OC:
8721		lpfc_pci_remove_one_s4(pdev);
8722		break;
8723	default:
8724		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8725				"1424 Invalid PCI device group: 0x%x\n",
8726				phba->pci_dev_grp);
8727		break;
8728	}
8729	return;
8730}
8731
8732/**
8733 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
8734 * @pdev: pointer to PCI device
8735 * @msg: power management message
8736 *
8737 * This routine is to be registered to the kernel's PCI subsystem to support
8738 * system Power Management (PM). When PM invokes this method, it dispatches
8739 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
8740 * suspend the device.
8741 *
8742 * Return code
8743 * 	0 - driver suspended the device
8744 * 	Error otherwise
8745 **/
8746static int
8747lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
8748{
8749	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8750	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8751	int rc = -ENODEV;
8752
8753	switch (phba->pci_dev_grp) {
8754	case LPFC_PCI_DEV_LP:
8755		rc = lpfc_pci_suspend_one_s3(pdev, msg);
8756		break;
8757	case LPFC_PCI_DEV_OC:
8758		rc = lpfc_pci_suspend_one_s4(pdev, msg);
8759		break;
8760	default:
8761		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8762				"1425 Invalid PCI device group: 0x%x\n",
8763				phba->pci_dev_grp);
8764		break;
8765	}
8766	return rc;
8767}
8768
8769/**
8770 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
8771 * @pdev: pointer to PCI device
8772 *
8773 * This routine is to be registered to the kernel's PCI subsystem to support
8774 * system Power Management (PM). When PM invokes this method, it dispatches
8775 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
8776 * resume the device.
8777 *
8778 * Return code
8779 * 	0 - driver suspended the device
8780 * 	Error otherwise
8781 **/
8782static int
8783lpfc_pci_resume_one(struct pci_dev *pdev)
8784{
8785	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8786	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8787	int rc = -ENODEV;
8788
8789	switch (phba->pci_dev_grp) {
8790	case LPFC_PCI_DEV_LP:
8791		rc = lpfc_pci_resume_one_s3(pdev);
8792		break;
8793	case LPFC_PCI_DEV_OC:
8794		rc = lpfc_pci_resume_one_s4(pdev);
8795		break;
8796	default:
8797		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8798				"1426 Invalid PCI device group: 0x%x\n",
8799				phba->pci_dev_grp);
8800		break;
8801	}
8802	return rc;
8803}
8804
8805/**
8806 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
8807 * @pdev: pointer to PCI device.
8808 * @state: the current PCI connection state.
8809 *
8810 * This routine is registered to the PCI subsystem for error handling. This
8811 * function is called by the PCI subsystem after a PCI bus error affecting
8812 * this device has been detected. When this routine is invoked, it dispatches
8813 * the action to the proper SLI-3 or SLI-4 device error detected handling
8814 * routine, which will perform the proper error detected operation.
8815 *
8816 * Return codes
8817 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8818 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8819 **/
8820static pci_ers_result_t
8821lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
8822{
8823	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8824	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8825	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
8826
8827	switch (phba->pci_dev_grp) {
8828	case LPFC_PCI_DEV_LP:
8829		rc = lpfc_io_error_detected_s3(pdev, state);
8830		break;
8831	case LPFC_PCI_DEV_OC:
8832		rc = lpfc_io_error_detected_s4(pdev, state);
8833		break;
8834	default:
8835		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8836				"1427 Invalid PCI device group: 0x%x\n",
8837				phba->pci_dev_grp);
8838		break;
8839	}
8840	return rc;
8841}
8842
8843/**
8844 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
8845 * @pdev: pointer to PCI device.
8846 *
8847 * This routine is registered to the PCI subsystem for error handling. This
8848 * function is called after PCI bus has been reset to restart the PCI card
8849 * from scratch, as if from a cold-boot. When this routine is invoked, it
8850 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
8851 * routine, which will perform the proper device reset.
8852 *
8853 * Return codes
8854 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
8855 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8856 **/
8857static pci_ers_result_t
8858lpfc_io_slot_reset(struct pci_dev *pdev)
8859{
8860	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8861	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8862	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
8863
8864	switch (phba->pci_dev_grp) {
8865	case LPFC_PCI_DEV_LP:
8866		rc = lpfc_io_slot_reset_s3(pdev);
8867		break;
8868	case LPFC_PCI_DEV_OC:
8869		rc = lpfc_io_slot_reset_s4(pdev);
8870		break;
8871	default:
8872		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8873				"1428 Invalid PCI device group: 0x%x\n",
8874				phba->pci_dev_grp);
8875		break;
8876	}
8877	return rc;
8878}
8879
8880/**
8881 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
8882 * @pdev: pointer to PCI device
8883 *
8884 * This routine is registered to the PCI subsystem for error handling. It
8885 * is called when kernel error recovery tells the lpfc driver that it is
8886 * OK to resume normal PCI operation after PCI bus error recovery. When
8887 * this routine is invoked, it dispatches the action to the proper SLI-3
8888 * or SLI-4 device io_resume routine, which will resume the device operation.
8889 **/
8890static void
8891lpfc_io_resume(struct pci_dev *pdev)
8892{
8893	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8894	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8895
8896	switch (phba->pci_dev_grp) {
8897	case LPFC_PCI_DEV_LP:
8898		lpfc_io_resume_s3(pdev);
8899		break;
8900	case LPFC_PCI_DEV_OC:
8901		lpfc_io_resume_s4(pdev);
8902		break;
8903	default:
8904		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8905				"1429 Invalid PCI device group: 0x%x\n",
8906				phba->pci_dev_grp);
8907		break;
8908	}
8909	return;
8910}
8911
8912static struct pci_device_id lpfc_id_table[] = {
8913	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
8914		PCI_ANY_ID, PCI_ANY_ID, },
8915	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
8916		PCI_ANY_ID, PCI_ANY_ID, },
8917	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
8918		PCI_ANY_ID, PCI_ANY_ID, },
8919	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
8920		PCI_ANY_ID, PCI_ANY_ID, },
8921	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
8922		PCI_ANY_ID, PCI_ANY_ID, },
8923	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
8924		PCI_ANY_ID, PCI_ANY_ID, },
8925	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
8926		PCI_ANY_ID, PCI_ANY_ID, },
8927	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
8928		PCI_ANY_ID, PCI_ANY_ID, },
8929	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
8930		PCI_ANY_ID, PCI_ANY_ID, },
8931	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
8932		PCI_ANY_ID, PCI_ANY_ID, },
8933	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
8934		PCI_ANY_ID, PCI_ANY_ID, },
8935	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
8936		PCI_ANY_ID, PCI_ANY_ID, },
8937	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
8938		PCI_ANY_ID, PCI_ANY_ID, },
8939	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
8940		PCI_ANY_ID, PCI_ANY_ID, },
8941	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
8942		PCI_ANY_ID, PCI_ANY_ID, },
8943	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
8944		PCI_ANY_ID, PCI_ANY_ID, },
8945	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
8946		PCI_ANY_ID, PCI_ANY_ID, },
8947	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
8948		PCI_ANY_ID, PCI_ANY_ID, },
8949	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
8950		PCI_ANY_ID, PCI_ANY_ID, },
8951	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
8952		PCI_ANY_ID, PCI_ANY_ID, },
8953	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
8954		PCI_ANY_ID, PCI_ANY_ID, },
8955	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
8956		PCI_ANY_ID, PCI_ANY_ID, },
8957	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
8958		PCI_ANY_ID, PCI_ANY_ID, },
8959	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
8960		PCI_ANY_ID, PCI_ANY_ID, },
8961	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
8962		PCI_ANY_ID, PCI_ANY_ID, },
8963	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
8964		PCI_ANY_ID, PCI_ANY_ID, },
8965	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
8966		PCI_ANY_ID, PCI_ANY_ID, },
8967	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
8968		PCI_ANY_ID, PCI_ANY_ID, },
8969	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
8970		PCI_ANY_ID, PCI_ANY_ID, },
8971	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
8972		PCI_ANY_ID, PCI_ANY_ID, },
8973	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
8974		PCI_ANY_ID, PCI_ANY_ID, },
8975	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
8976		PCI_ANY_ID, PCI_ANY_ID, },
8977	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
8978		PCI_ANY_ID, PCI_ANY_ID, },
8979	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
8980		PCI_ANY_ID, PCI_ANY_ID, },
8981	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
8982		PCI_ANY_ID, PCI_ANY_ID, },
8983	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
8984		PCI_ANY_ID, PCI_ANY_ID, },
8985	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
8986		PCI_ANY_ID, PCI_ANY_ID, },
8987	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
8988		PCI_ANY_ID, PCI_ANY_ID, },
8989	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
8990		PCI_ANY_ID, PCI_ANY_ID, },
8991	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
8992		PCI_ANY_ID, PCI_ANY_ID, },
8993	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
8994		PCI_ANY_ID, PCI_ANY_ID, },
8995	{ 0 }
8996};
8997
8998MODULE_DEVICE_TABLE(pci, lpfc_id_table);
8999
9000static struct pci_error_handlers lpfc_err_handler = {
9001	.error_detected = lpfc_io_error_detected,
9002	.slot_reset = lpfc_io_slot_reset,
9003	.resume = lpfc_io_resume,
9004};
9005
9006static struct pci_driver lpfc_driver = {
9007	.name		= LPFC_DRIVER_NAME,
9008	.id_table	= lpfc_id_table,
9009	.probe		= lpfc_pci_probe_one,
9010	.remove		= __devexit_p(lpfc_pci_remove_one),
9011	.suspend        = lpfc_pci_suspend_one,
9012	.resume		= lpfc_pci_resume_one,
9013	.err_handler    = &lpfc_err_handler,
9014};
9015
9016/**
9017 * lpfc_init - lpfc module initialization routine
9018 *
9019 * This routine is to be invoked when the lpfc module is loaded into the
9020 * kernel. The special kernel macro module_init() is used to indicate the
9021 * role of this routine to the kernel as lpfc module entry point.
9022 *
9023 * Return codes
9024 *   0 - successful
9025 *   -ENOMEM - FC attach transport failed
9026 *   all others - failed
9027 */
9028static int __init
9029lpfc_init(void)
9030{
9031	int error = 0;
9032
9033	printk(LPFC_MODULE_DESC "\n");
9034	printk(LPFC_COPYRIGHT "\n");
9035
9036	if (lpfc_enable_npiv) {
9037		lpfc_transport_functions.vport_create = lpfc_vport_create;
9038		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
9039	}
9040	lpfc_transport_template =
9041				fc_attach_transport(&lpfc_transport_functions);
9042	if (lpfc_transport_template == NULL)
9043		return -ENOMEM;
9044	if (lpfc_enable_npiv) {
9045		lpfc_vport_transport_template =
9046			fc_attach_transport(&lpfc_vport_transport_functions);
9047		if (lpfc_vport_transport_template == NULL) {
9048			fc_release_transport(lpfc_transport_template);
9049			return -ENOMEM;
9050		}
9051	}
9052	error = pci_register_driver(&lpfc_driver);
9053	if (error) {
9054		fc_release_transport(lpfc_transport_template);
9055		if (lpfc_enable_npiv)
9056			fc_release_transport(lpfc_vport_transport_template);
9057	}
9058
9059	return error;
9060}
9061
9062/**
9063 * lpfc_exit - lpfc module removal routine
9064 *
9065 * This routine is invoked when the lpfc module is removed from the kernel.
9066 * The special kernel macro module_exit() is used to indicate the role of
9067 * this routine to the kernel as lpfc module exit point.
9068 */
9069static void __exit
9070lpfc_exit(void)
9071{
9072	pci_unregister_driver(&lpfc_driver);
9073	fc_release_transport(lpfc_transport_template);
9074	if (lpfc_enable_npiv)
9075		fc_release_transport(lpfc_vport_transport_template);
9076	if (_dump_buf_data) {
9077		printk(KERN_ERR	"9062 BLKGRD: freeing %lu pages for "
9078				"_dump_buf_data at 0x%p\n",
9079				(1L << _dump_buf_data_order), _dump_buf_data);
9080		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
9081	}
9082
9083	if (_dump_buf_dif) {
9084		printk(KERN_ERR	"9049 BLKGRD: freeing %lu pages for "
9085				"_dump_buf_dif at 0x%p\n",
9086				(1L << _dump_buf_dif_order), _dump_buf_dif);
9087		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
9088	}
9089}
9090
9091module_init(lpfc_init);
9092module_exit(lpfc_exit);
9093MODULE_LICENSE("GPL");
9094MODULE_DESCRIPTION(LPFC_MODULE_DESC);
9095MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
9096MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
9097