lpfc_init.c revision f4b4c68f74dcd5da03df851090cad28ad4e8d7cc
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
29#include <linux/spinlock.h>
30#include <linux/ctype.h>
31
32#include <scsi/scsi.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h>
36
37#include "lpfc_hw4.h"
38#include "lpfc_hw.h"
39#include "lpfc_sli.h"
40#include "lpfc_sli4.h"
41#include "lpfc_nl.h"
42#include "lpfc_disc.h"
43#include "lpfc_scsi.h"
44#include "lpfc.h"
45#include "lpfc_logmsg.h"
46#include "lpfc_crtn.h"
47#include "lpfc_vport.h"
48#include "lpfc_version.h"
49
50char *_dump_buf_data;
51unsigned long _dump_buf_data_order;
52char *_dump_buf_dif;
53unsigned long _dump_buf_dif_order;
54spinlock_t _dump_buf_lock;
55
56static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
57static int lpfc_post_rcv_buf(struct lpfc_hba *);
58static int lpfc_sli4_queue_create(struct lpfc_hba *);
59static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
60static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
61static int lpfc_setup_endian_order(struct lpfc_hba *);
62static int lpfc_sli4_read_config(struct lpfc_hba *);
63static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
64static void lpfc_free_sgl_list(struct lpfc_hba *);
65static int lpfc_init_sgl_list(struct lpfc_hba *);
66static int lpfc_init_active_sgl_array(struct lpfc_hba *);
67static void lpfc_free_active_sgl(struct lpfc_hba *);
68static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
69static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
70static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
71static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
72static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
73
74static struct scsi_transport_template *lpfc_transport_template = NULL;
75static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
76static DEFINE_IDR(lpfc_hba_index);
77
78/**
79 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
80 * @phba: pointer to lpfc hba data structure.
81 *
82 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
83 * mailbox command. It retrieves the revision information from the HBA and
84 * collects the Vital Product Data (VPD) about the HBA for preparing the
85 * configuration of the HBA.
86 *
87 * Return codes:
88 *   0 - success.
89 *   -ERESTART - requests the SLI layer to reset the HBA and try again.
90 *   Any other value - indicates an error.
91 **/
92int
93lpfc_config_port_prep(struct lpfc_hba *phba)
94{
95	lpfc_vpd_t *vp = &phba->vpd;
96	int i = 0, rc;
97	LPFC_MBOXQ_t *pmb;
98	MAILBOX_t *mb;
99	char *lpfc_vpd_data = NULL;
100	uint16_t offset = 0;
101	static char licensed[56] =
102		    "key unlock for use with gnu public licensed code only\0";
103	static int init_key = 1;
104
105	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
106	if (!pmb) {
107		phba->link_state = LPFC_HBA_ERROR;
108		return -ENOMEM;
109	}
110
111	mb = &pmb->u.mb;
112	phba->link_state = LPFC_INIT_MBX_CMDS;
113
114	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
115		if (init_key) {
116			uint32_t *ptext = (uint32_t *) licensed;
117
118			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
119				*ptext = cpu_to_be32(*ptext);
120			init_key = 0;
121		}
122
123		lpfc_read_nv(phba, pmb);
124		memset((char*)mb->un.varRDnvp.rsvd3, 0,
125			sizeof (mb->un.varRDnvp.rsvd3));
126		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
127			 sizeof (licensed));
128
129		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
130
131		if (rc != MBX_SUCCESS) {
132			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
133					"0324 Config Port initialization "
134					"error, mbxCmd x%x READ_NVPARM, "
135					"mbxStatus x%x\n",
136					mb->mbxCommand, mb->mbxStatus);
137			mempool_free(pmb, phba->mbox_mem_pool);
138			return -ERESTART;
139		}
140		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
141		       sizeof(phba->wwnn));
142		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
143		       sizeof(phba->wwpn));
144	}
145
146	phba->sli3_options = 0x0;
147
148	/* Setup and issue mailbox READ REV command */
149	lpfc_read_rev(phba, pmb);
150	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
151	if (rc != MBX_SUCCESS) {
152		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
153				"0439 Adapter failed to init, mbxCmd x%x "
154				"READ_REV, mbxStatus x%x\n",
155				mb->mbxCommand, mb->mbxStatus);
156		mempool_free( pmb, phba->mbox_mem_pool);
157		return -ERESTART;
158	}
159
160
161	/*
162	 * The value of rr must be 1 since the driver set the cv field to 1.
163	 * This setting requires the FW to set all revision fields.
164	 */
165	if (mb->un.varRdRev.rr == 0) {
166		vp->rev.rBit = 0;
167		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
168				"0440 Adapter failed to init, READ_REV has "
169				"missing revision information.\n");
170		mempool_free(pmb, phba->mbox_mem_pool);
171		return -ERESTART;
172	}
173
174	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
175		mempool_free(pmb, phba->mbox_mem_pool);
176		return -EINVAL;
177	}
178
179	/* Save information as VPD data */
180	vp->rev.rBit = 1;
181	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
182	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
183	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
184	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
185	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
186	vp->rev.biuRev = mb->un.varRdRev.biuRev;
187	vp->rev.smRev = mb->un.varRdRev.smRev;
188	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
189	vp->rev.endecRev = mb->un.varRdRev.endecRev;
190	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
191	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
192	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
193	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
194	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
195	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
196
197	/* If the sli feature level is less then 9, we must
198	 * tear down all RPIs and VPIs on link down if NPIV
199	 * is enabled.
200	 */
201	if (vp->rev.feaLevelHigh < 9)
202		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
203
204	if (lpfc_is_LC_HBA(phba->pcidev->device))
205		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
206						sizeof (phba->RandomData));
207
208	/* Get adapter VPD information */
209	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
210	if (!lpfc_vpd_data)
211		goto out_free_mbox;
212
213	do {
214		lpfc_dump_mem(phba, pmb, offset);
215		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
216
217		if (rc != MBX_SUCCESS) {
218			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
219					"0441 VPD not present on adapter, "
220					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
221					mb->mbxCommand, mb->mbxStatus);
222			mb->un.varDmp.word_cnt = 0;
223		}
224		/* dump mem may return a zero when finished or we got a
225		 * mailbox error, either way we are done.
226		 */
227		if (mb->un.varDmp.word_cnt == 0)
228			break;
229		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
230			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
231		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
232				      lpfc_vpd_data + offset,
233				      mb->un.varDmp.word_cnt);
234		offset += mb->un.varDmp.word_cnt;
235	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
236	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
237
238	kfree(lpfc_vpd_data);
239out_free_mbox:
240	mempool_free(pmb, phba->mbox_mem_pool);
241	return 0;
242}
243
244/**
245 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
246 * @phba: pointer to lpfc hba data structure.
247 * @pmboxq: pointer to the driver internal queue element for mailbox command.
248 *
249 * This is the completion handler for driver's configuring asynchronous event
250 * mailbox command to the device. If the mailbox command returns successfully,
251 * it will set internal async event support flag to 1; otherwise, it will
252 * set internal async event support flag to 0.
253 **/
254static void
255lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
256{
257	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
258		phba->temp_sensor_support = 1;
259	else
260		phba->temp_sensor_support = 0;
261	mempool_free(pmboxq, phba->mbox_mem_pool);
262	return;
263}
264
265/**
266 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
267 * @phba: pointer to lpfc hba data structure.
268 * @pmboxq: pointer to the driver internal queue element for mailbox command.
269 *
270 * This is the completion handler for dump mailbox command for getting
271 * wake up parameters. When this command complete, the response contain
272 * Option rom version of the HBA. This function translate the version number
273 * into a human readable string and store it in OptionROMVersion.
274 **/
275static void
276lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
277{
278	struct prog_id *prg;
279	uint32_t prog_id_word;
280	char dist = ' ';
281	/* character array used for decoding dist type. */
282	char dist_char[] = "nabx";
283
284	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
285		mempool_free(pmboxq, phba->mbox_mem_pool);
286		return;
287	}
288
289	prg = (struct prog_id *) &prog_id_word;
290
291	/* word 7 contain option rom version */
292	prog_id_word = pmboxq->u.mb.un.varWords[7];
293
294	/* Decode the Option rom version word to a readable string */
295	if (prg->dist < 4)
296		dist = dist_char[prg->dist];
297
298	if ((prg->dist == 3) && (prg->num == 0))
299		sprintf(phba->OptionROMVersion, "%d.%d%d",
300			prg->ver, prg->rev, prg->lev);
301	else
302		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
303			prg->ver, prg->rev, prg->lev,
304			dist, prg->num);
305	mempool_free(pmboxq, phba->mbox_mem_pool);
306	return;
307}
308
309/**
310 * lpfc_config_port_post - Perform lpfc initialization after config port
311 * @phba: pointer to lpfc hba data structure.
312 *
313 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
314 * command call. It performs all internal resource and state setups on the
315 * port: post IOCB buffers, enable appropriate host interrupt attentions,
316 * ELS ring timers, etc.
317 *
318 * Return codes
319 *   0 - success.
320 *   Any other value - error.
321 **/
322int
323lpfc_config_port_post(struct lpfc_hba *phba)
324{
325	struct lpfc_vport *vport = phba->pport;
326	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
327	LPFC_MBOXQ_t *pmb;
328	MAILBOX_t *mb;
329	struct lpfc_dmabuf *mp;
330	struct lpfc_sli *psli = &phba->sli;
331	uint32_t status, timeout;
332	int i, j;
333	int rc;
334
335	spin_lock_irq(&phba->hbalock);
336	/*
337	 * If the Config port completed correctly the HBA is not
338	 * over heated any more.
339	 */
340	if (phba->over_temp_state == HBA_OVER_TEMP)
341		phba->over_temp_state = HBA_NORMAL_TEMP;
342	spin_unlock_irq(&phba->hbalock);
343
344	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
345	if (!pmb) {
346		phba->link_state = LPFC_HBA_ERROR;
347		return -ENOMEM;
348	}
349	mb = &pmb->u.mb;
350
351	/* Get login parameters for NID.  */
352	lpfc_read_sparam(phba, pmb, 0);
353	pmb->vport = vport;
354	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
355		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
356				"0448 Adapter failed init, mbxCmd x%x "
357				"READ_SPARM mbxStatus x%x\n",
358				mb->mbxCommand, mb->mbxStatus);
359		phba->link_state = LPFC_HBA_ERROR;
360		mp = (struct lpfc_dmabuf *) pmb->context1;
361		mempool_free( pmb, phba->mbox_mem_pool);
362		lpfc_mbuf_free(phba, mp->virt, mp->phys);
363		kfree(mp);
364		return -EIO;
365	}
366
367	mp = (struct lpfc_dmabuf *) pmb->context1;
368
369	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
370	lpfc_mbuf_free(phba, mp->virt, mp->phys);
371	kfree(mp);
372	pmb->context1 = NULL;
373
374	if (phba->cfg_soft_wwnn)
375		u64_to_wwn(phba->cfg_soft_wwnn,
376			   vport->fc_sparam.nodeName.u.wwn);
377	if (phba->cfg_soft_wwpn)
378		u64_to_wwn(phba->cfg_soft_wwpn,
379			   vport->fc_sparam.portName.u.wwn);
380	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
381	       sizeof (struct lpfc_name));
382	memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
383	       sizeof (struct lpfc_name));
384
385	/* Update the fc_host data structures with new wwn. */
386	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
387	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
388
389	/* If no serial number in VPD data, use low 6 bytes of WWNN */
390	/* This should be consolidated into parse_vpd ? - mr */
391	if (phba->SerialNumber[0] == 0) {
392		uint8_t *outptr;
393
394		outptr = &vport->fc_nodename.u.s.IEEE[0];
395		for (i = 0; i < 12; i++) {
396			status = *outptr++;
397			j = ((status & 0xf0) >> 4);
398			if (j <= 9)
399				phba->SerialNumber[i] =
400				    (char)((uint8_t) 0x30 + (uint8_t) j);
401			else
402				phba->SerialNumber[i] =
403				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
404			i++;
405			j = (status & 0xf);
406			if (j <= 9)
407				phba->SerialNumber[i] =
408				    (char)((uint8_t) 0x30 + (uint8_t) j);
409			else
410				phba->SerialNumber[i] =
411				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
412		}
413	}
414
415	lpfc_read_config(phba, pmb);
416	pmb->vport = vport;
417	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
418		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
419				"0453 Adapter failed to init, mbxCmd x%x "
420				"READ_CONFIG, mbxStatus x%x\n",
421				mb->mbxCommand, mb->mbxStatus);
422		phba->link_state = LPFC_HBA_ERROR;
423		mempool_free( pmb, phba->mbox_mem_pool);
424		return -EIO;
425	}
426
427	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
428	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
429		phba->cfg_hba_queue_depth =
430			mb->un.varRdConfig.max_xri + 1;
431
432	phba->lmt = mb->un.varRdConfig.lmt;
433
434	/* Get the default values for Model Name and Description */
435	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
436
437	if ((phba->cfg_link_speed > LINK_SPEED_10G)
438	    || ((phba->cfg_link_speed == LINK_SPEED_1G)
439		&& !(phba->lmt & LMT_1Gb))
440	    || ((phba->cfg_link_speed == LINK_SPEED_2G)
441		&& !(phba->lmt & LMT_2Gb))
442	    || ((phba->cfg_link_speed == LINK_SPEED_4G)
443		&& !(phba->lmt & LMT_4Gb))
444	    || ((phba->cfg_link_speed == LINK_SPEED_8G)
445		&& !(phba->lmt & LMT_8Gb))
446	    || ((phba->cfg_link_speed == LINK_SPEED_10G)
447		&& !(phba->lmt & LMT_10Gb))) {
448		/* Reset link speed to auto */
449		lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
450			"1302 Invalid speed for this board: "
451			"Reset link speed to auto: x%x\n",
452			phba->cfg_link_speed);
453			phba->cfg_link_speed = LINK_SPEED_AUTO;
454	}
455
456	phba->link_state = LPFC_LINK_DOWN;
457
458	/* Only process IOCBs on ELS ring till hba_state is READY */
459	if (psli->ring[psli->extra_ring].cmdringaddr)
460		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
461	if (psli->ring[psli->fcp_ring].cmdringaddr)
462		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
463	if (psli->ring[psli->next_ring].cmdringaddr)
464		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
465
466	/* Post receive buffers for desired rings */
467	if (phba->sli_rev != 3)
468		lpfc_post_rcv_buf(phba);
469
470	/*
471	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
472	 */
473	if (phba->intr_type == MSIX) {
474		rc = lpfc_config_msi(phba, pmb);
475		if (rc) {
476			mempool_free(pmb, phba->mbox_mem_pool);
477			return -EIO;
478		}
479		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
480		if (rc != MBX_SUCCESS) {
481			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
482					"0352 Config MSI mailbox command "
483					"failed, mbxCmd x%x, mbxStatus x%x\n",
484					pmb->u.mb.mbxCommand,
485					pmb->u.mb.mbxStatus);
486			mempool_free(pmb, phba->mbox_mem_pool);
487			return -EIO;
488		}
489	}
490
491	spin_lock_irq(&phba->hbalock);
492	/* Initialize ERATT handling flag */
493	phba->hba_flag &= ~HBA_ERATT_HANDLED;
494
495	/* Enable appropriate host interrupts */
496	status = readl(phba->HCregaddr);
497	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
498	if (psli->num_rings > 0)
499		status |= HC_R0INT_ENA;
500	if (psli->num_rings > 1)
501		status |= HC_R1INT_ENA;
502	if (psli->num_rings > 2)
503		status |= HC_R2INT_ENA;
504	if (psli->num_rings > 3)
505		status |= HC_R3INT_ENA;
506
507	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
508	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
509		status &= ~(HC_R0INT_ENA);
510
511	writel(status, phba->HCregaddr);
512	readl(phba->HCregaddr); /* flush */
513	spin_unlock_irq(&phba->hbalock);
514
515	/* Set up ring-0 (ELS) timer */
516	timeout = phba->fc_ratov * 2;
517	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
518	/* Set up heart beat (HB) timer */
519	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
520	phba->hb_outstanding = 0;
521	phba->last_completion_time = jiffies;
522	/* Set up error attention (ERATT) polling timer */
523	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
524
525	lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
526	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
527	lpfc_set_loopback_flag(phba);
528	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
529	if (rc != MBX_SUCCESS) {
530		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
531				"0454 Adapter failed to init, mbxCmd x%x "
532				"INIT_LINK, mbxStatus x%x\n",
533				mb->mbxCommand, mb->mbxStatus);
534
535		/* Clear all interrupt enable conditions */
536		writel(0, phba->HCregaddr);
537		readl(phba->HCregaddr); /* flush */
538		/* Clear all pending interrupts */
539		writel(0xffffffff, phba->HAregaddr);
540		readl(phba->HAregaddr); /* flush */
541
542		phba->link_state = LPFC_HBA_ERROR;
543		if (rc != MBX_BUSY)
544			mempool_free(pmb, phba->mbox_mem_pool);
545		return -EIO;
546	}
547	/* MBOX buffer will be freed in mbox compl */
548	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
549	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
550	pmb->mbox_cmpl = lpfc_config_async_cmpl;
551	pmb->vport = phba->pport;
552	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
553
554	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
555		lpfc_printf_log(phba,
556				KERN_ERR,
557				LOG_INIT,
558				"0456 Adapter failed to issue "
559				"ASYNCEVT_ENABLE mbox status x%x \n.",
560				rc);
561		mempool_free(pmb, phba->mbox_mem_pool);
562	}
563
564	/* Get Option rom version */
565	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
566	lpfc_dump_wakeup_param(phba, pmb);
567	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
568	pmb->vport = phba->pport;
569	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
570
571	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
572		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
573				"to get Option ROM version status x%x\n.", rc);
574		mempool_free(pmb, phba->mbox_mem_pool);
575	}
576
577	return 0;
578}
579
580/**
581 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
582 * @phba: pointer to lpfc HBA data structure.
583 *
584 * This routine will do LPFC uninitialization before the HBA is reset when
585 * bringing down the SLI Layer.
586 *
587 * Return codes
588 *   0 - success.
589 *   Any other value - error.
590 **/
591int
592lpfc_hba_down_prep(struct lpfc_hba *phba)
593{
594	struct lpfc_vport **vports;
595	int i;
596
597	if (phba->sli_rev <= LPFC_SLI_REV3) {
598		/* Disable interrupts */
599		writel(0, phba->HCregaddr);
600		readl(phba->HCregaddr); /* flush */
601	}
602
603	if (phba->pport->load_flag & FC_UNLOADING)
604		lpfc_cleanup_discovery_resources(phba->pport);
605	else {
606		vports = lpfc_create_vport_work_array(phba);
607		if (vports != NULL)
608			for (i = 0; i <= phba->max_vports &&
609				vports[i] != NULL; i++)
610				lpfc_cleanup_discovery_resources(vports[i]);
611		lpfc_destroy_vport_work_array(phba, vports);
612	}
613	return 0;
614}
615
616/**
617 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
618 * @phba: pointer to lpfc HBA data structure.
619 *
620 * This routine will do uninitialization after the HBA is reset when bring
621 * down the SLI Layer.
622 *
623 * Return codes
624 *   0 - sucess.
625 *   Any other value - error.
626 **/
627static int
628lpfc_hba_down_post_s3(struct lpfc_hba *phba)
629{
630	struct lpfc_sli *psli = &phba->sli;
631	struct lpfc_sli_ring *pring;
632	struct lpfc_dmabuf *mp, *next_mp;
633	LIST_HEAD(completions);
634	int i;
635
636	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
637		lpfc_sli_hbqbuf_free_all(phba);
638	else {
639		/* Cleanup preposted buffers on the ELS ring */
640		pring = &psli->ring[LPFC_ELS_RING];
641		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
642			list_del(&mp->list);
643			pring->postbufq_cnt--;
644			lpfc_mbuf_free(phba, mp->virt, mp->phys);
645			kfree(mp);
646		}
647	}
648
649	spin_lock_irq(&phba->hbalock);
650	for (i = 0; i < psli->num_rings; i++) {
651		pring = &psli->ring[i];
652
653		/* At this point in time the HBA is either reset or DOA. Either
654		 * way, nothing should be on txcmplq as it will NEVER complete.
655		 */
656		list_splice_init(&pring->txcmplq, &completions);
657		pring->txcmplq_cnt = 0;
658		spin_unlock_irq(&phba->hbalock);
659
660		/* Cancel all the IOCBs from the completions list */
661		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
662				      IOERR_SLI_ABORTED);
663
664		lpfc_sli_abort_iocb_ring(phba, pring);
665		spin_lock_irq(&phba->hbalock);
666	}
667	spin_unlock_irq(&phba->hbalock);
668
669	return 0;
670}
671/**
672 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
673 * @phba: pointer to lpfc HBA data structure.
674 *
675 * This routine will do uninitialization after the HBA is reset when bring
676 * down the SLI Layer.
677 *
678 * Return codes
679 *   0 - sucess.
680 *   Any other value - error.
681 **/
682static int
683lpfc_hba_down_post_s4(struct lpfc_hba *phba)
684{
685	struct lpfc_scsi_buf *psb, *psb_next;
686	LIST_HEAD(aborts);
687	int ret;
688	unsigned long iflag = 0;
689	ret = lpfc_hba_down_post_s3(phba);
690	if (ret)
691		return ret;
692	/* At this point in time the HBA is either reset or DOA. Either
693	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
694	 * on the lpfc_sgl_list so that it can either be freed if the
695	 * driver is unloading or reposted if the driver is restarting
696	 * the port.
697	 */
698	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
699					/* scsl_buf_list */
700	/* abts_sgl_list_lock required because worker thread uses this
701	 * list.
702	 */
703	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
704	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
705			&phba->sli4_hba.lpfc_sgl_list);
706	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
707	/* abts_scsi_buf_list_lock required because worker thread uses this
708	 * list.
709	 */
710	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
711	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
712			&aborts);
713	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
714	spin_unlock_irq(&phba->hbalock);
715
716	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
717		psb->pCmd = NULL;
718		psb->status = IOSTAT_SUCCESS;
719	}
720	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
721	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
722	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
723	return 0;
724}
725
726/**
727 * lpfc_hba_down_post - Wrapper func for hba down post routine
728 * @phba: pointer to lpfc HBA data structure.
729 *
730 * This routine wraps the actual SLI3 or SLI4 routine for performing
731 * uninitialization after the HBA is reset when bring down the SLI Layer.
732 *
733 * Return codes
734 *   0 - sucess.
735 *   Any other value - error.
736 **/
737int
738lpfc_hba_down_post(struct lpfc_hba *phba)
739{
740	return (*phba->lpfc_hba_down_post)(phba);
741}
742
743/**
744 * lpfc_hb_timeout - The HBA-timer timeout handler
745 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
746 *
747 * This is the HBA-timer timeout handler registered to the lpfc driver. When
748 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
749 * work-port-events bitmap and the worker thread is notified. This timeout
750 * event will be used by the worker thread to invoke the actual timeout
751 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
752 * be performed in the timeout handler and the HBA timeout event bit shall
753 * be cleared by the worker thread after it has taken the event bitmap out.
754 **/
755static void
756lpfc_hb_timeout(unsigned long ptr)
757{
758	struct lpfc_hba *phba;
759	uint32_t tmo_posted;
760	unsigned long iflag;
761
762	phba = (struct lpfc_hba *)ptr;
763
764	/* Check for heart beat timeout conditions */
765	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
766	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
767	if (!tmo_posted)
768		phba->pport->work_port_events |= WORKER_HB_TMO;
769	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
770
771	/* Tell the worker thread there is work to do */
772	if (!tmo_posted)
773		lpfc_worker_wake_up(phba);
774	return;
775}
776
777/**
778 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
779 * @phba: pointer to lpfc hba data structure.
780 * @pmboxq: pointer to the driver internal queue element for mailbox command.
781 *
782 * This is the callback function to the lpfc heart-beat mailbox command.
783 * If configured, the lpfc driver issues the heart-beat mailbox command to
784 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
785 * heart-beat mailbox command is issued, the driver shall set up heart-beat
786 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
787 * heart-beat outstanding state. Once the mailbox command comes back and
788 * no error conditions detected, the heart-beat mailbox command timer is
789 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
790 * state is cleared for the next heart-beat. If the timer expired with the
791 * heart-beat outstanding state set, the driver will put the HBA offline.
792 **/
793static void
794lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
795{
796	unsigned long drvr_flag;
797
798	spin_lock_irqsave(&phba->hbalock, drvr_flag);
799	phba->hb_outstanding = 0;
800	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
801
802	/* Check and reset heart-beat timer is necessary */
803	mempool_free(pmboxq, phba->mbox_mem_pool);
804	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
805		!(phba->link_state == LPFC_HBA_ERROR) &&
806		!(phba->pport->load_flag & FC_UNLOADING))
807		mod_timer(&phba->hb_tmofunc,
808			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
809	return;
810}
811
812/**
813 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
814 * @phba: pointer to lpfc hba data structure.
815 *
816 * This is the actual HBA-timer timeout handler to be invoked by the worker
817 * thread whenever the HBA timer fired and HBA-timeout event posted. This
818 * handler performs any periodic operations needed for the device. If such
819 * periodic event has already been attended to either in the interrupt handler
820 * or by processing slow-ring or fast-ring events within the HBA-timer
821 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
822 * the timer for the next timeout period. If lpfc heart-beat mailbox command
823 * is configured and there is no heart-beat mailbox command outstanding, a
824 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
825 * has been a heart-beat mailbox command outstanding, the HBA shall be put
826 * to offline.
827 **/
828void
829lpfc_hb_timeout_handler(struct lpfc_hba *phba)
830{
831	LPFC_MBOXQ_t *pmboxq;
832	struct lpfc_dmabuf *buf_ptr;
833	int retval;
834	struct lpfc_sli *psli = &phba->sli;
835	LIST_HEAD(completions);
836
837	if ((phba->link_state == LPFC_HBA_ERROR) ||
838		(phba->pport->load_flag & FC_UNLOADING) ||
839		(phba->pport->fc_flag & FC_OFFLINE_MODE))
840		return;
841
842	spin_lock_irq(&phba->pport->work_port_lock);
843
844	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
845		jiffies)) {
846		spin_unlock_irq(&phba->pport->work_port_lock);
847		if (!phba->hb_outstanding)
848			mod_timer(&phba->hb_tmofunc,
849				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
850		else
851			mod_timer(&phba->hb_tmofunc,
852				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
853		return;
854	}
855	spin_unlock_irq(&phba->pport->work_port_lock);
856
857	if (phba->elsbuf_cnt &&
858		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
859		spin_lock_irq(&phba->hbalock);
860		list_splice_init(&phba->elsbuf, &completions);
861		phba->elsbuf_cnt = 0;
862		phba->elsbuf_prev_cnt = 0;
863		spin_unlock_irq(&phba->hbalock);
864
865		while (!list_empty(&completions)) {
866			list_remove_head(&completions, buf_ptr,
867				struct lpfc_dmabuf, list);
868			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
869			kfree(buf_ptr);
870		}
871	}
872	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
873
874	/* If there is no heart beat outstanding, issue a heartbeat command */
875	if (phba->cfg_enable_hba_heartbeat) {
876		if (!phba->hb_outstanding) {
877			pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
878			if (!pmboxq) {
879				mod_timer(&phba->hb_tmofunc,
880					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
881				return;
882			}
883
884			lpfc_heart_beat(phba, pmboxq);
885			pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
886			pmboxq->vport = phba->pport;
887			retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
888
889			if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
890				mempool_free(pmboxq, phba->mbox_mem_pool);
891				mod_timer(&phba->hb_tmofunc,
892					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
893				return;
894			}
895			mod_timer(&phba->hb_tmofunc,
896				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
897			phba->hb_outstanding = 1;
898			return;
899		} else {
900			/*
901			* If heart beat timeout called with hb_outstanding set
902			* we need to take the HBA offline.
903			*/
904			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
905					"0459 Adapter heartbeat failure, "
906					"taking this port offline.\n");
907
908			spin_lock_irq(&phba->hbalock);
909			psli->sli_flag &= ~LPFC_SLI_ACTIVE;
910			spin_unlock_irq(&phba->hbalock);
911
912			lpfc_offline_prep(phba);
913			lpfc_offline(phba);
914			lpfc_unblock_mgmt_io(phba);
915			phba->link_state = LPFC_HBA_ERROR;
916			lpfc_hba_down_post(phba);
917		}
918	}
919}
920
921/**
922 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
923 * @phba: pointer to lpfc hba data structure.
924 *
925 * This routine is called to bring the HBA offline when HBA hardware error
926 * other than Port Error 6 has been detected.
927 **/
928static void
929lpfc_offline_eratt(struct lpfc_hba *phba)
930{
931	struct lpfc_sli   *psli = &phba->sli;
932
933	spin_lock_irq(&phba->hbalock);
934	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
935	spin_unlock_irq(&phba->hbalock);
936	lpfc_offline_prep(phba);
937
938	lpfc_offline(phba);
939	lpfc_reset_barrier(phba);
940	spin_lock_irq(&phba->hbalock);
941	lpfc_sli_brdreset(phba);
942	spin_unlock_irq(&phba->hbalock);
943	lpfc_hba_down_post(phba);
944	lpfc_sli_brdready(phba, HS_MBRDY);
945	lpfc_unblock_mgmt_io(phba);
946	phba->link_state = LPFC_HBA_ERROR;
947	return;
948}
949
950/**
951 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
952 * @phba: pointer to lpfc hba data structure.
953 *
954 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
955 * other than Port Error 6 has been detected.
956 **/
957static void
958lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
959{
960	lpfc_offline_prep(phba);
961	lpfc_offline(phba);
962	lpfc_sli4_brdreset(phba);
963	lpfc_hba_down_post(phba);
964	lpfc_sli4_post_status_check(phba);
965	lpfc_unblock_mgmt_io(phba);
966	phba->link_state = LPFC_HBA_ERROR;
967}
968
969/**
970 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
971 * @phba: pointer to lpfc hba data structure.
972 *
973 * This routine is invoked to handle the deferred HBA hardware error
974 * conditions. This type of error is indicated by HBA by setting ER1
975 * and another ER bit in the host status register. The driver will
976 * wait until the ER1 bit clears before handling the error condition.
977 **/
978static void
979lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
980{
981	uint32_t old_host_status = phba->work_hs;
982	struct lpfc_sli_ring  *pring;
983	struct lpfc_sli *psli = &phba->sli;
984
985	/* If the pci channel is offline, ignore possible errors,
986	 * since we cannot communicate with the pci card anyway.
987	 */
988	if (pci_channel_offline(phba->pcidev)) {
989		spin_lock_irq(&phba->hbalock);
990		phba->hba_flag &= ~DEFER_ERATT;
991		spin_unlock_irq(&phba->hbalock);
992		return;
993	}
994
995	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
996		"0479 Deferred Adapter Hardware Error "
997		"Data: x%x x%x x%x\n",
998		phba->work_hs,
999		phba->work_status[0], phba->work_status[1]);
1000
1001	spin_lock_irq(&phba->hbalock);
1002	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1003	spin_unlock_irq(&phba->hbalock);
1004
1005
1006	/*
1007	 * Firmware stops when it triggred erratt. That could cause the I/Os
1008	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1009	 * SCSI layer retry it after re-establishing link.
1010	 */
1011	pring = &psli->ring[psli->fcp_ring];
1012	lpfc_sli_abort_iocb_ring(phba, pring);
1013
1014	/*
1015	 * There was a firmware error. Take the hba offline and then
1016	 * attempt to restart it.
1017	 */
1018	lpfc_offline_prep(phba);
1019	lpfc_offline(phba);
1020
1021	/* Wait for the ER1 bit to clear.*/
1022	while (phba->work_hs & HS_FFER1) {
1023		msleep(100);
1024		phba->work_hs = readl(phba->HSregaddr);
1025		/* If driver is unloading let the worker thread continue */
1026		if (phba->pport->load_flag & FC_UNLOADING) {
1027			phba->work_hs = 0;
1028			break;
1029		}
1030	}
1031
1032	/*
1033	 * This is to ptrotect against a race condition in which
1034	 * first write to the host attention register clear the
1035	 * host status register.
1036	 */
1037	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1038		phba->work_hs = old_host_status & ~HS_FFER1;
1039
1040	spin_lock_irq(&phba->hbalock);
1041	phba->hba_flag &= ~DEFER_ERATT;
1042	spin_unlock_irq(&phba->hbalock);
1043	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1044	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1045}
1046
1047static void
1048lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1049{
1050	struct lpfc_board_event_header board_event;
1051	struct Scsi_Host *shost;
1052
1053	board_event.event_type = FC_REG_BOARD_EVENT;
1054	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1055	shost = lpfc_shost_from_vport(phba->pport);
1056	fc_host_post_vendor_event(shost, fc_get_event_number(),
1057				  sizeof(board_event),
1058				  (char *) &board_event,
1059				  LPFC_NL_VENDOR_ID);
1060}
1061
1062/**
1063 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1064 * @phba: pointer to lpfc hba data structure.
1065 *
1066 * This routine is invoked to handle the following HBA hardware error
1067 * conditions:
1068 * 1 - HBA error attention interrupt
1069 * 2 - DMA ring index out of range
1070 * 3 - Mailbox command came back as unknown
1071 **/
1072static void
1073lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1074{
1075	struct lpfc_vport *vport = phba->pport;
1076	struct lpfc_sli   *psli = &phba->sli;
1077	struct lpfc_sli_ring  *pring;
1078	uint32_t event_data;
1079	unsigned long temperature;
1080	struct temp_event temp_event_data;
1081	struct Scsi_Host  *shost;
1082
1083	/* If the pci channel is offline, ignore possible errors,
1084	 * since we cannot communicate with the pci card anyway.
1085	 */
1086	if (pci_channel_offline(phba->pcidev)) {
1087		spin_lock_irq(&phba->hbalock);
1088		phba->hba_flag &= ~DEFER_ERATT;
1089		spin_unlock_irq(&phba->hbalock);
1090		return;
1091	}
1092
1093	/* If resets are disabled then leave the HBA alone and return */
1094	if (!phba->cfg_enable_hba_reset)
1095		return;
1096
1097	/* Send an internal error event to mgmt application */
1098	lpfc_board_errevt_to_mgmt(phba);
1099
1100	if (phba->hba_flag & DEFER_ERATT)
1101		lpfc_handle_deferred_eratt(phba);
1102
1103	if (phba->work_hs & HS_FFER6) {
1104		/* Re-establishing Link */
1105		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1106				"1301 Re-establishing Link "
1107				"Data: x%x x%x x%x\n",
1108				phba->work_hs,
1109				phba->work_status[0], phba->work_status[1]);
1110
1111		spin_lock_irq(&phba->hbalock);
1112		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1113		spin_unlock_irq(&phba->hbalock);
1114
1115		/*
1116		* Firmware stops when it triggled erratt with HS_FFER6.
1117		* That could cause the I/Os dropped by the firmware.
1118		* Error iocb (I/O) on txcmplq and let the SCSI layer
1119		* retry it after re-establishing link.
1120		*/
1121		pring = &psli->ring[psli->fcp_ring];
1122		lpfc_sli_abort_iocb_ring(phba, pring);
1123
1124		/*
1125		 * There was a firmware error.  Take the hba offline and then
1126		 * attempt to restart it.
1127		 */
1128		lpfc_offline_prep(phba);
1129		lpfc_offline(phba);
1130		lpfc_sli_brdrestart(phba);
1131		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1132			lpfc_unblock_mgmt_io(phba);
1133			return;
1134		}
1135		lpfc_unblock_mgmt_io(phba);
1136	} else if (phba->work_hs & HS_CRIT_TEMP) {
1137		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1138		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1139		temp_event_data.event_code = LPFC_CRIT_TEMP;
1140		temp_event_data.data = (uint32_t)temperature;
1141
1142		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1143				"0406 Adapter maximum temperature exceeded "
1144				"(%ld), taking this port offline "
1145				"Data: x%x x%x x%x\n",
1146				temperature, phba->work_hs,
1147				phba->work_status[0], phba->work_status[1]);
1148
1149		shost = lpfc_shost_from_vport(phba->pport);
1150		fc_host_post_vendor_event(shost, fc_get_event_number(),
1151					  sizeof(temp_event_data),
1152					  (char *) &temp_event_data,
1153					  SCSI_NL_VID_TYPE_PCI
1154					  | PCI_VENDOR_ID_EMULEX);
1155
1156		spin_lock_irq(&phba->hbalock);
1157		phba->over_temp_state = HBA_OVER_TEMP;
1158		spin_unlock_irq(&phba->hbalock);
1159		lpfc_offline_eratt(phba);
1160
1161	} else {
1162		/* The if clause above forces this code path when the status
1163		 * failure is a value other than FFER6. Do not call the offline
1164		 * twice. This is the adapter hardware error path.
1165		 */
1166		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1167				"0457 Adapter Hardware Error "
1168				"Data: x%x x%x x%x\n",
1169				phba->work_hs,
1170				phba->work_status[0], phba->work_status[1]);
1171
1172		event_data = FC_REG_DUMP_EVENT;
1173		shost = lpfc_shost_from_vport(vport);
1174		fc_host_post_vendor_event(shost, fc_get_event_number(),
1175				sizeof(event_data), (char *) &event_data,
1176				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1177
1178		lpfc_offline_eratt(phba);
1179	}
1180	return;
1181}
1182
1183/**
1184 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1185 * @phba: pointer to lpfc hba data structure.
1186 *
1187 * This routine is invoked to handle the SLI4 HBA hardware error attention
1188 * conditions.
1189 **/
1190static void
1191lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1192{
1193	struct lpfc_vport *vport = phba->pport;
1194	uint32_t event_data;
1195	struct Scsi_Host *shost;
1196
1197	/* If the pci channel is offline, ignore possible errors, since
1198	 * we cannot communicate with the pci card anyway.
1199	 */
1200	if (pci_channel_offline(phba->pcidev))
1201		return;
1202	/* If resets are disabled then leave the HBA alone and return */
1203	if (!phba->cfg_enable_hba_reset)
1204		return;
1205
1206	/* Send an internal error event to mgmt application */
1207	lpfc_board_errevt_to_mgmt(phba);
1208
1209	/* For now, the actual action for SLI4 device handling is not
1210	 * specified yet, just treated it as adaptor hardware failure
1211	 */
1212	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1213			"0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1214			phba->work_status[0], phba->work_status[1]);
1215
1216	event_data = FC_REG_DUMP_EVENT;
1217	shost = lpfc_shost_from_vport(vport);
1218	fc_host_post_vendor_event(shost, fc_get_event_number(),
1219				  sizeof(event_data), (char *) &event_data,
1220				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1221
1222	lpfc_sli4_offline_eratt(phba);
1223}
1224
1225/**
1226 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1227 * @phba: pointer to lpfc HBA data structure.
1228 *
1229 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1230 * routine from the API jump table function pointer from the lpfc_hba struct.
1231 *
1232 * Return codes
1233 *   0 - sucess.
1234 *   Any other value - error.
1235 **/
1236void
1237lpfc_handle_eratt(struct lpfc_hba *phba)
1238{
1239	(*phba->lpfc_handle_eratt)(phba);
1240}
1241
1242/**
1243 * lpfc_handle_latt - The HBA link event handler
1244 * @phba: pointer to lpfc hba data structure.
1245 *
1246 * This routine is invoked from the worker thread to handle a HBA host
1247 * attention link event.
1248 **/
1249void
1250lpfc_handle_latt(struct lpfc_hba *phba)
1251{
1252	struct lpfc_vport *vport = phba->pport;
1253	struct lpfc_sli   *psli = &phba->sli;
1254	LPFC_MBOXQ_t *pmb;
1255	volatile uint32_t control;
1256	struct lpfc_dmabuf *mp;
1257	int rc = 0;
1258
1259	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1260	if (!pmb) {
1261		rc = 1;
1262		goto lpfc_handle_latt_err_exit;
1263	}
1264
1265	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1266	if (!mp) {
1267		rc = 2;
1268		goto lpfc_handle_latt_free_pmb;
1269	}
1270
1271	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1272	if (!mp->virt) {
1273		rc = 3;
1274		goto lpfc_handle_latt_free_mp;
1275	}
1276
1277	/* Cleanup any outstanding ELS commands */
1278	lpfc_els_flush_all_cmd(phba);
1279
1280	psli->slistat.link_event++;
1281	lpfc_read_la(phba, pmb, mp);
1282	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
1283	pmb->vport = vport;
1284	/* Block ELS IOCBs until we have processed this mbox command */
1285	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1286	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1287	if (rc == MBX_NOT_FINISHED) {
1288		rc = 4;
1289		goto lpfc_handle_latt_free_mbuf;
1290	}
1291
1292	/* Clear Link Attention in HA REG */
1293	spin_lock_irq(&phba->hbalock);
1294	writel(HA_LATT, phba->HAregaddr);
1295	readl(phba->HAregaddr); /* flush */
1296	spin_unlock_irq(&phba->hbalock);
1297
1298	return;
1299
1300lpfc_handle_latt_free_mbuf:
1301	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1302	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1303lpfc_handle_latt_free_mp:
1304	kfree(mp);
1305lpfc_handle_latt_free_pmb:
1306	mempool_free(pmb, phba->mbox_mem_pool);
1307lpfc_handle_latt_err_exit:
1308	/* Enable Link attention interrupts */
1309	spin_lock_irq(&phba->hbalock);
1310	psli->sli_flag |= LPFC_PROCESS_LA;
1311	control = readl(phba->HCregaddr);
1312	control |= HC_LAINT_ENA;
1313	writel(control, phba->HCregaddr);
1314	readl(phba->HCregaddr); /* flush */
1315
1316	/* Clear Link Attention in HA REG */
1317	writel(HA_LATT, phba->HAregaddr);
1318	readl(phba->HAregaddr); /* flush */
1319	spin_unlock_irq(&phba->hbalock);
1320	lpfc_linkdown(phba);
1321	phba->link_state = LPFC_HBA_ERROR;
1322
1323	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1324		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1325
1326	return;
1327}
1328
1329/**
1330 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1331 * @phba: pointer to lpfc hba data structure.
1332 * @vpd: pointer to the vital product data.
1333 * @len: length of the vital product data in bytes.
1334 *
1335 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1336 * an array of characters. In this routine, the ModelName, ProgramType, and
1337 * ModelDesc, etc. fields of the phba data structure will be populated.
1338 *
1339 * Return codes
1340 *   0 - pointer to the VPD passed in is NULL
1341 *   1 - success
1342 **/
1343int
1344lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1345{
1346	uint8_t lenlo, lenhi;
1347	int Length;
1348	int i, j;
1349	int finished = 0;
1350	int index = 0;
1351
1352	if (!vpd)
1353		return 0;
1354
1355	/* Vital Product */
1356	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1357			"0455 Vital Product Data: x%x x%x x%x x%x\n",
1358			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1359			(uint32_t) vpd[3]);
1360	while (!finished && (index < (len - 4))) {
1361		switch (vpd[index]) {
1362		case 0x82:
1363		case 0x91:
1364			index += 1;
1365			lenlo = vpd[index];
1366			index += 1;
1367			lenhi = vpd[index];
1368			index += 1;
1369			i = ((((unsigned short)lenhi) << 8) + lenlo);
1370			index += i;
1371			break;
1372		case 0x90:
1373			index += 1;
1374			lenlo = vpd[index];
1375			index += 1;
1376			lenhi = vpd[index];
1377			index += 1;
1378			Length = ((((unsigned short)lenhi) << 8) + lenlo);
1379			if (Length > len - index)
1380				Length = len - index;
1381			while (Length > 0) {
1382			/* Look for Serial Number */
1383			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1384				index += 2;
1385				i = vpd[index];
1386				index += 1;
1387				j = 0;
1388				Length -= (3+i);
1389				while(i--) {
1390					phba->SerialNumber[j++] = vpd[index++];
1391					if (j == 31)
1392						break;
1393				}
1394				phba->SerialNumber[j] = 0;
1395				continue;
1396			}
1397			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1398				phba->vpd_flag |= VPD_MODEL_DESC;
1399				index += 2;
1400				i = vpd[index];
1401				index += 1;
1402				j = 0;
1403				Length -= (3+i);
1404				while(i--) {
1405					phba->ModelDesc[j++] = vpd[index++];
1406					if (j == 255)
1407						break;
1408				}
1409				phba->ModelDesc[j] = 0;
1410				continue;
1411			}
1412			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1413				phba->vpd_flag |= VPD_MODEL_NAME;
1414				index += 2;
1415				i = vpd[index];
1416				index += 1;
1417				j = 0;
1418				Length -= (3+i);
1419				while(i--) {
1420					phba->ModelName[j++] = vpd[index++];
1421					if (j == 79)
1422						break;
1423				}
1424				phba->ModelName[j] = 0;
1425				continue;
1426			}
1427			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1428				phba->vpd_flag |= VPD_PROGRAM_TYPE;
1429				index += 2;
1430				i = vpd[index];
1431				index += 1;
1432				j = 0;
1433				Length -= (3+i);
1434				while(i--) {
1435					phba->ProgramType[j++] = vpd[index++];
1436					if (j == 255)
1437						break;
1438				}
1439				phba->ProgramType[j] = 0;
1440				continue;
1441			}
1442			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1443				phba->vpd_flag |= VPD_PORT;
1444				index += 2;
1445				i = vpd[index];
1446				index += 1;
1447				j = 0;
1448				Length -= (3+i);
1449				while(i--) {
1450				phba->Port[j++] = vpd[index++];
1451				if (j == 19)
1452					break;
1453				}
1454				phba->Port[j] = 0;
1455				continue;
1456			}
1457			else {
1458				index += 2;
1459				i = vpd[index];
1460				index += 1;
1461				index += i;
1462				Length -= (3 + i);
1463			}
1464		}
1465		finished = 0;
1466		break;
1467		case 0x78:
1468			finished = 1;
1469			break;
1470		default:
1471			index ++;
1472			break;
1473		}
1474	}
1475
1476	return(1);
1477}
1478
1479/**
1480 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1481 * @phba: pointer to lpfc hba data structure.
1482 * @mdp: pointer to the data structure to hold the derived model name.
1483 * @descp: pointer to the data structure to hold the derived description.
1484 *
1485 * This routine retrieves HBA's description based on its registered PCI device
1486 * ID. The @descp passed into this function points to an array of 256 chars. It
1487 * shall be returned with the model name, maximum speed, and the host bus type.
1488 * The @mdp passed into this function points to an array of 80 chars. When the
1489 * function returns, the @mdp will be filled with the model name.
1490 **/
1491static void
1492lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1493{
1494	lpfc_vpd_t *vp;
1495	uint16_t dev_id = phba->pcidev->device;
1496	int max_speed;
1497	int GE = 0;
1498	int oneConnect = 0; /* default is not a oneConnect */
1499	struct {
1500		char * name;
1501		int    max_speed;
1502		char * bus;
1503	} m = {"<Unknown>", 0, ""};
1504
1505	if (mdp && mdp[0] != '\0'
1506		&& descp && descp[0] != '\0')
1507		return;
1508
1509	if (phba->lmt & LMT_10Gb)
1510		max_speed = 10;
1511	else if (phba->lmt & LMT_8Gb)
1512		max_speed = 8;
1513	else if (phba->lmt & LMT_4Gb)
1514		max_speed = 4;
1515	else if (phba->lmt & LMT_2Gb)
1516		max_speed = 2;
1517	else
1518		max_speed = 1;
1519
1520	vp = &phba->vpd;
1521
1522	switch (dev_id) {
1523	case PCI_DEVICE_ID_FIREFLY:
1524		m = (typeof(m)){"LP6000", max_speed, "PCI"};
1525		break;
1526	case PCI_DEVICE_ID_SUPERFLY:
1527		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1528			m = (typeof(m)){"LP7000", max_speed,  "PCI"};
1529		else
1530			m = (typeof(m)){"LP7000E", max_speed, "PCI"};
1531		break;
1532	case PCI_DEVICE_ID_DRAGONFLY:
1533		m = (typeof(m)){"LP8000", max_speed, "PCI"};
1534		break;
1535	case PCI_DEVICE_ID_CENTAUR:
1536		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1537			m = (typeof(m)){"LP9002", max_speed, "PCI"};
1538		else
1539			m = (typeof(m)){"LP9000", max_speed, "PCI"};
1540		break;
1541	case PCI_DEVICE_ID_RFLY:
1542		m = (typeof(m)){"LP952", max_speed, "PCI"};
1543		break;
1544	case PCI_DEVICE_ID_PEGASUS:
1545		m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
1546		break;
1547	case PCI_DEVICE_ID_THOR:
1548		m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
1549		break;
1550	case PCI_DEVICE_ID_VIPER:
1551		m = (typeof(m)){"LPX1000", max_speed,  "PCI-X"};
1552		break;
1553	case PCI_DEVICE_ID_PFLY:
1554		m = (typeof(m)){"LP982", max_speed, "PCI-X"};
1555		break;
1556	case PCI_DEVICE_ID_TFLY:
1557		m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
1558		break;
1559	case PCI_DEVICE_ID_HELIOS:
1560		m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
1561		break;
1562	case PCI_DEVICE_ID_HELIOS_SCSP:
1563		m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
1564		break;
1565	case PCI_DEVICE_ID_HELIOS_DCSP:
1566		m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
1567		break;
1568	case PCI_DEVICE_ID_NEPTUNE:
1569		m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
1570		break;
1571	case PCI_DEVICE_ID_NEPTUNE_SCSP:
1572		m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
1573		break;
1574	case PCI_DEVICE_ID_NEPTUNE_DCSP:
1575		m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
1576		break;
1577	case PCI_DEVICE_ID_BMID:
1578		m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
1579		break;
1580	case PCI_DEVICE_ID_BSMB:
1581		m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
1582		break;
1583	case PCI_DEVICE_ID_ZEPHYR:
1584		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1585		break;
1586	case PCI_DEVICE_ID_ZEPHYR_SCSP:
1587		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1588		break;
1589	case PCI_DEVICE_ID_ZEPHYR_DCSP:
1590		m = (typeof(m)){"LP2105", max_speed, "PCIe"};
1591		GE = 1;
1592		break;
1593	case PCI_DEVICE_ID_ZMID:
1594		m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
1595		break;
1596	case PCI_DEVICE_ID_ZSMB:
1597		m = (typeof(m)){"LPe111", max_speed, "PCIe"};
1598		break;
1599	case PCI_DEVICE_ID_LP101:
1600		m = (typeof(m)){"LP101", max_speed, "PCI-X"};
1601		break;
1602	case PCI_DEVICE_ID_LP10000S:
1603		m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
1604		break;
1605	case PCI_DEVICE_ID_LP11000S:
1606		m = (typeof(m)){"LP11000-S", max_speed,
1607			"PCI-X2"};
1608		break;
1609	case PCI_DEVICE_ID_LPE11000S:
1610		m = (typeof(m)){"LPe11000-S", max_speed,
1611			"PCIe"};
1612		break;
1613	case PCI_DEVICE_ID_SAT:
1614		m = (typeof(m)){"LPe12000", max_speed, "PCIe"};
1615		break;
1616	case PCI_DEVICE_ID_SAT_MID:
1617		m = (typeof(m)){"LPe1250", max_speed, "PCIe"};
1618		break;
1619	case PCI_DEVICE_ID_SAT_SMB:
1620		m = (typeof(m)){"LPe121", max_speed, "PCIe"};
1621		break;
1622	case PCI_DEVICE_ID_SAT_DCSP:
1623		m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"};
1624		break;
1625	case PCI_DEVICE_ID_SAT_SCSP:
1626		m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"};
1627		break;
1628	case PCI_DEVICE_ID_SAT_S:
1629		m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
1630		break;
1631	case PCI_DEVICE_ID_HORNET:
1632		m = (typeof(m)){"LP21000", max_speed, "PCIe"};
1633		GE = 1;
1634		break;
1635	case PCI_DEVICE_ID_PROTEUS_VF:
1636		m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1637		break;
1638	case PCI_DEVICE_ID_PROTEUS_PF:
1639		m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1640		break;
1641	case PCI_DEVICE_ID_PROTEUS_S:
1642		m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
1643		break;
1644	case PCI_DEVICE_ID_TIGERSHARK:
1645		oneConnect = 1;
1646		m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
1647		break;
1648	case PCI_DEVICE_ID_TIGERSHARK_S:
1649		oneConnect = 1;
1650		m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"};
1651		break;
1652	default:
1653		m = (typeof(m)){ NULL };
1654		break;
1655	}
1656
1657	if (mdp && mdp[0] == '\0')
1658		snprintf(mdp, 79,"%s", m.name);
1659	/* oneConnect hba requires special processing, they are all initiators
1660	 * and we put the port number on the end
1661	 */
1662	if (descp && descp[0] == '\0') {
1663		if (oneConnect)
1664			snprintf(descp, 255,
1665				"Emulex OneConnect %s, FCoE Initiator, Port %s",
1666				m.name,
1667				phba->Port);
1668		else
1669			snprintf(descp, 255,
1670				"Emulex %s %d%s %s %s",
1671				m.name, m.max_speed,
1672				(GE) ? "GE" : "Gb",
1673				m.bus,
1674				(GE) ? "FCoE Adapter" :
1675					"Fibre Channel Adapter");
1676	}
1677}
1678
1679/**
1680 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1681 * @phba: pointer to lpfc hba data structure.
1682 * @pring: pointer to a IOCB ring.
1683 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1684 *
1685 * This routine posts a given number of IOCBs with the associated DMA buffer
1686 * descriptors specified by the cnt argument to the given IOCB ring.
1687 *
1688 * Return codes
1689 *   The number of IOCBs NOT able to be posted to the IOCB ring.
1690 **/
1691int
1692lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1693{
1694	IOCB_t *icmd;
1695	struct lpfc_iocbq *iocb;
1696	struct lpfc_dmabuf *mp1, *mp2;
1697
1698	cnt += pring->missbufcnt;
1699
1700	/* While there are buffers to post */
1701	while (cnt > 0) {
1702		/* Allocate buffer for  command iocb */
1703		iocb = lpfc_sli_get_iocbq(phba);
1704		if (iocb == NULL) {
1705			pring->missbufcnt = cnt;
1706			return cnt;
1707		}
1708		icmd = &iocb->iocb;
1709
1710		/* 2 buffers can be posted per command */
1711		/* Allocate buffer to post */
1712		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1713		if (mp1)
1714		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1715		if (!mp1 || !mp1->virt) {
1716			kfree(mp1);
1717			lpfc_sli_release_iocbq(phba, iocb);
1718			pring->missbufcnt = cnt;
1719			return cnt;
1720		}
1721
1722		INIT_LIST_HEAD(&mp1->list);
1723		/* Allocate buffer to post */
1724		if (cnt > 1) {
1725			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1726			if (mp2)
1727				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1728							    &mp2->phys);
1729			if (!mp2 || !mp2->virt) {
1730				kfree(mp2);
1731				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1732				kfree(mp1);
1733				lpfc_sli_release_iocbq(phba, iocb);
1734				pring->missbufcnt = cnt;
1735				return cnt;
1736			}
1737
1738			INIT_LIST_HEAD(&mp2->list);
1739		} else {
1740			mp2 = NULL;
1741		}
1742
1743		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1744		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1745		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1746		icmd->ulpBdeCount = 1;
1747		cnt--;
1748		if (mp2) {
1749			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1750			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1751			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1752			cnt--;
1753			icmd->ulpBdeCount = 2;
1754		}
1755
1756		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1757		icmd->ulpLe = 1;
1758
1759		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1760		    IOCB_ERROR) {
1761			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1762			kfree(mp1);
1763			cnt++;
1764			if (mp2) {
1765				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1766				kfree(mp2);
1767				cnt++;
1768			}
1769			lpfc_sli_release_iocbq(phba, iocb);
1770			pring->missbufcnt = cnt;
1771			return cnt;
1772		}
1773		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1774		if (mp2)
1775			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1776	}
1777	pring->missbufcnt = 0;
1778	return 0;
1779}
1780
1781/**
1782 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
1783 * @phba: pointer to lpfc hba data structure.
1784 *
1785 * This routine posts initial receive IOCB buffers to the ELS ring. The
1786 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1787 * set to 64 IOCBs.
1788 *
1789 * Return codes
1790 *   0 - success (currently always success)
1791 **/
1792static int
1793lpfc_post_rcv_buf(struct lpfc_hba *phba)
1794{
1795	struct lpfc_sli *psli = &phba->sli;
1796
1797	/* Ring 0, ELS / CT buffers */
1798	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
1799	/* Ring 2 - FCP no buffers needed */
1800
1801	return 0;
1802}
1803
1804#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1805
1806/**
1807 * lpfc_sha_init - Set up initial array of hash table entries
1808 * @HashResultPointer: pointer to an array as hash table.
1809 *
1810 * This routine sets up the initial values to the array of hash table entries
1811 * for the LC HBAs.
1812 **/
1813static void
1814lpfc_sha_init(uint32_t * HashResultPointer)
1815{
1816	HashResultPointer[0] = 0x67452301;
1817	HashResultPointer[1] = 0xEFCDAB89;
1818	HashResultPointer[2] = 0x98BADCFE;
1819	HashResultPointer[3] = 0x10325476;
1820	HashResultPointer[4] = 0xC3D2E1F0;
1821}
1822
1823/**
1824 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
1825 * @HashResultPointer: pointer to an initial/result hash table.
1826 * @HashWorkingPointer: pointer to an working hash table.
1827 *
1828 * This routine iterates an initial hash table pointed by @HashResultPointer
1829 * with the values from the working hash table pointeed by @HashWorkingPointer.
1830 * The results are putting back to the initial hash table, returned through
1831 * the @HashResultPointer as the result hash table.
1832 **/
1833static void
1834lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
1835{
1836	int t;
1837	uint32_t TEMP;
1838	uint32_t A, B, C, D, E;
1839	t = 16;
1840	do {
1841		HashWorkingPointer[t] =
1842		    S(1,
1843		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
1844								     8] ^
1845		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
1846	} while (++t <= 79);
1847	t = 0;
1848	A = HashResultPointer[0];
1849	B = HashResultPointer[1];
1850	C = HashResultPointer[2];
1851	D = HashResultPointer[3];
1852	E = HashResultPointer[4];
1853
1854	do {
1855		if (t < 20) {
1856			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
1857		} else if (t < 40) {
1858			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
1859		} else if (t < 60) {
1860			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
1861		} else {
1862			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
1863		}
1864		TEMP += S(5, A) + E + HashWorkingPointer[t];
1865		E = D;
1866		D = C;
1867		C = S(30, B);
1868		B = A;
1869		A = TEMP;
1870	} while (++t <= 79);
1871
1872	HashResultPointer[0] += A;
1873	HashResultPointer[1] += B;
1874	HashResultPointer[2] += C;
1875	HashResultPointer[3] += D;
1876	HashResultPointer[4] += E;
1877
1878}
1879
1880/**
1881 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
1882 * @RandomChallenge: pointer to the entry of host challenge random number array.
1883 * @HashWorking: pointer to the entry of the working hash array.
1884 *
1885 * This routine calculates the working hash array referred by @HashWorking
1886 * from the challenge random numbers associated with the host, referred by
1887 * @RandomChallenge. The result is put into the entry of the working hash
1888 * array and returned by reference through @HashWorking.
1889 **/
1890static void
1891lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
1892{
1893	*HashWorking = (*RandomChallenge ^ *HashWorking);
1894}
1895
1896/**
1897 * lpfc_hba_init - Perform special handling for LC HBA initialization
1898 * @phba: pointer to lpfc hba data structure.
1899 * @hbainit: pointer to an array of unsigned 32-bit integers.
1900 *
1901 * This routine performs the special handling for LC HBA initialization.
1902 **/
1903void
1904lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1905{
1906	int t;
1907	uint32_t *HashWorking;
1908	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
1909
1910	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
1911	if (!HashWorking)
1912		return;
1913
1914	HashWorking[0] = HashWorking[78] = *pwwnn++;
1915	HashWorking[1] = HashWorking[79] = *pwwnn;
1916
1917	for (t = 0; t < 7; t++)
1918		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
1919
1920	lpfc_sha_init(hbainit);
1921	lpfc_sha_iterate(hbainit, HashWorking);
1922	kfree(HashWorking);
1923}
1924
1925/**
1926 * lpfc_cleanup - Performs vport cleanups before deleting a vport
1927 * @vport: pointer to a virtual N_Port data structure.
1928 *
1929 * This routine performs the necessary cleanups before deleting the @vport.
1930 * It invokes the discovery state machine to perform necessary state
1931 * transitions and to release the ndlps associated with the @vport. Note,
1932 * the physical port is treated as @vport 0.
1933 **/
1934void
1935lpfc_cleanup(struct lpfc_vport *vport)
1936{
1937	struct lpfc_hba   *phba = vport->phba;
1938	struct lpfc_nodelist *ndlp, *next_ndlp;
1939	int i = 0;
1940
1941	if (phba->link_state > LPFC_LINK_DOWN)
1942		lpfc_port_link_failure(vport);
1943
1944	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1945		if (!NLP_CHK_NODE_ACT(ndlp)) {
1946			ndlp = lpfc_enable_node(vport, ndlp,
1947						NLP_STE_UNUSED_NODE);
1948			if (!ndlp)
1949				continue;
1950			spin_lock_irq(&phba->ndlp_lock);
1951			NLP_SET_FREE_REQ(ndlp);
1952			spin_unlock_irq(&phba->ndlp_lock);
1953			/* Trigger the release of the ndlp memory */
1954			lpfc_nlp_put(ndlp);
1955			continue;
1956		}
1957		spin_lock_irq(&phba->ndlp_lock);
1958		if (NLP_CHK_FREE_REQ(ndlp)) {
1959			/* The ndlp should not be in memory free mode already */
1960			spin_unlock_irq(&phba->ndlp_lock);
1961			continue;
1962		} else
1963			/* Indicate request for freeing ndlp memory */
1964			NLP_SET_FREE_REQ(ndlp);
1965		spin_unlock_irq(&phba->ndlp_lock);
1966
1967		if (vport->port_type != LPFC_PHYSICAL_PORT &&
1968		    ndlp->nlp_DID == Fabric_DID) {
1969			/* Just free up ndlp with Fabric_DID for vports */
1970			lpfc_nlp_put(ndlp);
1971			continue;
1972		}
1973
1974		if (ndlp->nlp_type & NLP_FABRIC)
1975			lpfc_disc_state_machine(vport, ndlp, NULL,
1976					NLP_EVT_DEVICE_RECOVERY);
1977
1978		lpfc_disc_state_machine(vport, ndlp, NULL,
1979					     NLP_EVT_DEVICE_RM);
1980
1981	}
1982
1983	/* At this point, ALL ndlp's should be gone
1984	 * because of the previous NLP_EVT_DEVICE_RM.
1985	 * Lets wait for this to happen, if needed.
1986	 */
1987	while (!list_empty(&vport->fc_nodes)) {
1988		if (i++ > 3000) {
1989			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1990				"0233 Nodelist not empty\n");
1991			list_for_each_entry_safe(ndlp, next_ndlp,
1992						&vport->fc_nodes, nlp_listp) {
1993				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
1994						LOG_NODE,
1995						"0282 did:x%x ndlp:x%p "
1996						"usgmap:x%x refcnt:%d\n",
1997						ndlp->nlp_DID, (void *)ndlp,
1998						ndlp->nlp_usg_map,
1999						atomic_read(
2000							&ndlp->kref.refcount));
2001			}
2002			break;
2003		}
2004
2005		/* Wait for any activity on ndlps to settle */
2006		msleep(10);
2007	}
2008}
2009
2010/**
2011 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2012 * @vport: pointer to a virtual N_Port data structure.
2013 *
2014 * This routine stops all the timers associated with a @vport. This function
2015 * is invoked before disabling or deleting a @vport. Note that the physical
2016 * port is treated as @vport 0.
2017 **/
2018void
2019lpfc_stop_vport_timers(struct lpfc_vport *vport)
2020{
2021	del_timer_sync(&vport->els_tmofunc);
2022	del_timer_sync(&vport->fc_fdmitmo);
2023	lpfc_can_disctmo(vport);
2024	return;
2025}
2026
2027/**
2028 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2029 * @phba: pointer to lpfc hba data structure.
2030 *
2031 * This routine stops all the timers associated with a HBA. This function is
2032 * invoked before either putting a HBA offline or unloading the driver.
2033 **/
2034void
2035lpfc_stop_hba_timers(struct lpfc_hba *phba)
2036{
2037	lpfc_stop_vport_timers(phba->pport);
2038	del_timer_sync(&phba->sli.mbox_tmo);
2039	del_timer_sync(&phba->fabric_block_timer);
2040	del_timer_sync(&phba->eratt_poll);
2041	del_timer_sync(&phba->hb_tmofunc);
2042	phba->hb_outstanding = 0;
2043
2044	switch (phba->pci_dev_grp) {
2045	case LPFC_PCI_DEV_LP:
2046		/* Stop any LightPulse device specific driver timers */
2047		del_timer_sync(&phba->fcp_poll_timer);
2048		break;
2049	case LPFC_PCI_DEV_OC:
2050		/* Stop any OneConnect device sepcific driver timers */
2051		break;
2052	default:
2053		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2054				"0297 Invalid device group (x%x)\n",
2055				phba->pci_dev_grp);
2056		break;
2057	}
2058	return;
2059}
2060
2061/**
2062 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2063 * @phba: pointer to lpfc hba data structure.
2064 *
2065 * This routine marks a HBA's management interface as blocked. Once the HBA's
2066 * management interface is marked as blocked, all the user space access to
2067 * the HBA, whether they are from sysfs interface or libdfc interface will
2068 * all be blocked. The HBA is set to block the management interface when the
2069 * driver prepares the HBA interface for online or offline.
2070 **/
2071static void
2072lpfc_block_mgmt_io(struct lpfc_hba * phba)
2073{
2074	unsigned long iflag;
2075
2076	spin_lock_irqsave(&phba->hbalock, iflag);
2077	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2078	spin_unlock_irqrestore(&phba->hbalock, iflag);
2079}
2080
2081/**
2082 * lpfc_online - Initialize and bring a HBA online
2083 * @phba: pointer to lpfc hba data structure.
2084 *
2085 * This routine initializes the HBA and brings a HBA online. During this
2086 * process, the management interface is blocked to prevent user space access
2087 * to the HBA interfering with the driver initialization.
2088 *
2089 * Return codes
2090 *   0 - successful
2091 *   1 - failed
2092 **/
2093int
2094lpfc_online(struct lpfc_hba *phba)
2095{
2096	struct lpfc_vport *vport;
2097	struct lpfc_vport **vports;
2098	int i;
2099
2100	if (!phba)
2101		return 0;
2102	vport = phba->pport;
2103
2104	if (!(vport->fc_flag & FC_OFFLINE_MODE))
2105		return 0;
2106
2107	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2108			"0458 Bring Adapter online\n");
2109
2110	lpfc_block_mgmt_io(phba);
2111
2112	if (!lpfc_sli_queue_setup(phba)) {
2113		lpfc_unblock_mgmt_io(phba);
2114		return 1;
2115	}
2116
2117	if (phba->sli_rev == LPFC_SLI_REV4) {
2118		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2119			lpfc_unblock_mgmt_io(phba);
2120			return 1;
2121		}
2122	} else {
2123		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
2124			lpfc_unblock_mgmt_io(phba);
2125			return 1;
2126		}
2127	}
2128
2129	vports = lpfc_create_vport_work_array(phba);
2130	if (vports != NULL)
2131		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2132			struct Scsi_Host *shost;
2133			shost = lpfc_shost_from_vport(vports[i]);
2134			spin_lock_irq(shost->host_lock);
2135			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2136			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2137				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2138			spin_unlock_irq(shost->host_lock);
2139		}
2140		lpfc_destroy_vport_work_array(phba, vports);
2141
2142	lpfc_unblock_mgmt_io(phba);
2143	return 0;
2144}
2145
2146/**
2147 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2148 * @phba: pointer to lpfc hba data structure.
2149 *
2150 * This routine marks a HBA's management interface as not blocked. Once the
2151 * HBA's management interface is marked as not blocked, all the user space
2152 * access to the HBA, whether they are from sysfs interface or libdfc
2153 * interface will be allowed. The HBA is set to block the management interface
2154 * when the driver prepares the HBA interface for online or offline and then
2155 * set to unblock the management interface afterwards.
2156 **/
2157void
2158lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2159{
2160	unsigned long iflag;
2161
2162	spin_lock_irqsave(&phba->hbalock, iflag);
2163	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2164	spin_unlock_irqrestore(&phba->hbalock, iflag);
2165}
2166
2167/**
2168 * lpfc_offline_prep - Prepare a HBA to be brought offline
2169 * @phba: pointer to lpfc hba data structure.
2170 *
2171 * This routine is invoked to prepare a HBA to be brought offline. It performs
2172 * unregistration login to all the nodes on all vports and flushes the mailbox
2173 * queue to make it ready to be brought offline.
2174 **/
2175void
2176lpfc_offline_prep(struct lpfc_hba * phba)
2177{
2178	struct lpfc_vport *vport = phba->pport;
2179	struct lpfc_nodelist  *ndlp, *next_ndlp;
2180	struct lpfc_vport **vports;
2181	int i;
2182
2183	if (vport->fc_flag & FC_OFFLINE_MODE)
2184		return;
2185
2186	lpfc_block_mgmt_io(phba);
2187
2188	lpfc_linkdown(phba);
2189
2190	/* Issue an unreg_login to all nodes on all vports */
2191	vports = lpfc_create_vport_work_array(phba);
2192	if (vports != NULL) {
2193		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2194			struct Scsi_Host *shost;
2195
2196			if (vports[i]->load_flag & FC_UNLOADING)
2197				continue;
2198			vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
2199			shost =	lpfc_shost_from_vport(vports[i]);
2200			list_for_each_entry_safe(ndlp, next_ndlp,
2201						 &vports[i]->fc_nodes,
2202						 nlp_listp) {
2203				if (!NLP_CHK_NODE_ACT(ndlp))
2204					continue;
2205				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2206					continue;
2207				if (ndlp->nlp_type & NLP_FABRIC) {
2208					lpfc_disc_state_machine(vports[i], ndlp,
2209						NULL, NLP_EVT_DEVICE_RECOVERY);
2210					lpfc_disc_state_machine(vports[i], ndlp,
2211						NULL, NLP_EVT_DEVICE_RM);
2212				}
2213				spin_lock_irq(shost->host_lock);
2214				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2215				spin_unlock_irq(shost->host_lock);
2216				lpfc_unreg_rpi(vports[i], ndlp);
2217			}
2218		}
2219	}
2220	lpfc_destroy_vport_work_array(phba, vports);
2221
2222	lpfc_sli_mbox_sys_shutdown(phba);
2223}
2224
2225/**
2226 * lpfc_offline - Bring a HBA offline
2227 * @phba: pointer to lpfc hba data structure.
2228 *
2229 * This routine actually brings a HBA offline. It stops all the timers
2230 * associated with the HBA, brings down the SLI layer, and eventually
2231 * marks the HBA as in offline state for the upper layer protocol.
2232 **/
2233void
2234lpfc_offline(struct lpfc_hba *phba)
2235{
2236	struct Scsi_Host  *shost;
2237	struct lpfc_vport **vports;
2238	int i;
2239
2240	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2241		return;
2242
2243	/* stop port and all timers associated with this hba */
2244	lpfc_stop_port(phba);
2245	vports = lpfc_create_vport_work_array(phba);
2246	if (vports != NULL)
2247		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2248			lpfc_stop_vport_timers(vports[i]);
2249	lpfc_destroy_vport_work_array(phba, vports);
2250	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2251			"0460 Bring Adapter offline\n");
2252	/* Bring down the SLI Layer and cleanup.  The HBA is offline
2253	   now.  */
2254	lpfc_sli_hba_down(phba);
2255	spin_lock_irq(&phba->hbalock);
2256	phba->work_ha = 0;
2257	spin_unlock_irq(&phba->hbalock);
2258	vports = lpfc_create_vport_work_array(phba);
2259	if (vports != NULL)
2260		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2261			shost = lpfc_shost_from_vport(vports[i]);
2262			spin_lock_irq(shost->host_lock);
2263			vports[i]->work_port_events = 0;
2264			vports[i]->fc_flag |= FC_OFFLINE_MODE;
2265			spin_unlock_irq(shost->host_lock);
2266		}
2267	lpfc_destroy_vport_work_array(phba, vports);
2268}
2269
2270/**
2271 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2272 * @phba: pointer to lpfc hba data structure.
2273 *
2274 * This routine is to free all the SCSI buffers and IOCBs from the driver
2275 * list back to kernel. It is called from lpfc_pci_remove_one to free
2276 * the internal resources before the device is removed from the system.
2277 *
2278 * Return codes
2279 *   0 - successful (for now, it always returns 0)
2280 **/
2281static int
2282lpfc_scsi_free(struct lpfc_hba *phba)
2283{
2284	struct lpfc_scsi_buf *sb, *sb_next;
2285	struct lpfc_iocbq *io, *io_next;
2286
2287	spin_lock_irq(&phba->hbalock);
2288	/* Release all the lpfc_scsi_bufs maintained by this host. */
2289	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2290		list_del(&sb->list);
2291		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2292			      sb->dma_handle);
2293		kfree(sb);
2294		phba->total_scsi_bufs--;
2295	}
2296
2297	/* Release all the lpfc_iocbq entries maintained by this host. */
2298	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2299		list_del(&io->list);
2300		kfree(io);
2301		phba->total_iocbq_bufs--;
2302	}
2303
2304	spin_unlock_irq(&phba->hbalock);
2305
2306	return 0;
2307}
2308
2309/**
2310 * lpfc_create_port - Create an FC port
2311 * @phba: pointer to lpfc hba data structure.
2312 * @instance: a unique integer ID to this FC port.
2313 * @dev: pointer to the device data structure.
2314 *
2315 * This routine creates a FC port for the upper layer protocol. The FC port
2316 * can be created on top of either a physical port or a virtual port provided
2317 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2318 * and associates the FC port created before adding the shost into the SCSI
2319 * layer.
2320 *
2321 * Return codes
2322 *   @vport - pointer to the virtual N_Port data structure.
2323 *   NULL - port create failed.
2324 **/
2325struct lpfc_vport *
2326lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2327{
2328	struct lpfc_vport *vport;
2329	struct Scsi_Host  *shost;
2330	int error = 0;
2331
2332	if (dev != &phba->pcidev->dev)
2333		shost = scsi_host_alloc(&lpfc_vport_template,
2334					sizeof(struct lpfc_vport));
2335	else
2336		shost = scsi_host_alloc(&lpfc_template,
2337					sizeof(struct lpfc_vport));
2338	if (!shost)
2339		goto out;
2340
2341	vport = (struct lpfc_vport *) shost->hostdata;
2342	vport->phba = phba;
2343	vport->load_flag |= FC_LOADING;
2344	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2345	vport->fc_rscn_flush = 0;
2346
2347	lpfc_get_vport_cfgparam(vport);
2348	shost->unique_id = instance;
2349	shost->max_id = LPFC_MAX_TARGET;
2350	shost->max_lun = vport->cfg_max_luns;
2351	shost->this_id = -1;
2352	shost->max_cmd_len = 16;
2353	if (phba->sli_rev == LPFC_SLI_REV4) {
2354		shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
2355		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2356	}
2357
2358	/*
2359	 * Set initial can_queue value since 0 is no longer supported and
2360	 * scsi_add_host will fail. This will be adjusted later based on the
2361	 * max xri value determined in hba setup.
2362	 */
2363	shost->can_queue = phba->cfg_hba_queue_depth - 10;
2364	if (dev != &phba->pcidev->dev) {
2365		shost->transportt = lpfc_vport_transport_template;
2366		vport->port_type = LPFC_NPIV_PORT;
2367	} else {
2368		shost->transportt = lpfc_transport_template;
2369		vport->port_type = LPFC_PHYSICAL_PORT;
2370	}
2371
2372	/* Initialize all internally managed lists. */
2373	INIT_LIST_HEAD(&vport->fc_nodes);
2374	INIT_LIST_HEAD(&vport->rcv_buffer_list);
2375	spin_lock_init(&vport->work_port_lock);
2376
2377	init_timer(&vport->fc_disctmo);
2378	vport->fc_disctmo.function = lpfc_disc_timeout;
2379	vport->fc_disctmo.data = (unsigned long)vport;
2380
2381	init_timer(&vport->fc_fdmitmo);
2382	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2383	vport->fc_fdmitmo.data = (unsigned long)vport;
2384
2385	init_timer(&vport->els_tmofunc);
2386	vport->els_tmofunc.function = lpfc_els_timeout;
2387	vport->els_tmofunc.data = (unsigned long)vport;
2388
2389	error = scsi_add_host(shost, dev);
2390	if (error)
2391		goto out_put_shost;
2392
2393	spin_lock_irq(&phba->hbalock);
2394	list_add_tail(&vport->listentry, &phba->port_list);
2395	spin_unlock_irq(&phba->hbalock);
2396	return vport;
2397
2398out_put_shost:
2399	scsi_host_put(shost);
2400out:
2401	return NULL;
2402}
2403
2404/**
2405 * destroy_port -  destroy an FC port
2406 * @vport: pointer to an lpfc virtual N_Port data structure.
2407 *
2408 * This routine destroys a FC port from the upper layer protocol. All the
2409 * resources associated with the port are released.
2410 **/
2411void
2412destroy_port(struct lpfc_vport *vport)
2413{
2414	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2415	struct lpfc_hba  *phba = vport->phba;
2416
2417	lpfc_debugfs_terminate(vport);
2418	fc_remove_host(shost);
2419	scsi_remove_host(shost);
2420
2421	spin_lock_irq(&phba->hbalock);
2422	list_del_init(&vport->listentry);
2423	spin_unlock_irq(&phba->hbalock);
2424
2425	lpfc_cleanup(vport);
2426	return;
2427}
2428
2429/**
2430 * lpfc_get_instance - Get a unique integer ID
2431 *
2432 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2433 * uses the kernel idr facility to perform the task.
2434 *
2435 * Return codes:
2436 *   instance - a unique integer ID allocated as the new instance.
2437 *   -1 - lpfc get instance failed.
2438 **/
2439int
2440lpfc_get_instance(void)
2441{
2442	int instance = 0;
2443
2444	/* Assign an unused number */
2445	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2446		return -1;
2447	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2448		return -1;
2449	return instance;
2450}
2451
2452/**
2453 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2454 * @shost: pointer to SCSI host data structure.
2455 * @time: elapsed time of the scan in jiffies.
2456 *
2457 * This routine is called by the SCSI layer with a SCSI host to determine
2458 * whether the scan host is finished.
2459 *
2460 * Note: there is no scan_start function as adapter initialization will have
2461 * asynchronously kicked off the link initialization.
2462 *
2463 * Return codes
2464 *   0 - SCSI host scan is not over yet.
2465 *   1 - SCSI host scan is over.
2466 **/
2467int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2468{
2469	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2470	struct lpfc_hba   *phba = vport->phba;
2471	int stat = 0;
2472
2473	spin_lock_irq(shost->host_lock);
2474
2475	if (vport->load_flag & FC_UNLOADING) {
2476		stat = 1;
2477		goto finished;
2478	}
2479	if (time >= 30 * HZ) {
2480		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2481				"0461 Scanning longer than 30 "
2482				"seconds.  Continuing initialization\n");
2483		stat = 1;
2484		goto finished;
2485	}
2486	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2487		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2488				"0465 Link down longer than 15 "
2489				"seconds.  Continuing initialization\n");
2490		stat = 1;
2491		goto finished;
2492	}
2493
2494	if (vport->port_state != LPFC_VPORT_READY)
2495		goto finished;
2496	if (vport->num_disc_nodes || vport->fc_prli_sent)
2497		goto finished;
2498	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2499		goto finished;
2500	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2501		goto finished;
2502
2503	stat = 1;
2504
2505finished:
2506	spin_unlock_irq(shost->host_lock);
2507	return stat;
2508}
2509
2510/**
2511 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2512 * @shost: pointer to SCSI host data structure.
2513 *
2514 * This routine initializes a given SCSI host attributes on a FC port. The
2515 * SCSI host can be either on top of a physical port or a virtual port.
2516 **/
2517void lpfc_host_attrib_init(struct Scsi_Host *shost)
2518{
2519	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2520	struct lpfc_hba   *phba = vport->phba;
2521	/*
2522	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
2523	 */
2524
2525	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2526	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2527	fc_host_supported_classes(shost) = FC_COS_CLASS3;
2528
2529	memset(fc_host_supported_fc4s(shost), 0,
2530	       sizeof(fc_host_supported_fc4s(shost)));
2531	fc_host_supported_fc4s(shost)[2] = 1;
2532	fc_host_supported_fc4s(shost)[7] = 1;
2533
2534	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2535				 sizeof fc_host_symbolic_name(shost));
2536
2537	fc_host_supported_speeds(shost) = 0;
2538	if (phba->lmt & LMT_10Gb)
2539		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2540	if (phba->lmt & LMT_8Gb)
2541		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2542	if (phba->lmt & LMT_4Gb)
2543		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2544	if (phba->lmt & LMT_2Gb)
2545		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2546	if (phba->lmt & LMT_1Gb)
2547		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2548
2549	fc_host_maxframe_size(shost) =
2550		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2551		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2552
2553	/* This value is also unchanging */
2554	memset(fc_host_active_fc4s(shost), 0,
2555	       sizeof(fc_host_active_fc4s(shost)));
2556	fc_host_active_fc4s(shost)[2] = 1;
2557	fc_host_active_fc4s(shost)[7] = 1;
2558
2559	fc_host_max_npiv_vports(shost) = phba->max_vpi;
2560	spin_lock_irq(shost->host_lock);
2561	vport->load_flag &= ~FC_LOADING;
2562	spin_unlock_irq(shost->host_lock);
2563}
2564
2565/**
2566 * lpfc_stop_port_s3 - Stop SLI3 device port
2567 * @phba: pointer to lpfc hba data structure.
2568 *
2569 * This routine is invoked to stop an SLI3 device port, it stops the device
2570 * from generating interrupts and stops the device driver's timers for the
2571 * device.
2572 **/
2573static void
2574lpfc_stop_port_s3(struct lpfc_hba *phba)
2575{
2576	/* Clear all interrupt enable conditions */
2577	writel(0, phba->HCregaddr);
2578	readl(phba->HCregaddr); /* flush */
2579	/* Clear all pending interrupts */
2580	writel(0xffffffff, phba->HAregaddr);
2581	readl(phba->HAregaddr); /* flush */
2582
2583	/* Reset some HBA SLI setup states */
2584	lpfc_stop_hba_timers(phba);
2585	phba->pport->work_port_events = 0;
2586}
2587
2588/**
2589 * lpfc_stop_port_s4 - Stop SLI4 device port
2590 * @phba: pointer to lpfc hba data structure.
2591 *
2592 * This routine is invoked to stop an SLI4 device port, it stops the device
2593 * from generating interrupts and stops the device driver's timers for the
2594 * device.
2595 **/
2596static void
2597lpfc_stop_port_s4(struct lpfc_hba *phba)
2598{
2599	/* Reset some HBA SLI4 setup states */
2600	lpfc_stop_hba_timers(phba);
2601	phba->pport->work_port_events = 0;
2602	phba->sli4_hba.intr_enable = 0;
2603	/* Hard clear it for now, shall have more graceful way to wait later */
2604	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2605}
2606
2607/**
2608 * lpfc_stop_port - Wrapper function for stopping hba port
2609 * @phba: Pointer to HBA context object.
2610 *
2611 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2612 * the API jump table function pointer from the lpfc_hba struct.
2613 **/
2614void
2615lpfc_stop_port(struct lpfc_hba *phba)
2616{
2617	phba->lpfc_stop_port(phba);
2618}
2619
2620/**
2621 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2622 * @phba: pointer to lpfc hba data structure.
2623 *
2624 * This routine is invoked to remove the driver default fcf record from
2625 * the port.  This routine currently acts on FCF Index 0.
2626 *
2627 **/
2628void
2629lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2630{
2631	int rc = 0;
2632	LPFC_MBOXQ_t *mboxq;
2633	struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2634	uint32_t mbox_tmo, req_len;
2635	uint32_t shdr_status, shdr_add_status;
2636
2637	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2638	if (!mboxq) {
2639		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2640			"2020 Failed to allocate mbox for ADD_FCF cmd\n");
2641		return;
2642	}
2643
2644	req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2645		  sizeof(struct lpfc_sli4_cfg_mhdr);
2646	rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2647			      LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2648			      req_len, LPFC_SLI4_MBX_EMBED);
2649	/*
2650	 * In phase 1, there is a single FCF index, 0.  In phase2, the driver
2651	 * supports multiple FCF indices.
2652	 */
2653	del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2654	bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2655	bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2656	       phba->fcf.fcf_indx);
2657
2658	if (!phba->sli4_hba.intr_enable)
2659		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2660	else {
2661		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2662		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2663	}
2664	/* The IOCTL status is embedded in the mailbox subheader. */
2665	shdr_status = bf_get(lpfc_mbox_hdr_status,
2666			     &del_fcf_record->header.cfg_shdr.response);
2667	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2668				 &del_fcf_record->header.cfg_shdr.response);
2669	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2670		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2671				"2516 DEL FCF of default FCF Index failed "
2672				"mbx status x%x, status x%x add_status x%x\n",
2673				rc, shdr_status, shdr_add_status);
2674	}
2675	if (rc != MBX_TIMEOUT)
2676		mempool_free(mboxq, phba->mbox_mem_pool);
2677}
2678
2679/**
2680 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2681 * @phba: pointer to lpfc hba data structure.
2682 * @acqe_link: pointer to the async link completion queue entry.
2683 *
2684 * This routine is to parse the SLI4 link-attention link fault code and
2685 * translate it into the base driver's read link attention mailbox command
2686 * status.
2687 *
2688 * Return: Link-attention status in terms of base driver's coding.
2689 **/
2690static uint16_t
2691lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
2692			   struct lpfc_acqe_link *acqe_link)
2693{
2694	uint16_t latt_fault;
2695
2696	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
2697	case LPFC_ASYNC_LINK_FAULT_NONE:
2698	case LPFC_ASYNC_LINK_FAULT_LOCAL:
2699	case LPFC_ASYNC_LINK_FAULT_REMOTE:
2700		latt_fault = 0;
2701		break;
2702	default:
2703		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2704				"0398 Invalid link fault code: x%x\n",
2705				bf_get(lpfc_acqe_link_fault, acqe_link));
2706		latt_fault = MBXERR_ERROR;
2707		break;
2708	}
2709	return latt_fault;
2710}
2711
2712/**
2713 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
2714 * @phba: pointer to lpfc hba data structure.
2715 * @acqe_link: pointer to the async link completion queue entry.
2716 *
2717 * This routine is to parse the SLI4 link attention type and translate it
2718 * into the base driver's link attention type coding.
2719 *
2720 * Return: Link attention type in terms of base driver's coding.
2721 **/
2722static uint8_t
2723lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
2724			  struct lpfc_acqe_link *acqe_link)
2725{
2726	uint8_t att_type;
2727
2728	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
2729	case LPFC_ASYNC_LINK_STATUS_DOWN:
2730	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
2731		att_type = AT_LINK_DOWN;
2732		break;
2733	case LPFC_ASYNC_LINK_STATUS_UP:
2734		/* Ignore physical link up events - wait for logical link up */
2735		att_type = AT_RESERVED;
2736		break;
2737	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
2738		att_type = AT_LINK_UP;
2739		break;
2740	default:
2741		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2742				"0399 Invalid link attention type: x%x\n",
2743				bf_get(lpfc_acqe_link_status, acqe_link));
2744		att_type = AT_RESERVED;
2745		break;
2746	}
2747	return att_type;
2748}
2749
2750/**
2751 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
2752 * @phba: pointer to lpfc hba data structure.
2753 * @acqe_link: pointer to the async link completion queue entry.
2754 *
2755 * This routine is to parse the SLI4 link-attention link speed and translate
2756 * it into the base driver's link-attention link speed coding.
2757 *
2758 * Return: Link-attention link speed in terms of base driver's coding.
2759 **/
2760static uint8_t
2761lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
2762				struct lpfc_acqe_link *acqe_link)
2763{
2764	uint8_t link_speed;
2765
2766	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
2767	case LPFC_ASYNC_LINK_SPEED_ZERO:
2768		link_speed = LA_UNKNW_LINK;
2769		break;
2770	case LPFC_ASYNC_LINK_SPEED_10MBPS:
2771		link_speed = LA_UNKNW_LINK;
2772		break;
2773	case LPFC_ASYNC_LINK_SPEED_100MBPS:
2774		link_speed = LA_UNKNW_LINK;
2775		break;
2776	case LPFC_ASYNC_LINK_SPEED_1GBPS:
2777		link_speed = LA_1GHZ_LINK;
2778		break;
2779	case LPFC_ASYNC_LINK_SPEED_10GBPS:
2780		link_speed = LA_10GHZ_LINK;
2781		break;
2782	default:
2783		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2784				"0483 Invalid link-attention link speed: x%x\n",
2785				bf_get(lpfc_acqe_link_speed, acqe_link));
2786		link_speed = LA_UNKNW_LINK;
2787		break;
2788	}
2789	return link_speed;
2790}
2791
2792/**
2793 * lpfc_sli4_async_link_evt - Process the asynchronous link event
2794 * @phba: pointer to lpfc hba data structure.
2795 * @acqe_link: pointer to the async link completion queue entry.
2796 *
2797 * This routine is to handle the SLI4 asynchronous link event.
2798 **/
2799static void
2800lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2801			 struct lpfc_acqe_link *acqe_link)
2802{
2803	struct lpfc_dmabuf *mp;
2804	LPFC_MBOXQ_t *pmb;
2805	MAILBOX_t *mb;
2806	READ_LA_VAR *la;
2807	uint8_t att_type;
2808
2809	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
2810	if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
2811		return;
2812	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2813	if (!pmb) {
2814		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2815				"0395 The mboxq allocation failed\n");
2816		return;
2817	}
2818	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2819	if (!mp) {
2820		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2821				"0396 The lpfc_dmabuf allocation failed\n");
2822		goto out_free_pmb;
2823	}
2824	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2825	if (!mp->virt) {
2826		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2827				"0397 The mbuf allocation failed\n");
2828		goto out_free_dmabuf;
2829	}
2830
2831	/* Cleanup any outstanding ELS commands */
2832	lpfc_els_flush_all_cmd(phba);
2833
2834	/* Block ELS IOCBs until we have done process link event */
2835	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2836
2837	/* Update link event statistics */
2838	phba->sli.slistat.link_event++;
2839
2840	/* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
2841	lpfc_read_la(phba, pmb, mp);
2842	pmb->vport = phba->pport;
2843
2844	/* Parse and translate status field */
2845	mb = &pmb->u.mb;
2846	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
2847
2848	/* Parse and translate link attention fields */
2849	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
2850	la->eventTag = acqe_link->event_tag;
2851	la->attType = att_type;
2852	la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
2853
2854	/* Fake the the following irrelvant fields */
2855	la->topology = TOPOLOGY_PT_PT;
2856	la->granted_AL_PA = 0;
2857	la->il = 0;
2858	la->pb = 0;
2859	la->fa = 0;
2860	la->mm = 0;
2861
2862	/* Keep the link status for extra SLI4 state machine reference */
2863	phba->sli4_hba.link_state.speed =
2864				bf_get(lpfc_acqe_link_speed, acqe_link);
2865	phba->sli4_hba.link_state.duplex =
2866				bf_get(lpfc_acqe_link_duplex, acqe_link);
2867	phba->sli4_hba.link_state.status =
2868				bf_get(lpfc_acqe_link_status, acqe_link);
2869	phba->sli4_hba.link_state.physical =
2870				bf_get(lpfc_acqe_link_physical, acqe_link);
2871	phba->sli4_hba.link_state.fault =
2872				bf_get(lpfc_acqe_link_fault, acqe_link);
2873
2874	/* Invoke the lpfc_handle_latt mailbox command callback function */
2875	lpfc_mbx_cmpl_read_la(phba, pmb);
2876
2877	return;
2878
2879out_free_dmabuf:
2880	kfree(mp);
2881out_free_pmb:
2882	mempool_free(pmb, phba->mbox_mem_pool);
2883}
2884
2885/**
2886 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
2887 * @phba: pointer to lpfc hba data structure.
2888 * @acqe_link: pointer to the async fcoe completion queue entry.
2889 *
2890 * This routine is to handle the SLI4 asynchronous fcoe event.
2891 **/
2892static void
2893lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2894			 struct lpfc_acqe_fcoe *acqe_fcoe)
2895{
2896	uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2897	int rc;
2898
2899	switch (event_type) {
2900	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2901		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2902			"2546 New FCF found index 0x%x tag 0x%x \n",
2903			acqe_fcoe->fcf_index,
2904			acqe_fcoe->event_tag);
2905		/*
2906		 * If the current FCF is in discovered state,
2907		 * do nothing.
2908		 */
2909		spin_lock_irq(&phba->hbalock);
2910		if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
2911			spin_unlock_irq(&phba->hbalock);
2912			break;
2913		}
2914		spin_unlock_irq(&phba->hbalock);
2915
2916		/* Read the FCF table and re-discover SAN. */
2917		rc = lpfc_sli4_read_fcf_record(phba,
2918			LPFC_FCOE_FCF_GET_FIRST);
2919		if (rc)
2920			lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2921				"2547 Read FCF record failed 0x%x\n",
2922				rc);
2923		break;
2924
2925	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
2926		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2927			"2548 FCF Table full count 0x%x tag 0x%x \n",
2928			bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
2929			acqe_fcoe->event_tag);
2930		break;
2931
2932	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2933		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2934			"2549 FCF disconnected fron network index 0x%x"
2935			" tag 0x%x \n", acqe_fcoe->fcf_index,
2936			acqe_fcoe->event_tag);
2937		/* If the event is not for currently used fcf do nothing */
2938		if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
2939			break;
2940		/*
2941		 * Currently, driver support only one FCF - so treat this as
2942		 * a link down.
2943		 */
2944		lpfc_linkdown(phba);
2945		/* Unregister FCF if no devices connected to it */
2946		lpfc_unregister_unused_fcf(phba);
2947		break;
2948
2949	default:
2950		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2951			"0288 Unknown FCoE event type 0x%x event tag "
2952			"0x%x\n", event_type, acqe_fcoe->event_tag);
2953		break;
2954	}
2955}
2956
2957/**
2958 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
2959 * @phba: pointer to lpfc hba data structure.
2960 * @acqe_link: pointer to the async dcbx completion queue entry.
2961 *
2962 * This routine is to handle the SLI4 asynchronous dcbx event.
2963 **/
2964static void
2965lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2966			 struct lpfc_acqe_dcbx *acqe_dcbx)
2967{
2968	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2969			"0290 The SLI4 DCBX asynchronous event is not "
2970			"handled yet\n");
2971}
2972
2973/**
2974 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
2975 * @phba: pointer to lpfc hba data structure.
2976 *
2977 * This routine is invoked by the worker thread to process all the pending
2978 * SLI4 asynchronous events.
2979 **/
2980void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
2981{
2982	struct lpfc_cq_event *cq_event;
2983
2984	/* First, declare the async event has been handled */
2985	spin_lock_irq(&phba->hbalock);
2986	phba->hba_flag &= ~ASYNC_EVENT;
2987	spin_unlock_irq(&phba->hbalock);
2988	/* Now, handle all the async events */
2989	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
2990		/* Get the first event from the head of the event queue */
2991		spin_lock_irq(&phba->hbalock);
2992		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
2993				 cq_event, struct lpfc_cq_event, list);
2994		spin_unlock_irq(&phba->hbalock);
2995		/* Process the asynchronous event */
2996		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
2997		case LPFC_TRAILER_CODE_LINK:
2998			lpfc_sli4_async_link_evt(phba,
2999						 &cq_event->cqe.acqe_link);
3000			break;
3001		case LPFC_TRAILER_CODE_FCOE:
3002			lpfc_sli4_async_fcoe_evt(phba,
3003						 &cq_event->cqe.acqe_fcoe);
3004			break;
3005		case LPFC_TRAILER_CODE_DCBX:
3006			lpfc_sli4_async_dcbx_evt(phba,
3007						 &cq_event->cqe.acqe_dcbx);
3008			break;
3009		default:
3010			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3011					"1804 Invalid asynchrous event code: "
3012					"x%x\n", bf_get(lpfc_trailer_code,
3013					&cq_event->cqe.mcqe_cmpl));
3014			break;
3015		}
3016		/* Free the completion event processed to the free pool */
3017		lpfc_sli4_cq_event_release(phba, cq_event);
3018	}
3019}
3020
3021/**
3022 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3023 * @phba: pointer to lpfc hba data structure.
3024 * @dev_grp: The HBA PCI-Device group number.
3025 *
3026 * This routine is invoked to set up the per HBA PCI-Device group function
3027 * API jump table entries.
3028 *
3029 * Return: 0 if success, otherwise -ENODEV
3030 **/
3031int
3032lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3033{
3034	int rc;
3035
3036	/* Set up lpfc PCI-device group */
3037	phba->pci_dev_grp = dev_grp;
3038
3039	/* The LPFC_PCI_DEV_OC uses SLI4 */
3040	if (dev_grp == LPFC_PCI_DEV_OC)
3041		phba->sli_rev = LPFC_SLI_REV4;
3042
3043	/* Set up device INIT API function jump table */
3044	rc = lpfc_init_api_table_setup(phba, dev_grp);
3045	if (rc)
3046		return -ENODEV;
3047	/* Set up SCSI API function jump table */
3048	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3049	if (rc)
3050		return -ENODEV;
3051	/* Set up SLI API function jump table */
3052	rc = lpfc_sli_api_table_setup(phba, dev_grp);
3053	if (rc)
3054		return -ENODEV;
3055	/* Set up MBOX API function jump table */
3056	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3057	if (rc)
3058		return -ENODEV;
3059
3060	return 0;
3061}
3062
3063/**
3064 * lpfc_log_intr_mode - Log the active interrupt mode
3065 * @phba: pointer to lpfc hba data structure.
3066 * @intr_mode: active interrupt mode adopted.
3067 *
3068 * This routine it invoked to log the currently used active interrupt mode
3069 * to the device.
3070 **/
3071static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3072{
3073	switch (intr_mode) {
3074	case 0:
3075		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3076				"0470 Enable INTx interrupt mode.\n");
3077		break;
3078	case 1:
3079		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3080				"0481 Enabled MSI interrupt mode.\n");
3081		break;
3082	case 2:
3083		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3084				"0480 Enabled MSI-X interrupt mode.\n");
3085		break;
3086	default:
3087		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3088				"0482 Illegal interrupt mode.\n");
3089		break;
3090	}
3091	return;
3092}
3093
3094/**
3095 * lpfc_enable_pci_dev - Enable a generic PCI device.
3096 * @phba: pointer to lpfc hba data structure.
3097 *
3098 * This routine is invoked to enable the PCI device that is common to all
3099 * PCI devices.
3100 *
3101 * Return codes
3102 * 	0 - sucessful
3103 * 	other values - error
3104 **/
3105static int
3106lpfc_enable_pci_dev(struct lpfc_hba *phba)
3107{
3108	struct pci_dev *pdev;
3109	int bars;
3110
3111	/* Obtain PCI device reference */
3112	if (!phba->pcidev)
3113		goto out_error;
3114	else
3115		pdev = phba->pcidev;
3116	/* Select PCI BARs */
3117	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3118	/* Enable PCI device */
3119	if (pci_enable_device_mem(pdev))
3120		goto out_error;
3121	/* Request PCI resource for the device */
3122	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3123		goto out_disable_device;
3124	/* Set up device as PCI master and save state for EEH */
3125	pci_set_master(pdev);
3126	pci_try_set_mwi(pdev);
3127	pci_save_state(pdev);
3128
3129	return 0;
3130
3131out_disable_device:
3132	pci_disable_device(pdev);
3133out_error:
3134	return -ENODEV;
3135}
3136
3137/**
3138 * lpfc_disable_pci_dev - Disable a generic PCI device.
3139 * @phba: pointer to lpfc hba data structure.
3140 *
3141 * This routine is invoked to disable the PCI device that is common to all
3142 * PCI devices.
3143 **/
3144static void
3145lpfc_disable_pci_dev(struct lpfc_hba *phba)
3146{
3147	struct pci_dev *pdev;
3148	int bars;
3149
3150	/* Obtain PCI device reference */
3151	if (!phba->pcidev)
3152		return;
3153	else
3154		pdev = phba->pcidev;
3155	/* Select PCI BARs */
3156	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3157	/* Release PCI resource and disable PCI device */
3158	pci_release_selected_regions(pdev, bars);
3159	pci_disable_device(pdev);
3160	/* Null out PCI private reference to driver */
3161	pci_set_drvdata(pdev, NULL);
3162
3163	return;
3164}
3165
3166/**
3167 * lpfc_reset_hba - Reset a hba
3168 * @phba: pointer to lpfc hba data structure.
3169 *
3170 * This routine is invoked to reset a hba device. It brings the HBA
3171 * offline, performs a board restart, and then brings the board back
3172 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3173 * on outstanding mailbox commands.
3174 **/
3175void
3176lpfc_reset_hba(struct lpfc_hba *phba)
3177{
3178	/* If resets are disabled then set error state and return. */
3179	if (!phba->cfg_enable_hba_reset) {
3180		phba->link_state = LPFC_HBA_ERROR;
3181		return;
3182	}
3183	lpfc_offline_prep(phba);
3184	lpfc_offline(phba);
3185	lpfc_sli_brdrestart(phba);
3186	lpfc_online(phba);
3187	lpfc_unblock_mgmt_io(phba);
3188}
3189
3190/**
3191 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3192 * @phba: pointer to lpfc hba data structure.
3193 *
3194 * This routine is invoked to set up the driver internal resources specific to
3195 * support the SLI-3 HBA device it attached to.
3196 *
3197 * Return codes
3198 * 	0 - sucessful
3199 * 	other values - error
3200 **/
3201static int
3202lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3203{
3204	struct lpfc_sli *psli;
3205
3206	/*
3207	 * Initialize timers used by driver
3208	 */
3209
3210	/* Heartbeat timer */
3211	init_timer(&phba->hb_tmofunc);
3212	phba->hb_tmofunc.function = lpfc_hb_timeout;
3213	phba->hb_tmofunc.data = (unsigned long)phba;
3214
3215	psli = &phba->sli;
3216	/* MBOX heartbeat timer */
3217	init_timer(&psli->mbox_tmo);
3218	psli->mbox_tmo.function = lpfc_mbox_timeout;
3219	psli->mbox_tmo.data = (unsigned long) phba;
3220	/* FCP polling mode timer */
3221	init_timer(&phba->fcp_poll_timer);
3222	phba->fcp_poll_timer.function = lpfc_poll_timeout;
3223	phba->fcp_poll_timer.data = (unsigned long) phba;
3224	/* Fabric block timer */
3225	init_timer(&phba->fabric_block_timer);
3226	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3227	phba->fabric_block_timer.data = (unsigned long) phba;
3228	/* EA polling mode timer */
3229	init_timer(&phba->eratt_poll);
3230	phba->eratt_poll.function = lpfc_poll_eratt;
3231	phba->eratt_poll.data = (unsigned long) phba;
3232
3233	/* Host attention work mask setup */
3234	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3235	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3236
3237	/* Get all the module params for configuring this host */
3238	lpfc_get_cfgparam(phba);
3239	/*
3240	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3241	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3242	 * 2 segments are added since the IOCB needs a command and response bde.
3243	 */
3244	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3245		sizeof(struct fcp_rsp) +
3246			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3247
3248	if (phba->cfg_enable_bg) {
3249		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3250		phba->cfg_sg_dma_buf_size +=
3251			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3252	}
3253
3254	/* Also reinitialize the host templates with new values. */
3255	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3256	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3257
3258	phba->max_vpi = LPFC_MAX_VPI;
3259	/* This will be set to correct value after config_port mbox */
3260	phba->max_vports = 0;
3261
3262	/*
3263	 * Initialize the SLI Layer to run with lpfc HBAs.
3264	 */
3265	lpfc_sli_setup(phba);
3266	lpfc_sli_queue_setup(phba);
3267
3268	/* Allocate device driver memory */
3269	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3270		return -ENOMEM;
3271
3272	return 0;
3273}
3274
3275/**
3276 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3277 * @phba: pointer to lpfc hba data structure.
3278 *
3279 * This routine is invoked to unset the driver internal resources set up
3280 * specific for supporting the SLI-3 HBA device it attached to.
3281 **/
3282static void
3283lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3284{
3285	/* Free device driver memory allocated */
3286	lpfc_mem_free_all(phba);
3287
3288	return;
3289}
3290
3291/**
3292 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3293 * @phba: pointer to lpfc hba data structure.
3294 *
3295 * This routine is invoked to set up the driver internal resources specific to
3296 * support the SLI-4 HBA device it attached to.
3297 *
3298 * Return codes
3299 * 	0 - sucessful
3300 * 	other values - error
3301 **/
3302static int
3303lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3304{
3305	struct lpfc_sli *psli;
3306	int rc;
3307	int i, hbq_count;
3308
3309	/* Before proceed, wait for POST done and device ready */
3310	rc = lpfc_sli4_post_status_check(phba);
3311	if (rc)
3312		return -ENODEV;
3313
3314	/*
3315	 * Initialize timers used by driver
3316	 */
3317
3318	/* Heartbeat timer */
3319	init_timer(&phba->hb_tmofunc);
3320	phba->hb_tmofunc.function = lpfc_hb_timeout;
3321	phba->hb_tmofunc.data = (unsigned long)phba;
3322
3323	psli = &phba->sli;
3324	/* MBOX heartbeat timer */
3325	init_timer(&psli->mbox_tmo);
3326	psli->mbox_tmo.function = lpfc_mbox_timeout;
3327	psli->mbox_tmo.data = (unsigned long) phba;
3328	/* Fabric block timer */
3329	init_timer(&phba->fabric_block_timer);
3330	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3331	phba->fabric_block_timer.data = (unsigned long) phba;
3332	/* EA polling mode timer */
3333	init_timer(&phba->eratt_poll);
3334	phba->eratt_poll.function = lpfc_poll_eratt;
3335	phba->eratt_poll.data = (unsigned long) phba;
3336	/*
3337	 * We need to do a READ_CONFIG mailbox command here before
3338	 * calling lpfc_get_cfgparam. For VFs this will report the
3339	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3340	 * All of the resources allocated
3341	 * for this Port are tied to these values.
3342	 */
3343	/* Get all the module params for configuring this host */
3344	lpfc_get_cfgparam(phba);
3345	phba->max_vpi = LPFC_MAX_VPI;
3346	/* This will be set to correct value after the read_config mbox */
3347	phba->max_vports = 0;
3348
3349	/* Program the default value of vlan_id and fc_map */
3350	phba->valid_vlan = 0;
3351	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3352	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3353	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3354
3355	/*
3356	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3357	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3358	 * 2 segments are added since the IOCB needs a command and response bde.
3359	 * To insure that the scsi sgl does not cross a 4k page boundary only
3360	 * sgl sizes of 1k, 2k, 4k, and 8k are supported.
3361	 * Table of sgl sizes and seg_cnt:
3362	 * sgl size, 	sg_seg_cnt	total seg
3363	 * 1k		50		52
3364	 * 2k		114		116
3365	 * 4k		242		244
3366	 * 8k		498		500
3367	 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3368	 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3369	 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3370	 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3371	 */
3372	if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
3373		phba->cfg_sg_seg_cnt = 50;
3374	else if (phba->cfg_sg_seg_cnt <= 114)
3375		phba->cfg_sg_seg_cnt = 114;
3376	else if (phba->cfg_sg_seg_cnt <= 242)
3377		phba->cfg_sg_seg_cnt = 242;
3378	else
3379		phba->cfg_sg_seg_cnt = 498;
3380
3381	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
3382					+ sizeof(struct fcp_rsp);
3383	phba->cfg_sg_dma_buf_size +=
3384		((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
3385
3386	/* Initialize buffer queue management fields */
3387	hbq_count = lpfc_sli_hbq_count();
3388	for (i = 0; i < hbq_count; ++i)
3389		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
3390	INIT_LIST_HEAD(&phba->rb_pend_list);
3391	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
3392	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3393
3394	/*
3395	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
3396	 */
3397	/* Initialize the Abort scsi buffer list used by driver */
3398	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
3399	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
3400	/* This abort list used by worker thread */
3401	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3402
3403	/*
3404	 * Initialize dirver internal slow-path work queues
3405	 */
3406
3407	/* Driver internel slow-path CQ Event pool */
3408	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3409	/* Response IOCB work queue list */
3410	INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
3411	/* Asynchronous event CQ Event work queue list */
3412	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3413	/* Fast-path XRI aborted CQ Event work queue list */
3414	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
3415	/* Slow-path XRI aborted CQ Event work queue list */
3416	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
3417	/* Receive queue CQ Event work queue list */
3418	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
3419
3420	/* Initialize the driver internal SLI layer lists. */
3421	lpfc_sli_setup(phba);
3422	lpfc_sli_queue_setup(phba);
3423
3424	/* Allocate device driver memory */
3425	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
3426	if (rc)
3427		return -ENOMEM;
3428
3429	/* Create the bootstrap mailbox command */
3430	rc = lpfc_create_bootstrap_mbox(phba);
3431	if (unlikely(rc))
3432		goto out_free_mem;
3433
3434	/* Set up the host's endian order with the device. */
3435	rc = lpfc_setup_endian_order(phba);
3436	if (unlikely(rc))
3437		goto out_free_bsmbx;
3438
3439	/* Set up the hba's configuration parameters. */
3440	rc = lpfc_sli4_read_config(phba);
3441	if (unlikely(rc))
3442		goto out_free_bsmbx;
3443
3444	/* Perform a function reset */
3445	rc = lpfc_pci_function_reset(phba);
3446	if (unlikely(rc))
3447		goto out_free_bsmbx;
3448
3449	/* Create all the SLI4 queues */
3450	rc = lpfc_sli4_queue_create(phba);
3451	if (rc)
3452		goto out_free_bsmbx;
3453
3454	/* Create driver internal CQE event pool */
3455	rc = lpfc_sli4_cq_event_pool_create(phba);
3456	if (rc)
3457		goto out_destroy_queue;
3458
3459	/* Initialize and populate the iocb list per host */
3460	rc = lpfc_init_sgl_list(phba);
3461	if (rc) {
3462		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3463				"1400 Failed to initialize sgl list.\n");
3464		goto out_destroy_cq_event_pool;
3465	}
3466	rc = lpfc_init_active_sgl_array(phba);
3467	if (rc) {
3468		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3469				"1430 Failed to initialize sgl list.\n");
3470		goto out_free_sgl_list;
3471	}
3472
3473	rc = lpfc_sli4_init_rpi_hdrs(phba);
3474	if (rc) {
3475		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3476				"1432 Failed to initialize rpi headers.\n");
3477		goto out_free_active_sgl;
3478	}
3479
3480	phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3481				    phba->cfg_fcp_eq_count), GFP_KERNEL);
3482	if (!phba->sli4_hba.fcp_eq_hdl) {
3483		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3484				"2572 Failed allocate memory for fast-path "
3485				"per-EQ handle array\n");
3486		goto out_remove_rpi_hdrs;
3487	}
3488
3489	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
3490				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
3491	if (!phba->sli4_hba.msix_entries) {
3492		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3493				"2573 Failed allocate memory for msi-x "
3494				"interrupt vector entries\n");
3495		goto out_free_fcp_eq_hdl;
3496	}
3497
3498	return rc;
3499
3500out_free_fcp_eq_hdl:
3501	kfree(phba->sli4_hba.fcp_eq_hdl);
3502out_remove_rpi_hdrs:
3503	lpfc_sli4_remove_rpi_hdrs(phba);
3504out_free_active_sgl:
3505	lpfc_free_active_sgl(phba);
3506out_free_sgl_list:
3507	lpfc_free_sgl_list(phba);
3508out_destroy_cq_event_pool:
3509	lpfc_sli4_cq_event_pool_destroy(phba);
3510out_destroy_queue:
3511	lpfc_sli4_queue_destroy(phba);
3512out_free_bsmbx:
3513	lpfc_destroy_bootstrap_mbox(phba);
3514out_free_mem:
3515	lpfc_mem_free(phba);
3516	return rc;
3517}
3518
3519/**
3520 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
3521 * @phba: pointer to lpfc hba data structure.
3522 *
3523 * This routine is invoked to unset the driver internal resources set up
3524 * specific for supporting the SLI-4 HBA device it attached to.
3525 **/
3526static void
3527lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3528{
3529	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
3530
3531	/* unregister default FCFI from the HBA */
3532	lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
3533
3534	/* Free the default FCR table */
3535	lpfc_sli_remove_dflt_fcf(phba);
3536
3537	/* Free memory allocated for msi-x interrupt vector entries */
3538	kfree(phba->sli4_hba.msix_entries);
3539
3540	/* Free memory allocated for fast-path work queue handles */
3541	kfree(phba->sli4_hba.fcp_eq_hdl);
3542
3543	/* Free the allocated rpi headers. */
3544	lpfc_sli4_remove_rpi_hdrs(phba);
3545
3546	/* Free the ELS sgl list */
3547	lpfc_free_active_sgl(phba);
3548	lpfc_free_sgl_list(phba);
3549
3550	/* Free the SCSI sgl management array */
3551	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3552
3553	/* Free the SLI4 queues */
3554	lpfc_sli4_queue_destroy(phba);
3555
3556	/* Free the completion queue EQ event pool */
3557	lpfc_sli4_cq_event_release_all(phba);
3558	lpfc_sli4_cq_event_pool_destroy(phba);
3559
3560	/* Reset SLI4 HBA FCoE function */
3561	lpfc_pci_function_reset(phba);
3562
3563	/* Free the bsmbx region. */
3564	lpfc_destroy_bootstrap_mbox(phba);
3565
3566	/* Free the SLI Layer memory with SLI4 HBAs */
3567	lpfc_mem_free_all(phba);
3568
3569	/* Free the current connect table */
3570	list_for_each_entry_safe(conn_entry, next_conn_entry,
3571		&phba->fcf_conn_rec_list, list)
3572		kfree(conn_entry);
3573
3574	return;
3575}
3576
3577/**
3578 * lpfc_init_api_table_setup - Set up init api fucntion jump table
3579 * @phba: The hba struct for which this call is being executed.
3580 * @dev_grp: The HBA PCI-Device group number.
3581 *
3582 * This routine sets up the device INIT interface API function jump table
3583 * in @phba struct.
3584 *
3585 * Returns: 0 - success, -ENODEV - failure.
3586 **/
3587int
3588lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3589{
3590	switch (dev_grp) {
3591	case LPFC_PCI_DEV_LP:
3592		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
3593		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
3594		phba->lpfc_stop_port = lpfc_stop_port_s3;
3595		break;
3596	case LPFC_PCI_DEV_OC:
3597		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
3598		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
3599		phba->lpfc_stop_port = lpfc_stop_port_s4;
3600		break;
3601	default:
3602		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3603				"1431 Invalid HBA PCI-device group: 0x%x\n",
3604				dev_grp);
3605		return -ENODEV;
3606		break;
3607	}
3608	return 0;
3609}
3610
3611/**
3612 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3613 * @phba: pointer to lpfc hba data structure.
3614 *
3615 * This routine is invoked to set up the driver internal resources before the
3616 * device specific resource setup to support the HBA device it attached to.
3617 *
3618 * Return codes
3619 *	0 - sucessful
3620 *	other values - error
3621 **/
3622static int
3623lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3624{
3625	/*
3626	 * Driver resources common to all SLI revisions
3627	 */
3628	atomic_set(&phba->fast_event_count, 0);
3629	spin_lock_init(&phba->hbalock);
3630
3631	/* Initialize ndlp management spinlock */
3632	spin_lock_init(&phba->ndlp_lock);
3633
3634	INIT_LIST_HEAD(&phba->port_list);
3635	INIT_LIST_HEAD(&phba->work_list);
3636	init_waitqueue_head(&phba->wait_4_mlo_m_q);
3637
3638	/* Initialize the wait queue head for the kernel thread */
3639	init_waitqueue_head(&phba->work_waitq);
3640
3641	/* Initialize the scsi buffer list used by driver for scsi IO */
3642	spin_lock_init(&phba->scsi_buf_list_lock);
3643	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
3644
3645	/* Initialize the fabric iocb list */
3646	INIT_LIST_HEAD(&phba->fabric_iocb_list);
3647
3648	/* Initialize list to save ELS buffers */
3649	INIT_LIST_HEAD(&phba->elsbuf);
3650
3651	/* Initialize FCF connection rec list */
3652	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
3653
3654	return 0;
3655}
3656
3657/**
3658 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
3659 * @phba: pointer to lpfc hba data structure.
3660 *
3661 * This routine is invoked to set up the driver internal resources after the
3662 * device specific resource setup to support the HBA device it attached to.
3663 *
3664 * Return codes
3665 * 	0 - sucessful
3666 * 	other values - error
3667 **/
3668static int
3669lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
3670{
3671	int error;
3672
3673	/* Startup the kernel thread for this host adapter. */
3674	phba->worker_thread = kthread_run(lpfc_do_work, phba,
3675					  "lpfc_worker_%d", phba->brd_no);
3676	if (IS_ERR(phba->worker_thread)) {
3677		error = PTR_ERR(phba->worker_thread);
3678		return error;
3679	}
3680
3681	return 0;
3682}
3683
3684/**
3685 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
3686 * @phba: pointer to lpfc hba data structure.
3687 *
3688 * This routine is invoked to unset the driver internal resources set up after
3689 * the device specific resource setup for supporting the HBA device it
3690 * attached to.
3691 **/
3692static void
3693lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
3694{
3695	/* Stop kernel worker thread */
3696	kthread_stop(phba->worker_thread);
3697}
3698
3699/**
3700 * lpfc_free_iocb_list - Free iocb list.
3701 * @phba: pointer to lpfc hba data structure.
3702 *
3703 * This routine is invoked to free the driver's IOCB list and memory.
3704 **/
3705static void
3706lpfc_free_iocb_list(struct lpfc_hba *phba)
3707{
3708	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
3709
3710	spin_lock_irq(&phba->hbalock);
3711	list_for_each_entry_safe(iocbq_entry, iocbq_next,
3712				 &phba->lpfc_iocb_list, list) {
3713		list_del(&iocbq_entry->list);
3714		kfree(iocbq_entry);
3715		phba->total_iocbq_bufs--;
3716	}
3717	spin_unlock_irq(&phba->hbalock);
3718
3719	return;
3720}
3721
3722/**
3723 * lpfc_init_iocb_list - Allocate and initialize iocb list.
3724 * @phba: pointer to lpfc hba data structure.
3725 *
3726 * This routine is invoked to allocate and initizlize the driver's IOCB
3727 * list and set up the IOCB tag array accordingly.
3728 *
3729 * Return codes
3730 *	0 - sucessful
3731 *	other values - error
3732 **/
3733static int
3734lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
3735{
3736	struct lpfc_iocbq *iocbq_entry = NULL;
3737	uint16_t iotag;
3738	int i;
3739
3740	/* Initialize and populate the iocb list per host.  */
3741	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3742	for (i = 0; i < iocb_count; i++) {
3743		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
3744		if (iocbq_entry == NULL) {
3745			printk(KERN_ERR "%s: only allocated %d iocbs of "
3746				"expected %d count. Unloading driver.\n",
3747				__func__, i, LPFC_IOCB_LIST_CNT);
3748			goto out_free_iocbq;
3749		}
3750
3751		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
3752		if (iotag == 0) {
3753			kfree(iocbq_entry);
3754			printk(KERN_ERR "%s: failed to allocate IOTAG. "
3755				"Unloading driver.\n", __func__);
3756			goto out_free_iocbq;
3757		}
3758		iocbq_entry->sli4_xritag = NO_XRI;
3759
3760		spin_lock_irq(&phba->hbalock);
3761		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
3762		phba->total_iocbq_bufs++;
3763		spin_unlock_irq(&phba->hbalock);
3764	}
3765
3766	return 0;
3767
3768out_free_iocbq:
3769	lpfc_free_iocb_list(phba);
3770
3771	return -ENOMEM;
3772}
3773
3774/**
3775 * lpfc_free_sgl_list - Free sgl list.
3776 * @phba: pointer to lpfc hba data structure.
3777 *
3778 * This routine is invoked to free the driver's sgl list and memory.
3779 **/
3780static void
3781lpfc_free_sgl_list(struct lpfc_hba *phba)
3782{
3783	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
3784	LIST_HEAD(sglq_list);
3785	int rc = 0;
3786
3787	spin_lock_irq(&phba->hbalock);
3788	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
3789	spin_unlock_irq(&phba->hbalock);
3790
3791	list_for_each_entry_safe(sglq_entry, sglq_next,
3792				 &sglq_list, list) {
3793		list_del(&sglq_entry->list);
3794		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
3795		kfree(sglq_entry);
3796		phba->sli4_hba.total_sglq_bufs--;
3797	}
3798	rc = lpfc_sli4_remove_all_sgl_pages(phba);
3799	if (rc) {
3800		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3801			"2005 Unable to deregister pages from HBA: %x", rc);
3802	}
3803	kfree(phba->sli4_hba.lpfc_els_sgl_array);
3804}
3805
3806/**
3807 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
3808 * @phba: pointer to lpfc hba data structure.
3809 *
3810 * This routine is invoked to allocate the driver's active sgl memory.
3811 * This array will hold the sglq_entry's for active IOs.
3812 **/
3813static int
3814lpfc_init_active_sgl_array(struct lpfc_hba *phba)
3815{
3816	int size;
3817	size = sizeof(struct lpfc_sglq *);
3818	size *= phba->sli4_hba.max_cfg_param.max_xri;
3819
3820	phba->sli4_hba.lpfc_sglq_active_list =
3821		kzalloc(size, GFP_KERNEL);
3822	if (!phba->sli4_hba.lpfc_sglq_active_list)
3823		return -ENOMEM;
3824	return 0;
3825}
3826
3827/**
3828 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3829 * @phba: pointer to lpfc hba data structure.
3830 *
3831 * This routine is invoked to walk through the array of active sglq entries
3832 * and free all of the resources.
3833 * This is just a place holder for now.
3834 **/
3835static void
3836lpfc_free_active_sgl(struct lpfc_hba *phba)
3837{
3838	kfree(phba->sli4_hba.lpfc_sglq_active_list);
3839}
3840
3841/**
3842 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3843 * @phba: pointer to lpfc hba data structure.
3844 *
3845 * This routine is invoked to allocate and initizlize the driver's sgl
3846 * list and set up the sgl xritag tag array accordingly.
3847 *
3848 * Return codes
3849 *	0 - sucessful
3850 *	other values - error
3851 **/
3852static int
3853lpfc_init_sgl_list(struct lpfc_hba *phba)
3854{
3855	struct lpfc_sglq *sglq_entry = NULL;
3856	int i;
3857	int els_xri_cnt;
3858
3859	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3860	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3861				"2400 lpfc_init_sgl_list els %d.\n",
3862				els_xri_cnt);
3863	/* Initialize and populate the sglq list per host/VF. */
3864	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
3865	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
3866
3867	/* Sanity check on XRI management */
3868	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
3869		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3870				"2562 No room left for SCSI XRI allocation: "
3871				"max_xri=%d, els_xri=%d\n",
3872				phba->sli4_hba.max_cfg_param.max_xri,
3873				els_xri_cnt);
3874		return -ENOMEM;
3875	}
3876
3877	/* Allocate memory for the ELS XRI management array */
3878	phba->sli4_hba.lpfc_els_sgl_array =
3879			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
3880			GFP_KERNEL);
3881
3882	if (!phba->sli4_hba.lpfc_els_sgl_array) {
3883		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3884				"2401 Failed to allocate memory for ELS "
3885				"XRI management array of size %d.\n",
3886				els_xri_cnt);
3887		return -ENOMEM;
3888	}
3889
3890	/* Keep the SCSI XRI into the XRI management array */
3891	phba->sli4_hba.scsi_xri_max =
3892			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3893	phba->sli4_hba.scsi_xri_cnt = 0;
3894
3895	phba->sli4_hba.lpfc_scsi_psb_array =
3896			kzalloc((sizeof(struct lpfc_scsi_buf *) *
3897			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
3898
3899	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
3900		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3901				"2563 Failed to allocate memory for SCSI "
3902				"XRI management array of size %d.\n",
3903				phba->sli4_hba.scsi_xri_max);
3904		kfree(phba->sli4_hba.lpfc_els_sgl_array);
3905		return -ENOMEM;
3906	}
3907
3908	for (i = 0; i < els_xri_cnt; i++) {
3909		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
3910		if (sglq_entry == NULL) {
3911			printk(KERN_ERR "%s: only allocated %d sgls of "
3912				"expected %d count. Unloading driver.\n",
3913				__func__, i, els_xri_cnt);
3914			goto out_free_mem;
3915		}
3916
3917		sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
3918		if (sglq_entry->sli4_xritag == NO_XRI) {
3919			kfree(sglq_entry);
3920			printk(KERN_ERR "%s: failed to allocate XRI.\n"
3921				"Unloading driver.\n", __func__);
3922			goto out_free_mem;
3923		}
3924		sglq_entry->buff_type = GEN_BUFF_TYPE;
3925		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
3926		if (sglq_entry->virt == NULL) {
3927			kfree(sglq_entry);
3928			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
3929				"Unloading driver.\n", __func__);
3930			goto out_free_mem;
3931		}
3932		sglq_entry->sgl = sglq_entry->virt;
3933		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3934
3935		/* The list order is used by later block SGL registraton */
3936		spin_lock_irq(&phba->hbalock);
3937		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
3938		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
3939		phba->sli4_hba.total_sglq_bufs++;
3940		spin_unlock_irq(&phba->hbalock);
3941	}
3942	return 0;
3943
3944out_free_mem:
3945	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3946	lpfc_free_sgl_list(phba);
3947	return -ENOMEM;
3948}
3949
3950/**
3951 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
3952 * @phba: pointer to lpfc hba data structure.
3953 *
3954 * This routine is invoked to post rpi header templates to the
3955 * HBA consistent with the SLI-4 interface spec.  This routine
3956 * posts a PAGE_SIZE memory region to the port to hold up to
3957 * PAGE_SIZE modulo 64 rpi context headers.
3958 * No locks are held here because this is an initialization routine
3959 * called only from probe or lpfc_online when interrupts are not
3960 * enabled and the driver is reinitializing the device.
3961 *
3962 * Return codes
3963 * 	0 - sucessful
3964 * 	ENOMEM - No availble memory
3965 *      EIO - The mailbox failed to complete successfully.
3966 **/
3967int
3968lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
3969{
3970	int rc = 0;
3971	int longs;
3972	uint16_t rpi_count;
3973	struct lpfc_rpi_hdr *rpi_hdr;
3974
3975	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
3976
3977	/*
3978	 * Provision an rpi bitmask range for discovery. The total count
3979	 * is the difference between max and base + 1.
3980	 */
3981	rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
3982		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
3983
3984	longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
3985	phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
3986					   GFP_KERNEL);
3987	if (!phba->sli4_hba.rpi_bmask)
3988		return -ENOMEM;
3989
3990	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
3991	if (!rpi_hdr) {
3992		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3993				"0391 Error during rpi post operation\n");
3994		lpfc_sli4_remove_rpis(phba);
3995		rc = -ENODEV;
3996	}
3997
3998	return rc;
3999}
4000
4001/**
4002 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4003 * @phba: pointer to lpfc hba data structure.
4004 *
4005 * This routine is invoked to allocate a single 4KB memory region to
4006 * support rpis and stores them in the phba.  This single region
4007 * provides support for up to 64 rpis.  The region is used globally
4008 * by the device.
4009 *
4010 * Returns:
4011 *   A valid rpi hdr on success.
4012 *   A NULL pointer on any failure.
4013 **/
4014struct lpfc_rpi_hdr *
4015lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4016{
4017	uint16_t rpi_limit, curr_rpi_range;
4018	struct lpfc_dmabuf *dmabuf;
4019	struct lpfc_rpi_hdr *rpi_hdr;
4020
4021	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4022		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4023
4024	spin_lock_irq(&phba->hbalock);
4025	curr_rpi_range = phba->sli4_hba.next_rpi;
4026	spin_unlock_irq(&phba->hbalock);
4027
4028	/*
4029	 * The port has a limited number of rpis. The increment here
4030	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4031	 * and to allow the full max_rpi range per port.
4032	 */
4033	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4034		return NULL;
4035
4036	/*
4037	 * First allocate the protocol header region for the port.  The
4038	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4039	 */
4040	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4041	if (!dmabuf)
4042		return NULL;
4043
4044	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4045					  LPFC_HDR_TEMPLATE_SIZE,
4046					  &dmabuf->phys,
4047					  GFP_KERNEL);
4048	if (!dmabuf->virt) {
4049		rpi_hdr = NULL;
4050		goto err_free_dmabuf;
4051	}
4052
4053	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4054	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4055		rpi_hdr = NULL;
4056		goto err_free_coherent;
4057	}
4058
4059	/* Save the rpi header data for cleanup later. */
4060	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4061	if (!rpi_hdr)
4062		goto err_free_coherent;
4063
4064	rpi_hdr->dmabuf = dmabuf;
4065	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4066	rpi_hdr->page_count = 1;
4067	spin_lock_irq(&phba->hbalock);
4068	rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4069	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4070
4071	/*
4072	 * The next_rpi stores the next module-64 rpi value to post
4073	 * in any subsequent rpi memory region postings.
4074	 */
4075	phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4076	spin_unlock_irq(&phba->hbalock);
4077	return rpi_hdr;
4078
4079 err_free_coherent:
4080	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4081			  dmabuf->virt, dmabuf->phys);
4082 err_free_dmabuf:
4083	kfree(dmabuf);
4084	return NULL;
4085}
4086
4087/**
4088 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4089 * @phba: pointer to lpfc hba data structure.
4090 *
4091 * This routine is invoked to remove all memory resources allocated
4092 * to support rpis. This routine presumes the caller has released all
4093 * rpis consumed by fabric or port logins and is prepared to have
4094 * the header pages removed.
4095 **/
4096void
4097lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4098{
4099	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4100
4101	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4102				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4103		list_del(&rpi_hdr->list);
4104		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4105				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4106		kfree(rpi_hdr->dmabuf);
4107		kfree(rpi_hdr);
4108	}
4109
4110	phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4111	memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4112}
4113
4114/**
4115 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4116 * @pdev: pointer to pci device data structure.
4117 *
4118 * This routine is invoked to allocate the driver hba data structure for an
4119 * HBA device. If the allocation is successful, the phba reference to the
4120 * PCI device data structure is set.
4121 *
4122 * Return codes
4123 *      pointer to @phba - sucessful
4124 *      NULL - error
4125 **/
4126static struct lpfc_hba *
4127lpfc_hba_alloc(struct pci_dev *pdev)
4128{
4129	struct lpfc_hba *phba;
4130
4131	/* Allocate memory for HBA structure */
4132	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4133	if (!phba) {
4134		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4135				"1417 Failed to allocate hba struct.\n");
4136		return NULL;
4137	}
4138
4139	/* Set reference to PCI device in HBA structure */
4140	phba->pcidev = pdev;
4141
4142	/* Assign an unused board number */
4143	phba->brd_no = lpfc_get_instance();
4144	if (phba->brd_no < 0) {
4145		kfree(phba);
4146		return NULL;
4147	}
4148
4149	return phba;
4150}
4151
4152/**
4153 * lpfc_hba_free - Free driver hba data structure with a device.
4154 * @phba: pointer to lpfc hba data structure.
4155 *
4156 * This routine is invoked to free the driver hba data structure with an
4157 * HBA device.
4158 **/
4159static void
4160lpfc_hba_free(struct lpfc_hba *phba)
4161{
4162	/* Release the driver assigned board number */
4163	idr_remove(&lpfc_hba_index, phba->brd_no);
4164
4165	kfree(phba);
4166	return;
4167}
4168
4169/**
4170 * lpfc_create_shost - Create hba physical port with associated scsi host.
4171 * @phba: pointer to lpfc hba data structure.
4172 *
4173 * This routine is invoked to create HBA physical port and associate a SCSI
4174 * host with it.
4175 *
4176 * Return codes
4177 *      0 - sucessful
4178 *      other values - error
4179 **/
4180static int
4181lpfc_create_shost(struct lpfc_hba *phba)
4182{
4183	struct lpfc_vport *vport;
4184	struct Scsi_Host  *shost;
4185
4186	/* Initialize HBA FC structure */
4187	phba->fc_edtov = FF_DEF_EDTOV;
4188	phba->fc_ratov = FF_DEF_RATOV;
4189	phba->fc_altov = FF_DEF_ALTOV;
4190	phba->fc_arbtov = FF_DEF_ARBTOV;
4191
4192	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4193	if (!vport)
4194		return -ENODEV;
4195
4196	shost = lpfc_shost_from_vport(vport);
4197	phba->pport = vport;
4198	lpfc_debugfs_initialize(vport);
4199	/* Put reference to SCSI host to driver's device private data */
4200	pci_set_drvdata(phba->pcidev, shost);
4201
4202	return 0;
4203}
4204
4205/**
4206 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4207 * @phba: pointer to lpfc hba data structure.
4208 *
4209 * This routine is invoked to destroy HBA physical port and the associated
4210 * SCSI host.
4211 **/
4212static void
4213lpfc_destroy_shost(struct lpfc_hba *phba)
4214{
4215	struct lpfc_vport *vport = phba->pport;
4216
4217	/* Destroy physical port that associated with the SCSI host */
4218	destroy_port(vport);
4219
4220	return;
4221}
4222
4223/**
4224 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4225 * @phba: pointer to lpfc hba data structure.
4226 * @shost: the shost to be used to detect Block guard settings.
4227 *
4228 * This routine sets up the local Block guard protocol settings for @shost.
4229 * This routine also allocates memory for debugging bg buffers.
4230 **/
4231static void
4232lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4233{
4234	int pagecnt = 10;
4235	if (lpfc_prot_mask && lpfc_prot_guard) {
4236		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4237				"1478 Registering BlockGuard with the "
4238				"SCSI layer\n");
4239		scsi_host_set_prot(shost, lpfc_prot_mask);
4240		scsi_host_set_guard(shost, lpfc_prot_guard);
4241	}
4242	if (!_dump_buf_data) {
4243		while (pagecnt) {
4244			spin_lock_init(&_dump_buf_lock);
4245			_dump_buf_data =
4246				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4247			if (_dump_buf_data) {
4248				printk(KERN_ERR "BLKGRD allocated %d pages for "
4249				       "_dump_buf_data at 0x%p\n",
4250				       (1 << pagecnt), _dump_buf_data);
4251				_dump_buf_data_order = pagecnt;
4252				memset(_dump_buf_data, 0,
4253				       ((1 << PAGE_SHIFT) << pagecnt));
4254				break;
4255			} else
4256				--pagecnt;
4257		}
4258		if (!_dump_buf_data_order)
4259			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4260			       "memory for hexdump\n");
4261	} else
4262		printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
4263		       "\n", _dump_buf_data);
4264	if (!_dump_buf_dif) {
4265		while (pagecnt) {
4266			_dump_buf_dif =
4267				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4268			if (_dump_buf_dif) {
4269				printk(KERN_ERR "BLKGRD allocated %d pages for "
4270				       "_dump_buf_dif at 0x%p\n",
4271				       (1 << pagecnt), _dump_buf_dif);
4272				_dump_buf_dif_order = pagecnt;
4273				memset(_dump_buf_dif, 0,
4274				       ((1 << PAGE_SHIFT) << pagecnt));
4275				break;
4276			} else
4277				--pagecnt;
4278		}
4279		if (!_dump_buf_dif_order)
4280			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4281			       "memory for hexdump\n");
4282	} else
4283		printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
4284		       _dump_buf_dif);
4285}
4286
4287/**
4288 * lpfc_post_init_setup - Perform necessary device post initialization setup.
4289 * @phba: pointer to lpfc hba data structure.
4290 *
4291 * This routine is invoked to perform all the necessary post initialization
4292 * setup for the device.
4293 **/
4294static void
4295lpfc_post_init_setup(struct lpfc_hba *phba)
4296{
4297	struct Scsi_Host  *shost;
4298	struct lpfc_adapter_event_header adapter_event;
4299
4300	/* Get the default values for Model Name and Description */
4301	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
4302
4303	/*
4304	 * hba setup may have changed the hba_queue_depth so we need to
4305	 * adjust the value of can_queue.
4306	 */
4307	shost = pci_get_drvdata(phba->pcidev);
4308	shost->can_queue = phba->cfg_hba_queue_depth - 10;
4309	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4310		lpfc_setup_bg(phba, shost);
4311
4312	lpfc_host_attrib_init(shost);
4313
4314	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
4315		spin_lock_irq(shost->host_lock);
4316		lpfc_poll_start_timer(phba);
4317		spin_unlock_irq(shost->host_lock);
4318	}
4319
4320	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4321			"0428 Perform SCSI scan\n");
4322	/* Send board arrival event to upper layer */
4323	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
4324	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
4325	fc_host_post_vendor_event(shost, fc_get_event_number(),
4326				  sizeof(adapter_event),
4327				  (char *) &adapter_event,
4328				  LPFC_NL_VENDOR_ID);
4329	return;
4330}
4331
4332/**
4333 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
4334 * @phba: pointer to lpfc hba data structure.
4335 *
4336 * This routine is invoked to set up the PCI device memory space for device
4337 * with SLI-3 interface spec.
4338 *
4339 * Return codes
4340 * 	0 - sucessful
4341 * 	other values - error
4342 **/
4343static int
4344lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4345{
4346	struct pci_dev *pdev;
4347	unsigned long bar0map_len, bar2map_len;
4348	int i, hbq_count;
4349	void *ptr;
4350	int error = -ENODEV;
4351
4352	/* Obtain PCI device reference */
4353	if (!phba->pcidev)
4354		return error;
4355	else
4356		pdev = phba->pcidev;
4357
4358	/* Set the device DMA mask size */
4359	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
4360		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
4361			return error;
4362
4363	/* Get the bus address of Bar0 and Bar2 and the number of bytes
4364	 * required by each mapping.
4365	 */
4366	phba->pci_bar0_map = pci_resource_start(pdev, 0);
4367	bar0map_len = pci_resource_len(pdev, 0);
4368
4369	phba->pci_bar2_map = pci_resource_start(pdev, 2);
4370	bar2map_len = pci_resource_len(pdev, 2);
4371
4372	/* Map HBA SLIM to a kernel virtual address. */
4373	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
4374	if (!phba->slim_memmap_p) {
4375		dev_printk(KERN_ERR, &pdev->dev,
4376			   "ioremap failed for SLIM memory.\n");
4377		goto out;
4378	}
4379
4380	/* Map HBA Control Registers to a kernel virtual address. */
4381	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
4382	if (!phba->ctrl_regs_memmap_p) {
4383		dev_printk(KERN_ERR, &pdev->dev,
4384			   "ioremap failed for HBA control registers.\n");
4385		goto out_iounmap_slim;
4386	}
4387
4388	/* Allocate memory for SLI-2 structures */
4389	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
4390					       SLI2_SLIM_SIZE,
4391					       &phba->slim2p.phys,
4392					       GFP_KERNEL);
4393	if (!phba->slim2p.virt)
4394		goto out_iounmap;
4395
4396	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
4397	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
4398	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
4399	phba->IOCBs = (phba->slim2p.virt +
4400		       offsetof(struct lpfc_sli2_slim, IOCBs));
4401
4402	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
4403						 lpfc_sli_hbq_size(),
4404						 &phba->hbqslimp.phys,
4405						 GFP_KERNEL);
4406	if (!phba->hbqslimp.virt)
4407		goto out_free_slim;
4408
4409	hbq_count = lpfc_sli_hbq_count();
4410	ptr = phba->hbqslimp.virt;
4411	for (i = 0; i < hbq_count; ++i) {
4412		phba->hbqs[i].hbq_virt = ptr;
4413		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4414		ptr += (lpfc_hbq_defs[i]->entry_count *
4415			sizeof(struct lpfc_hbq_entry));
4416	}
4417	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
4418	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
4419
4420	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
4421
4422	INIT_LIST_HEAD(&phba->rb_pend_list);
4423
4424	phba->MBslimaddr = phba->slim_memmap_p;
4425	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
4426	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
4427	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
4428	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
4429
4430	return 0;
4431
4432out_free_slim:
4433	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4434			  phba->slim2p.virt, phba->slim2p.phys);
4435out_iounmap:
4436	iounmap(phba->ctrl_regs_memmap_p);
4437out_iounmap_slim:
4438	iounmap(phba->slim_memmap_p);
4439out:
4440	return error;
4441}
4442
4443/**
4444 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
4445 * @phba: pointer to lpfc hba data structure.
4446 *
4447 * This routine is invoked to unset the PCI device memory space for device
4448 * with SLI-3 interface spec.
4449 **/
4450static void
4451lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4452{
4453	struct pci_dev *pdev;
4454
4455	/* Obtain PCI device reference */
4456	if (!phba->pcidev)
4457		return;
4458	else
4459		pdev = phba->pcidev;
4460
4461	/* Free coherent DMA memory allocated */
4462	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
4463			  phba->hbqslimp.virt, phba->hbqslimp.phys);
4464	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4465			  phba->slim2p.virt, phba->slim2p.phys);
4466
4467	/* I/O memory unmap */
4468	iounmap(phba->ctrl_regs_memmap_p);
4469	iounmap(phba->slim_memmap_p);
4470
4471	return;
4472}
4473
4474/**
4475 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
4476 * @phba: pointer to lpfc hba data structure.
4477 *
4478 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
4479 * done and check status.
4480 *
4481 * Return 0 if successful, otherwise -ENODEV.
4482 **/
4483int
4484lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4485{
4486	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
4487	uint32_t onlnreg0, onlnreg1;
4488	int i, port_error = -ENODEV;
4489
4490	if (!phba->sli4_hba.STAregaddr)
4491		return -ENODEV;
4492
4493	/* With uncoverable error, log the error message and return error */
4494	onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4495	onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4496	if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4497		uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4498		uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4499		if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4500			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4501					"1422 HBA Unrecoverable error: "
4502					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4503					"online0_reg=0x%x, online1_reg=0x%x\n",
4504					uerrlo_reg.word0, uerrhi_reg.word0,
4505					onlnreg0, onlnreg1);
4506		}
4507		return -ENODEV;
4508	}
4509
4510	/* Wait up to 30 seconds for the SLI Port POST done and ready */
4511	for (i = 0; i < 3000; i++) {
4512		sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
4513		/* Encounter fatal POST error, break out */
4514		if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
4515			port_error = -ENODEV;
4516			break;
4517		}
4518		if (LPFC_POST_STAGE_ARMFW_READY ==
4519		    bf_get(lpfc_hst_state_port_status, &sta_reg)) {
4520			port_error = 0;
4521			break;
4522		}
4523		msleep(10);
4524	}
4525
4526	if (port_error)
4527		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4528			"1408 Failure HBA POST Status: sta_reg=0x%x, "
4529			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
4530			"dl=x%x, pstatus=x%x\n", sta_reg.word0,
4531			bf_get(lpfc_hst_state_perr, &sta_reg),
4532			bf_get(lpfc_hst_state_sfi, &sta_reg),
4533			bf_get(lpfc_hst_state_nip, &sta_reg),
4534			bf_get(lpfc_hst_state_ipc, &sta_reg),
4535			bf_get(lpfc_hst_state_xrom, &sta_reg),
4536			bf_get(lpfc_hst_state_dl, &sta_reg),
4537			bf_get(lpfc_hst_state_port_status, &sta_reg));
4538
4539	/* Log device information */
4540	scratchpad.word0 =  readl(phba->sli4_hba.SCRATCHPADregaddr);
4541	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4542			"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4543			"FeatureL1=0x%x, FeatureL2=0x%x\n",
4544			bf_get(lpfc_scratchpad_chiptype, &scratchpad),
4545			bf_get(lpfc_scratchpad_slirev, &scratchpad),
4546			bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4547			bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4548
4549	return port_error;
4550}
4551
4552/**
4553 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
4554 * @phba: pointer to lpfc hba data structure.
4555 *
4556 * This routine is invoked to set up SLI4 BAR0 PCI config space register
4557 * memory map.
4558 **/
4559static void
4560lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4561{
4562	phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4563					LPFC_UERR_STATUS_LO;
4564	phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4565					LPFC_UERR_STATUS_HI;
4566	phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
4567					LPFC_ONLINE0;
4568	phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
4569					LPFC_ONLINE1;
4570	phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
4571					LPFC_SCRATCHPAD;
4572}
4573
4574/**
4575 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
4576 * @phba: pointer to lpfc hba data structure.
4577 *
4578 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
4579 * memory map.
4580 **/
4581static void
4582lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
4583{
4584
4585	phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4586				    LPFC_HST_STATE;
4587	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4588				    LPFC_HST_ISR0;
4589	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4590				    LPFC_HST_IMR0;
4591	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4592				     LPFC_HST_ISCR0;
4593	return;
4594}
4595
4596/**
4597 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
4598 * @phba: pointer to lpfc hba data structure.
4599 * @vf: virtual function number
4600 *
4601 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
4602 * based on the given viftual function number, @vf.
4603 *
4604 * Return 0 if successful, otherwise -ENODEV.
4605 **/
4606static int
4607lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
4608{
4609	if (vf > LPFC_VIR_FUNC_MAX)
4610		return -ENODEV;
4611
4612	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4613				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
4614	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4615				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
4616	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4617				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
4618	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4619				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
4620	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4621				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
4622	return 0;
4623}
4624
4625/**
4626 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
4627 * @phba: pointer to lpfc hba data structure.
4628 *
4629 * This routine is invoked to create the bootstrap mailbox
4630 * region consistent with the SLI-4 interface spec.  This
4631 * routine allocates all memory necessary to communicate
4632 * mailbox commands to the port and sets up all alignment
4633 * needs.  No locks are expected to be held when calling
4634 * this routine.
4635 *
4636 * Return codes
4637 * 	0 - sucessful
4638 * 	ENOMEM - could not allocated memory.
4639 **/
4640static int
4641lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
4642{
4643	uint32_t bmbx_size;
4644	struct lpfc_dmabuf *dmabuf;
4645	struct dma_address *dma_address;
4646	uint32_t pa_addr;
4647	uint64_t phys_addr;
4648
4649	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4650	if (!dmabuf)
4651		return -ENOMEM;
4652
4653	/*
4654	 * The bootstrap mailbox region is comprised of 2 parts
4655	 * plus an alignment restriction of 16 bytes.
4656	 */
4657	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
4658	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4659					  bmbx_size,
4660					  &dmabuf->phys,
4661					  GFP_KERNEL);
4662	if (!dmabuf->virt) {
4663		kfree(dmabuf);
4664		return -ENOMEM;
4665	}
4666	memset(dmabuf->virt, 0, bmbx_size);
4667
4668	/*
4669	 * Initialize the bootstrap mailbox pointers now so that the register
4670	 * operations are simple later.  The mailbox dma address is required
4671	 * to be 16-byte aligned.  Also align the virtual memory as each
4672	 * maibox is copied into the bmbx mailbox region before issuing the
4673	 * command to the port.
4674	 */
4675	phba->sli4_hba.bmbx.dmabuf = dmabuf;
4676	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
4677
4678	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
4679					      LPFC_ALIGN_16_BYTE);
4680	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
4681					      LPFC_ALIGN_16_BYTE);
4682
4683	/*
4684	 * Set the high and low physical addresses now.  The SLI4 alignment
4685	 * requirement is 16 bytes and the mailbox is posted to the port
4686	 * as two 30-bit addresses.  The other data is a bit marking whether
4687	 * the 30-bit address is the high or low address.
4688	 * Upcast bmbx aphys to 64bits so shift instruction compiles
4689	 * clean on 32 bit machines.
4690	 */
4691	dma_address = &phba->sli4_hba.bmbx.dma_address;
4692	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
4693	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
4694	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
4695					   LPFC_BMBX_BIT1_ADDR_HI);
4696
4697	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
4698	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
4699					   LPFC_BMBX_BIT1_ADDR_LO);
4700	return 0;
4701}
4702
4703/**
4704 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
4705 * @phba: pointer to lpfc hba data structure.
4706 *
4707 * This routine is invoked to teardown the bootstrap mailbox
4708 * region and release all host resources. This routine requires
4709 * the caller to ensure all mailbox commands recovered, no
4710 * additional mailbox comands are sent, and interrupts are disabled
4711 * before calling this routine.
4712 *
4713 **/
4714static void
4715lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
4716{
4717	dma_free_coherent(&phba->pcidev->dev,
4718			  phba->sli4_hba.bmbx.bmbx_size,
4719			  phba->sli4_hba.bmbx.dmabuf->virt,
4720			  phba->sli4_hba.bmbx.dmabuf->phys);
4721
4722	kfree(phba->sli4_hba.bmbx.dmabuf);
4723	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
4724}
4725
4726/**
4727 * lpfc_sli4_read_config - Get the config parameters.
4728 * @phba: pointer to lpfc hba data structure.
4729 *
4730 * This routine is invoked to read the configuration parameters from the HBA.
4731 * The configuration parameters are used to set the base and maximum values
4732 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
4733 * allocation for the port.
4734 *
4735 * Return codes
4736 * 	0 - sucessful
4737 * 	ENOMEM - No availble memory
4738 *      EIO - The mailbox failed to complete successfully.
4739 **/
4740static int
4741lpfc_sli4_read_config(struct lpfc_hba *phba)
4742{
4743	LPFC_MBOXQ_t *pmb;
4744	struct lpfc_mbx_read_config *rd_config;
4745	uint32_t rc = 0;
4746
4747	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4748	if (!pmb) {
4749		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4750				"2011 Unable to allocate memory for issuing "
4751				"SLI_CONFIG_SPECIAL mailbox command\n");
4752		return -ENOMEM;
4753	}
4754
4755	lpfc_read_config(phba, pmb);
4756
4757	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4758	if (rc != MBX_SUCCESS) {
4759		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4760			"2012 Mailbox failed , mbxCmd x%x "
4761			"READ_CONFIG, mbxStatus x%x\n",
4762			bf_get(lpfc_mqe_command, &pmb->u.mqe),
4763			bf_get(lpfc_mqe_status, &pmb->u.mqe));
4764		rc = -EIO;
4765	} else {
4766		rd_config = &pmb->u.mqe.un.rd_config;
4767		phba->sli4_hba.max_cfg_param.max_xri =
4768			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
4769		phba->sli4_hba.max_cfg_param.xri_base =
4770			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
4771		phba->sli4_hba.max_cfg_param.max_vpi =
4772			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
4773		phba->sli4_hba.max_cfg_param.vpi_base =
4774			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
4775		phba->sli4_hba.max_cfg_param.max_rpi =
4776			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
4777		phba->sli4_hba.max_cfg_param.rpi_base =
4778			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
4779		phba->sli4_hba.max_cfg_param.max_vfi =
4780			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
4781		phba->sli4_hba.max_cfg_param.vfi_base =
4782			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
4783		phba->sli4_hba.max_cfg_param.max_fcfi =
4784			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
4785		phba->sli4_hba.max_cfg_param.fcfi_base =
4786			bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
4787		phba->sli4_hba.max_cfg_param.max_eq =
4788			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
4789		phba->sli4_hba.max_cfg_param.max_rq =
4790			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
4791		phba->sli4_hba.max_cfg_param.max_wq =
4792			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
4793		phba->sli4_hba.max_cfg_param.max_cq =
4794			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
4795		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
4796		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
4797		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4798		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4799		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4800		phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
4801		phba->max_vports = phba->max_vpi;
4802		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4803				"2003 cfg params XRI(B:%d M:%d), "
4804				"VPI(B:%d M:%d) "
4805				"VFI(B:%d M:%d) "
4806				"RPI(B:%d M:%d) "
4807				"FCFI(B:%d M:%d)\n",
4808				phba->sli4_hba.max_cfg_param.xri_base,
4809				phba->sli4_hba.max_cfg_param.max_xri,
4810				phba->sli4_hba.max_cfg_param.vpi_base,
4811				phba->sli4_hba.max_cfg_param.max_vpi,
4812				phba->sli4_hba.max_cfg_param.vfi_base,
4813				phba->sli4_hba.max_cfg_param.max_vfi,
4814				phba->sli4_hba.max_cfg_param.rpi_base,
4815				phba->sli4_hba.max_cfg_param.max_rpi,
4816				phba->sli4_hba.max_cfg_param.fcfi_base,
4817				phba->sli4_hba.max_cfg_param.max_fcfi);
4818	}
4819	mempool_free(pmb, phba->mbox_mem_pool);
4820
4821	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
4822	if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
4823		phba->cfg_hba_queue_depth =
4824				phba->sli4_hba.max_cfg_param.max_xri;
4825	return rc;
4826}
4827
4828/**
4829 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
4830 * @phba: pointer to lpfc hba data structure.
4831 *
4832 * This routine is invoked to setup the host-side endian order to the
4833 * HBA consistent with the SLI-4 interface spec.
4834 *
4835 * Return codes
4836 * 	0 - sucessful
4837 * 	ENOMEM - No availble memory
4838 *      EIO - The mailbox failed to complete successfully.
4839 **/
4840static int
4841lpfc_setup_endian_order(struct lpfc_hba *phba)
4842{
4843	LPFC_MBOXQ_t *mboxq;
4844	uint32_t rc = 0;
4845	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
4846				      HOST_ENDIAN_HIGH_WORD1};
4847
4848	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4849	if (!mboxq) {
4850		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4851				"0492 Unable to allocate memory for issuing "
4852				"SLI_CONFIG_SPECIAL mailbox command\n");
4853		return -ENOMEM;
4854	}
4855
4856	/*
4857	 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
4858	 * words to contain special data values and no other data.
4859	 */
4860	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
4861	memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
4862	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4863	if (rc != MBX_SUCCESS) {
4864		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4865				"0493 SLI_CONFIG_SPECIAL mailbox failed with "
4866				"status x%x\n",
4867				rc);
4868		rc = -EIO;
4869	}
4870
4871	mempool_free(mboxq, phba->mbox_mem_pool);
4872	return rc;
4873}
4874
4875/**
4876 * lpfc_sli4_queue_create - Create all the SLI4 queues
4877 * @phba: pointer to lpfc hba data structure.
4878 *
4879 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
4880 * operation. For each SLI4 queue type, the parameters such as queue entry
4881 * count (queue depth) shall be taken from the module parameter. For now,
4882 * we just use some constant number as place holder.
4883 *
4884 * Return codes
4885 *      0 - sucessful
4886 *      ENOMEM - No availble memory
4887 *      EIO - The mailbox failed to complete successfully.
4888 **/
4889static int
4890lpfc_sli4_queue_create(struct lpfc_hba *phba)
4891{
4892	struct lpfc_queue *qdesc;
4893	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
4894	int cfg_fcp_wq_count;
4895	int cfg_fcp_eq_count;
4896
4897	/*
4898	 * Sanity check for confiugred queue parameters against the run-time
4899	 * device parameters
4900	 */
4901
4902	/* Sanity check on FCP fast-path WQ parameters */
4903	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
4904	if (cfg_fcp_wq_count >
4905	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
4906		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
4907				   LPFC_SP_WQN_DEF;
4908		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
4909			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4910					"2581 Not enough WQs (%d) from "
4911					"the pci function for supporting "
4912					"FCP WQs (%d)\n",
4913					phba->sli4_hba.max_cfg_param.max_wq,
4914					phba->cfg_fcp_wq_count);
4915			goto out_error;
4916		}
4917		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4918				"2582 Not enough WQs (%d) from the pci "
4919				"function for supporting the requested "
4920				"FCP WQs (%d), the actual FCP WQs can "
4921				"be supported: %d\n",
4922				phba->sli4_hba.max_cfg_param.max_wq,
4923				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
4924	}
4925	/* The actual number of FCP work queues adopted */
4926	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
4927
4928	/* Sanity check on FCP fast-path EQ parameters */
4929	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
4930	if (cfg_fcp_eq_count >
4931	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
4932		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
4933				   LPFC_SP_EQN_DEF;
4934		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
4935			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4936					"2574 Not enough EQs (%d) from the "
4937					"pci function for supporting FCP "
4938					"EQs (%d)\n",
4939					phba->sli4_hba.max_cfg_param.max_eq,
4940					phba->cfg_fcp_eq_count);
4941			goto out_error;
4942		}
4943		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4944				"2575 Not enough EQs (%d) from the pci "
4945				"function for supporting the requested "
4946				"FCP EQs (%d), the actual FCP EQs can "
4947				"be supported: %d\n",
4948				phba->sli4_hba.max_cfg_param.max_eq,
4949				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
4950	}
4951	/* It does not make sense to have more EQs than WQs */
4952	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4953		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4954				"2593 The number of FCP EQs (%d) is more "
4955				"than the number of FCP WQs (%d), take "
4956				"the number of FCP EQs same as than of "
4957				"WQs (%d)\n", cfg_fcp_eq_count,
4958				phba->cfg_fcp_wq_count,
4959				phba->cfg_fcp_wq_count);
4960		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
4961	}
4962	/* The actual number of FCP event queues adopted */
4963	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
4964	/* The overall number of event queues used */
4965	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
4966
4967	/*
4968	 * Create Event Queues (EQs)
4969	 */
4970
4971	/* Get EQ depth from module parameter, fake the default for now */
4972	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
4973	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
4974
4975	/* Create slow path event queue */
4976	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4977				      phba->sli4_hba.eq_ecount);
4978	if (!qdesc) {
4979		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4980				"0496 Failed allocate slow-path EQ\n");
4981		goto out_error;
4982	}
4983	phba->sli4_hba.sp_eq = qdesc;
4984
4985	/* Create fast-path FCP Event Queue(s) */
4986	phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
4987			       phba->cfg_fcp_eq_count), GFP_KERNEL);
4988	if (!phba->sli4_hba.fp_eq) {
4989		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4990				"2576 Failed allocate memory for fast-path "
4991				"EQ record array\n");
4992		goto out_free_sp_eq;
4993	}
4994	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
4995		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4996					      phba->sli4_hba.eq_ecount);
4997		if (!qdesc) {
4998			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4999					"0497 Failed allocate fast-path EQ\n");
5000			goto out_free_fp_eq;
5001		}
5002		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5003	}
5004
5005	/*
5006	 * Create Complete Queues (CQs)
5007	 */
5008
5009	/* Get CQ depth from module parameter, fake the default for now */
5010	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5011	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5012
5013	/* Create slow-path Mailbox Command Complete Queue */
5014	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5015				      phba->sli4_hba.cq_ecount);
5016	if (!qdesc) {
5017		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5018				"0500 Failed allocate slow-path mailbox CQ\n");
5019		goto out_free_fp_eq;
5020	}
5021	phba->sli4_hba.mbx_cq = qdesc;
5022
5023	/* Create slow-path ELS Complete Queue */
5024	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5025				      phba->sli4_hba.cq_ecount);
5026	if (!qdesc) {
5027		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5028				"0501 Failed allocate slow-path ELS CQ\n");
5029		goto out_free_mbx_cq;
5030	}
5031	phba->sli4_hba.els_cq = qdesc;
5032
5033	/* Create slow-path Unsolicited Receive Complete Queue */
5034	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5035				      phba->sli4_hba.cq_ecount);
5036	if (!qdesc) {
5037		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5038				"0502 Failed allocate slow-path USOL RX CQ\n");
5039		goto out_free_els_cq;
5040	}
5041	phba->sli4_hba.rxq_cq = qdesc;
5042
5043	/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5044	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5045				phba->cfg_fcp_eq_count), GFP_KERNEL);
5046	if (!phba->sli4_hba.fcp_cq) {
5047		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5048				"2577 Failed allocate memory for fast-path "
5049				"CQ record array\n");
5050		goto out_free_rxq_cq;
5051	}
5052	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5053		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5054					      phba->sli4_hba.cq_ecount);
5055		if (!qdesc) {
5056			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5057					"0499 Failed allocate fast-path FCP "
5058					"CQ (%d)\n", fcp_cqidx);
5059			goto out_free_fcp_cq;
5060		}
5061		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5062	}
5063
5064	/* Create Mailbox Command Queue */
5065	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5066	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5067
5068	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5069				      phba->sli4_hba.mq_ecount);
5070	if (!qdesc) {
5071		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5072				"0505 Failed allocate slow-path MQ\n");
5073		goto out_free_fcp_cq;
5074	}
5075	phba->sli4_hba.mbx_wq = qdesc;
5076
5077	/*
5078	 * Create all the Work Queues (WQs)
5079	 */
5080	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5081	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5082
5083	/* Create slow-path ELS Work Queue */
5084	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5085				      phba->sli4_hba.wq_ecount);
5086	if (!qdesc) {
5087		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5088				"0504 Failed allocate slow-path ELS WQ\n");
5089		goto out_free_mbx_wq;
5090	}
5091	phba->sli4_hba.els_wq = qdesc;
5092
5093	/* Create fast-path FCP Work Queue(s) */
5094	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5095				phba->cfg_fcp_wq_count), GFP_KERNEL);
5096	if (!phba->sli4_hba.fcp_wq) {
5097		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5098				"2578 Failed allocate memory for fast-path "
5099				"WQ record array\n");
5100		goto out_free_els_wq;
5101	}
5102	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5103		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5104					      phba->sli4_hba.wq_ecount);
5105		if (!qdesc) {
5106			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5107					"0503 Failed allocate fast-path FCP "
5108					"WQ (%d)\n", fcp_wqidx);
5109			goto out_free_fcp_wq;
5110		}
5111		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5112	}
5113
5114	/*
5115	 * Create Receive Queue (RQ)
5116	 */
5117	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5118	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5119
5120	/* Create Receive Queue for header */
5121	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5122				      phba->sli4_hba.rq_ecount);
5123	if (!qdesc) {
5124		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5125				"0506 Failed allocate receive HRQ\n");
5126		goto out_free_fcp_wq;
5127	}
5128	phba->sli4_hba.hdr_rq = qdesc;
5129
5130	/* Create Receive Queue for data */
5131	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5132				      phba->sli4_hba.rq_ecount);
5133	if (!qdesc) {
5134		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5135				"0507 Failed allocate receive DRQ\n");
5136		goto out_free_hdr_rq;
5137	}
5138	phba->sli4_hba.dat_rq = qdesc;
5139
5140	return 0;
5141
5142out_free_hdr_rq:
5143	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5144	phba->sli4_hba.hdr_rq = NULL;
5145out_free_fcp_wq:
5146	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5147		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5148		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5149	}
5150	kfree(phba->sli4_hba.fcp_wq);
5151out_free_els_wq:
5152	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5153	phba->sli4_hba.els_wq = NULL;
5154out_free_mbx_wq:
5155	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5156	phba->sli4_hba.mbx_wq = NULL;
5157out_free_fcp_cq:
5158	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5159		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5160		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5161	}
5162	kfree(phba->sli4_hba.fcp_cq);
5163out_free_rxq_cq:
5164	lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5165	phba->sli4_hba.rxq_cq = NULL;
5166out_free_els_cq:
5167	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5168	phba->sli4_hba.els_cq = NULL;
5169out_free_mbx_cq:
5170	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5171	phba->sli4_hba.mbx_cq = NULL;
5172out_free_fp_eq:
5173	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5174		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5175		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5176	}
5177	kfree(phba->sli4_hba.fp_eq);
5178out_free_sp_eq:
5179	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5180	phba->sli4_hba.sp_eq = NULL;
5181out_error:
5182	return -ENOMEM;
5183}
5184
5185/**
5186 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5187 * @phba: pointer to lpfc hba data structure.
5188 *
5189 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5190 * operation.
5191 *
5192 * Return codes
5193 *      0 - sucessful
5194 *      ENOMEM - No availble memory
5195 *      EIO - The mailbox failed to complete successfully.
5196 **/
5197static void
5198lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5199{
5200	int fcp_qidx;
5201
5202	/* Release mailbox command work queue */
5203	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5204	phba->sli4_hba.mbx_wq = NULL;
5205
5206	/* Release ELS work queue */
5207	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5208	phba->sli4_hba.els_wq = NULL;
5209
5210	/* Release FCP work queue */
5211	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5212		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5213	kfree(phba->sli4_hba.fcp_wq);
5214	phba->sli4_hba.fcp_wq = NULL;
5215
5216	/* Release unsolicited receive queue */
5217	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5218	phba->sli4_hba.hdr_rq = NULL;
5219	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5220	phba->sli4_hba.dat_rq = NULL;
5221
5222	/* Release unsolicited receive complete queue */
5223	lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5224	phba->sli4_hba.rxq_cq = NULL;
5225
5226	/* Release ELS complete queue */
5227	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5228	phba->sli4_hba.els_cq = NULL;
5229
5230	/* Release mailbox command complete queue */
5231	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5232	phba->sli4_hba.mbx_cq = NULL;
5233
5234	/* Release FCP response complete queue */
5235	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5236		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5237	kfree(phba->sli4_hba.fcp_cq);
5238	phba->sli4_hba.fcp_cq = NULL;
5239
5240	/* Release fast-path event queue */
5241	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5242		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5243	kfree(phba->sli4_hba.fp_eq);
5244	phba->sli4_hba.fp_eq = NULL;
5245
5246	/* Release slow-path event queue */
5247	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5248	phba->sli4_hba.sp_eq = NULL;
5249
5250	return;
5251}
5252
5253/**
5254 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5255 * @phba: pointer to lpfc hba data structure.
5256 *
5257 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5258 * operation.
5259 *
5260 * Return codes
5261 *      0 - sucessful
5262 *      ENOMEM - No availble memory
5263 *      EIO - The mailbox failed to complete successfully.
5264 **/
5265int
5266lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5267{
5268	int rc = -ENOMEM;
5269	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5270	int fcp_cq_index = 0;
5271
5272	/*
5273	 * Set up Event Queues (EQs)
5274	 */
5275
5276	/* Set up slow-path event queue */
5277	if (!phba->sli4_hba.sp_eq) {
5278		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5279				"0520 Slow-path EQ not allocated\n");
5280		goto out_error;
5281	}
5282	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5283			    LPFC_SP_DEF_IMAX);
5284	if (rc) {
5285		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5286				"0521 Failed setup of slow-path EQ: "
5287				"rc = 0x%x\n", rc);
5288		goto out_error;
5289	}
5290	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5291			"2583 Slow-path EQ setup: queue-id=%d\n",
5292			phba->sli4_hba.sp_eq->queue_id);
5293
5294	/* Set up fast-path event queue */
5295	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5296		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5297			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5298					"0522 Fast-path EQ (%d) not "
5299					"allocated\n", fcp_eqidx);
5300			goto out_destroy_fp_eq;
5301		}
5302		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
5303				    phba->cfg_fcp_imax);
5304		if (rc) {
5305			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5306					"0523 Failed setup of fast-path EQ "
5307					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
5308			goto out_destroy_fp_eq;
5309		}
5310		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5311				"2584 Fast-path EQ setup: "
5312				"queue[%d]-id=%d\n", fcp_eqidx,
5313				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
5314	}
5315
5316	/*
5317	 * Set up Complete Queues (CQs)
5318	 */
5319
5320	/* Set up slow-path MBOX Complete Queue as the first CQ */
5321	if (!phba->sli4_hba.mbx_cq) {
5322		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5323				"0528 Mailbox CQ not allocated\n");
5324		goto out_destroy_fp_eq;
5325	}
5326	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
5327			    LPFC_MCQ, LPFC_MBOX);
5328	if (rc) {
5329		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5330				"0529 Failed setup of slow-path mailbox CQ: "
5331				"rc = 0x%x\n", rc);
5332		goto out_destroy_fp_eq;
5333	}
5334	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5335			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
5336			phba->sli4_hba.mbx_cq->queue_id,
5337			phba->sli4_hba.sp_eq->queue_id);
5338
5339	/* Set up slow-path ELS Complete Queue */
5340	if (!phba->sli4_hba.els_cq) {
5341		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5342				"0530 ELS CQ not allocated\n");
5343		goto out_destroy_mbx_cq;
5344	}
5345	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
5346			    LPFC_WCQ, LPFC_ELS);
5347	if (rc) {
5348		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5349				"0531 Failed setup of slow-path ELS CQ: "
5350				"rc = 0x%x\n", rc);
5351		goto out_destroy_mbx_cq;
5352	}
5353	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5354			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
5355			phba->sli4_hba.els_cq->queue_id,
5356			phba->sli4_hba.sp_eq->queue_id);
5357
5358	/* Set up slow-path Unsolicited Receive Complete Queue */
5359	if (!phba->sli4_hba.rxq_cq) {
5360		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5361				"0532 USOL RX CQ not allocated\n");
5362		goto out_destroy_els_cq;
5363	}
5364	rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5365			    LPFC_RCQ, LPFC_USOL);
5366	if (rc) {
5367		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5368				"0533 Failed setup of slow-path USOL RX CQ: "
5369				"rc = 0x%x\n", rc);
5370		goto out_destroy_els_cq;
5371	}
5372	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5373			"2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5374			phba->sli4_hba.rxq_cq->queue_id,
5375			phba->sli4_hba.sp_eq->queue_id);
5376
5377	/* Set up fast-path FCP Response Complete Queue */
5378	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5379		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
5380			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5381					"0526 Fast-path FCP CQ (%d) not "
5382					"allocated\n", fcp_cqidx);
5383			goto out_destroy_fcp_cq;
5384		}
5385		rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
5386				    phba->sli4_hba.fp_eq[fcp_cqidx],
5387				    LPFC_WCQ, LPFC_FCP);
5388		if (rc) {
5389			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5390					"0527 Failed setup of fast-path FCP "
5391					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
5392			goto out_destroy_fcp_cq;
5393		}
5394		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5395				"2588 FCP CQ setup: cq[%d]-id=%d, "
5396				"parent eq[%d]-id=%d\n",
5397				fcp_cqidx,
5398				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
5399				fcp_cqidx,
5400				phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
5401	}
5402
5403	/*
5404	 * Set up all the Work Queues (WQs)
5405	 */
5406
5407	/* Set up Mailbox Command Queue */
5408	if (!phba->sli4_hba.mbx_wq) {
5409		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5410				"0538 Slow-path MQ not allocated\n");
5411		goto out_destroy_fcp_cq;
5412	}
5413	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
5414			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
5415	if (rc) {
5416		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5417				"0539 Failed setup of slow-path MQ: "
5418				"rc = 0x%x\n", rc);
5419		goto out_destroy_fcp_cq;
5420	}
5421	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5422			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
5423			phba->sli4_hba.mbx_wq->queue_id,
5424			phba->sli4_hba.mbx_cq->queue_id);
5425
5426	/* Set up slow-path ELS Work Queue */
5427	if (!phba->sli4_hba.els_wq) {
5428		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5429				"0536 Slow-path ELS WQ not allocated\n");
5430		goto out_destroy_mbx_wq;
5431	}
5432	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
5433			    phba->sli4_hba.els_cq, LPFC_ELS);
5434	if (rc) {
5435		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5436				"0537 Failed setup of slow-path ELS WQ: "
5437				"rc = 0x%x\n", rc);
5438		goto out_destroy_mbx_wq;
5439	}
5440	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5441			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
5442			phba->sli4_hba.els_wq->queue_id,
5443			phba->sli4_hba.els_cq->queue_id);
5444
5445	/* Set up fast-path FCP Work Queue */
5446	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5447		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
5448			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5449					"0534 Fast-path FCP WQ (%d) not "
5450					"allocated\n", fcp_wqidx);
5451			goto out_destroy_fcp_wq;
5452		}
5453		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
5454				    phba->sli4_hba.fcp_cq[fcp_cq_index],
5455				    LPFC_FCP);
5456		if (rc) {
5457			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5458					"0535 Failed setup of fast-path FCP "
5459					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
5460			goto out_destroy_fcp_wq;
5461		}
5462		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5463				"2591 FCP WQ setup: wq[%d]-id=%d, "
5464				"parent cq[%d]-id=%d\n",
5465				fcp_wqidx,
5466				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
5467				fcp_cq_index,
5468				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
5469		/* Round robin FCP Work Queue's Completion Queue assignment */
5470		fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
5471	}
5472
5473	/*
5474	 * Create Receive Queue (RQ)
5475	 */
5476	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
5477		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5478				"0540 Receive Queue not allocated\n");
5479		goto out_destroy_fcp_wq;
5480	}
5481	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5482			    phba->sli4_hba.rxq_cq, LPFC_USOL);
5483	if (rc) {
5484		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5485				"0541 Failed setup of Receive Queue: "
5486				"rc = 0x%x\n", rc);
5487		goto out_destroy_fcp_wq;
5488	}
5489	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5490			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
5491			"parent cq-id=%d\n",
5492			phba->sli4_hba.hdr_rq->queue_id,
5493			phba->sli4_hba.dat_rq->queue_id,
5494			phba->sli4_hba.rxq_cq->queue_id);
5495	return 0;
5496
5497out_destroy_fcp_wq:
5498	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
5499		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
5500	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5501out_destroy_mbx_wq:
5502	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5503out_destroy_fcp_cq:
5504	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5505		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5506	lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5507out_destroy_els_cq:
5508	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5509out_destroy_mbx_cq:
5510	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5511out_destroy_fp_eq:
5512	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
5513		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
5514	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5515out_error:
5516	return rc;
5517}
5518
5519/**
5520 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
5521 * @phba: pointer to lpfc hba data structure.
5522 *
5523 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
5524 * operation.
5525 *
5526 * Return codes
5527 *      0 - sucessful
5528 *      ENOMEM - No availble memory
5529 *      EIO - The mailbox failed to complete successfully.
5530 **/
5531void
5532lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5533{
5534	int fcp_qidx;
5535
5536	/* Unset mailbox command work queue */
5537	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5538	/* Unset ELS work queue */
5539	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5540	/* Unset unsolicited receive queue */
5541	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
5542	/* Unset FCP work queue */
5543	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5544		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
5545	/* Unset mailbox command complete queue */
5546	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5547	/* Unset ELS complete queue */
5548	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5549	/* Unset unsolicited receive complete queue */
5550	lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5551	/* Unset FCP response complete queue */
5552	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5553		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
5554	/* Unset fast-path event queue */
5555	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5556		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
5557	/* Unset slow-path event queue */
5558	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5559}
5560
5561/**
5562 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
5563 * @phba: pointer to lpfc hba data structure.
5564 *
5565 * This routine is invoked to allocate and set up a pool of completion queue
5566 * events. The body of the completion queue event is a completion queue entry
5567 * CQE. For now, this pool is used for the interrupt service routine to queue
5568 * the following HBA completion queue events for the worker thread to process:
5569 *   - Mailbox asynchronous events
5570 *   - Receive queue completion unsolicited events
5571 * Later, this can be used for all the slow-path events.
5572 *
5573 * Return codes
5574 *      0 - sucessful
5575 *      -ENOMEM - No availble memory
5576 **/
5577static int
5578lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
5579{
5580	struct lpfc_cq_event *cq_event;
5581	int i;
5582
5583	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
5584		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
5585		if (!cq_event)
5586			goto out_pool_create_fail;
5587		list_add_tail(&cq_event->list,
5588			      &phba->sli4_hba.sp_cqe_event_pool);
5589	}
5590	return 0;
5591
5592out_pool_create_fail:
5593	lpfc_sli4_cq_event_pool_destroy(phba);
5594	return -ENOMEM;
5595}
5596
5597/**
5598 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
5599 * @phba: pointer to lpfc hba data structure.
5600 *
5601 * This routine is invoked to free the pool of completion queue events at
5602 * driver unload time. Note that, it is the responsibility of the driver
5603 * cleanup routine to free all the outstanding completion-queue events
5604 * allocated from this pool back into the pool before invoking this routine
5605 * to destroy the pool.
5606 **/
5607static void
5608lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
5609{
5610	struct lpfc_cq_event *cq_event, *next_cq_event;
5611
5612	list_for_each_entry_safe(cq_event, next_cq_event,
5613				 &phba->sli4_hba.sp_cqe_event_pool, list) {
5614		list_del(&cq_event->list);
5615		kfree(cq_event);
5616	}
5617}
5618
5619/**
5620 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5621 * @phba: pointer to lpfc hba data structure.
5622 *
5623 * This routine is the lock free version of the API invoked to allocate a
5624 * completion-queue event from the free pool.
5625 *
5626 * Return: Pointer to the newly allocated completion-queue event if successful
5627 *         NULL otherwise.
5628 **/
5629struct lpfc_cq_event *
5630__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5631{
5632	struct lpfc_cq_event *cq_event = NULL;
5633
5634	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
5635			 struct lpfc_cq_event, list);
5636	return cq_event;
5637}
5638
5639/**
5640 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5641 * @phba: pointer to lpfc hba data structure.
5642 *
5643 * This routine is the lock version of the API invoked to allocate a
5644 * completion-queue event from the free pool.
5645 *
5646 * Return: Pointer to the newly allocated completion-queue event if successful
5647 *         NULL otherwise.
5648 **/
5649struct lpfc_cq_event *
5650lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5651{
5652	struct lpfc_cq_event *cq_event;
5653	unsigned long iflags;
5654
5655	spin_lock_irqsave(&phba->hbalock, iflags);
5656	cq_event = __lpfc_sli4_cq_event_alloc(phba);
5657	spin_unlock_irqrestore(&phba->hbalock, iflags);
5658	return cq_event;
5659}
5660
5661/**
5662 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5663 * @phba: pointer to lpfc hba data structure.
5664 * @cq_event: pointer to the completion queue event to be freed.
5665 *
5666 * This routine is the lock free version of the API invoked to release a
5667 * completion-queue event back into the free pool.
5668 **/
5669void
5670__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5671			     struct lpfc_cq_event *cq_event)
5672{
5673	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
5674}
5675
5676/**
5677 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5678 * @phba: pointer to lpfc hba data structure.
5679 * @cq_event: pointer to the completion queue event to be freed.
5680 *
5681 * This routine is the lock version of the API invoked to release a
5682 * completion-queue event back into the free pool.
5683 **/
5684void
5685lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5686			   struct lpfc_cq_event *cq_event)
5687{
5688	unsigned long iflags;
5689	spin_lock_irqsave(&phba->hbalock, iflags);
5690	__lpfc_sli4_cq_event_release(phba, cq_event);
5691	spin_unlock_irqrestore(&phba->hbalock, iflags);
5692}
5693
5694/**
5695 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
5696 * @phba: pointer to lpfc hba data structure.
5697 *
5698 * This routine is to free all the pending completion-queue events to the
5699 * back into the free pool for device reset.
5700 **/
5701static void
5702lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
5703{
5704	LIST_HEAD(cqelist);
5705	struct lpfc_cq_event *cqe;
5706	unsigned long iflags;
5707
5708	/* Retrieve all the pending WCQEs from pending WCQE lists */
5709	spin_lock_irqsave(&phba->hbalock, iflags);
5710	/* Pending FCP XRI abort events */
5711	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
5712			 &cqelist);
5713	/* Pending ELS XRI abort events */
5714	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
5715			 &cqelist);
5716	/* Pending asynnc events */
5717	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
5718			 &cqelist);
5719	spin_unlock_irqrestore(&phba->hbalock, iflags);
5720
5721	while (!list_empty(&cqelist)) {
5722		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
5723		lpfc_sli4_cq_event_release(phba, cqe);
5724	}
5725}
5726
5727/**
5728 * lpfc_pci_function_reset - Reset pci function.
5729 * @phba: pointer to lpfc hba data structure.
5730 *
5731 * This routine is invoked to request a PCI function reset. It will destroys
5732 * all resources assigned to the PCI function which originates this request.
5733 *
5734 * Return codes
5735 *      0 - sucessful
5736 *      ENOMEM - No availble memory
5737 *      EIO - The mailbox failed to complete successfully.
5738 **/
5739int
5740lpfc_pci_function_reset(struct lpfc_hba *phba)
5741{
5742	LPFC_MBOXQ_t *mboxq;
5743	uint32_t rc = 0;
5744	uint32_t shdr_status, shdr_add_status;
5745	union lpfc_sli4_cfg_shdr *shdr;
5746
5747	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5748	if (!mboxq) {
5749		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5750				"0494 Unable to allocate memory for issuing "
5751				"SLI_FUNCTION_RESET mailbox command\n");
5752		return -ENOMEM;
5753	}
5754
5755	/* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
5756	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5757			 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
5758			 LPFC_SLI4_MBX_EMBED);
5759	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5760	shdr = (union lpfc_sli4_cfg_shdr *)
5761		&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5762	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5763	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5764	if (rc != MBX_TIMEOUT)
5765		mempool_free(mboxq, phba->mbox_mem_pool);
5766	if (shdr_status || shdr_add_status || rc) {
5767		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5768				"0495 SLI_FUNCTION_RESET mailbox failed with "
5769				"status x%x add_status x%x, mbx status x%x\n",
5770				shdr_status, shdr_add_status, rc);
5771		rc = -ENXIO;
5772	}
5773	return rc;
5774}
5775
5776/**
5777 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
5778 * @phba: pointer to lpfc hba data structure.
5779 * @cnt: number of nop mailbox commands to send.
5780 *
5781 * This routine is invoked to send a number @cnt of NOP mailbox command and
5782 * wait for each command to complete.
5783 *
5784 * Return: the number of NOP mailbox command completed.
5785 **/
5786static int
5787lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
5788{
5789	LPFC_MBOXQ_t *mboxq;
5790	int length, cmdsent;
5791	uint32_t mbox_tmo;
5792	uint32_t rc = 0;
5793	uint32_t shdr_status, shdr_add_status;
5794	union lpfc_sli4_cfg_shdr *shdr;
5795
5796	if (cnt == 0) {
5797		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5798				"2518 Requested to send 0 NOP mailbox cmd\n");
5799		return cnt;
5800	}
5801
5802	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5803	if (!mboxq) {
5804		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5805				"2519 Unable to allocate memory for issuing "
5806				"NOP mailbox command\n");
5807		return 0;
5808	}
5809
5810	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
5811	length = (sizeof(struct lpfc_mbx_nop) -
5812		  sizeof(struct lpfc_sli4_cfg_mhdr));
5813	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5814			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
5815
5816	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5817	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
5818		if (!phba->sli4_hba.intr_enable)
5819			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5820		else
5821			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
5822		if (rc == MBX_TIMEOUT)
5823			break;
5824		/* Check return status */
5825		shdr = (union lpfc_sli4_cfg_shdr *)
5826			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5827		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5828		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
5829					 &shdr->response);
5830		if (shdr_status || shdr_add_status || rc) {
5831			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5832					"2520 NOP mailbox command failed "
5833					"status x%x add_status x%x mbx "
5834					"status x%x\n", shdr_status,
5835					shdr_add_status, rc);
5836			break;
5837		}
5838	}
5839
5840	if (rc != MBX_TIMEOUT)
5841		mempool_free(mboxq, phba->mbox_mem_pool);
5842
5843	return cmdsent;
5844}
5845
5846/**
5847 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
5848 * @phba: pointer to lpfc hba data structure.
5849 * @fcfi: fcf index.
5850 *
5851 * This routine is invoked to unregister a FCFI from device.
5852 **/
5853void
5854lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5855{
5856	LPFC_MBOXQ_t *mbox;
5857	uint32_t mbox_tmo;
5858	int rc;
5859	unsigned long flags;
5860
5861	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5862
5863	if (!mbox)
5864		return;
5865
5866	lpfc_unreg_fcfi(mbox, fcfi);
5867
5868	if (!phba->sli4_hba.intr_enable)
5869		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5870	else {
5871		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5872		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5873	}
5874	if (rc != MBX_TIMEOUT)
5875		mempool_free(mbox, phba->mbox_mem_pool);
5876	if (rc != MBX_SUCCESS)
5877		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5878				"2517 Unregister FCFI command failed "
5879				"status %d, mbxStatus x%x\n", rc,
5880				bf_get(lpfc_mqe_status, &mbox->u.mqe));
5881	else {
5882		spin_lock_irqsave(&phba->hbalock, flags);
5883		/* Mark the FCFI is no longer registered */
5884		phba->fcf.fcf_flag &=
5885			~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
5886		spin_unlock_irqrestore(&phba->hbalock, flags);
5887	}
5888}
5889
5890/**
5891 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
5892 * @phba: pointer to lpfc hba data structure.
5893 *
5894 * This routine is invoked to set up the PCI device memory space for device
5895 * with SLI-4 interface spec.
5896 *
5897 * Return codes
5898 * 	0 - sucessful
5899 * 	other values - error
5900 **/
5901static int
5902lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
5903{
5904	struct pci_dev *pdev;
5905	unsigned long bar0map_len, bar1map_len, bar2map_len;
5906	int error = -ENODEV;
5907
5908	/* Obtain PCI device reference */
5909	if (!phba->pcidev)
5910		return error;
5911	else
5912		pdev = phba->pcidev;
5913
5914	/* Set the device DMA mask size */
5915	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
5916		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5917			return error;
5918
5919	/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
5920	 * number of bytes required by each mapping. They are actually
5921	 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
5922	 */
5923	phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
5924	bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
5925
5926	phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
5927	bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
5928
5929	phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
5930	bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
5931
5932	/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
5933	phba->sli4_hba.conf_regs_memmap_p =
5934				ioremap(phba->pci_bar0_map, bar0map_len);
5935	if (!phba->sli4_hba.conf_regs_memmap_p) {
5936		dev_printk(KERN_ERR, &pdev->dev,
5937			   "ioremap failed for SLI4 PCI config registers.\n");
5938		goto out;
5939	}
5940
5941	/* Map SLI4 HBA Control Register base to a kernel virtual address. */
5942	phba->sli4_hba.ctrl_regs_memmap_p =
5943				ioremap(phba->pci_bar1_map, bar1map_len);
5944	if (!phba->sli4_hba.ctrl_regs_memmap_p) {
5945		dev_printk(KERN_ERR, &pdev->dev,
5946			   "ioremap failed for SLI4 HBA control registers.\n");
5947		goto out_iounmap_conf;
5948	}
5949
5950	/* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
5951	phba->sli4_hba.drbl_regs_memmap_p =
5952				ioremap(phba->pci_bar2_map, bar2map_len);
5953	if (!phba->sli4_hba.drbl_regs_memmap_p) {
5954		dev_printk(KERN_ERR, &pdev->dev,
5955			   "ioremap failed for SLI4 HBA doorbell registers.\n");
5956		goto out_iounmap_ctrl;
5957	}
5958
5959	/* Set up BAR0 PCI config space register memory map */
5960	lpfc_sli4_bar0_register_memmap(phba);
5961
5962	/* Set up BAR1 register memory map */
5963	lpfc_sli4_bar1_register_memmap(phba);
5964
5965	/* Set up BAR2 register memory map */
5966	error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
5967	if (error)
5968		goto out_iounmap_all;
5969
5970	return 0;
5971
5972out_iounmap_all:
5973	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
5974out_iounmap_ctrl:
5975	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
5976out_iounmap_conf:
5977	iounmap(phba->sli4_hba.conf_regs_memmap_p);
5978out:
5979	return error;
5980}
5981
5982/**
5983 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
5984 * @phba: pointer to lpfc hba data structure.
5985 *
5986 * This routine is invoked to unset the PCI device memory space for device
5987 * with SLI-4 interface spec.
5988 **/
5989static void
5990lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
5991{
5992	struct pci_dev *pdev;
5993
5994	/* Obtain PCI device reference */
5995	if (!phba->pcidev)
5996		return;
5997	else
5998		pdev = phba->pcidev;
5999
6000	/* Free coherent DMA memory allocated */
6001
6002	/* Unmap I/O memory space */
6003	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6004	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6005	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6006
6007	return;
6008}
6009
6010/**
6011 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6012 * @phba: pointer to lpfc hba data structure.
6013 *
6014 * This routine is invoked to enable the MSI-X interrupt vectors to device
6015 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6016 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6017 * invoked, enables either all or nothing, depending on the current
6018 * availability of PCI vector resources. The device driver is responsible
6019 * for calling the individual request_irq() to register each MSI-X vector
6020 * with a interrupt handler, which is done in this function. Note that
6021 * later when device is unloading, the driver should always call free_irq()
6022 * on all MSI-X vectors it has done request_irq() on before calling
6023 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
6024 * will be left with MSI-X enabled and leaks its vectors.
6025 *
6026 * Return codes
6027 *   0 - sucessful
6028 *   other values - error
6029 **/
6030static int
6031lpfc_sli_enable_msix(struct lpfc_hba *phba)
6032{
6033	int rc, i;
6034	LPFC_MBOXQ_t *pmb;
6035
6036	/* Set up MSI-X multi-message vectors */
6037	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6038		phba->msix_entries[i].entry = i;
6039
6040	/* Configure MSI-X capability structure */
6041	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
6042				ARRAY_SIZE(phba->msix_entries));
6043	if (rc) {
6044		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6045				"0420 PCI enable MSI-X failed (%d)\n", rc);
6046		goto msi_fail_out;
6047	}
6048	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6049		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6050				"0477 MSI-X entry[%d]: vector=x%x "
6051				"message=%d\n", i,
6052				phba->msix_entries[i].vector,
6053				phba->msix_entries[i].entry);
6054	/*
6055	 * Assign MSI-X vectors to interrupt handlers
6056	 */
6057
6058	/* vector-0 is associated to slow-path handler */
6059	rc = request_irq(phba->msix_entries[0].vector,
6060			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6061			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6062	if (rc) {
6063		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6064				"0421 MSI-X slow-path request_irq failed "
6065				"(%d)\n", rc);
6066		goto msi_fail_out;
6067	}
6068
6069	/* vector-1 is associated to fast-path handler */
6070	rc = request_irq(phba->msix_entries[1].vector,
6071			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6072			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
6073
6074	if (rc) {
6075		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6076				"0429 MSI-X fast-path request_irq failed "
6077				"(%d)\n", rc);
6078		goto irq_fail_out;
6079	}
6080
6081	/*
6082	 * Configure HBA MSI-X attention conditions to messages
6083	 */
6084	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6085
6086	if (!pmb) {
6087		rc = -ENOMEM;
6088		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6089				"0474 Unable to allocate memory for issuing "
6090				"MBOX_CONFIG_MSI command\n");
6091		goto mem_fail_out;
6092	}
6093	rc = lpfc_config_msi(phba, pmb);
6094	if (rc)
6095		goto mbx_fail_out;
6096	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6097	if (rc != MBX_SUCCESS) {
6098		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
6099				"0351 Config MSI mailbox command failed, "
6100				"mbxCmd x%x, mbxStatus x%x\n",
6101				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
6102		goto mbx_fail_out;
6103	}
6104
6105	/* Free memory allocated for mailbox command */
6106	mempool_free(pmb, phba->mbox_mem_pool);
6107	return rc;
6108
6109mbx_fail_out:
6110	/* Free memory allocated for mailbox command */
6111	mempool_free(pmb, phba->mbox_mem_pool);
6112
6113mem_fail_out:
6114	/* free the irq already requested */
6115	free_irq(phba->msix_entries[1].vector, phba);
6116
6117irq_fail_out:
6118	/* free the irq already requested */
6119	free_irq(phba->msix_entries[0].vector, phba);
6120
6121msi_fail_out:
6122	/* Unconfigure MSI-X capability structure */
6123	pci_disable_msix(phba->pcidev);
6124	return rc;
6125}
6126
6127/**
6128 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
6129 * @phba: pointer to lpfc hba data structure.
6130 *
6131 * This routine is invoked to release the MSI-X vectors and then disable the
6132 * MSI-X interrupt mode to device with SLI-3 interface spec.
6133 **/
6134static void
6135lpfc_sli_disable_msix(struct lpfc_hba *phba)
6136{
6137	int i;
6138
6139	/* Free up MSI-X multi-message vectors */
6140	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6141		free_irq(phba->msix_entries[i].vector, phba);
6142	/* Disable MSI-X */
6143	pci_disable_msix(phba->pcidev);
6144
6145	return;
6146}
6147
6148/**
6149 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
6150 * @phba: pointer to lpfc hba data structure.
6151 *
6152 * This routine is invoked to enable the MSI interrupt mode to device with
6153 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
6154 * enable the MSI vector. The device driver is responsible for calling the
6155 * request_irq() to register MSI vector with a interrupt the handler, which
6156 * is done in this function.
6157 *
6158 * Return codes
6159 * 	0 - sucessful
6160 * 	other values - error
6161 */
6162static int
6163lpfc_sli_enable_msi(struct lpfc_hba *phba)
6164{
6165	int rc;
6166
6167	rc = pci_enable_msi(phba->pcidev);
6168	if (!rc)
6169		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6170				"0462 PCI enable MSI mode success.\n");
6171	else {
6172		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6173				"0471 PCI enable MSI mode failed (%d)\n", rc);
6174		return rc;
6175	}
6176
6177	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6178			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6179	if (rc) {
6180		pci_disable_msi(phba->pcidev);
6181		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6182				"0478 MSI request_irq failed (%d)\n", rc);
6183	}
6184	return rc;
6185}
6186
6187/**
6188 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
6189 * @phba: pointer to lpfc hba data structure.
6190 *
6191 * This routine is invoked to disable the MSI interrupt mode to device with
6192 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
6193 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6194 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6195 * its vector.
6196 */
6197static void
6198lpfc_sli_disable_msi(struct lpfc_hba *phba)
6199{
6200	free_irq(phba->pcidev->irq, phba);
6201	pci_disable_msi(phba->pcidev);
6202	return;
6203}
6204
6205/**
6206 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
6207 * @phba: pointer to lpfc hba data structure.
6208 *
6209 * This routine is invoked to enable device interrupt and associate driver's
6210 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
6211 * spec. Depends on the interrupt mode configured to the driver, the driver
6212 * will try to fallback from the configured interrupt mode to an interrupt
6213 * mode which is supported by the platform, kernel, and device in the order
6214 * of:
6215 * MSI-X -> MSI -> IRQ.
6216 *
6217 * Return codes
6218 *   0 - sucessful
6219 *   other values - error
6220 **/
6221static uint32_t
6222lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6223{
6224	uint32_t intr_mode = LPFC_INTR_ERROR;
6225	int retval;
6226
6227	if (cfg_mode == 2) {
6228		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6229		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6230		if (!retval) {
6231			/* Now, try to enable MSI-X interrupt mode */
6232			retval = lpfc_sli_enable_msix(phba);
6233			if (!retval) {
6234				/* Indicate initialization to MSI-X mode */
6235				phba->intr_type = MSIX;
6236				intr_mode = 2;
6237			}
6238		}
6239	}
6240
6241	/* Fallback to MSI if MSI-X initialization failed */
6242	if (cfg_mode >= 1 && phba->intr_type == NONE) {
6243		retval = lpfc_sli_enable_msi(phba);
6244		if (!retval) {
6245			/* Indicate initialization to MSI mode */
6246			phba->intr_type = MSI;
6247			intr_mode = 1;
6248		}
6249	}
6250
6251	/* Fallback to INTx if both MSI-X/MSI initalization failed */
6252	if (phba->intr_type == NONE) {
6253		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6254				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6255		if (!retval) {
6256			/* Indicate initialization to INTx mode */
6257			phba->intr_type = INTx;
6258			intr_mode = 0;
6259		}
6260	}
6261	return intr_mode;
6262}
6263
6264/**
6265 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6266 * @phba: pointer to lpfc hba data structure.
6267 *
6268 * This routine is invoked to disable device interrupt and disassociate the
6269 * driver's interrupt handler(s) from interrupt vector(s) to device with
6270 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6271 * release the interrupt vector(s) for the message signaled interrupt.
6272 **/
6273static void
6274lpfc_sli_disable_intr(struct lpfc_hba *phba)
6275{
6276	/* Disable the currently initialized interrupt mode */
6277	if (phba->intr_type == MSIX)
6278		lpfc_sli_disable_msix(phba);
6279	else if (phba->intr_type == MSI)
6280		lpfc_sli_disable_msi(phba);
6281	else if (phba->intr_type == INTx)
6282		free_irq(phba->pcidev->irq, phba);
6283
6284	/* Reset interrupt management states */
6285	phba->intr_type = NONE;
6286	phba->sli.slistat.sli_intr = 0;
6287
6288	return;
6289}
6290
6291/**
6292 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6293 * @phba: pointer to lpfc hba data structure.
6294 *
6295 * This routine is invoked to enable the MSI-X interrupt vectors to device
6296 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6297 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6298 * enables either all or nothing, depending on the current availability of
6299 * PCI vector resources. The device driver is responsible for calling the
6300 * individual request_irq() to register each MSI-X vector with a interrupt
6301 * handler, which is done in this function. Note that later when device is
6302 * unloading, the driver should always call free_irq() on all MSI-X vectors
6303 * it has done request_irq() on before calling pci_disable_msix(). Failure
6304 * to do so results in a BUG_ON() and a device will be left with MSI-X
6305 * enabled and leaks its vectors.
6306 *
6307 * Return codes
6308 * 0 - sucessful
6309 * other values - error
6310 **/
6311static int
6312lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6313{
6314	int rc, index;
6315
6316	/* Set up MSI-X multi-message vectors */
6317	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6318		phba->sli4_hba.msix_entries[index].entry = index;
6319
6320	/* Configure MSI-X capability structure */
6321	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6322			     phba->sli4_hba.cfg_eqn);
6323	if (rc) {
6324		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6325				"0484 PCI enable MSI-X failed (%d)\n", rc);
6326		goto msi_fail_out;
6327	}
6328	/* Log MSI-X vector assignment */
6329	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6330		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6331				"0489 MSI-X entry[%d]: vector=x%x "
6332				"message=%d\n", index,
6333				phba->sli4_hba.msix_entries[index].vector,
6334				phba->sli4_hba.msix_entries[index].entry);
6335	/*
6336	 * Assign MSI-X vectors to interrupt handlers
6337	 */
6338
6339	/* The first vector must associated to slow-path handler for MQ */
6340	rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6341			 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6342			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6343	if (rc) {
6344		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6345				"0485 MSI-X slow-path request_irq failed "
6346				"(%d)\n", rc);
6347		goto msi_fail_out;
6348	}
6349
6350	/* The rest of the vector(s) are associated to fast-path handler(s) */
6351	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
6352		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
6353		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
6354		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
6355				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
6356				 LPFC_FP_DRIVER_HANDLER_NAME,
6357				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6358		if (rc) {
6359			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6360					"0486 MSI-X fast-path (%d) "
6361					"request_irq failed (%d)\n", index, rc);
6362			goto cfg_fail_out;
6363		}
6364	}
6365
6366	return rc;
6367
6368cfg_fail_out:
6369	/* free the irq already requested */
6370	for (--index; index >= 1; index--)
6371		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
6372			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6373
6374	/* free the irq already requested */
6375	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6376
6377msi_fail_out:
6378	/* Unconfigure MSI-X capability structure */
6379	pci_disable_msix(phba->pcidev);
6380	return rc;
6381}
6382
6383/**
6384 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
6385 * @phba: pointer to lpfc hba data structure.
6386 *
6387 * This routine is invoked to release the MSI-X vectors and then disable the
6388 * MSI-X interrupt mode to device with SLI-4 interface spec.
6389 **/
6390static void
6391lpfc_sli4_disable_msix(struct lpfc_hba *phba)
6392{
6393	int index;
6394
6395	/* Free up MSI-X multi-message vectors */
6396	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6397
6398	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
6399		free_irq(phba->sli4_hba.msix_entries[index].vector,
6400			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6401	/* Disable MSI-X */
6402	pci_disable_msix(phba->pcidev);
6403
6404	return;
6405}
6406
6407/**
6408 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
6409 * @phba: pointer to lpfc hba data structure.
6410 *
6411 * This routine is invoked to enable the MSI interrupt mode to device with
6412 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
6413 * to enable the MSI vector. The device driver is responsible for calling
6414 * the request_irq() to register MSI vector with a interrupt the handler,
6415 * which is done in this function.
6416 *
6417 * Return codes
6418 * 	0 - sucessful
6419 * 	other values - error
6420 **/
6421static int
6422lpfc_sli4_enable_msi(struct lpfc_hba *phba)
6423{
6424	int rc, index;
6425
6426	rc = pci_enable_msi(phba->pcidev);
6427	if (!rc)
6428		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6429				"0487 PCI enable MSI mode success.\n");
6430	else {
6431		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6432				"0488 PCI enable MSI mode failed (%d)\n", rc);
6433		return rc;
6434	}
6435
6436	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6437			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6438	if (rc) {
6439		pci_disable_msi(phba->pcidev);
6440		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6441				"0490 MSI request_irq failed (%d)\n", rc);
6442	}
6443
6444	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
6445		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6446		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6447	}
6448
6449	return rc;
6450}
6451
6452/**
6453 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
6454 * @phba: pointer to lpfc hba data structure.
6455 *
6456 * This routine is invoked to disable the MSI interrupt mode to device with
6457 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
6458 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6459 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6460 * its vector.
6461 **/
6462static void
6463lpfc_sli4_disable_msi(struct lpfc_hba *phba)
6464{
6465	free_irq(phba->pcidev->irq, phba);
6466	pci_disable_msi(phba->pcidev);
6467	return;
6468}
6469
6470/**
6471 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
6472 * @phba: pointer to lpfc hba data structure.
6473 *
6474 * This routine is invoked to enable device interrupt and associate driver's
6475 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
6476 * interface spec. Depends on the interrupt mode configured to the driver,
6477 * the driver will try to fallback from the configured interrupt mode to an
6478 * interrupt mode which is supported by the platform, kernel, and device in
6479 * the order of:
6480 * MSI-X -> MSI -> IRQ.
6481 *
6482 * Return codes
6483 * 	0 - sucessful
6484 * 	other values - error
6485 **/
6486static uint32_t
6487lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6488{
6489	uint32_t intr_mode = LPFC_INTR_ERROR;
6490	int retval, index;
6491
6492	if (cfg_mode == 2) {
6493		/* Preparation before conf_msi mbox cmd */
6494		retval = 0;
6495		if (!retval) {
6496			/* Now, try to enable MSI-X interrupt mode */
6497			retval = lpfc_sli4_enable_msix(phba);
6498			if (!retval) {
6499				/* Indicate initialization to MSI-X mode */
6500				phba->intr_type = MSIX;
6501				intr_mode = 2;
6502			}
6503		}
6504	}
6505
6506	/* Fallback to MSI if MSI-X initialization failed */
6507	if (cfg_mode >= 1 && phba->intr_type == NONE) {
6508		retval = lpfc_sli4_enable_msi(phba);
6509		if (!retval) {
6510			/* Indicate initialization to MSI mode */
6511			phba->intr_type = MSI;
6512			intr_mode = 1;
6513		}
6514	}
6515
6516	/* Fallback to INTx if both MSI-X/MSI initalization failed */
6517	if (phba->intr_type == NONE) {
6518		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6519				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6520		if (!retval) {
6521			/* Indicate initialization to INTx mode */
6522			phba->intr_type = INTx;
6523			intr_mode = 0;
6524			for (index = 0; index < phba->cfg_fcp_eq_count;
6525			     index++) {
6526				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6527				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6528			}
6529		}
6530	}
6531	return intr_mode;
6532}
6533
6534/**
6535 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
6536 * @phba: pointer to lpfc hba data structure.
6537 *
6538 * This routine is invoked to disable device interrupt and disassociate
6539 * the driver's interrupt handler(s) from interrupt vector(s) to device
6540 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
6541 * will release the interrupt vector(s) for the message signaled interrupt.
6542 **/
6543static void
6544lpfc_sli4_disable_intr(struct lpfc_hba *phba)
6545{
6546	/* Disable the currently initialized interrupt mode */
6547	if (phba->intr_type == MSIX)
6548		lpfc_sli4_disable_msix(phba);
6549	else if (phba->intr_type == MSI)
6550		lpfc_sli4_disable_msi(phba);
6551	else if (phba->intr_type == INTx)
6552		free_irq(phba->pcidev->irq, phba);
6553
6554	/* Reset interrupt management states */
6555	phba->intr_type = NONE;
6556	phba->sli.slistat.sli_intr = 0;
6557
6558	return;
6559}
6560
6561/**
6562 * lpfc_unset_hba - Unset SLI3 hba device initialization
6563 * @phba: pointer to lpfc hba data structure.
6564 *
6565 * This routine is invoked to unset the HBA device initialization steps to
6566 * a device with SLI-3 interface spec.
6567 **/
6568static void
6569lpfc_unset_hba(struct lpfc_hba *phba)
6570{
6571	struct lpfc_vport *vport = phba->pport;
6572	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
6573
6574	spin_lock_irq(shost->host_lock);
6575	vport->load_flag |= FC_UNLOADING;
6576	spin_unlock_irq(shost->host_lock);
6577
6578	lpfc_stop_hba_timers(phba);
6579
6580	phba->pport->work_port_events = 0;
6581
6582	lpfc_sli_hba_down(phba);
6583
6584	lpfc_sli_brdrestart(phba);
6585
6586	lpfc_sli_disable_intr(phba);
6587
6588	return;
6589}
6590
6591/**
6592 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
6593 * @phba: pointer to lpfc hba data structure.
6594 *
6595 * This routine is invoked to unset the HBA device initialization steps to
6596 * a device with SLI-4 interface spec.
6597 **/
6598static void
6599lpfc_sli4_unset_hba(struct lpfc_hba *phba)
6600{
6601	struct lpfc_vport *vport = phba->pport;
6602	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
6603
6604	spin_lock_irq(shost->host_lock);
6605	vport->load_flag |= FC_UNLOADING;
6606	spin_unlock_irq(shost->host_lock);
6607
6608	phba->pport->work_port_events = 0;
6609
6610	lpfc_sli4_hba_down(phba);
6611
6612	lpfc_sli4_disable_intr(phba);
6613
6614	return;
6615}
6616
6617/**
6618 * lpfc_sli4_hba_unset - Unset the fcoe hba
6619 * @phba: Pointer to HBA context object.
6620 *
6621 * This function is called in the SLI4 code path to reset the HBA's FCoE
6622 * function. The caller is not required to hold any lock. This routine
6623 * issues PCI function reset mailbox command to reset the FCoE function.
6624 * At the end of the function, it calls lpfc_hba_down_post function to
6625 * free any pending commands.
6626 **/
6627static void
6628lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6629{
6630	int wait_cnt = 0;
6631	LPFC_MBOXQ_t *mboxq;
6632
6633	lpfc_stop_hba_timers(phba);
6634	phba->sli4_hba.intr_enable = 0;
6635
6636	/*
6637	 * Gracefully wait out the potential current outstanding asynchronous
6638	 * mailbox command.
6639	 */
6640
6641	/* First, block any pending async mailbox command from posted */
6642	spin_lock_irq(&phba->hbalock);
6643	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
6644	spin_unlock_irq(&phba->hbalock);
6645	/* Now, trying to wait it out if we can */
6646	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6647		msleep(10);
6648		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
6649			break;
6650	}
6651	/* Forcefully release the outstanding mailbox command if timed out */
6652	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6653		spin_lock_irq(&phba->hbalock);
6654		mboxq = phba->sli.mbox_active;
6655		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
6656		__lpfc_mbox_cmpl_put(phba, mboxq);
6657		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6658		phba->sli.mbox_active = NULL;
6659		spin_unlock_irq(&phba->hbalock);
6660	}
6661
6662	/* Tear down the queues in the HBA */
6663	lpfc_sli4_queue_unset(phba);
6664
6665	/* Disable PCI subsystem interrupt */
6666	lpfc_sli4_disable_intr(phba);
6667
6668	/* Stop kthread signal shall trigger work_done one more time */
6669	kthread_stop(phba->worker_thread);
6670
6671	/* Stop the SLI4 device port */
6672	phba->pport->work_port_events = 0;
6673}
6674
6675/**
6676 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
6677 * @pdev: pointer to PCI device
6678 * @pid: pointer to PCI device identifier
6679 *
6680 * This routine is to be called to attach a device with SLI-3 interface spec
6681 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6682 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
6683 * information of the device and driver to see if the driver state that it can
6684 * support this kind of device. If the match is successful, the driver core
6685 * invokes this routine. If this routine determines it can claim the HBA, it
6686 * does all the initialization that it needs to do to handle the HBA properly.
6687 *
6688 * Return code
6689 * 	0 - driver can claim the device
6690 * 	negative value - driver can not claim the device
6691 **/
6692static int __devinit
6693lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6694{
6695	struct lpfc_hba   *phba;
6696	struct lpfc_vport *vport = NULL;
6697	int error;
6698	uint32_t cfg_mode, intr_mode;
6699
6700	/* Allocate memory for HBA structure */
6701	phba = lpfc_hba_alloc(pdev);
6702	if (!phba)
6703		return -ENOMEM;
6704
6705	/* Perform generic PCI device enabling operation */
6706	error = lpfc_enable_pci_dev(phba);
6707	if (error) {
6708		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6709				"1401 Failed to enable pci device.\n");
6710		goto out_free_phba;
6711	}
6712
6713	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
6714	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
6715	if (error)
6716		goto out_disable_pci_dev;
6717
6718	/* Set up SLI-3 specific device PCI memory space */
6719	error = lpfc_sli_pci_mem_setup(phba);
6720	if (error) {
6721		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6722				"1402 Failed to set up pci memory space.\n");
6723		goto out_disable_pci_dev;
6724	}
6725
6726	/* Set up phase-1 common device driver resources */
6727	error = lpfc_setup_driver_resource_phase1(phba);
6728	if (error) {
6729		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6730				"1403 Failed to set up driver resource.\n");
6731		goto out_unset_pci_mem_s3;
6732	}
6733
6734	/* Set up SLI-3 specific device driver resources */
6735	error = lpfc_sli_driver_resource_setup(phba);
6736	if (error) {
6737		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6738				"1404 Failed to set up driver resource.\n");
6739		goto out_unset_pci_mem_s3;
6740	}
6741
6742	/* Initialize and populate the iocb list per host */
6743	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
6744	if (error) {
6745		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6746				"1405 Failed to initialize iocb list.\n");
6747		goto out_unset_driver_resource_s3;
6748	}
6749
6750	/* Set up common device driver resources */
6751	error = lpfc_setup_driver_resource_phase2(phba);
6752	if (error) {
6753		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6754				"1406 Failed to set up driver resource.\n");
6755		goto out_free_iocb_list;
6756	}
6757
6758	/* Create SCSI host to the physical port */
6759	error = lpfc_create_shost(phba);
6760	if (error) {
6761		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6762				"1407 Failed to create scsi host.\n");
6763		goto out_unset_driver_resource;
6764	}
6765
6766	/* Configure sysfs attributes */
6767	vport = phba->pport;
6768	error = lpfc_alloc_sysfs_attr(vport);
6769	if (error) {
6770		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6771				"1476 Failed to allocate sysfs attr\n");
6772		goto out_destroy_shost;
6773	}
6774
6775	/* Now, trying to enable interrupt and bring up the device */
6776	cfg_mode = phba->cfg_use_msi;
6777	while (true) {
6778		/* Put device to a known state before enabling interrupt */
6779		lpfc_stop_port(phba);
6780		/* Configure and enable interrupt */
6781		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
6782		if (intr_mode == LPFC_INTR_ERROR) {
6783			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6784					"0431 Failed to enable interrupt.\n");
6785			error = -ENODEV;
6786			goto out_free_sysfs_attr;
6787		}
6788		/* SLI-3 HBA setup */
6789		if (lpfc_sli_hba_setup(phba)) {
6790			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6791					"1477 Failed to set up hba\n");
6792			error = -ENODEV;
6793			goto out_remove_device;
6794		}
6795
6796		/* Wait 50ms for the interrupts of previous mailbox commands */
6797		msleep(50);
6798		/* Check active interrupts on message signaled interrupts */
6799		if (intr_mode == 0 ||
6800		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
6801			/* Log the current active interrupt mode */
6802			phba->intr_mode = intr_mode;
6803			lpfc_log_intr_mode(phba, intr_mode);
6804			break;
6805		} else {
6806			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6807					"0447 Configure interrupt mode (%d) "
6808					"failed active interrupt test.\n",
6809					intr_mode);
6810			/* Disable the current interrupt mode */
6811			lpfc_sli_disable_intr(phba);
6812			/* Try next level of interrupt mode */
6813			cfg_mode = --intr_mode;
6814		}
6815	}
6816
6817	/* Perform post initialization setup */
6818	lpfc_post_init_setup(phba);
6819
6820	/* Check if there are static vports to be created. */
6821	lpfc_create_static_vport(phba);
6822
6823	return 0;
6824
6825out_remove_device:
6826	lpfc_unset_hba(phba);
6827out_free_sysfs_attr:
6828	lpfc_free_sysfs_attr(vport);
6829out_destroy_shost:
6830	lpfc_destroy_shost(phba);
6831out_unset_driver_resource:
6832	lpfc_unset_driver_resource_phase2(phba);
6833out_free_iocb_list:
6834	lpfc_free_iocb_list(phba);
6835out_unset_driver_resource_s3:
6836	lpfc_sli_driver_resource_unset(phba);
6837out_unset_pci_mem_s3:
6838	lpfc_sli_pci_mem_unset(phba);
6839out_disable_pci_dev:
6840	lpfc_disable_pci_dev(phba);
6841out_free_phba:
6842	lpfc_hba_free(phba);
6843	return error;
6844}
6845
6846/**
6847 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
6848 * @pdev: pointer to PCI device
6849 *
6850 * This routine is to be called to disattach a device with SLI-3 interface
6851 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6852 * removed from PCI bus, it performs all the necessary cleanup for the HBA
6853 * device to be removed from the PCI subsystem properly.
6854 **/
6855static void __devexit
6856lpfc_pci_remove_one_s3(struct pci_dev *pdev)
6857{
6858	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
6859	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6860	struct lpfc_vport **vports;
6861	struct lpfc_hba   *phba = vport->phba;
6862	int i;
6863	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
6864
6865	spin_lock_irq(&phba->hbalock);
6866	vport->load_flag |= FC_UNLOADING;
6867	spin_unlock_irq(&phba->hbalock);
6868
6869	lpfc_free_sysfs_attr(vport);
6870
6871	/* Release all the vports against this physical port */
6872	vports = lpfc_create_vport_work_array(phba);
6873	if (vports != NULL)
6874		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
6875			fc_vport_terminate(vports[i]->fc_vport);
6876	lpfc_destroy_vport_work_array(phba, vports);
6877
6878	/* Remove FC host and then SCSI host with the physical port */
6879	fc_remove_host(shost);
6880	scsi_remove_host(shost);
6881	lpfc_cleanup(vport);
6882
6883	/*
6884	 * Bring down the SLI Layer. This step disable all interrupts,
6885	 * clears the rings, discards all mailbox commands, and resets
6886	 * the HBA.
6887	 */
6888
6889	/* HBA interrupt will be diabled after this call */
6890	lpfc_sli_hba_down(phba);
6891	/* Stop kthread signal shall trigger work_done one more time */
6892	kthread_stop(phba->worker_thread);
6893	/* Final cleanup of txcmplq and reset the HBA */
6894	lpfc_sli_brdrestart(phba);
6895
6896	lpfc_stop_hba_timers(phba);
6897	spin_lock_irq(&phba->hbalock);
6898	list_del_init(&vport->listentry);
6899	spin_unlock_irq(&phba->hbalock);
6900
6901	lpfc_debugfs_terminate(vport);
6902
6903	/* Disable interrupt */
6904	lpfc_sli_disable_intr(phba);
6905
6906	pci_set_drvdata(pdev, NULL);
6907	scsi_host_put(shost);
6908
6909	/*
6910	 * Call scsi_free before mem_free since scsi bufs are released to their
6911	 * corresponding pools here.
6912	 */
6913	lpfc_scsi_free(phba);
6914	lpfc_mem_free_all(phba);
6915
6916	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
6917			  phba->hbqslimp.virt, phba->hbqslimp.phys);
6918
6919	/* Free resources associated with SLI2 interface */
6920	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6921			  phba->slim2p.virt, phba->slim2p.phys);
6922
6923	/* unmap adapter SLIM and Control Registers */
6924	iounmap(phba->ctrl_regs_memmap_p);
6925	iounmap(phba->slim_memmap_p);
6926
6927	lpfc_hba_free(phba);
6928
6929	pci_release_selected_regions(pdev, bars);
6930	pci_disable_device(pdev);
6931}
6932
6933/**
6934 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
6935 * @pdev: pointer to PCI device
6936 * @msg: power management message
6937 *
6938 * This routine is to be called from the kernel's PCI subsystem to support
6939 * system Power Management (PM) to device with SLI-3 interface spec. When
6940 * PM invokes this method, it quiesces the device by stopping the driver's
6941 * worker thread for the device, turning off device's interrupt and DMA,
6942 * and bring the device offline. Note that as the driver implements the
6943 * minimum PM requirements to a power-aware driver's PM support for the
6944 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
6945 * to the suspend() method call will be treated as SUSPEND and the driver will
6946 * fully reinitialize its device during resume() method call, the driver will
6947 * set device to PCI_D3hot state in PCI config space instead of setting it
6948 * according to the @msg provided by the PM.
6949 *
6950 * Return code
6951 * 	0 - driver suspended the device
6952 * 	Error otherwise
6953 **/
6954static int
6955lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
6956{
6957	struct Scsi_Host *shost = pci_get_drvdata(pdev);
6958	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
6959
6960	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6961			"0473 PCI device Power Management suspend.\n");
6962
6963	/* Bring down the device */
6964	lpfc_offline_prep(phba);
6965	lpfc_offline(phba);
6966	kthread_stop(phba->worker_thread);
6967
6968	/* Disable interrupt from device */
6969	lpfc_sli_disable_intr(phba);
6970
6971	/* Save device state to PCI config space */
6972	pci_save_state(pdev);
6973	pci_set_power_state(pdev, PCI_D3hot);
6974
6975	return 0;
6976}
6977
6978/**
6979 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
6980 * @pdev: pointer to PCI device
6981 *
6982 * This routine is to be called from the kernel's PCI subsystem to support
6983 * system Power Management (PM) to device with SLI-3 interface spec. When PM
6984 * invokes this method, it restores the device's PCI config space state and
6985 * fully reinitializes the device and brings it online. Note that as the
6986 * driver implements the minimum PM requirements to a power-aware driver's
6987 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
6988 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
6989 * driver will fully reinitialize its device during resume() method call,
6990 * the device will be set to PCI_D0 directly in PCI config space before
6991 * restoring the state.
6992 *
6993 * Return code
6994 * 	0 - driver suspended the device
6995 * 	Error otherwise
6996 **/
6997static int
6998lpfc_pci_resume_one_s3(struct pci_dev *pdev)
6999{
7000	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7001	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7002	uint32_t intr_mode;
7003	int error;
7004
7005	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7006			"0452 PCI device Power Management resume.\n");
7007
7008	/* Restore device state from PCI config space */
7009	pci_set_power_state(pdev, PCI_D0);
7010	pci_restore_state(pdev);
7011	if (pdev->is_busmaster)
7012		pci_set_master(pdev);
7013
7014	/* Startup the kernel thread for this host adapter. */
7015	phba->worker_thread = kthread_run(lpfc_do_work, phba,
7016					"lpfc_worker_%d", phba->brd_no);
7017	if (IS_ERR(phba->worker_thread)) {
7018		error = PTR_ERR(phba->worker_thread);
7019		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7020				"0434 PM resume failed to start worker "
7021				"thread: error=x%x.\n", error);
7022		return error;
7023	}
7024
7025	/* Configure and enable interrupt */
7026	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7027	if (intr_mode == LPFC_INTR_ERROR) {
7028		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7029				"0430 PM resume Failed to enable interrupt\n");
7030		return -EIO;
7031	} else
7032		phba->intr_mode = intr_mode;
7033
7034	/* Restart HBA and bring it online */
7035	lpfc_sli_brdrestart(phba);
7036	lpfc_online(phba);
7037
7038	/* Log the current active interrupt mode */
7039	lpfc_log_intr_mode(phba, phba->intr_mode);
7040
7041	return 0;
7042}
7043
7044/**
7045 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7046 * @pdev: pointer to PCI device.
7047 * @state: the current PCI connection state.
7048 *
7049 * This routine is called from the PCI subsystem for I/O error handling to
7050 * device with SLI-3 interface spec. This function is called by the PCI
7051 * subsystem after a PCI bus error affecting this device has been detected.
7052 * When this function is invoked, it will need to stop all the I/Os and
7053 * interrupt(s) to the device. Once that is done, it will return
7054 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7055 * as desired.
7056 *
7057 * Return codes
7058 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7059 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7060 **/
7061static pci_ers_result_t
7062lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7063{
7064	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7065	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7066	struct lpfc_sli *psli = &phba->sli;
7067	struct lpfc_sli_ring  *pring;
7068
7069	if (state == pci_channel_io_perm_failure) {
7070		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7071				"0472 PCI channel I/O permanent failure\n");
7072		/* Block all SCSI devices' I/Os on the host */
7073		lpfc_scsi_dev_block(phba);
7074		/* Clean up all driver's outstanding SCSI I/Os */
7075		lpfc_sli_flush_fcp_rings(phba);
7076		return PCI_ERS_RESULT_DISCONNECT;
7077	}
7078
7079	pci_disable_device(pdev);
7080	/*
7081	 * There may be I/Os dropped by the firmware.
7082	 * Error iocb (I/O) on txcmplq and let the SCSI layer
7083	 * retry it after re-establishing link.
7084	 */
7085	pring = &psli->ring[psli->fcp_ring];
7086	lpfc_sli_abort_iocb_ring(phba, pring);
7087
7088	/* Disable interrupt */
7089	lpfc_sli_disable_intr(phba);
7090
7091	/* Request a slot reset. */
7092	return PCI_ERS_RESULT_NEED_RESET;
7093}
7094
7095/**
7096 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
7097 * @pdev: pointer to PCI device.
7098 *
7099 * This routine is called from the PCI subsystem for error handling to
7100 * device with SLI-3 interface spec. This is called after PCI bus has been
7101 * reset to restart the PCI card from scratch, as if from a cold-boot.
7102 * During the PCI subsystem error recovery, after driver returns
7103 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7104 * recovery and then call this routine before calling the .resume method
7105 * to recover the device. This function will initialize the HBA device,
7106 * enable the interrupt, but it will just put the HBA to offline state
7107 * without passing any I/O traffic.
7108 *
7109 * Return codes
7110 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7111 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7112 */
7113static pci_ers_result_t
7114lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7115{
7116	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7117	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7118	struct lpfc_sli *psli = &phba->sli;
7119	uint32_t intr_mode;
7120
7121	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
7122	if (pci_enable_device_mem(pdev)) {
7123		printk(KERN_ERR "lpfc: Cannot re-enable "
7124			"PCI device after reset.\n");
7125		return PCI_ERS_RESULT_DISCONNECT;
7126	}
7127
7128	pci_restore_state(pdev);
7129	if (pdev->is_busmaster)
7130		pci_set_master(pdev);
7131
7132	spin_lock_irq(&phba->hbalock);
7133	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7134	spin_unlock_irq(&phba->hbalock);
7135
7136	/* Configure and enable interrupt */
7137	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7138	if (intr_mode == LPFC_INTR_ERROR) {
7139		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7140				"0427 Cannot re-enable interrupt after "
7141				"slot reset.\n");
7142		return PCI_ERS_RESULT_DISCONNECT;
7143	} else
7144		phba->intr_mode = intr_mode;
7145
7146	/* Take device offline; this will perform cleanup */
7147	lpfc_offline(phba);
7148	lpfc_sli_brdrestart(phba);
7149
7150	/* Log the current active interrupt mode */
7151	lpfc_log_intr_mode(phba, phba->intr_mode);
7152
7153	return PCI_ERS_RESULT_RECOVERED;
7154}
7155
7156/**
7157 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
7158 * @pdev: pointer to PCI device
7159 *
7160 * This routine is called from the PCI subsystem for error handling to device
7161 * with SLI-3 interface spec. It is called when kernel error recovery tells
7162 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7163 * error recovery. After this call, traffic can start to flow from this device
7164 * again.
7165 */
7166static void
7167lpfc_io_resume_s3(struct pci_dev *pdev)
7168{
7169	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7170	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7171
7172	lpfc_online(phba);
7173}
7174
7175/**
7176 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7177 * @phba: pointer to lpfc hba data structure.
7178 *
7179 * returns the number of ELS/CT IOCBs to reserve
7180 **/
7181int
7182lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7183{
7184	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7185
7186	if (max_xri <= 100)
7187		return 4;
7188	else if (max_xri <= 256)
7189		return 8;
7190	else if (max_xri <= 512)
7191		return 16;
7192	else if (max_xri <= 1024)
7193		return 32;
7194	else
7195		return 48;
7196}
7197
7198/**
7199 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
7200 * @pdev: pointer to PCI device
7201 * @pid: pointer to PCI device identifier
7202 *
7203 * This routine is called from the kernel's PCI subsystem to device with
7204 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7205 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7206 * information of the device and driver to see if the driver state that it
7207 * can support this kind of device. If the match is successful, the driver
7208 * core invokes this routine. If this routine determines it can claim the HBA,
7209 * it does all the initialization that it needs to do to handle the HBA
7210 * properly.
7211 *
7212 * Return code
7213 * 	0 - driver can claim the device
7214 * 	negative value - driver can not claim the device
7215 **/
7216static int __devinit
7217lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7218{
7219	struct lpfc_hba   *phba;
7220	struct lpfc_vport *vport = NULL;
7221	int error;
7222	uint32_t cfg_mode, intr_mode;
7223	int mcnt;
7224
7225	/* Allocate memory for HBA structure */
7226	phba = lpfc_hba_alloc(pdev);
7227	if (!phba)
7228		return -ENOMEM;
7229
7230	/* Perform generic PCI device enabling operation */
7231	error = lpfc_enable_pci_dev(phba);
7232	if (error) {
7233		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7234				"1409 Failed to enable pci device.\n");
7235		goto out_free_phba;
7236	}
7237
7238	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
7239	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
7240	if (error)
7241		goto out_disable_pci_dev;
7242
7243	/* Set up SLI-4 specific device PCI memory space */
7244	error = lpfc_sli4_pci_mem_setup(phba);
7245	if (error) {
7246		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7247				"1410 Failed to set up pci memory space.\n");
7248		goto out_disable_pci_dev;
7249	}
7250
7251	/* Set up phase-1 common device driver resources */
7252	error = lpfc_setup_driver_resource_phase1(phba);
7253	if (error) {
7254		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7255				"1411 Failed to set up driver resource.\n");
7256		goto out_unset_pci_mem_s4;
7257	}
7258
7259	/* Set up SLI-4 Specific device driver resources */
7260	error = lpfc_sli4_driver_resource_setup(phba);
7261	if (error) {
7262		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7263				"1412 Failed to set up driver resource.\n");
7264		goto out_unset_pci_mem_s4;
7265	}
7266
7267	/* Initialize and populate the iocb list per host */
7268	error = lpfc_init_iocb_list(phba,
7269			phba->sli4_hba.max_cfg_param.max_xri);
7270	if (error) {
7271		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7272				"1413 Failed to initialize iocb list.\n");
7273		goto out_unset_driver_resource_s4;
7274	}
7275
7276	/* Set up common device driver resources */
7277	error = lpfc_setup_driver_resource_phase2(phba);
7278	if (error) {
7279		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7280				"1414 Failed to set up driver resource.\n");
7281		goto out_free_iocb_list;
7282	}
7283
7284	/* Create SCSI host to the physical port */
7285	error = lpfc_create_shost(phba);
7286	if (error) {
7287		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7288				"1415 Failed to create scsi host.\n");
7289		goto out_unset_driver_resource;
7290	}
7291
7292	/* Configure sysfs attributes */
7293	vport = phba->pport;
7294	error = lpfc_alloc_sysfs_attr(vport);
7295	if (error) {
7296		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7297				"1416 Failed to allocate sysfs attr\n");
7298		goto out_destroy_shost;
7299	}
7300
7301	/* Now, trying to enable interrupt and bring up the device */
7302	cfg_mode = phba->cfg_use_msi;
7303	while (true) {
7304		/* Put device to a known state before enabling interrupt */
7305		lpfc_stop_port(phba);
7306		/* Configure and enable interrupt */
7307		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
7308		if (intr_mode == LPFC_INTR_ERROR) {
7309			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7310					"0426 Failed to enable interrupt.\n");
7311			error = -ENODEV;
7312			goto out_free_sysfs_attr;
7313		}
7314		/* Set up SLI-4 HBA */
7315		if (lpfc_sli4_hba_setup(phba)) {
7316			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7317					"1421 Failed to set up hba\n");
7318			error = -ENODEV;
7319			goto out_disable_intr;
7320		}
7321
7322		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
7323		if (intr_mode != 0)
7324			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
7325							    LPFC_ACT_INTR_CNT);
7326
7327		/* Check active interrupts received only for MSI/MSI-X */
7328		if (intr_mode == 0 ||
7329		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
7330			/* Log the current active interrupt mode */
7331			phba->intr_mode = intr_mode;
7332			lpfc_log_intr_mode(phba, intr_mode);
7333			break;
7334		}
7335		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7336				"0451 Configure interrupt mode (%d) "
7337				"failed active interrupt test.\n",
7338				intr_mode);
7339		/* Unset the preivous SLI-4 HBA setup */
7340		lpfc_sli4_unset_hba(phba);
7341		/* Try next level of interrupt mode */
7342		cfg_mode = --intr_mode;
7343	}
7344
7345	/* Perform post initialization setup */
7346	lpfc_post_init_setup(phba);
7347
7348	return 0;
7349
7350out_disable_intr:
7351	lpfc_sli4_disable_intr(phba);
7352out_free_sysfs_attr:
7353	lpfc_free_sysfs_attr(vport);
7354out_destroy_shost:
7355	lpfc_destroy_shost(phba);
7356out_unset_driver_resource:
7357	lpfc_unset_driver_resource_phase2(phba);
7358out_free_iocb_list:
7359	lpfc_free_iocb_list(phba);
7360out_unset_driver_resource_s4:
7361	lpfc_sli4_driver_resource_unset(phba);
7362out_unset_pci_mem_s4:
7363	lpfc_sli4_pci_mem_unset(phba);
7364out_disable_pci_dev:
7365	lpfc_disable_pci_dev(phba);
7366out_free_phba:
7367	lpfc_hba_free(phba);
7368	return error;
7369}
7370
7371/**
7372 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
7373 * @pdev: pointer to PCI device
7374 *
7375 * This routine is called from the kernel's PCI subsystem to device with
7376 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7377 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7378 * device to be removed from the PCI subsystem properly.
7379 **/
7380static void __devexit
7381lpfc_pci_remove_one_s4(struct pci_dev *pdev)
7382{
7383	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7384	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7385	struct lpfc_vport **vports;
7386	struct lpfc_hba *phba = vport->phba;
7387	int i;
7388
7389	/* Mark the device unloading flag */
7390	spin_lock_irq(&phba->hbalock);
7391	vport->load_flag |= FC_UNLOADING;
7392	spin_unlock_irq(&phba->hbalock);
7393
7394	/* Free the HBA sysfs attributes */
7395	lpfc_free_sysfs_attr(vport);
7396
7397	/* Release all the vports against this physical port */
7398	vports = lpfc_create_vport_work_array(phba);
7399	if (vports != NULL)
7400		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7401			fc_vport_terminate(vports[i]->fc_vport);
7402	lpfc_destroy_vport_work_array(phba, vports);
7403
7404	/* Remove FC host and then SCSI host with the physical port */
7405	fc_remove_host(shost);
7406	scsi_remove_host(shost);
7407
7408	/* Perform cleanup on the physical port */
7409	lpfc_cleanup(vport);
7410
7411	/*
7412	 * Bring down the SLI Layer. This step disables all interrupts,
7413	 * clears the rings, discards all mailbox commands, and resets
7414	 * the HBA FCoE function.
7415	 */
7416	lpfc_debugfs_terminate(vport);
7417	lpfc_sli4_hba_unset(phba);
7418
7419	spin_lock_irq(&phba->hbalock);
7420	list_del_init(&vport->listentry);
7421	spin_unlock_irq(&phba->hbalock);
7422
7423	/* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
7424	 * buffers are released to their corresponding pools here.
7425	 */
7426	lpfc_scsi_free(phba);
7427	lpfc_sli4_driver_resource_unset(phba);
7428
7429	/* Unmap adapter Control and Doorbell registers */
7430	lpfc_sli4_pci_mem_unset(phba);
7431
7432	/* Release PCI resources and disable device's PCI function */
7433	scsi_host_put(shost);
7434	lpfc_disable_pci_dev(phba);
7435
7436	/* Finally, free the driver's device data structure */
7437	lpfc_hba_free(phba);
7438
7439	return;
7440}
7441
7442/**
7443 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
7444 * @pdev: pointer to PCI device
7445 * @msg: power management message
7446 *
7447 * This routine is called from the kernel's PCI subsystem to support system
7448 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
7449 * this method, it quiesces the device by stopping the driver's worker
7450 * thread for the device, turning off device's interrupt and DMA, and bring
7451 * the device offline. Note that as the driver implements the minimum PM
7452 * requirements to a power-aware driver's PM support for suspend/resume -- all
7453 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
7454 * method call will be treated as SUSPEND and the driver will fully
7455 * reinitialize its device during resume() method call, the driver will set
7456 * device to PCI_D3hot state in PCI config space instead of setting it
7457 * according to the @msg provided by the PM.
7458 *
7459 * Return code
7460 * 	0 - driver suspended the device
7461 * 	Error otherwise
7462 **/
7463static int
7464lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
7465{
7466	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7467	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7468
7469	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7470			"0298 PCI device Power Management suspend.\n");
7471
7472	/* Bring down the device */
7473	lpfc_offline_prep(phba);
7474	lpfc_offline(phba);
7475	kthread_stop(phba->worker_thread);
7476
7477	/* Disable interrupt from device */
7478	lpfc_sli4_disable_intr(phba);
7479
7480	/* Save device state to PCI config space */
7481	pci_save_state(pdev);
7482	pci_set_power_state(pdev, PCI_D3hot);
7483
7484	return 0;
7485}
7486
7487/**
7488 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
7489 * @pdev: pointer to PCI device
7490 *
7491 * This routine is called from the kernel's PCI subsystem to support system
7492 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
7493 * this method, it restores the device's PCI config space state and fully
7494 * reinitializes the device and brings it online. Note that as the driver
7495 * implements the minimum PM requirements to a power-aware driver's PM for
7496 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7497 * to the suspend() method call will be treated as SUSPEND and the driver
7498 * will fully reinitialize its device during resume() method call, the device
7499 * will be set to PCI_D0 directly in PCI config space before restoring the
7500 * state.
7501 *
7502 * Return code
7503 * 	0 - driver suspended the device
7504 * 	Error otherwise
7505 **/
7506static int
7507lpfc_pci_resume_one_s4(struct pci_dev *pdev)
7508{
7509	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7510	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7511	uint32_t intr_mode;
7512	int error;
7513
7514	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7515			"0292 PCI device Power Management resume.\n");
7516
7517	/* Restore device state from PCI config space */
7518	pci_set_power_state(pdev, PCI_D0);
7519	pci_restore_state(pdev);
7520	if (pdev->is_busmaster)
7521		pci_set_master(pdev);
7522
7523	 /* Startup the kernel thread for this host adapter. */
7524	phba->worker_thread = kthread_run(lpfc_do_work, phba,
7525					"lpfc_worker_%d", phba->brd_no);
7526	if (IS_ERR(phba->worker_thread)) {
7527		error = PTR_ERR(phba->worker_thread);
7528		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7529				"0293 PM resume failed to start worker "
7530				"thread: error=x%x.\n", error);
7531		return error;
7532	}
7533
7534	/* Configure and enable interrupt */
7535	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
7536	if (intr_mode == LPFC_INTR_ERROR) {
7537		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7538				"0294 PM resume Failed to enable interrupt\n");
7539		return -EIO;
7540	} else
7541		phba->intr_mode = intr_mode;
7542
7543	/* Restart HBA and bring it online */
7544	lpfc_sli_brdrestart(phba);
7545	lpfc_online(phba);
7546
7547	/* Log the current active interrupt mode */
7548	lpfc_log_intr_mode(phba, phba->intr_mode);
7549
7550	return 0;
7551}
7552
7553/**
7554 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
7555 * @pdev: pointer to PCI device.
7556 * @state: the current PCI connection state.
7557 *
7558 * This routine is called from the PCI subsystem for error handling to device
7559 * with SLI-4 interface spec. This function is called by the PCI subsystem
7560 * after a PCI bus error affecting this device has been detected. When this
7561 * function is invoked, it will need to stop all the I/Os and interrupt(s)
7562 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
7563 * for the PCI subsystem to perform proper recovery as desired.
7564 *
7565 * Return codes
7566 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7567 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7568 **/
7569static pci_ers_result_t
7570lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
7571{
7572	return PCI_ERS_RESULT_NEED_RESET;
7573}
7574
7575/**
7576 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
7577 * @pdev: pointer to PCI device.
7578 *
7579 * This routine is called from the PCI subsystem for error handling to device
7580 * with SLI-4 interface spec. It is called after PCI bus has been reset to
7581 * restart the PCI card from scratch, as if from a cold-boot. During the
7582 * PCI subsystem error recovery, after the driver returns
7583 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7584 * recovery and then call this routine before calling the .resume method to
7585 * recover the device. This function will initialize the HBA device, enable
7586 * the interrupt, but it will just put the HBA to offline state without
7587 * passing any I/O traffic.
7588 *
7589 * Return codes
7590 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7591 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7592 */
7593static pci_ers_result_t
7594lpfc_io_slot_reset_s4(struct pci_dev *pdev)
7595{
7596	return PCI_ERS_RESULT_RECOVERED;
7597}
7598
7599/**
7600 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
7601 * @pdev: pointer to PCI device
7602 *
7603 * This routine is called from the PCI subsystem for error handling to device
7604 * with SLI-4 interface spec. It is called when kernel error recovery tells
7605 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7606 * error recovery. After this call, traffic can start to flow from this device
7607 * again.
7608 **/
7609static void
7610lpfc_io_resume_s4(struct pci_dev *pdev)
7611{
7612	return;
7613}
7614
7615/**
7616 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
7617 * @pdev: pointer to PCI device
7618 * @pid: pointer to PCI device identifier
7619 *
7620 * This routine is to be registered to the kernel's PCI subsystem. When an
7621 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
7622 * at PCI device-specific information of the device and driver to see if the
7623 * driver state that it can support this kind of device. If the match is
7624 * successful, the driver core invokes this routine. This routine dispatches
7625 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
7626 * do all the initialization that it needs to do to handle the HBA device
7627 * properly.
7628 *
7629 * Return code
7630 * 	0 - driver can claim the device
7631 * 	negative value - driver can not claim the device
7632 **/
7633static int __devinit
7634lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7635{
7636	int rc;
7637	uint16_t dev_id;
7638
7639	if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
7640		return -ENODEV;
7641
7642	switch (dev_id) {
7643	case PCI_DEVICE_ID_TIGERSHARK:
7644	case PCI_DEVICE_ID_TIGERSHARK_S:
7645		rc = lpfc_pci_probe_one_s4(pdev, pid);
7646		break;
7647	default:
7648		rc = lpfc_pci_probe_one_s3(pdev, pid);
7649		break;
7650	}
7651	return rc;
7652}
7653
7654/**
7655 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
7656 * @pdev: pointer to PCI device
7657 *
7658 * This routine is to be registered to the kernel's PCI subsystem. When an
7659 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
7660 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
7661 * remove routine, which will perform all the necessary cleanup for the
7662 * device to be removed from the PCI subsystem properly.
7663 **/
7664static void __devexit
7665lpfc_pci_remove_one(struct pci_dev *pdev)
7666{
7667	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7668	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7669
7670	switch (phba->pci_dev_grp) {
7671	case LPFC_PCI_DEV_LP:
7672		lpfc_pci_remove_one_s3(pdev);
7673		break;
7674	case LPFC_PCI_DEV_OC:
7675		lpfc_pci_remove_one_s4(pdev);
7676		break;
7677	default:
7678		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7679				"1424 Invalid PCI device group: 0x%x\n",
7680				phba->pci_dev_grp);
7681		break;
7682	}
7683	return;
7684}
7685
7686/**
7687 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
7688 * @pdev: pointer to PCI device
7689 * @msg: power management message
7690 *
7691 * This routine is to be registered to the kernel's PCI subsystem to support
7692 * system Power Management (PM). When PM invokes this method, it dispatches
7693 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
7694 * suspend the device.
7695 *
7696 * Return code
7697 * 	0 - driver suspended the device
7698 * 	Error otherwise
7699 **/
7700static int
7701lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
7702{
7703	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7704	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7705	int rc = -ENODEV;
7706
7707	switch (phba->pci_dev_grp) {
7708	case LPFC_PCI_DEV_LP:
7709		rc = lpfc_pci_suspend_one_s3(pdev, msg);
7710		break;
7711	case LPFC_PCI_DEV_OC:
7712		rc = lpfc_pci_suspend_one_s4(pdev, msg);
7713		break;
7714	default:
7715		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7716				"1425 Invalid PCI device group: 0x%x\n",
7717				phba->pci_dev_grp);
7718		break;
7719	}
7720	return rc;
7721}
7722
7723/**
7724 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
7725 * @pdev: pointer to PCI device
7726 *
7727 * This routine is to be registered to the kernel's PCI subsystem to support
7728 * system Power Management (PM). When PM invokes this method, it dispatches
7729 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
7730 * resume the device.
7731 *
7732 * Return code
7733 * 	0 - driver suspended the device
7734 * 	Error otherwise
7735 **/
7736static int
7737lpfc_pci_resume_one(struct pci_dev *pdev)
7738{
7739	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7740	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7741	int rc = -ENODEV;
7742
7743	switch (phba->pci_dev_grp) {
7744	case LPFC_PCI_DEV_LP:
7745		rc = lpfc_pci_resume_one_s3(pdev);
7746		break;
7747	case LPFC_PCI_DEV_OC:
7748		rc = lpfc_pci_resume_one_s4(pdev);
7749		break;
7750	default:
7751		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7752				"1426 Invalid PCI device group: 0x%x\n",
7753				phba->pci_dev_grp);
7754		break;
7755	}
7756	return rc;
7757}
7758
7759/**
7760 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
7761 * @pdev: pointer to PCI device.
7762 * @state: the current PCI connection state.
7763 *
7764 * This routine is registered to the PCI subsystem for error handling. This
7765 * function is called by the PCI subsystem after a PCI bus error affecting
7766 * this device has been detected. When this routine is invoked, it dispatches
7767 * the action to the proper SLI-3 or SLI-4 device error detected handling
7768 * routine, which will perform the proper error detected operation.
7769 *
7770 * Return codes
7771 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7772 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7773 **/
7774static pci_ers_result_t
7775lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7776{
7777	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7778	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7779	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7780
7781	switch (phba->pci_dev_grp) {
7782	case LPFC_PCI_DEV_LP:
7783		rc = lpfc_io_error_detected_s3(pdev, state);
7784		break;
7785	case LPFC_PCI_DEV_OC:
7786		rc = lpfc_io_error_detected_s4(pdev, state);
7787		break;
7788	default:
7789		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7790				"1427 Invalid PCI device group: 0x%x\n",
7791				phba->pci_dev_grp);
7792		break;
7793	}
7794	return rc;
7795}
7796
7797/**
7798 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
7799 * @pdev: pointer to PCI device.
7800 *
7801 * This routine is registered to the PCI subsystem for error handling. This
7802 * function is called after PCI bus has been reset to restart the PCI card
7803 * from scratch, as if from a cold-boot. When this routine is invoked, it
7804 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
7805 * routine, which will perform the proper device reset.
7806 *
7807 * Return codes
7808 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7809 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7810 **/
7811static pci_ers_result_t
7812lpfc_io_slot_reset(struct pci_dev *pdev)
7813{
7814	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7815	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7816	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7817
7818	switch (phba->pci_dev_grp) {
7819	case LPFC_PCI_DEV_LP:
7820		rc = lpfc_io_slot_reset_s3(pdev);
7821		break;
7822	case LPFC_PCI_DEV_OC:
7823		rc = lpfc_io_slot_reset_s4(pdev);
7824		break;
7825	default:
7826		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7827				"1428 Invalid PCI device group: 0x%x\n",
7828				phba->pci_dev_grp);
7829		break;
7830	}
7831	return rc;
7832}
7833
7834/**
7835 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
7836 * @pdev: pointer to PCI device
7837 *
7838 * This routine is registered to the PCI subsystem for error handling. It
7839 * is called when kernel error recovery tells the lpfc driver that it is
7840 * OK to resume normal PCI operation after PCI bus error recovery. When
7841 * this routine is invoked, it dispatches the action to the proper SLI-3
7842 * or SLI-4 device io_resume routine, which will resume the device operation.
7843 **/
7844static void
7845lpfc_io_resume(struct pci_dev *pdev)
7846{
7847	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7848	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7849
7850	switch (phba->pci_dev_grp) {
7851	case LPFC_PCI_DEV_LP:
7852		lpfc_io_resume_s3(pdev);
7853		break;
7854	case LPFC_PCI_DEV_OC:
7855		lpfc_io_resume_s4(pdev);
7856		break;
7857	default:
7858		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7859				"1429 Invalid PCI device group: 0x%x\n",
7860				phba->pci_dev_grp);
7861		break;
7862	}
7863	return;
7864}
7865
7866static struct pci_device_id lpfc_id_table[] = {
7867	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
7868		PCI_ANY_ID, PCI_ANY_ID, },
7869	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
7870		PCI_ANY_ID, PCI_ANY_ID, },
7871	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
7872		PCI_ANY_ID, PCI_ANY_ID, },
7873	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
7874		PCI_ANY_ID, PCI_ANY_ID, },
7875	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
7876		PCI_ANY_ID, PCI_ANY_ID, },
7877	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
7878		PCI_ANY_ID, PCI_ANY_ID, },
7879	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
7880		PCI_ANY_ID, PCI_ANY_ID, },
7881	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
7882		PCI_ANY_ID, PCI_ANY_ID, },
7883	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
7884		PCI_ANY_ID, PCI_ANY_ID, },
7885	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
7886		PCI_ANY_ID, PCI_ANY_ID, },
7887	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
7888		PCI_ANY_ID, PCI_ANY_ID, },
7889	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
7890		PCI_ANY_ID, PCI_ANY_ID, },
7891	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
7892		PCI_ANY_ID, PCI_ANY_ID, },
7893	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
7894		PCI_ANY_ID, PCI_ANY_ID, },
7895	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
7896		PCI_ANY_ID, PCI_ANY_ID, },
7897	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
7898		PCI_ANY_ID, PCI_ANY_ID, },
7899	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
7900		PCI_ANY_ID, PCI_ANY_ID, },
7901	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
7902		PCI_ANY_ID, PCI_ANY_ID, },
7903	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
7904		PCI_ANY_ID, PCI_ANY_ID, },
7905	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
7906		PCI_ANY_ID, PCI_ANY_ID, },
7907	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
7908		PCI_ANY_ID, PCI_ANY_ID, },
7909	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
7910		PCI_ANY_ID, PCI_ANY_ID, },
7911	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
7912		PCI_ANY_ID, PCI_ANY_ID, },
7913	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
7914		PCI_ANY_ID, PCI_ANY_ID, },
7915	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
7916		PCI_ANY_ID, PCI_ANY_ID, },
7917	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
7918		PCI_ANY_ID, PCI_ANY_ID, },
7919	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
7920		PCI_ANY_ID, PCI_ANY_ID, },
7921	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
7922		PCI_ANY_ID, PCI_ANY_ID, },
7923	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
7924		PCI_ANY_ID, PCI_ANY_ID, },
7925	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
7926		PCI_ANY_ID, PCI_ANY_ID, },
7927	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
7928		PCI_ANY_ID, PCI_ANY_ID, },
7929	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
7930		PCI_ANY_ID, PCI_ANY_ID, },
7931	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
7932		PCI_ANY_ID, PCI_ANY_ID, },
7933	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
7934		PCI_ANY_ID, PCI_ANY_ID, },
7935	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
7936		PCI_ANY_ID, PCI_ANY_ID, },
7937	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
7938		PCI_ANY_ID, PCI_ANY_ID, },
7939	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
7940		PCI_ANY_ID, PCI_ANY_ID, },
7941	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7942		PCI_ANY_ID, PCI_ANY_ID, },
7943	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S,
7944		PCI_ANY_ID, PCI_ANY_ID, },
7945	{ 0 }
7946};
7947
7948MODULE_DEVICE_TABLE(pci, lpfc_id_table);
7949
7950static struct pci_error_handlers lpfc_err_handler = {
7951	.error_detected = lpfc_io_error_detected,
7952	.slot_reset = lpfc_io_slot_reset,
7953	.resume = lpfc_io_resume,
7954};
7955
7956static struct pci_driver lpfc_driver = {
7957	.name		= LPFC_DRIVER_NAME,
7958	.id_table	= lpfc_id_table,
7959	.probe		= lpfc_pci_probe_one,
7960	.remove		= __devexit_p(lpfc_pci_remove_one),
7961	.suspend        = lpfc_pci_suspend_one,
7962	.resume		= lpfc_pci_resume_one,
7963	.err_handler    = &lpfc_err_handler,
7964};
7965
7966/**
7967 * lpfc_init - lpfc module initialization routine
7968 *
7969 * This routine is to be invoked when the lpfc module is loaded into the
7970 * kernel. The special kernel macro module_init() is used to indicate the
7971 * role of this routine to the kernel as lpfc module entry point.
7972 *
7973 * Return codes
7974 *   0 - successful
7975 *   -ENOMEM - FC attach transport failed
7976 *   all others - failed
7977 */
7978static int __init
7979lpfc_init(void)
7980{
7981	int error = 0;
7982
7983	printk(LPFC_MODULE_DESC "\n");
7984	printk(LPFC_COPYRIGHT "\n");
7985
7986	if (lpfc_enable_npiv) {
7987		lpfc_transport_functions.vport_create = lpfc_vport_create;
7988		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
7989	}
7990	lpfc_transport_template =
7991				fc_attach_transport(&lpfc_transport_functions);
7992	if (lpfc_transport_template == NULL)
7993		return -ENOMEM;
7994	if (lpfc_enable_npiv) {
7995		lpfc_vport_transport_template =
7996			fc_attach_transport(&lpfc_vport_transport_functions);
7997		if (lpfc_vport_transport_template == NULL) {
7998			fc_release_transport(lpfc_transport_template);
7999			return -ENOMEM;
8000		}
8001	}
8002	error = pci_register_driver(&lpfc_driver);
8003	if (error) {
8004		fc_release_transport(lpfc_transport_template);
8005		if (lpfc_enable_npiv)
8006			fc_release_transport(lpfc_vport_transport_template);
8007	}
8008
8009	return error;
8010}
8011
8012/**
8013 * lpfc_exit - lpfc module removal routine
8014 *
8015 * This routine is invoked when the lpfc module is removed from the kernel.
8016 * The special kernel macro module_exit() is used to indicate the role of
8017 * this routine to the kernel as lpfc module exit point.
8018 */
8019static void __exit
8020lpfc_exit(void)
8021{
8022	pci_unregister_driver(&lpfc_driver);
8023	fc_release_transport(lpfc_transport_template);
8024	if (lpfc_enable_npiv)
8025		fc_release_transport(lpfc_vport_transport_template);
8026	if (_dump_buf_data) {
8027		printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data "
8028				"at 0x%p\n",
8029				(1L << _dump_buf_data_order), _dump_buf_data);
8030		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
8031	}
8032
8033	if (_dump_buf_dif) {
8034		printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif "
8035				"at 0x%p\n",
8036				(1L << _dump_buf_dif_order), _dump_buf_dif);
8037		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
8038	}
8039}
8040
8041module_init(lpfc_init);
8042module_exit(lpfc_exit);
8043MODULE_LICENSE("GPL");
8044MODULE_DESCRIPTION(LPFC_MODULE_DESC);
8045MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
8046MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
8047