lpfc_init.c revision 32b9793fe6ff09a85f36b8bd7d6ff214653a7497
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
29#include <linux/spinlock.h>
30#include <linux/ctype.h>
31
32#include <scsi/scsi.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h>
36
37#include "lpfc_hw4.h"
38#include "lpfc_hw.h"
39#include "lpfc_sli.h"
40#include "lpfc_sli4.h"
41#include "lpfc_nl.h"
42#include "lpfc_disc.h"
43#include "lpfc_scsi.h"
44#include "lpfc.h"
45#include "lpfc_logmsg.h"
46#include "lpfc_crtn.h"
47#include "lpfc_vport.h"
48#include "lpfc_version.h"
49
50char *_dump_buf_data;
51unsigned long _dump_buf_data_order;
52char *_dump_buf_dif;
53unsigned long _dump_buf_dif_order;
54spinlock_t _dump_buf_lock;
55
56static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
57static int lpfc_post_rcv_buf(struct lpfc_hba *);
58static int lpfc_sli4_queue_create(struct lpfc_hba *);
59static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
60static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
61static int lpfc_setup_endian_order(struct lpfc_hba *);
62static int lpfc_sli4_read_config(struct lpfc_hba *);
63static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
64static void lpfc_free_sgl_list(struct lpfc_hba *);
65static int lpfc_init_sgl_list(struct lpfc_hba *);
66static int lpfc_init_active_sgl_array(struct lpfc_hba *);
67static void lpfc_free_active_sgl(struct lpfc_hba *);
68static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
69static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
70static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
71static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
72static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
73
74static struct scsi_transport_template *lpfc_transport_template = NULL;
75static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
76static DEFINE_IDR(lpfc_hba_index);
77
78/**
79 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
80 * @phba: pointer to lpfc hba data structure.
81 *
82 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
83 * mailbox command. It retrieves the revision information from the HBA and
84 * collects the Vital Product Data (VPD) about the HBA for preparing the
85 * configuration of the HBA.
86 *
87 * Return codes:
88 *   0 - success.
89 *   -ERESTART - requests the SLI layer to reset the HBA and try again.
90 *   Any other value - indicates an error.
91 **/
92int
93lpfc_config_port_prep(struct lpfc_hba *phba)
94{
95	lpfc_vpd_t *vp = &phba->vpd;
96	int i = 0, rc;
97	LPFC_MBOXQ_t *pmb;
98	MAILBOX_t *mb;
99	char *lpfc_vpd_data = NULL;
100	uint16_t offset = 0;
101	static char licensed[56] =
102		    "key unlock for use with gnu public licensed code only\0";
103	static int init_key = 1;
104
105	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
106	if (!pmb) {
107		phba->link_state = LPFC_HBA_ERROR;
108		return -ENOMEM;
109	}
110
111	mb = &pmb->u.mb;
112	phba->link_state = LPFC_INIT_MBX_CMDS;
113
114	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
115		if (init_key) {
116			uint32_t *ptext = (uint32_t *) licensed;
117
118			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
119				*ptext = cpu_to_be32(*ptext);
120			init_key = 0;
121		}
122
123		lpfc_read_nv(phba, pmb);
124		memset((char*)mb->un.varRDnvp.rsvd3, 0,
125			sizeof (mb->un.varRDnvp.rsvd3));
126		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
127			 sizeof (licensed));
128
129		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
130
131		if (rc != MBX_SUCCESS) {
132			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
133					"0324 Config Port initialization "
134					"error, mbxCmd x%x READ_NVPARM, "
135					"mbxStatus x%x\n",
136					mb->mbxCommand, mb->mbxStatus);
137			mempool_free(pmb, phba->mbox_mem_pool);
138			return -ERESTART;
139		}
140		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
141		       sizeof(phba->wwnn));
142		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
143		       sizeof(phba->wwpn));
144	}
145
146	phba->sli3_options = 0x0;
147
148	/* Setup and issue mailbox READ REV command */
149	lpfc_read_rev(phba, pmb);
150	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
151	if (rc != MBX_SUCCESS) {
152		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
153				"0439 Adapter failed to init, mbxCmd x%x "
154				"READ_REV, mbxStatus x%x\n",
155				mb->mbxCommand, mb->mbxStatus);
156		mempool_free( pmb, phba->mbox_mem_pool);
157		return -ERESTART;
158	}
159
160
161	/*
162	 * The value of rr must be 1 since the driver set the cv field to 1.
163	 * This setting requires the FW to set all revision fields.
164	 */
165	if (mb->un.varRdRev.rr == 0) {
166		vp->rev.rBit = 0;
167		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
168				"0440 Adapter failed to init, READ_REV has "
169				"missing revision information.\n");
170		mempool_free(pmb, phba->mbox_mem_pool);
171		return -ERESTART;
172	}
173
174	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
175		mempool_free(pmb, phba->mbox_mem_pool);
176		return -EINVAL;
177	}
178
179	/* Save information as VPD data */
180	vp->rev.rBit = 1;
181	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
182	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
183	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
184	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
185	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
186	vp->rev.biuRev = mb->un.varRdRev.biuRev;
187	vp->rev.smRev = mb->un.varRdRev.smRev;
188	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
189	vp->rev.endecRev = mb->un.varRdRev.endecRev;
190	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
191	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
192	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
193	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
194	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
195	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
196
197	/* If the sli feature level is less then 9, we must
198	 * tear down all RPIs and VPIs on link down if NPIV
199	 * is enabled.
200	 */
201	if (vp->rev.feaLevelHigh < 9)
202		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
203
204	if (lpfc_is_LC_HBA(phba->pcidev->device))
205		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
206						sizeof (phba->RandomData));
207
208	/* Get adapter VPD information */
209	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
210	if (!lpfc_vpd_data)
211		goto out_free_mbox;
212
213	do {
214		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
215		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
216
217		if (rc != MBX_SUCCESS) {
218			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
219					"0441 VPD not present on adapter, "
220					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
221					mb->mbxCommand, mb->mbxStatus);
222			mb->un.varDmp.word_cnt = 0;
223		}
224		/* dump mem may return a zero when finished or we got a
225		 * mailbox error, either way we are done.
226		 */
227		if (mb->un.varDmp.word_cnt == 0)
228			break;
229		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
230			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
231		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
232				      lpfc_vpd_data + offset,
233				      mb->un.varDmp.word_cnt);
234		offset += mb->un.varDmp.word_cnt;
235	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
236	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
237
238	kfree(lpfc_vpd_data);
239out_free_mbox:
240	mempool_free(pmb, phba->mbox_mem_pool);
241	return 0;
242}
243
244/**
245 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
246 * @phba: pointer to lpfc hba data structure.
247 * @pmboxq: pointer to the driver internal queue element for mailbox command.
248 *
249 * This is the completion handler for driver's configuring asynchronous event
250 * mailbox command to the device. If the mailbox command returns successfully,
251 * it will set internal async event support flag to 1; otherwise, it will
252 * set internal async event support flag to 0.
253 **/
254static void
255lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
256{
257	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
258		phba->temp_sensor_support = 1;
259	else
260		phba->temp_sensor_support = 0;
261	mempool_free(pmboxq, phba->mbox_mem_pool);
262	return;
263}
264
265/**
266 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
267 * @phba: pointer to lpfc hba data structure.
268 * @pmboxq: pointer to the driver internal queue element for mailbox command.
269 *
270 * This is the completion handler for dump mailbox command for getting
271 * wake up parameters. When this command complete, the response contain
272 * Option rom version of the HBA. This function translate the version number
273 * into a human readable string and store it in OptionROMVersion.
274 **/
275static void
276lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
277{
278	struct prog_id *prg;
279	uint32_t prog_id_word;
280	char dist = ' ';
281	/* character array used for decoding dist type. */
282	char dist_char[] = "nabx";
283
284	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
285		mempool_free(pmboxq, phba->mbox_mem_pool);
286		return;
287	}
288
289	prg = (struct prog_id *) &prog_id_word;
290
291	/* word 7 contain option rom version */
292	prog_id_word = pmboxq->u.mb.un.varWords[7];
293
294	/* Decode the Option rom version word to a readable string */
295	if (prg->dist < 4)
296		dist = dist_char[prg->dist];
297
298	if ((prg->dist == 3) && (prg->num == 0))
299		sprintf(phba->OptionROMVersion, "%d.%d%d",
300			prg->ver, prg->rev, prg->lev);
301	else
302		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
303			prg->ver, prg->rev, prg->lev,
304			dist, prg->num);
305	mempool_free(pmboxq, phba->mbox_mem_pool);
306	return;
307}
308
309/**
310 * lpfc_config_port_post - Perform lpfc initialization after config port
311 * @phba: pointer to lpfc hba data structure.
312 *
313 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
314 * command call. It performs all internal resource and state setups on the
315 * port: post IOCB buffers, enable appropriate host interrupt attentions,
316 * ELS ring timers, etc.
317 *
318 * Return codes
319 *   0 - success.
320 *   Any other value - error.
321 **/
322int
323lpfc_config_port_post(struct lpfc_hba *phba)
324{
325	struct lpfc_vport *vport = phba->pport;
326	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
327	LPFC_MBOXQ_t *pmb;
328	MAILBOX_t *mb;
329	struct lpfc_dmabuf *mp;
330	struct lpfc_sli *psli = &phba->sli;
331	uint32_t status, timeout;
332	int i, j;
333	int rc;
334
335	spin_lock_irq(&phba->hbalock);
336	/*
337	 * If the Config port completed correctly the HBA is not
338	 * over heated any more.
339	 */
340	if (phba->over_temp_state == HBA_OVER_TEMP)
341		phba->over_temp_state = HBA_NORMAL_TEMP;
342	spin_unlock_irq(&phba->hbalock);
343
344	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
345	if (!pmb) {
346		phba->link_state = LPFC_HBA_ERROR;
347		return -ENOMEM;
348	}
349	mb = &pmb->u.mb;
350
351	/* Get login parameters for NID.  */
352	lpfc_read_sparam(phba, pmb, 0);
353	pmb->vport = vport;
354	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
355		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
356				"0448 Adapter failed init, mbxCmd x%x "
357				"READ_SPARM mbxStatus x%x\n",
358				mb->mbxCommand, mb->mbxStatus);
359		phba->link_state = LPFC_HBA_ERROR;
360		mp = (struct lpfc_dmabuf *) pmb->context1;
361		mempool_free( pmb, phba->mbox_mem_pool);
362		lpfc_mbuf_free(phba, mp->virt, mp->phys);
363		kfree(mp);
364		return -EIO;
365	}
366
367	mp = (struct lpfc_dmabuf *) pmb->context1;
368
369	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
370	lpfc_mbuf_free(phba, mp->virt, mp->phys);
371	kfree(mp);
372	pmb->context1 = NULL;
373
374	if (phba->cfg_soft_wwnn)
375		u64_to_wwn(phba->cfg_soft_wwnn,
376			   vport->fc_sparam.nodeName.u.wwn);
377	if (phba->cfg_soft_wwpn)
378		u64_to_wwn(phba->cfg_soft_wwpn,
379			   vport->fc_sparam.portName.u.wwn);
380	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
381	       sizeof (struct lpfc_name));
382	memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
383	       sizeof (struct lpfc_name));
384
385	/* Update the fc_host data structures with new wwn. */
386	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
387	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
388	fc_host_max_npiv_vports(shost) = phba->max_vpi;
389
390	/* If no serial number in VPD data, use low 6 bytes of WWNN */
391	/* This should be consolidated into parse_vpd ? - mr */
392	if (phba->SerialNumber[0] == 0) {
393		uint8_t *outptr;
394
395		outptr = &vport->fc_nodename.u.s.IEEE[0];
396		for (i = 0; i < 12; i++) {
397			status = *outptr++;
398			j = ((status & 0xf0) >> 4);
399			if (j <= 9)
400				phba->SerialNumber[i] =
401				    (char)((uint8_t) 0x30 + (uint8_t) j);
402			else
403				phba->SerialNumber[i] =
404				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
405			i++;
406			j = (status & 0xf);
407			if (j <= 9)
408				phba->SerialNumber[i] =
409				    (char)((uint8_t) 0x30 + (uint8_t) j);
410			else
411				phba->SerialNumber[i] =
412				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
413		}
414	}
415
416	lpfc_read_config(phba, pmb);
417	pmb->vport = vport;
418	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
419		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
420				"0453 Adapter failed to init, mbxCmd x%x "
421				"READ_CONFIG, mbxStatus x%x\n",
422				mb->mbxCommand, mb->mbxStatus);
423		phba->link_state = LPFC_HBA_ERROR;
424		mempool_free( pmb, phba->mbox_mem_pool);
425		return -EIO;
426	}
427
428	/* Check if the port is disabled */
429	lpfc_sli_read_link_ste(phba);
430
431	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
432	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
433		phba->cfg_hba_queue_depth =
434			(mb->un.varRdConfig.max_xri + 1) -
435					lpfc_sli4_get_els_iocb_cnt(phba);
436
437	phba->lmt = mb->un.varRdConfig.lmt;
438
439	/* Get the default values for Model Name and Description */
440	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
441
442	if ((phba->cfg_link_speed > LINK_SPEED_10G)
443	    || ((phba->cfg_link_speed == LINK_SPEED_1G)
444		&& !(phba->lmt & LMT_1Gb))
445	    || ((phba->cfg_link_speed == LINK_SPEED_2G)
446		&& !(phba->lmt & LMT_2Gb))
447	    || ((phba->cfg_link_speed == LINK_SPEED_4G)
448		&& !(phba->lmt & LMT_4Gb))
449	    || ((phba->cfg_link_speed == LINK_SPEED_8G)
450		&& !(phba->lmt & LMT_8Gb))
451	    || ((phba->cfg_link_speed == LINK_SPEED_10G)
452		&& !(phba->lmt & LMT_10Gb))) {
453		/* Reset link speed to auto */
454		lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
455			"1302 Invalid speed for this board: "
456			"Reset link speed to auto: x%x\n",
457			phba->cfg_link_speed);
458			phba->cfg_link_speed = LINK_SPEED_AUTO;
459	}
460
461	phba->link_state = LPFC_LINK_DOWN;
462
463	/* Only process IOCBs on ELS ring till hba_state is READY */
464	if (psli->ring[psli->extra_ring].cmdringaddr)
465		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
466	if (psli->ring[psli->fcp_ring].cmdringaddr)
467		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
468	if (psli->ring[psli->next_ring].cmdringaddr)
469		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
470
471	/* Post receive buffers for desired rings */
472	if (phba->sli_rev != 3)
473		lpfc_post_rcv_buf(phba);
474
475	/*
476	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
477	 */
478	if (phba->intr_type == MSIX) {
479		rc = lpfc_config_msi(phba, pmb);
480		if (rc) {
481			mempool_free(pmb, phba->mbox_mem_pool);
482			return -EIO;
483		}
484		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
485		if (rc != MBX_SUCCESS) {
486			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
487					"0352 Config MSI mailbox command "
488					"failed, mbxCmd x%x, mbxStatus x%x\n",
489					pmb->u.mb.mbxCommand,
490					pmb->u.mb.mbxStatus);
491			mempool_free(pmb, phba->mbox_mem_pool);
492			return -EIO;
493		}
494	}
495
496	spin_lock_irq(&phba->hbalock);
497	/* Initialize ERATT handling flag */
498	phba->hba_flag &= ~HBA_ERATT_HANDLED;
499
500	/* Enable appropriate host interrupts */
501	status = readl(phba->HCregaddr);
502	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
503	if (psli->num_rings > 0)
504		status |= HC_R0INT_ENA;
505	if (psli->num_rings > 1)
506		status |= HC_R1INT_ENA;
507	if (psli->num_rings > 2)
508		status |= HC_R2INT_ENA;
509	if (psli->num_rings > 3)
510		status |= HC_R3INT_ENA;
511
512	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
513	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
514		status &= ~(HC_R0INT_ENA);
515
516	writel(status, phba->HCregaddr);
517	readl(phba->HCregaddr); /* flush */
518	spin_unlock_irq(&phba->hbalock);
519
520	/* Set up ring-0 (ELS) timer */
521	timeout = phba->fc_ratov * 2;
522	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
523	/* Set up heart beat (HB) timer */
524	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
525	phba->hb_outstanding = 0;
526	phba->last_completion_time = jiffies;
527	/* Set up error attention (ERATT) polling timer */
528	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
529
530	if (phba->hba_flag & LINK_DISABLED) {
531		lpfc_printf_log(phba,
532			KERN_ERR, LOG_INIT,
533			"2598 Adapter Link is disabled.\n");
534		lpfc_down_link(phba, pmb);
535		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
536		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
537		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
538			lpfc_printf_log(phba,
539			KERN_ERR, LOG_INIT,
540			"2599 Adapter failed to issue DOWN_LINK"
541			" mbox command rc 0x%x\n", rc);
542
543			mempool_free(pmb, phba->mbox_mem_pool);
544			return -EIO;
545		}
546	} else {
547		lpfc_init_link(phba, pmb, phba->cfg_topology,
548			phba->cfg_link_speed);
549		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
550		lpfc_set_loopback_flag(phba);
551		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
552		if (rc != MBX_SUCCESS) {
553			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
554				"0454 Adapter failed to init, mbxCmd x%x "
555				"INIT_LINK, mbxStatus x%x\n",
556				mb->mbxCommand, mb->mbxStatus);
557
558			/* Clear all interrupt enable conditions */
559			writel(0, phba->HCregaddr);
560			readl(phba->HCregaddr); /* flush */
561			/* Clear all pending interrupts */
562			writel(0xffffffff, phba->HAregaddr);
563			readl(phba->HAregaddr); /* flush */
564
565			phba->link_state = LPFC_HBA_ERROR;
566			if (rc != MBX_BUSY)
567				mempool_free(pmb, phba->mbox_mem_pool);
568			return -EIO;
569		}
570	}
571	/* MBOX buffer will be freed in mbox compl */
572	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
573	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
574	pmb->mbox_cmpl = lpfc_config_async_cmpl;
575	pmb->vport = phba->pport;
576	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
577
578	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
579		lpfc_printf_log(phba,
580				KERN_ERR,
581				LOG_INIT,
582				"0456 Adapter failed to issue "
583				"ASYNCEVT_ENABLE mbox status x%x \n.",
584				rc);
585		mempool_free(pmb, phba->mbox_mem_pool);
586	}
587
588	/* Get Option rom version */
589	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
590	lpfc_dump_wakeup_param(phba, pmb);
591	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
592	pmb->vport = phba->pport;
593	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
594
595	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
596		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
597				"to get Option ROM version status x%x\n.", rc);
598		mempool_free(pmb, phba->mbox_mem_pool);
599	}
600
601	return 0;
602}
603
604/**
605 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
606 * @phba: pointer to lpfc HBA data structure.
607 *
608 * This routine will do LPFC uninitialization before the HBA is reset when
609 * bringing down the SLI Layer.
610 *
611 * Return codes
612 *   0 - success.
613 *   Any other value - error.
614 **/
615int
616lpfc_hba_down_prep(struct lpfc_hba *phba)
617{
618	struct lpfc_vport **vports;
619	int i;
620
621	if (phba->sli_rev <= LPFC_SLI_REV3) {
622		/* Disable interrupts */
623		writel(0, phba->HCregaddr);
624		readl(phba->HCregaddr); /* flush */
625	}
626
627	if (phba->pport->load_flag & FC_UNLOADING)
628		lpfc_cleanup_discovery_resources(phba->pport);
629	else {
630		vports = lpfc_create_vport_work_array(phba);
631		if (vports != NULL)
632			for (i = 0; i <= phba->max_vports &&
633				vports[i] != NULL; i++)
634				lpfc_cleanup_discovery_resources(vports[i]);
635		lpfc_destroy_vport_work_array(phba, vports);
636	}
637	return 0;
638}
639
640/**
641 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
642 * @phba: pointer to lpfc HBA data structure.
643 *
644 * This routine will do uninitialization after the HBA is reset when bring
645 * down the SLI Layer.
646 *
647 * Return codes
648 *   0 - sucess.
649 *   Any other value - error.
650 **/
651static int
652lpfc_hba_down_post_s3(struct lpfc_hba *phba)
653{
654	struct lpfc_sli *psli = &phba->sli;
655	struct lpfc_sli_ring *pring;
656	struct lpfc_dmabuf *mp, *next_mp;
657	LIST_HEAD(completions);
658	int i;
659
660	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
661		lpfc_sli_hbqbuf_free_all(phba);
662	else {
663		/* Cleanup preposted buffers on the ELS ring */
664		pring = &psli->ring[LPFC_ELS_RING];
665		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
666			list_del(&mp->list);
667			pring->postbufq_cnt--;
668			lpfc_mbuf_free(phba, mp->virt, mp->phys);
669			kfree(mp);
670		}
671	}
672
673	spin_lock_irq(&phba->hbalock);
674	for (i = 0; i < psli->num_rings; i++) {
675		pring = &psli->ring[i];
676
677		/* At this point in time the HBA is either reset or DOA. Either
678		 * way, nothing should be on txcmplq as it will NEVER complete.
679		 */
680		list_splice_init(&pring->txcmplq, &completions);
681		pring->txcmplq_cnt = 0;
682		spin_unlock_irq(&phba->hbalock);
683
684		/* Cancel all the IOCBs from the completions list */
685		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
686				      IOERR_SLI_ABORTED);
687
688		lpfc_sli_abort_iocb_ring(phba, pring);
689		spin_lock_irq(&phba->hbalock);
690	}
691	spin_unlock_irq(&phba->hbalock);
692
693	return 0;
694}
695/**
696 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
697 * @phba: pointer to lpfc HBA data structure.
698 *
699 * This routine will do uninitialization after the HBA is reset when bring
700 * down the SLI Layer.
701 *
702 * Return codes
703 *   0 - sucess.
704 *   Any other value - error.
705 **/
706static int
707lpfc_hba_down_post_s4(struct lpfc_hba *phba)
708{
709	struct lpfc_scsi_buf *psb, *psb_next;
710	LIST_HEAD(aborts);
711	int ret;
712	unsigned long iflag = 0;
713	ret = lpfc_hba_down_post_s3(phba);
714	if (ret)
715		return ret;
716	/* At this point in time the HBA is either reset or DOA. Either
717	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
718	 * on the lpfc_sgl_list so that it can either be freed if the
719	 * driver is unloading or reposted if the driver is restarting
720	 * the port.
721	 */
722	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
723					/* scsl_buf_list */
724	/* abts_sgl_list_lock required because worker thread uses this
725	 * list.
726	 */
727	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
728	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
729			&phba->sli4_hba.lpfc_sgl_list);
730	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
731	/* abts_scsi_buf_list_lock required because worker thread uses this
732	 * list.
733	 */
734	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
735	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
736			&aborts);
737	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
738	spin_unlock_irq(&phba->hbalock);
739
740	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
741		psb->pCmd = NULL;
742		psb->status = IOSTAT_SUCCESS;
743	}
744	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
745	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
746	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
747	return 0;
748}
749
750/**
751 * lpfc_hba_down_post - Wrapper func for hba down post routine
752 * @phba: pointer to lpfc HBA data structure.
753 *
754 * This routine wraps the actual SLI3 or SLI4 routine for performing
755 * uninitialization after the HBA is reset when bring down the SLI Layer.
756 *
757 * Return codes
758 *   0 - sucess.
759 *   Any other value - error.
760 **/
761int
762lpfc_hba_down_post(struct lpfc_hba *phba)
763{
764	return (*phba->lpfc_hba_down_post)(phba);
765}
766
767/**
768 * lpfc_hb_timeout - The HBA-timer timeout handler
769 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
770 *
771 * This is the HBA-timer timeout handler registered to the lpfc driver. When
772 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
773 * work-port-events bitmap and the worker thread is notified. This timeout
774 * event will be used by the worker thread to invoke the actual timeout
775 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
776 * be performed in the timeout handler and the HBA timeout event bit shall
777 * be cleared by the worker thread after it has taken the event bitmap out.
778 **/
779static void
780lpfc_hb_timeout(unsigned long ptr)
781{
782	struct lpfc_hba *phba;
783	uint32_t tmo_posted;
784	unsigned long iflag;
785
786	phba = (struct lpfc_hba *)ptr;
787
788	/* Check for heart beat timeout conditions */
789	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
790	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
791	if (!tmo_posted)
792		phba->pport->work_port_events |= WORKER_HB_TMO;
793	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
794
795	/* Tell the worker thread there is work to do */
796	if (!tmo_posted)
797		lpfc_worker_wake_up(phba);
798	return;
799}
800
801/**
802 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
803 * @phba: pointer to lpfc hba data structure.
804 * @pmboxq: pointer to the driver internal queue element for mailbox command.
805 *
806 * This is the callback function to the lpfc heart-beat mailbox command.
807 * If configured, the lpfc driver issues the heart-beat mailbox command to
808 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
809 * heart-beat mailbox command is issued, the driver shall set up heart-beat
810 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
811 * heart-beat outstanding state. Once the mailbox command comes back and
812 * no error conditions detected, the heart-beat mailbox command timer is
813 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
814 * state is cleared for the next heart-beat. If the timer expired with the
815 * heart-beat outstanding state set, the driver will put the HBA offline.
816 **/
817static void
818lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
819{
820	unsigned long drvr_flag;
821
822	spin_lock_irqsave(&phba->hbalock, drvr_flag);
823	phba->hb_outstanding = 0;
824	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
825
826	/* Check and reset heart-beat timer is necessary */
827	mempool_free(pmboxq, phba->mbox_mem_pool);
828	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
829		!(phba->link_state == LPFC_HBA_ERROR) &&
830		!(phba->pport->load_flag & FC_UNLOADING))
831		mod_timer(&phba->hb_tmofunc,
832			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
833	return;
834}
835
836/**
837 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
838 * @phba: pointer to lpfc hba data structure.
839 *
840 * This is the actual HBA-timer timeout handler to be invoked by the worker
841 * thread whenever the HBA timer fired and HBA-timeout event posted. This
842 * handler performs any periodic operations needed for the device. If such
843 * periodic event has already been attended to either in the interrupt handler
844 * or by processing slow-ring or fast-ring events within the HBA-timer
845 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
846 * the timer for the next timeout period. If lpfc heart-beat mailbox command
847 * is configured and there is no heart-beat mailbox command outstanding, a
848 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
849 * has been a heart-beat mailbox command outstanding, the HBA shall be put
850 * to offline.
851 **/
852void
853lpfc_hb_timeout_handler(struct lpfc_hba *phba)
854{
855	LPFC_MBOXQ_t *pmboxq;
856	struct lpfc_dmabuf *buf_ptr;
857	int retval;
858	struct lpfc_sli *psli = &phba->sli;
859	LIST_HEAD(completions);
860
861	if ((phba->link_state == LPFC_HBA_ERROR) ||
862		(phba->pport->load_flag & FC_UNLOADING) ||
863		(phba->pport->fc_flag & FC_OFFLINE_MODE))
864		return;
865
866	spin_lock_irq(&phba->pport->work_port_lock);
867
868	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
869		jiffies)) {
870		spin_unlock_irq(&phba->pport->work_port_lock);
871		if (!phba->hb_outstanding)
872			mod_timer(&phba->hb_tmofunc,
873				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
874		else
875			mod_timer(&phba->hb_tmofunc,
876				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
877		return;
878	}
879	spin_unlock_irq(&phba->pport->work_port_lock);
880
881	if (phba->elsbuf_cnt &&
882		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
883		spin_lock_irq(&phba->hbalock);
884		list_splice_init(&phba->elsbuf, &completions);
885		phba->elsbuf_cnt = 0;
886		phba->elsbuf_prev_cnt = 0;
887		spin_unlock_irq(&phba->hbalock);
888
889		while (!list_empty(&completions)) {
890			list_remove_head(&completions, buf_ptr,
891				struct lpfc_dmabuf, list);
892			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
893			kfree(buf_ptr);
894		}
895	}
896	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
897
898	/* If there is no heart beat outstanding, issue a heartbeat command */
899	if (phba->cfg_enable_hba_heartbeat) {
900		if (!phba->hb_outstanding) {
901			pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
902			if (!pmboxq) {
903				mod_timer(&phba->hb_tmofunc,
904					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
905				return;
906			}
907
908			lpfc_heart_beat(phba, pmboxq);
909			pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
910			pmboxq->vport = phba->pport;
911			retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
912
913			if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
914				mempool_free(pmboxq, phba->mbox_mem_pool);
915				mod_timer(&phba->hb_tmofunc,
916					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
917				return;
918			}
919			mod_timer(&phba->hb_tmofunc,
920				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
921			phba->hb_outstanding = 1;
922			return;
923		} else {
924			/*
925			* If heart beat timeout called with hb_outstanding set
926			* we need to take the HBA offline.
927			*/
928			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
929					"0459 Adapter heartbeat failure, "
930					"taking this port offline.\n");
931
932			spin_lock_irq(&phba->hbalock);
933			psli->sli_flag &= ~LPFC_SLI_ACTIVE;
934			spin_unlock_irq(&phba->hbalock);
935
936			lpfc_offline_prep(phba);
937			lpfc_offline(phba);
938			lpfc_unblock_mgmt_io(phba);
939			phba->link_state = LPFC_HBA_ERROR;
940			lpfc_hba_down_post(phba);
941		}
942	}
943}
944
945/**
946 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
947 * @phba: pointer to lpfc hba data structure.
948 *
949 * This routine is called to bring the HBA offline when HBA hardware error
950 * other than Port Error 6 has been detected.
951 **/
952static void
953lpfc_offline_eratt(struct lpfc_hba *phba)
954{
955	struct lpfc_sli   *psli = &phba->sli;
956
957	spin_lock_irq(&phba->hbalock);
958	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
959	spin_unlock_irq(&phba->hbalock);
960	lpfc_offline_prep(phba);
961
962	lpfc_offline(phba);
963	lpfc_reset_barrier(phba);
964	spin_lock_irq(&phba->hbalock);
965	lpfc_sli_brdreset(phba);
966	spin_unlock_irq(&phba->hbalock);
967	lpfc_hba_down_post(phba);
968	lpfc_sli_brdready(phba, HS_MBRDY);
969	lpfc_unblock_mgmt_io(phba);
970	phba->link_state = LPFC_HBA_ERROR;
971	return;
972}
973
974/**
975 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
976 * @phba: pointer to lpfc hba data structure.
977 *
978 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
979 * other than Port Error 6 has been detected.
980 **/
981static void
982lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
983{
984	lpfc_offline_prep(phba);
985	lpfc_offline(phba);
986	lpfc_sli4_brdreset(phba);
987	lpfc_hba_down_post(phba);
988	lpfc_sli4_post_status_check(phba);
989	lpfc_unblock_mgmt_io(phba);
990	phba->link_state = LPFC_HBA_ERROR;
991}
992
993/**
994 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
995 * @phba: pointer to lpfc hba data structure.
996 *
997 * This routine is invoked to handle the deferred HBA hardware error
998 * conditions. This type of error is indicated by HBA by setting ER1
999 * and another ER bit in the host status register. The driver will
1000 * wait until the ER1 bit clears before handling the error condition.
1001 **/
1002static void
1003lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1004{
1005	uint32_t old_host_status = phba->work_hs;
1006	struct lpfc_sli_ring  *pring;
1007	struct lpfc_sli *psli = &phba->sli;
1008
1009	/* If the pci channel is offline, ignore possible errors,
1010	 * since we cannot communicate with the pci card anyway.
1011	 */
1012	if (pci_channel_offline(phba->pcidev)) {
1013		spin_lock_irq(&phba->hbalock);
1014		phba->hba_flag &= ~DEFER_ERATT;
1015		spin_unlock_irq(&phba->hbalock);
1016		return;
1017	}
1018
1019	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1020		"0479 Deferred Adapter Hardware Error "
1021		"Data: x%x x%x x%x\n",
1022		phba->work_hs,
1023		phba->work_status[0], phba->work_status[1]);
1024
1025	spin_lock_irq(&phba->hbalock);
1026	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1027	spin_unlock_irq(&phba->hbalock);
1028
1029
1030	/*
1031	 * Firmware stops when it triggred erratt. That could cause the I/Os
1032	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1033	 * SCSI layer retry it after re-establishing link.
1034	 */
1035	pring = &psli->ring[psli->fcp_ring];
1036	lpfc_sli_abort_iocb_ring(phba, pring);
1037
1038	/*
1039	 * There was a firmware error. Take the hba offline and then
1040	 * attempt to restart it.
1041	 */
1042	lpfc_offline_prep(phba);
1043	lpfc_offline(phba);
1044
1045	/* Wait for the ER1 bit to clear.*/
1046	while (phba->work_hs & HS_FFER1) {
1047		msleep(100);
1048		phba->work_hs = readl(phba->HSregaddr);
1049		/* If driver is unloading let the worker thread continue */
1050		if (phba->pport->load_flag & FC_UNLOADING) {
1051			phba->work_hs = 0;
1052			break;
1053		}
1054	}
1055
1056	/*
1057	 * This is to ptrotect against a race condition in which
1058	 * first write to the host attention register clear the
1059	 * host status register.
1060	 */
1061	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1062		phba->work_hs = old_host_status & ~HS_FFER1;
1063
1064	spin_lock_irq(&phba->hbalock);
1065	phba->hba_flag &= ~DEFER_ERATT;
1066	spin_unlock_irq(&phba->hbalock);
1067	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1068	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1069}
1070
1071static void
1072lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1073{
1074	struct lpfc_board_event_header board_event;
1075	struct Scsi_Host *shost;
1076
1077	board_event.event_type = FC_REG_BOARD_EVENT;
1078	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1079	shost = lpfc_shost_from_vport(phba->pport);
1080	fc_host_post_vendor_event(shost, fc_get_event_number(),
1081				  sizeof(board_event),
1082				  (char *) &board_event,
1083				  LPFC_NL_VENDOR_ID);
1084}
1085
1086/**
1087 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1088 * @phba: pointer to lpfc hba data structure.
1089 *
1090 * This routine is invoked to handle the following HBA hardware error
1091 * conditions:
1092 * 1 - HBA error attention interrupt
1093 * 2 - DMA ring index out of range
1094 * 3 - Mailbox command came back as unknown
1095 **/
1096static void
1097lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1098{
1099	struct lpfc_vport *vport = phba->pport;
1100	struct lpfc_sli   *psli = &phba->sli;
1101	struct lpfc_sli_ring  *pring;
1102	uint32_t event_data;
1103	unsigned long temperature;
1104	struct temp_event temp_event_data;
1105	struct Scsi_Host  *shost;
1106
1107	/* If the pci channel is offline, ignore possible errors,
1108	 * since we cannot communicate with the pci card anyway.
1109	 */
1110	if (pci_channel_offline(phba->pcidev)) {
1111		spin_lock_irq(&phba->hbalock);
1112		phba->hba_flag &= ~DEFER_ERATT;
1113		spin_unlock_irq(&phba->hbalock);
1114		return;
1115	}
1116
1117	/* If resets are disabled then leave the HBA alone and return */
1118	if (!phba->cfg_enable_hba_reset)
1119		return;
1120
1121	/* Send an internal error event to mgmt application */
1122	lpfc_board_errevt_to_mgmt(phba);
1123
1124	if (phba->hba_flag & DEFER_ERATT)
1125		lpfc_handle_deferred_eratt(phba);
1126
1127	if (phba->work_hs & HS_FFER6) {
1128		/* Re-establishing Link */
1129		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1130				"1301 Re-establishing Link "
1131				"Data: x%x x%x x%x\n",
1132				phba->work_hs,
1133				phba->work_status[0], phba->work_status[1]);
1134
1135		spin_lock_irq(&phba->hbalock);
1136		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1137		spin_unlock_irq(&phba->hbalock);
1138
1139		/*
1140		* Firmware stops when it triggled erratt with HS_FFER6.
1141		* That could cause the I/Os dropped by the firmware.
1142		* Error iocb (I/O) on txcmplq and let the SCSI layer
1143		* retry it after re-establishing link.
1144		*/
1145		pring = &psli->ring[psli->fcp_ring];
1146		lpfc_sli_abort_iocb_ring(phba, pring);
1147
1148		/*
1149		 * There was a firmware error.  Take the hba offline and then
1150		 * attempt to restart it.
1151		 */
1152		lpfc_offline_prep(phba);
1153		lpfc_offline(phba);
1154		lpfc_sli_brdrestart(phba);
1155		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1156			lpfc_unblock_mgmt_io(phba);
1157			return;
1158		}
1159		lpfc_unblock_mgmt_io(phba);
1160	} else if (phba->work_hs & HS_CRIT_TEMP) {
1161		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1162		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1163		temp_event_data.event_code = LPFC_CRIT_TEMP;
1164		temp_event_data.data = (uint32_t)temperature;
1165
1166		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1167				"0406 Adapter maximum temperature exceeded "
1168				"(%ld), taking this port offline "
1169				"Data: x%x x%x x%x\n",
1170				temperature, phba->work_hs,
1171				phba->work_status[0], phba->work_status[1]);
1172
1173		shost = lpfc_shost_from_vport(phba->pport);
1174		fc_host_post_vendor_event(shost, fc_get_event_number(),
1175					  sizeof(temp_event_data),
1176					  (char *) &temp_event_data,
1177					  SCSI_NL_VID_TYPE_PCI
1178					  | PCI_VENDOR_ID_EMULEX);
1179
1180		spin_lock_irq(&phba->hbalock);
1181		phba->over_temp_state = HBA_OVER_TEMP;
1182		spin_unlock_irq(&phba->hbalock);
1183		lpfc_offline_eratt(phba);
1184
1185	} else {
1186		/* The if clause above forces this code path when the status
1187		 * failure is a value other than FFER6. Do not call the offline
1188		 * twice. This is the adapter hardware error path.
1189		 */
1190		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1191				"0457 Adapter Hardware Error "
1192				"Data: x%x x%x x%x\n",
1193				phba->work_hs,
1194				phba->work_status[0], phba->work_status[1]);
1195
1196		event_data = FC_REG_DUMP_EVENT;
1197		shost = lpfc_shost_from_vport(vport);
1198		fc_host_post_vendor_event(shost, fc_get_event_number(),
1199				sizeof(event_data), (char *) &event_data,
1200				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1201
1202		lpfc_offline_eratt(phba);
1203	}
1204	return;
1205}
1206
1207/**
1208 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1209 * @phba: pointer to lpfc hba data structure.
1210 *
1211 * This routine is invoked to handle the SLI4 HBA hardware error attention
1212 * conditions.
1213 **/
1214static void
1215lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1216{
1217	struct lpfc_vport *vport = phba->pport;
1218	uint32_t event_data;
1219	struct Scsi_Host *shost;
1220
1221	/* If the pci channel is offline, ignore possible errors, since
1222	 * we cannot communicate with the pci card anyway.
1223	 */
1224	if (pci_channel_offline(phba->pcidev))
1225		return;
1226	/* If resets are disabled then leave the HBA alone and return */
1227	if (!phba->cfg_enable_hba_reset)
1228		return;
1229
1230	/* Send an internal error event to mgmt application */
1231	lpfc_board_errevt_to_mgmt(phba);
1232
1233	/* For now, the actual action for SLI4 device handling is not
1234	 * specified yet, just treated it as adaptor hardware failure
1235	 */
1236	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1237			"0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1238			phba->work_status[0], phba->work_status[1]);
1239
1240	event_data = FC_REG_DUMP_EVENT;
1241	shost = lpfc_shost_from_vport(vport);
1242	fc_host_post_vendor_event(shost, fc_get_event_number(),
1243				  sizeof(event_data), (char *) &event_data,
1244				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1245
1246	lpfc_sli4_offline_eratt(phba);
1247}
1248
1249/**
1250 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1251 * @phba: pointer to lpfc HBA data structure.
1252 *
1253 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1254 * routine from the API jump table function pointer from the lpfc_hba struct.
1255 *
1256 * Return codes
1257 *   0 - sucess.
1258 *   Any other value - error.
1259 **/
1260void
1261lpfc_handle_eratt(struct lpfc_hba *phba)
1262{
1263	(*phba->lpfc_handle_eratt)(phba);
1264}
1265
1266/**
1267 * lpfc_handle_latt - The HBA link event handler
1268 * @phba: pointer to lpfc hba data structure.
1269 *
1270 * This routine is invoked from the worker thread to handle a HBA host
1271 * attention link event.
1272 **/
1273void
1274lpfc_handle_latt(struct lpfc_hba *phba)
1275{
1276	struct lpfc_vport *vport = phba->pport;
1277	struct lpfc_sli   *psli = &phba->sli;
1278	LPFC_MBOXQ_t *pmb;
1279	volatile uint32_t control;
1280	struct lpfc_dmabuf *mp;
1281	int rc = 0;
1282
1283	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1284	if (!pmb) {
1285		rc = 1;
1286		goto lpfc_handle_latt_err_exit;
1287	}
1288
1289	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1290	if (!mp) {
1291		rc = 2;
1292		goto lpfc_handle_latt_free_pmb;
1293	}
1294
1295	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1296	if (!mp->virt) {
1297		rc = 3;
1298		goto lpfc_handle_latt_free_mp;
1299	}
1300
1301	/* Cleanup any outstanding ELS commands */
1302	lpfc_els_flush_all_cmd(phba);
1303
1304	psli->slistat.link_event++;
1305	lpfc_read_la(phba, pmb, mp);
1306	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
1307	pmb->vport = vport;
1308	/* Block ELS IOCBs until we have processed this mbox command */
1309	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1310	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1311	if (rc == MBX_NOT_FINISHED) {
1312		rc = 4;
1313		goto lpfc_handle_latt_free_mbuf;
1314	}
1315
1316	/* Clear Link Attention in HA REG */
1317	spin_lock_irq(&phba->hbalock);
1318	writel(HA_LATT, phba->HAregaddr);
1319	readl(phba->HAregaddr); /* flush */
1320	spin_unlock_irq(&phba->hbalock);
1321
1322	return;
1323
1324lpfc_handle_latt_free_mbuf:
1325	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1326	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1327lpfc_handle_latt_free_mp:
1328	kfree(mp);
1329lpfc_handle_latt_free_pmb:
1330	mempool_free(pmb, phba->mbox_mem_pool);
1331lpfc_handle_latt_err_exit:
1332	/* Enable Link attention interrupts */
1333	spin_lock_irq(&phba->hbalock);
1334	psli->sli_flag |= LPFC_PROCESS_LA;
1335	control = readl(phba->HCregaddr);
1336	control |= HC_LAINT_ENA;
1337	writel(control, phba->HCregaddr);
1338	readl(phba->HCregaddr); /* flush */
1339
1340	/* Clear Link Attention in HA REG */
1341	writel(HA_LATT, phba->HAregaddr);
1342	readl(phba->HAregaddr); /* flush */
1343	spin_unlock_irq(&phba->hbalock);
1344	lpfc_linkdown(phba);
1345	phba->link_state = LPFC_HBA_ERROR;
1346
1347	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1348		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1349
1350	return;
1351}
1352
1353/**
1354 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1355 * @phba: pointer to lpfc hba data structure.
1356 * @vpd: pointer to the vital product data.
1357 * @len: length of the vital product data in bytes.
1358 *
1359 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1360 * an array of characters. In this routine, the ModelName, ProgramType, and
1361 * ModelDesc, etc. fields of the phba data structure will be populated.
1362 *
1363 * Return codes
1364 *   0 - pointer to the VPD passed in is NULL
1365 *   1 - success
1366 **/
1367int
1368lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1369{
1370	uint8_t lenlo, lenhi;
1371	int Length;
1372	int i, j;
1373	int finished = 0;
1374	int index = 0;
1375
1376	if (!vpd)
1377		return 0;
1378
1379	/* Vital Product */
1380	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1381			"0455 Vital Product Data: x%x x%x x%x x%x\n",
1382			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1383			(uint32_t) vpd[3]);
1384	while (!finished && (index < (len - 4))) {
1385		switch (vpd[index]) {
1386		case 0x82:
1387		case 0x91:
1388			index += 1;
1389			lenlo = vpd[index];
1390			index += 1;
1391			lenhi = vpd[index];
1392			index += 1;
1393			i = ((((unsigned short)lenhi) << 8) + lenlo);
1394			index += i;
1395			break;
1396		case 0x90:
1397			index += 1;
1398			lenlo = vpd[index];
1399			index += 1;
1400			lenhi = vpd[index];
1401			index += 1;
1402			Length = ((((unsigned short)lenhi) << 8) + lenlo);
1403			if (Length > len - index)
1404				Length = len - index;
1405			while (Length > 0) {
1406			/* Look for Serial Number */
1407			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1408				index += 2;
1409				i = vpd[index];
1410				index += 1;
1411				j = 0;
1412				Length -= (3+i);
1413				while(i--) {
1414					phba->SerialNumber[j++] = vpd[index++];
1415					if (j == 31)
1416						break;
1417				}
1418				phba->SerialNumber[j] = 0;
1419				continue;
1420			}
1421			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1422				phba->vpd_flag |= VPD_MODEL_DESC;
1423				index += 2;
1424				i = vpd[index];
1425				index += 1;
1426				j = 0;
1427				Length -= (3+i);
1428				while(i--) {
1429					phba->ModelDesc[j++] = vpd[index++];
1430					if (j == 255)
1431						break;
1432				}
1433				phba->ModelDesc[j] = 0;
1434				continue;
1435			}
1436			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1437				phba->vpd_flag |= VPD_MODEL_NAME;
1438				index += 2;
1439				i = vpd[index];
1440				index += 1;
1441				j = 0;
1442				Length -= (3+i);
1443				while(i--) {
1444					phba->ModelName[j++] = vpd[index++];
1445					if (j == 79)
1446						break;
1447				}
1448				phba->ModelName[j] = 0;
1449				continue;
1450			}
1451			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1452				phba->vpd_flag |= VPD_PROGRAM_TYPE;
1453				index += 2;
1454				i = vpd[index];
1455				index += 1;
1456				j = 0;
1457				Length -= (3+i);
1458				while(i--) {
1459					phba->ProgramType[j++] = vpd[index++];
1460					if (j == 255)
1461						break;
1462				}
1463				phba->ProgramType[j] = 0;
1464				continue;
1465			}
1466			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1467				phba->vpd_flag |= VPD_PORT;
1468				index += 2;
1469				i = vpd[index];
1470				index += 1;
1471				j = 0;
1472				Length -= (3+i);
1473				while(i--) {
1474				phba->Port[j++] = vpd[index++];
1475				if (j == 19)
1476					break;
1477				}
1478				phba->Port[j] = 0;
1479				continue;
1480			}
1481			else {
1482				index += 2;
1483				i = vpd[index];
1484				index += 1;
1485				index += i;
1486				Length -= (3 + i);
1487			}
1488		}
1489		finished = 0;
1490		break;
1491		case 0x78:
1492			finished = 1;
1493			break;
1494		default:
1495			index ++;
1496			break;
1497		}
1498	}
1499
1500	return(1);
1501}
1502
1503/**
1504 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1505 * @phba: pointer to lpfc hba data structure.
1506 * @mdp: pointer to the data structure to hold the derived model name.
1507 * @descp: pointer to the data structure to hold the derived description.
1508 *
1509 * This routine retrieves HBA's description based on its registered PCI device
1510 * ID. The @descp passed into this function points to an array of 256 chars. It
1511 * shall be returned with the model name, maximum speed, and the host bus type.
1512 * The @mdp passed into this function points to an array of 80 chars. When the
1513 * function returns, the @mdp will be filled with the model name.
1514 **/
1515static void
1516lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1517{
1518	lpfc_vpd_t *vp;
1519	uint16_t dev_id = phba->pcidev->device;
1520	int max_speed;
1521	int GE = 0;
1522	int oneConnect = 0; /* default is not a oneConnect */
1523	struct {
1524		char * name;
1525		int    max_speed;
1526		char * bus;
1527	} m = {"<Unknown>", 0, ""};
1528
1529	if (mdp && mdp[0] != '\0'
1530		&& descp && descp[0] != '\0')
1531		return;
1532
1533	if (phba->lmt & LMT_10Gb)
1534		max_speed = 10;
1535	else if (phba->lmt & LMT_8Gb)
1536		max_speed = 8;
1537	else if (phba->lmt & LMT_4Gb)
1538		max_speed = 4;
1539	else if (phba->lmt & LMT_2Gb)
1540		max_speed = 2;
1541	else
1542		max_speed = 1;
1543
1544	vp = &phba->vpd;
1545
1546	switch (dev_id) {
1547	case PCI_DEVICE_ID_FIREFLY:
1548		m = (typeof(m)){"LP6000", max_speed, "PCI"};
1549		break;
1550	case PCI_DEVICE_ID_SUPERFLY:
1551		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1552			m = (typeof(m)){"LP7000", max_speed,  "PCI"};
1553		else
1554			m = (typeof(m)){"LP7000E", max_speed, "PCI"};
1555		break;
1556	case PCI_DEVICE_ID_DRAGONFLY:
1557		m = (typeof(m)){"LP8000", max_speed, "PCI"};
1558		break;
1559	case PCI_DEVICE_ID_CENTAUR:
1560		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1561			m = (typeof(m)){"LP9002", max_speed, "PCI"};
1562		else
1563			m = (typeof(m)){"LP9000", max_speed, "PCI"};
1564		break;
1565	case PCI_DEVICE_ID_RFLY:
1566		m = (typeof(m)){"LP952", max_speed, "PCI"};
1567		break;
1568	case PCI_DEVICE_ID_PEGASUS:
1569		m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
1570		break;
1571	case PCI_DEVICE_ID_THOR:
1572		m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
1573		break;
1574	case PCI_DEVICE_ID_VIPER:
1575		m = (typeof(m)){"LPX1000", max_speed,  "PCI-X"};
1576		break;
1577	case PCI_DEVICE_ID_PFLY:
1578		m = (typeof(m)){"LP982", max_speed, "PCI-X"};
1579		break;
1580	case PCI_DEVICE_ID_TFLY:
1581		m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
1582		break;
1583	case PCI_DEVICE_ID_HELIOS:
1584		m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
1585		break;
1586	case PCI_DEVICE_ID_HELIOS_SCSP:
1587		m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
1588		break;
1589	case PCI_DEVICE_ID_HELIOS_DCSP:
1590		m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
1591		break;
1592	case PCI_DEVICE_ID_NEPTUNE:
1593		m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
1594		break;
1595	case PCI_DEVICE_ID_NEPTUNE_SCSP:
1596		m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
1597		break;
1598	case PCI_DEVICE_ID_NEPTUNE_DCSP:
1599		m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
1600		break;
1601	case PCI_DEVICE_ID_BMID:
1602		m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
1603		break;
1604	case PCI_DEVICE_ID_BSMB:
1605		m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
1606		break;
1607	case PCI_DEVICE_ID_ZEPHYR:
1608		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1609		break;
1610	case PCI_DEVICE_ID_ZEPHYR_SCSP:
1611		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1612		break;
1613	case PCI_DEVICE_ID_ZEPHYR_DCSP:
1614		m = (typeof(m)){"LP2105", max_speed, "PCIe"};
1615		GE = 1;
1616		break;
1617	case PCI_DEVICE_ID_ZMID:
1618		m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
1619		break;
1620	case PCI_DEVICE_ID_ZSMB:
1621		m = (typeof(m)){"LPe111", max_speed, "PCIe"};
1622		break;
1623	case PCI_DEVICE_ID_LP101:
1624		m = (typeof(m)){"LP101", max_speed, "PCI-X"};
1625		break;
1626	case PCI_DEVICE_ID_LP10000S:
1627		m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
1628		break;
1629	case PCI_DEVICE_ID_LP11000S:
1630		m = (typeof(m)){"LP11000-S", max_speed,
1631			"PCI-X2"};
1632		break;
1633	case PCI_DEVICE_ID_LPE11000S:
1634		m = (typeof(m)){"LPe11000-S", max_speed,
1635			"PCIe"};
1636		break;
1637	case PCI_DEVICE_ID_SAT:
1638		m = (typeof(m)){"LPe12000", max_speed, "PCIe"};
1639		break;
1640	case PCI_DEVICE_ID_SAT_MID:
1641		m = (typeof(m)){"LPe1250", max_speed, "PCIe"};
1642		break;
1643	case PCI_DEVICE_ID_SAT_SMB:
1644		m = (typeof(m)){"LPe121", max_speed, "PCIe"};
1645		break;
1646	case PCI_DEVICE_ID_SAT_DCSP:
1647		m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"};
1648		break;
1649	case PCI_DEVICE_ID_SAT_SCSP:
1650		m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"};
1651		break;
1652	case PCI_DEVICE_ID_SAT_S:
1653		m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
1654		break;
1655	case PCI_DEVICE_ID_HORNET:
1656		m = (typeof(m)){"LP21000", max_speed, "PCIe"};
1657		GE = 1;
1658		break;
1659	case PCI_DEVICE_ID_PROTEUS_VF:
1660		m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1661		break;
1662	case PCI_DEVICE_ID_PROTEUS_PF:
1663		m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1664		break;
1665	case PCI_DEVICE_ID_PROTEUS_S:
1666		m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
1667		break;
1668	case PCI_DEVICE_ID_TIGERSHARK:
1669		oneConnect = 1;
1670		m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
1671		break;
1672	default:
1673		m = (typeof(m)){ NULL };
1674		break;
1675	}
1676
1677	if (mdp && mdp[0] == '\0')
1678		snprintf(mdp, 79,"%s", m.name);
1679	/* oneConnect hba requires special processing, they are all initiators
1680	 * and we put the port number on the end
1681	 */
1682	if (descp && descp[0] == '\0') {
1683		if (oneConnect)
1684			snprintf(descp, 255,
1685				"Emulex OneConnect %s, FCoE Initiator, Port %s",
1686				m.name,
1687				phba->Port);
1688		else
1689			snprintf(descp, 255,
1690				"Emulex %s %d%s %s %s",
1691				m.name, m.max_speed,
1692				(GE) ? "GE" : "Gb",
1693				m.bus,
1694				(GE) ? "FCoE Adapter" :
1695					"Fibre Channel Adapter");
1696	}
1697}
1698
1699/**
1700 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1701 * @phba: pointer to lpfc hba data structure.
1702 * @pring: pointer to a IOCB ring.
1703 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1704 *
1705 * This routine posts a given number of IOCBs with the associated DMA buffer
1706 * descriptors specified by the cnt argument to the given IOCB ring.
1707 *
1708 * Return codes
1709 *   The number of IOCBs NOT able to be posted to the IOCB ring.
1710 **/
1711int
1712lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1713{
1714	IOCB_t *icmd;
1715	struct lpfc_iocbq *iocb;
1716	struct lpfc_dmabuf *mp1, *mp2;
1717
1718	cnt += pring->missbufcnt;
1719
1720	/* While there are buffers to post */
1721	while (cnt > 0) {
1722		/* Allocate buffer for  command iocb */
1723		iocb = lpfc_sli_get_iocbq(phba);
1724		if (iocb == NULL) {
1725			pring->missbufcnt = cnt;
1726			return cnt;
1727		}
1728		icmd = &iocb->iocb;
1729
1730		/* 2 buffers can be posted per command */
1731		/* Allocate buffer to post */
1732		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1733		if (mp1)
1734		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1735		if (!mp1 || !mp1->virt) {
1736			kfree(mp1);
1737			lpfc_sli_release_iocbq(phba, iocb);
1738			pring->missbufcnt = cnt;
1739			return cnt;
1740		}
1741
1742		INIT_LIST_HEAD(&mp1->list);
1743		/* Allocate buffer to post */
1744		if (cnt > 1) {
1745			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1746			if (mp2)
1747				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1748							    &mp2->phys);
1749			if (!mp2 || !mp2->virt) {
1750				kfree(mp2);
1751				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1752				kfree(mp1);
1753				lpfc_sli_release_iocbq(phba, iocb);
1754				pring->missbufcnt = cnt;
1755				return cnt;
1756			}
1757
1758			INIT_LIST_HEAD(&mp2->list);
1759		} else {
1760			mp2 = NULL;
1761		}
1762
1763		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1764		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1765		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1766		icmd->ulpBdeCount = 1;
1767		cnt--;
1768		if (mp2) {
1769			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1770			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1771			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1772			cnt--;
1773			icmd->ulpBdeCount = 2;
1774		}
1775
1776		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1777		icmd->ulpLe = 1;
1778
1779		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1780		    IOCB_ERROR) {
1781			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1782			kfree(mp1);
1783			cnt++;
1784			if (mp2) {
1785				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1786				kfree(mp2);
1787				cnt++;
1788			}
1789			lpfc_sli_release_iocbq(phba, iocb);
1790			pring->missbufcnt = cnt;
1791			return cnt;
1792		}
1793		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1794		if (mp2)
1795			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1796	}
1797	pring->missbufcnt = 0;
1798	return 0;
1799}
1800
1801/**
1802 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
1803 * @phba: pointer to lpfc hba data structure.
1804 *
1805 * This routine posts initial receive IOCB buffers to the ELS ring. The
1806 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1807 * set to 64 IOCBs.
1808 *
1809 * Return codes
1810 *   0 - success (currently always success)
1811 **/
1812static int
1813lpfc_post_rcv_buf(struct lpfc_hba *phba)
1814{
1815	struct lpfc_sli *psli = &phba->sli;
1816
1817	/* Ring 0, ELS / CT buffers */
1818	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
1819	/* Ring 2 - FCP no buffers needed */
1820
1821	return 0;
1822}
1823
1824#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1825
1826/**
1827 * lpfc_sha_init - Set up initial array of hash table entries
1828 * @HashResultPointer: pointer to an array as hash table.
1829 *
1830 * This routine sets up the initial values to the array of hash table entries
1831 * for the LC HBAs.
1832 **/
1833static void
1834lpfc_sha_init(uint32_t * HashResultPointer)
1835{
1836	HashResultPointer[0] = 0x67452301;
1837	HashResultPointer[1] = 0xEFCDAB89;
1838	HashResultPointer[2] = 0x98BADCFE;
1839	HashResultPointer[3] = 0x10325476;
1840	HashResultPointer[4] = 0xC3D2E1F0;
1841}
1842
1843/**
1844 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
1845 * @HashResultPointer: pointer to an initial/result hash table.
1846 * @HashWorkingPointer: pointer to an working hash table.
1847 *
1848 * This routine iterates an initial hash table pointed by @HashResultPointer
1849 * with the values from the working hash table pointeed by @HashWorkingPointer.
1850 * The results are putting back to the initial hash table, returned through
1851 * the @HashResultPointer as the result hash table.
1852 **/
1853static void
1854lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
1855{
1856	int t;
1857	uint32_t TEMP;
1858	uint32_t A, B, C, D, E;
1859	t = 16;
1860	do {
1861		HashWorkingPointer[t] =
1862		    S(1,
1863		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
1864								     8] ^
1865		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
1866	} while (++t <= 79);
1867	t = 0;
1868	A = HashResultPointer[0];
1869	B = HashResultPointer[1];
1870	C = HashResultPointer[2];
1871	D = HashResultPointer[3];
1872	E = HashResultPointer[4];
1873
1874	do {
1875		if (t < 20) {
1876			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
1877		} else if (t < 40) {
1878			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
1879		} else if (t < 60) {
1880			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
1881		} else {
1882			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
1883		}
1884		TEMP += S(5, A) + E + HashWorkingPointer[t];
1885		E = D;
1886		D = C;
1887		C = S(30, B);
1888		B = A;
1889		A = TEMP;
1890	} while (++t <= 79);
1891
1892	HashResultPointer[0] += A;
1893	HashResultPointer[1] += B;
1894	HashResultPointer[2] += C;
1895	HashResultPointer[3] += D;
1896	HashResultPointer[4] += E;
1897
1898}
1899
1900/**
1901 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
1902 * @RandomChallenge: pointer to the entry of host challenge random number array.
1903 * @HashWorking: pointer to the entry of the working hash array.
1904 *
1905 * This routine calculates the working hash array referred by @HashWorking
1906 * from the challenge random numbers associated with the host, referred by
1907 * @RandomChallenge. The result is put into the entry of the working hash
1908 * array and returned by reference through @HashWorking.
1909 **/
1910static void
1911lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
1912{
1913	*HashWorking = (*RandomChallenge ^ *HashWorking);
1914}
1915
1916/**
1917 * lpfc_hba_init - Perform special handling for LC HBA initialization
1918 * @phba: pointer to lpfc hba data structure.
1919 * @hbainit: pointer to an array of unsigned 32-bit integers.
1920 *
1921 * This routine performs the special handling for LC HBA initialization.
1922 **/
1923void
1924lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1925{
1926	int t;
1927	uint32_t *HashWorking;
1928	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
1929
1930	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
1931	if (!HashWorking)
1932		return;
1933
1934	HashWorking[0] = HashWorking[78] = *pwwnn++;
1935	HashWorking[1] = HashWorking[79] = *pwwnn;
1936
1937	for (t = 0; t < 7; t++)
1938		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
1939
1940	lpfc_sha_init(hbainit);
1941	lpfc_sha_iterate(hbainit, HashWorking);
1942	kfree(HashWorking);
1943}
1944
1945/**
1946 * lpfc_cleanup - Performs vport cleanups before deleting a vport
1947 * @vport: pointer to a virtual N_Port data structure.
1948 *
1949 * This routine performs the necessary cleanups before deleting the @vport.
1950 * It invokes the discovery state machine to perform necessary state
1951 * transitions and to release the ndlps associated with the @vport. Note,
1952 * the physical port is treated as @vport 0.
1953 **/
1954void
1955lpfc_cleanup(struct lpfc_vport *vport)
1956{
1957	struct lpfc_hba   *phba = vport->phba;
1958	struct lpfc_nodelist *ndlp, *next_ndlp;
1959	int i = 0;
1960
1961	if (phba->link_state > LPFC_LINK_DOWN)
1962		lpfc_port_link_failure(vport);
1963
1964	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1965		if (!NLP_CHK_NODE_ACT(ndlp)) {
1966			ndlp = lpfc_enable_node(vport, ndlp,
1967						NLP_STE_UNUSED_NODE);
1968			if (!ndlp)
1969				continue;
1970			spin_lock_irq(&phba->ndlp_lock);
1971			NLP_SET_FREE_REQ(ndlp);
1972			spin_unlock_irq(&phba->ndlp_lock);
1973			/* Trigger the release of the ndlp memory */
1974			lpfc_nlp_put(ndlp);
1975			continue;
1976		}
1977		spin_lock_irq(&phba->ndlp_lock);
1978		if (NLP_CHK_FREE_REQ(ndlp)) {
1979			/* The ndlp should not be in memory free mode already */
1980			spin_unlock_irq(&phba->ndlp_lock);
1981			continue;
1982		} else
1983			/* Indicate request for freeing ndlp memory */
1984			NLP_SET_FREE_REQ(ndlp);
1985		spin_unlock_irq(&phba->ndlp_lock);
1986
1987		if (vport->port_type != LPFC_PHYSICAL_PORT &&
1988		    ndlp->nlp_DID == Fabric_DID) {
1989			/* Just free up ndlp with Fabric_DID for vports */
1990			lpfc_nlp_put(ndlp);
1991			continue;
1992		}
1993
1994		if (ndlp->nlp_type & NLP_FABRIC)
1995			lpfc_disc_state_machine(vport, ndlp, NULL,
1996					NLP_EVT_DEVICE_RECOVERY);
1997
1998		lpfc_disc_state_machine(vport, ndlp, NULL,
1999					     NLP_EVT_DEVICE_RM);
2000
2001	}
2002
2003	/* At this point, ALL ndlp's should be gone
2004	 * because of the previous NLP_EVT_DEVICE_RM.
2005	 * Lets wait for this to happen, if needed.
2006	 */
2007	while (!list_empty(&vport->fc_nodes)) {
2008		if (i++ > 3000) {
2009			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2010				"0233 Nodelist not empty\n");
2011			list_for_each_entry_safe(ndlp, next_ndlp,
2012						&vport->fc_nodes, nlp_listp) {
2013				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2014						LOG_NODE,
2015						"0282 did:x%x ndlp:x%p "
2016						"usgmap:x%x refcnt:%d\n",
2017						ndlp->nlp_DID, (void *)ndlp,
2018						ndlp->nlp_usg_map,
2019						atomic_read(
2020							&ndlp->kref.refcount));
2021			}
2022			break;
2023		}
2024
2025		/* Wait for any activity on ndlps to settle */
2026		msleep(10);
2027	}
2028}
2029
2030/**
2031 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2032 * @vport: pointer to a virtual N_Port data structure.
2033 *
2034 * This routine stops all the timers associated with a @vport. This function
2035 * is invoked before disabling or deleting a @vport. Note that the physical
2036 * port is treated as @vport 0.
2037 **/
2038void
2039lpfc_stop_vport_timers(struct lpfc_vport *vport)
2040{
2041	del_timer_sync(&vport->els_tmofunc);
2042	del_timer_sync(&vport->fc_fdmitmo);
2043	lpfc_can_disctmo(vport);
2044	return;
2045}
2046
2047/**
2048 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2049 * @phba: pointer to lpfc hba data structure.
2050 *
2051 * This routine stops all the timers associated with a HBA. This function is
2052 * invoked before either putting a HBA offline or unloading the driver.
2053 **/
2054void
2055lpfc_stop_hba_timers(struct lpfc_hba *phba)
2056{
2057	lpfc_stop_vport_timers(phba->pport);
2058	del_timer_sync(&phba->sli.mbox_tmo);
2059	del_timer_sync(&phba->fabric_block_timer);
2060	del_timer_sync(&phba->eratt_poll);
2061	del_timer_sync(&phba->hb_tmofunc);
2062	phba->hb_outstanding = 0;
2063
2064	switch (phba->pci_dev_grp) {
2065	case LPFC_PCI_DEV_LP:
2066		/* Stop any LightPulse device specific driver timers */
2067		del_timer_sync(&phba->fcp_poll_timer);
2068		break;
2069	case LPFC_PCI_DEV_OC:
2070		/* Stop any OneConnect device sepcific driver timers */
2071		break;
2072	default:
2073		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2074				"0297 Invalid device group (x%x)\n",
2075				phba->pci_dev_grp);
2076		break;
2077	}
2078	return;
2079}
2080
2081/**
2082 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2083 * @phba: pointer to lpfc hba data structure.
2084 *
2085 * This routine marks a HBA's management interface as blocked. Once the HBA's
2086 * management interface is marked as blocked, all the user space access to
2087 * the HBA, whether they are from sysfs interface or libdfc interface will
2088 * all be blocked. The HBA is set to block the management interface when the
2089 * driver prepares the HBA interface for online or offline.
2090 **/
2091static void
2092lpfc_block_mgmt_io(struct lpfc_hba * phba)
2093{
2094	unsigned long iflag;
2095
2096	spin_lock_irqsave(&phba->hbalock, iflag);
2097	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2098	spin_unlock_irqrestore(&phba->hbalock, iflag);
2099}
2100
2101/**
2102 * lpfc_online - Initialize and bring a HBA online
2103 * @phba: pointer to lpfc hba data structure.
2104 *
2105 * This routine initializes the HBA and brings a HBA online. During this
2106 * process, the management interface is blocked to prevent user space access
2107 * to the HBA interfering with the driver initialization.
2108 *
2109 * Return codes
2110 *   0 - successful
2111 *   1 - failed
2112 **/
2113int
2114lpfc_online(struct lpfc_hba *phba)
2115{
2116	struct lpfc_vport *vport;
2117	struct lpfc_vport **vports;
2118	int i;
2119
2120	if (!phba)
2121		return 0;
2122	vport = phba->pport;
2123
2124	if (!(vport->fc_flag & FC_OFFLINE_MODE))
2125		return 0;
2126
2127	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2128			"0458 Bring Adapter online\n");
2129
2130	lpfc_block_mgmt_io(phba);
2131
2132	if (!lpfc_sli_queue_setup(phba)) {
2133		lpfc_unblock_mgmt_io(phba);
2134		return 1;
2135	}
2136
2137	if (phba->sli_rev == LPFC_SLI_REV4) {
2138		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2139			lpfc_unblock_mgmt_io(phba);
2140			return 1;
2141		}
2142	} else {
2143		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
2144			lpfc_unblock_mgmt_io(phba);
2145			return 1;
2146		}
2147	}
2148
2149	vports = lpfc_create_vport_work_array(phba);
2150	if (vports != NULL)
2151		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2152			struct Scsi_Host *shost;
2153			shost = lpfc_shost_from_vport(vports[i]);
2154			spin_lock_irq(shost->host_lock);
2155			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2156			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2157				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2158			spin_unlock_irq(shost->host_lock);
2159		}
2160		lpfc_destroy_vport_work_array(phba, vports);
2161
2162	lpfc_unblock_mgmt_io(phba);
2163	return 0;
2164}
2165
2166/**
2167 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2168 * @phba: pointer to lpfc hba data structure.
2169 *
2170 * This routine marks a HBA's management interface as not blocked. Once the
2171 * HBA's management interface is marked as not blocked, all the user space
2172 * access to the HBA, whether they are from sysfs interface or libdfc
2173 * interface will be allowed. The HBA is set to block the management interface
2174 * when the driver prepares the HBA interface for online or offline and then
2175 * set to unblock the management interface afterwards.
2176 **/
2177void
2178lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2179{
2180	unsigned long iflag;
2181
2182	spin_lock_irqsave(&phba->hbalock, iflag);
2183	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2184	spin_unlock_irqrestore(&phba->hbalock, iflag);
2185}
2186
2187/**
2188 * lpfc_offline_prep - Prepare a HBA to be brought offline
2189 * @phba: pointer to lpfc hba data structure.
2190 *
2191 * This routine is invoked to prepare a HBA to be brought offline. It performs
2192 * unregistration login to all the nodes on all vports and flushes the mailbox
2193 * queue to make it ready to be brought offline.
2194 **/
2195void
2196lpfc_offline_prep(struct lpfc_hba * phba)
2197{
2198	struct lpfc_vport *vport = phba->pport;
2199	struct lpfc_nodelist  *ndlp, *next_ndlp;
2200	struct lpfc_vport **vports;
2201	int i;
2202
2203	if (vport->fc_flag & FC_OFFLINE_MODE)
2204		return;
2205
2206	lpfc_block_mgmt_io(phba);
2207
2208	lpfc_linkdown(phba);
2209
2210	/* Issue an unreg_login to all nodes on all vports */
2211	vports = lpfc_create_vport_work_array(phba);
2212	if (vports != NULL) {
2213		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2214			struct Scsi_Host *shost;
2215
2216			if (vports[i]->load_flag & FC_UNLOADING)
2217				continue;
2218			vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
2219			shost =	lpfc_shost_from_vport(vports[i]);
2220			list_for_each_entry_safe(ndlp, next_ndlp,
2221						 &vports[i]->fc_nodes,
2222						 nlp_listp) {
2223				if (!NLP_CHK_NODE_ACT(ndlp))
2224					continue;
2225				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2226					continue;
2227				if (ndlp->nlp_type & NLP_FABRIC) {
2228					lpfc_disc_state_machine(vports[i], ndlp,
2229						NULL, NLP_EVT_DEVICE_RECOVERY);
2230					lpfc_disc_state_machine(vports[i], ndlp,
2231						NULL, NLP_EVT_DEVICE_RM);
2232				}
2233				spin_lock_irq(shost->host_lock);
2234				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2235				spin_unlock_irq(shost->host_lock);
2236				lpfc_unreg_rpi(vports[i], ndlp);
2237			}
2238		}
2239	}
2240	lpfc_destroy_vport_work_array(phba, vports);
2241
2242	lpfc_sli_mbox_sys_shutdown(phba);
2243}
2244
2245/**
2246 * lpfc_offline - Bring a HBA offline
2247 * @phba: pointer to lpfc hba data structure.
2248 *
2249 * This routine actually brings a HBA offline. It stops all the timers
2250 * associated with the HBA, brings down the SLI layer, and eventually
2251 * marks the HBA as in offline state for the upper layer protocol.
2252 **/
2253void
2254lpfc_offline(struct lpfc_hba *phba)
2255{
2256	struct Scsi_Host  *shost;
2257	struct lpfc_vport **vports;
2258	int i;
2259
2260	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2261		return;
2262
2263	/* stop port and all timers associated with this hba */
2264	lpfc_stop_port(phba);
2265	vports = lpfc_create_vport_work_array(phba);
2266	if (vports != NULL)
2267		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2268			lpfc_stop_vport_timers(vports[i]);
2269	lpfc_destroy_vport_work_array(phba, vports);
2270	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2271			"0460 Bring Adapter offline\n");
2272	/* Bring down the SLI Layer and cleanup.  The HBA is offline
2273	   now.  */
2274	lpfc_sli_hba_down(phba);
2275	spin_lock_irq(&phba->hbalock);
2276	phba->work_ha = 0;
2277	spin_unlock_irq(&phba->hbalock);
2278	vports = lpfc_create_vport_work_array(phba);
2279	if (vports != NULL)
2280		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2281			shost = lpfc_shost_from_vport(vports[i]);
2282			spin_lock_irq(shost->host_lock);
2283			vports[i]->work_port_events = 0;
2284			vports[i]->fc_flag |= FC_OFFLINE_MODE;
2285			spin_unlock_irq(shost->host_lock);
2286		}
2287	lpfc_destroy_vport_work_array(phba, vports);
2288}
2289
2290/**
2291 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2292 * @phba: pointer to lpfc hba data structure.
2293 *
2294 * This routine is to free all the SCSI buffers and IOCBs from the driver
2295 * list back to kernel. It is called from lpfc_pci_remove_one to free
2296 * the internal resources before the device is removed from the system.
2297 *
2298 * Return codes
2299 *   0 - successful (for now, it always returns 0)
2300 **/
2301static int
2302lpfc_scsi_free(struct lpfc_hba *phba)
2303{
2304	struct lpfc_scsi_buf *sb, *sb_next;
2305	struct lpfc_iocbq *io, *io_next;
2306
2307	spin_lock_irq(&phba->hbalock);
2308	/* Release all the lpfc_scsi_bufs maintained by this host. */
2309	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2310		list_del(&sb->list);
2311		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2312			      sb->dma_handle);
2313		kfree(sb);
2314		phba->total_scsi_bufs--;
2315	}
2316
2317	/* Release all the lpfc_iocbq entries maintained by this host. */
2318	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2319		list_del(&io->list);
2320		kfree(io);
2321		phba->total_iocbq_bufs--;
2322	}
2323
2324	spin_unlock_irq(&phba->hbalock);
2325
2326	return 0;
2327}
2328
2329/**
2330 * lpfc_create_port - Create an FC port
2331 * @phba: pointer to lpfc hba data structure.
2332 * @instance: a unique integer ID to this FC port.
2333 * @dev: pointer to the device data structure.
2334 *
2335 * This routine creates a FC port for the upper layer protocol. The FC port
2336 * can be created on top of either a physical port or a virtual port provided
2337 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2338 * and associates the FC port created before adding the shost into the SCSI
2339 * layer.
2340 *
2341 * Return codes
2342 *   @vport - pointer to the virtual N_Port data structure.
2343 *   NULL - port create failed.
2344 **/
2345struct lpfc_vport *
2346lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2347{
2348	struct lpfc_vport *vport;
2349	struct Scsi_Host  *shost;
2350	int error = 0;
2351
2352	if (dev != &phba->pcidev->dev)
2353		shost = scsi_host_alloc(&lpfc_vport_template,
2354					sizeof(struct lpfc_vport));
2355	else
2356		shost = scsi_host_alloc(&lpfc_template,
2357					sizeof(struct lpfc_vport));
2358	if (!shost)
2359		goto out;
2360
2361	vport = (struct lpfc_vport *) shost->hostdata;
2362	vport->phba = phba;
2363	vport->load_flag |= FC_LOADING;
2364	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2365	vport->fc_rscn_flush = 0;
2366
2367	lpfc_get_vport_cfgparam(vport);
2368	shost->unique_id = instance;
2369	shost->max_id = LPFC_MAX_TARGET;
2370	shost->max_lun = vport->cfg_max_luns;
2371	shost->this_id = -1;
2372	shost->max_cmd_len = 16;
2373	if (phba->sli_rev == LPFC_SLI_REV4) {
2374		shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
2375		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2376	}
2377
2378	/*
2379	 * Set initial can_queue value since 0 is no longer supported and
2380	 * scsi_add_host will fail. This will be adjusted later based on the
2381	 * max xri value determined in hba setup.
2382	 */
2383	shost->can_queue = phba->cfg_hba_queue_depth - 10;
2384	if (dev != &phba->pcidev->dev) {
2385		shost->transportt = lpfc_vport_transport_template;
2386		vport->port_type = LPFC_NPIV_PORT;
2387	} else {
2388		shost->transportt = lpfc_transport_template;
2389		vport->port_type = LPFC_PHYSICAL_PORT;
2390	}
2391
2392	/* Initialize all internally managed lists. */
2393	INIT_LIST_HEAD(&vport->fc_nodes);
2394	INIT_LIST_HEAD(&vport->rcv_buffer_list);
2395	spin_lock_init(&vport->work_port_lock);
2396
2397	init_timer(&vport->fc_disctmo);
2398	vport->fc_disctmo.function = lpfc_disc_timeout;
2399	vport->fc_disctmo.data = (unsigned long)vport;
2400
2401	init_timer(&vport->fc_fdmitmo);
2402	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2403	vport->fc_fdmitmo.data = (unsigned long)vport;
2404
2405	init_timer(&vport->els_tmofunc);
2406	vport->els_tmofunc.function = lpfc_els_timeout;
2407	vport->els_tmofunc.data = (unsigned long)vport;
2408
2409	error = scsi_add_host(shost, dev);
2410	if (error)
2411		goto out_put_shost;
2412
2413	spin_lock_irq(&phba->hbalock);
2414	list_add_tail(&vport->listentry, &phba->port_list);
2415	spin_unlock_irq(&phba->hbalock);
2416	return vport;
2417
2418out_put_shost:
2419	scsi_host_put(shost);
2420out:
2421	return NULL;
2422}
2423
2424/**
2425 * destroy_port -  destroy an FC port
2426 * @vport: pointer to an lpfc virtual N_Port data structure.
2427 *
2428 * This routine destroys a FC port from the upper layer protocol. All the
2429 * resources associated with the port are released.
2430 **/
2431void
2432destroy_port(struct lpfc_vport *vport)
2433{
2434	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2435	struct lpfc_hba  *phba = vport->phba;
2436
2437	lpfc_debugfs_terminate(vport);
2438	fc_remove_host(shost);
2439	scsi_remove_host(shost);
2440
2441	spin_lock_irq(&phba->hbalock);
2442	list_del_init(&vport->listentry);
2443	spin_unlock_irq(&phba->hbalock);
2444
2445	lpfc_cleanup(vport);
2446	return;
2447}
2448
2449/**
2450 * lpfc_get_instance - Get a unique integer ID
2451 *
2452 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2453 * uses the kernel idr facility to perform the task.
2454 *
2455 * Return codes:
2456 *   instance - a unique integer ID allocated as the new instance.
2457 *   -1 - lpfc get instance failed.
2458 **/
2459int
2460lpfc_get_instance(void)
2461{
2462	int instance = 0;
2463
2464	/* Assign an unused number */
2465	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2466		return -1;
2467	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2468		return -1;
2469	return instance;
2470}
2471
2472/**
2473 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2474 * @shost: pointer to SCSI host data structure.
2475 * @time: elapsed time of the scan in jiffies.
2476 *
2477 * This routine is called by the SCSI layer with a SCSI host to determine
2478 * whether the scan host is finished.
2479 *
2480 * Note: there is no scan_start function as adapter initialization will have
2481 * asynchronously kicked off the link initialization.
2482 *
2483 * Return codes
2484 *   0 - SCSI host scan is not over yet.
2485 *   1 - SCSI host scan is over.
2486 **/
2487int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2488{
2489	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2490	struct lpfc_hba   *phba = vport->phba;
2491	int stat = 0;
2492
2493	spin_lock_irq(shost->host_lock);
2494
2495	if (vport->load_flag & FC_UNLOADING) {
2496		stat = 1;
2497		goto finished;
2498	}
2499	if (time >= 30 * HZ) {
2500		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2501				"0461 Scanning longer than 30 "
2502				"seconds.  Continuing initialization\n");
2503		stat = 1;
2504		goto finished;
2505	}
2506	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2507		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2508				"0465 Link down longer than 15 "
2509				"seconds.  Continuing initialization\n");
2510		stat = 1;
2511		goto finished;
2512	}
2513
2514	if (vport->port_state != LPFC_VPORT_READY)
2515		goto finished;
2516	if (vport->num_disc_nodes || vport->fc_prli_sent)
2517		goto finished;
2518	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2519		goto finished;
2520	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2521		goto finished;
2522
2523	stat = 1;
2524
2525finished:
2526	spin_unlock_irq(shost->host_lock);
2527	return stat;
2528}
2529
2530/**
2531 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2532 * @shost: pointer to SCSI host data structure.
2533 *
2534 * This routine initializes a given SCSI host attributes on a FC port. The
2535 * SCSI host can be either on top of a physical port or a virtual port.
2536 **/
2537void lpfc_host_attrib_init(struct Scsi_Host *shost)
2538{
2539	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2540	struct lpfc_hba   *phba = vport->phba;
2541	/*
2542	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
2543	 */
2544
2545	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2546	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2547	fc_host_supported_classes(shost) = FC_COS_CLASS3;
2548
2549	memset(fc_host_supported_fc4s(shost), 0,
2550	       sizeof(fc_host_supported_fc4s(shost)));
2551	fc_host_supported_fc4s(shost)[2] = 1;
2552	fc_host_supported_fc4s(shost)[7] = 1;
2553
2554	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2555				 sizeof fc_host_symbolic_name(shost));
2556
2557	fc_host_supported_speeds(shost) = 0;
2558	if (phba->lmt & LMT_10Gb)
2559		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2560	if (phba->lmt & LMT_8Gb)
2561		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2562	if (phba->lmt & LMT_4Gb)
2563		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2564	if (phba->lmt & LMT_2Gb)
2565		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2566	if (phba->lmt & LMT_1Gb)
2567		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2568
2569	fc_host_maxframe_size(shost) =
2570		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2571		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2572
2573	/* This value is also unchanging */
2574	memset(fc_host_active_fc4s(shost), 0,
2575	       sizeof(fc_host_active_fc4s(shost)));
2576	fc_host_active_fc4s(shost)[2] = 1;
2577	fc_host_active_fc4s(shost)[7] = 1;
2578
2579	fc_host_max_npiv_vports(shost) = phba->max_vpi;
2580	spin_lock_irq(shost->host_lock);
2581	vport->load_flag &= ~FC_LOADING;
2582	spin_unlock_irq(shost->host_lock);
2583}
2584
2585/**
2586 * lpfc_stop_port_s3 - Stop SLI3 device port
2587 * @phba: pointer to lpfc hba data structure.
2588 *
2589 * This routine is invoked to stop an SLI3 device port, it stops the device
2590 * from generating interrupts and stops the device driver's timers for the
2591 * device.
2592 **/
2593static void
2594lpfc_stop_port_s3(struct lpfc_hba *phba)
2595{
2596	/* Clear all interrupt enable conditions */
2597	writel(0, phba->HCregaddr);
2598	readl(phba->HCregaddr); /* flush */
2599	/* Clear all pending interrupts */
2600	writel(0xffffffff, phba->HAregaddr);
2601	readl(phba->HAregaddr); /* flush */
2602
2603	/* Reset some HBA SLI setup states */
2604	lpfc_stop_hba_timers(phba);
2605	phba->pport->work_port_events = 0;
2606}
2607
2608/**
2609 * lpfc_stop_port_s4 - Stop SLI4 device port
2610 * @phba: pointer to lpfc hba data structure.
2611 *
2612 * This routine is invoked to stop an SLI4 device port, it stops the device
2613 * from generating interrupts and stops the device driver's timers for the
2614 * device.
2615 **/
2616static void
2617lpfc_stop_port_s4(struct lpfc_hba *phba)
2618{
2619	/* Reset some HBA SLI4 setup states */
2620	lpfc_stop_hba_timers(phba);
2621	phba->pport->work_port_events = 0;
2622	phba->sli4_hba.intr_enable = 0;
2623	/* Hard clear it for now, shall have more graceful way to wait later */
2624	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2625}
2626
2627/**
2628 * lpfc_stop_port - Wrapper function for stopping hba port
2629 * @phba: Pointer to HBA context object.
2630 *
2631 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2632 * the API jump table function pointer from the lpfc_hba struct.
2633 **/
2634void
2635lpfc_stop_port(struct lpfc_hba *phba)
2636{
2637	phba->lpfc_stop_port(phba);
2638}
2639
2640/**
2641 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2642 * @phba: pointer to lpfc hba data structure.
2643 *
2644 * This routine is invoked to remove the driver default fcf record from
2645 * the port.  This routine currently acts on FCF Index 0.
2646 *
2647 **/
2648void
2649lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2650{
2651	int rc = 0;
2652	LPFC_MBOXQ_t *mboxq;
2653	struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2654	uint32_t mbox_tmo, req_len;
2655	uint32_t shdr_status, shdr_add_status;
2656
2657	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2658	if (!mboxq) {
2659		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2660			"2020 Failed to allocate mbox for ADD_FCF cmd\n");
2661		return;
2662	}
2663
2664	req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2665		  sizeof(struct lpfc_sli4_cfg_mhdr);
2666	rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2667			      LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2668			      req_len, LPFC_SLI4_MBX_EMBED);
2669	/*
2670	 * In phase 1, there is a single FCF index, 0.  In phase2, the driver
2671	 * supports multiple FCF indices.
2672	 */
2673	del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2674	bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2675	bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2676	       phba->fcf.fcf_indx);
2677
2678	if (!phba->sli4_hba.intr_enable)
2679		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2680	else {
2681		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2682		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2683	}
2684	/* The IOCTL status is embedded in the mailbox subheader. */
2685	shdr_status = bf_get(lpfc_mbox_hdr_status,
2686			     &del_fcf_record->header.cfg_shdr.response);
2687	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2688				 &del_fcf_record->header.cfg_shdr.response);
2689	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2690		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2691				"2516 DEL FCF of default FCF Index failed "
2692				"mbx status x%x, status x%x add_status x%x\n",
2693				rc, shdr_status, shdr_add_status);
2694	}
2695	if (rc != MBX_TIMEOUT)
2696		mempool_free(mboxq, phba->mbox_mem_pool);
2697}
2698
2699/**
2700 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2701 * @phba: pointer to lpfc hba data structure.
2702 * @acqe_link: pointer to the async link completion queue entry.
2703 *
2704 * This routine is to parse the SLI4 link-attention link fault code and
2705 * translate it into the base driver's read link attention mailbox command
2706 * status.
2707 *
2708 * Return: Link-attention status in terms of base driver's coding.
2709 **/
2710static uint16_t
2711lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
2712			   struct lpfc_acqe_link *acqe_link)
2713{
2714	uint16_t latt_fault;
2715
2716	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
2717	case LPFC_ASYNC_LINK_FAULT_NONE:
2718	case LPFC_ASYNC_LINK_FAULT_LOCAL:
2719	case LPFC_ASYNC_LINK_FAULT_REMOTE:
2720		latt_fault = 0;
2721		break;
2722	default:
2723		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2724				"0398 Invalid link fault code: x%x\n",
2725				bf_get(lpfc_acqe_link_fault, acqe_link));
2726		latt_fault = MBXERR_ERROR;
2727		break;
2728	}
2729	return latt_fault;
2730}
2731
2732/**
2733 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
2734 * @phba: pointer to lpfc hba data structure.
2735 * @acqe_link: pointer to the async link completion queue entry.
2736 *
2737 * This routine is to parse the SLI4 link attention type and translate it
2738 * into the base driver's link attention type coding.
2739 *
2740 * Return: Link attention type in terms of base driver's coding.
2741 **/
2742static uint8_t
2743lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
2744			  struct lpfc_acqe_link *acqe_link)
2745{
2746	uint8_t att_type;
2747
2748	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
2749	case LPFC_ASYNC_LINK_STATUS_DOWN:
2750	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
2751		att_type = AT_LINK_DOWN;
2752		break;
2753	case LPFC_ASYNC_LINK_STATUS_UP:
2754		/* Ignore physical link up events - wait for logical link up */
2755		att_type = AT_RESERVED;
2756		break;
2757	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
2758		att_type = AT_LINK_UP;
2759		break;
2760	default:
2761		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2762				"0399 Invalid link attention type: x%x\n",
2763				bf_get(lpfc_acqe_link_status, acqe_link));
2764		att_type = AT_RESERVED;
2765		break;
2766	}
2767	return att_type;
2768}
2769
2770/**
2771 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
2772 * @phba: pointer to lpfc hba data structure.
2773 * @acqe_link: pointer to the async link completion queue entry.
2774 *
2775 * This routine is to parse the SLI4 link-attention link speed and translate
2776 * it into the base driver's link-attention link speed coding.
2777 *
2778 * Return: Link-attention link speed in terms of base driver's coding.
2779 **/
2780static uint8_t
2781lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
2782				struct lpfc_acqe_link *acqe_link)
2783{
2784	uint8_t link_speed;
2785
2786	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
2787	case LPFC_ASYNC_LINK_SPEED_ZERO:
2788		link_speed = LA_UNKNW_LINK;
2789		break;
2790	case LPFC_ASYNC_LINK_SPEED_10MBPS:
2791		link_speed = LA_UNKNW_LINK;
2792		break;
2793	case LPFC_ASYNC_LINK_SPEED_100MBPS:
2794		link_speed = LA_UNKNW_LINK;
2795		break;
2796	case LPFC_ASYNC_LINK_SPEED_1GBPS:
2797		link_speed = LA_1GHZ_LINK;
2798		break;
2799	case LPFC_ASYNC_LINK_SPEED_10GBPS:
2800		link_speed = LA_10GHZ_LINK;
2801		break;
2802	default:
2803		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2804				"0483 Invalid link-attention link speed: x%x\n",
2805				bf_get(lpfc_acqe_link_speed, acqe_link));
2806		link_speed = LA_UNKNW_LINK;
2807		break;
2808	}
2809	return link_speed;
2810}
2811
2812/**
2813 * lpfc_sli4_async_link_evt - Process the asynchronous link event
2814 * @phba: pointer to lpfc hba data structure.
2815 * @acqe_link: pointer to the async link completion queue entry.
2816 *
2817 * This routine is to handle the SLI4 asynchronous link event.
2818 **/
2819static void
2820lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2821			 struct lpfc_acqe_link *acqe_link)
2822{
2823	struct lpfc_dmabuf *mp;
2824	LPFC_MBOXQ_t *pmb;
2825	MAILBOX_t *mb;
2826	READ_LA_VAR *la;
2827	uint8_t att_type;
2828
2829	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
2830	if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
2831		return;
2832	phba->fcoe_eventtag = acqe_link->event_tag;
2833	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2834	if (!pmb) {
2835		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2836				"0395 The mboxq allocation failed\n");
2837		return;
2838	}
2839	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2840	if (!mp) {
2841		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2842				"0396 The lpfc_dmabuf allocation failed\n");
2843		goto out_free_pmb;
2844	}
2845	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2846	if (!mp->virt) {
2847		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2848				"0397 The mbuf allocation failed\n");
2849		goto out_free_dmabuf;
2850	}
2851
2852	/* Cleanup any outstanding ELS commands */
2853	lpfc_els_flush_all_cmd(phba);
2854
2855	/* Block ELS IOCBs until we have done process link event */
2856	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2857
2858	/* Update link event statistics */
2859	phba->sli.slistat.link_event++;
2860
2861	/* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
2862	lpfc_read_la(phba, pmb, mp);
2863	pmb->vport = phba->pport;
2864
2865	/* Parse and translate status field */
2866	mb = &pmb->u.mb;
2867	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
2868
2869	/* Parse and translate link attention fields */
2870	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
2871	la->eventTag = acqe_link->event_tag;
2872	la->attType = att_type;
2873	la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
2874
2875	/* Fake the the following irrelvant fields */
2876	la->topology = TOPOLOGY_PT_PT;
2877	la->granted_AL_PA = 0;
2878	la->il = 0;
2879	la->pb = 0;
2880	la->fa = 0;
2881	la->mm = 0;
2882
2883	/* Keep the link status for extra SLI4 state machine reference */
2884	phba->sli4_hba.link_state.speed =
2885				bf_get(lpfc_acqe_link_speed, acqe_link);
2886	phba->sli4_hba.link_state.duplex =
2887				bf_get(lpfc_acqe_link_duplex, acqe_link);
2888	phba->sli4_hba.link_state.status =
2889				bf_get(lpfc_acqe_link_status, acqe_link);
2890	phba->sli4_hba.link_state.physical =
2891				bf_get(lpfc_acqe_link_physical, acqe_link);
2892	phba->sli4_hba.link_state.fault =
2893				bf_get(lpfc_acqe_link_fault, acqe_link);
2894
2895	/* Invoke the lpfc_handle_latt mailbox command callback function */
2896	lpfc_mbx_cmpl_read_la(phba, pmb);
2897
2898	return;
2899
2900out_free_dmabuf:
2901	kfree(mp);
2902out_free_pmb:
2903	mempool_free(pmb, phba->mbox_mem_pool);
2904}
2905
2906/**
2907 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
2908 * @phba: pointer to lpfc hba data structure.
2909 * @acqe_link: pointer to the async fcoe completion queue entry.
2910 *
2911 * This routine is to handle the SLI4 asynchronous fcoe event.
2912 **/
2913static void
2914lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2915			 struct lpfc_acqe_fcoe *acqe_fcoe)
2916{
2917	uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2918	int rc;
2919
2920	phba->fcoe_eventtag = acqe_fcoe->event_tag;
2921	switch (event_type) {
2922	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2923		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2924			"2546 New FCF found index 0x%x tag 0x%x \n",
2925			acqe_fcoe->fcf_index,
2926			acqe_fcoe->event_tag);
2927		/*
2928		 * If the current FCF is in discovered state, or
2929		 * FCF discovery is in progress do nothing.
2930		 */
2931		spin_lock_irq(&phba->hbalock);
2932		if ((phba->fcf.fcf_flag & FCF_DISCOVERED) ||
2933		   (phba->hba_flag & FCF_DISC_INPROGRESS)) {
2934			spin_unlock_irq(&phba->hbalock);
2935			break;
2936		}
2937		spin_unlock_irq(&phba->hbalock);
2938
2939		/* Read the FCF table and re-discover SAN. */
2940		rc = lpfc_sli4_read_fcf_record(phba,
2941			LPFC_FCOE_FCF_GET_FIRST);
2942		if (rc)
2943			lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2944				"2547 Read FCF record failed 0x%x\n",
2945				rc);
2946		break;
2947
2948	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
2949		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2950			"2548 FCF Table full count 0x%x tag 0x%x \n",
2951			bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
2952			acqe_fcoe->event_tag);
2953		break;
2954
2955	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2956		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2957			"2549 FCF disconnected fron network index 0x%x"
2958			" tag 0x%x \n", acqe_fcoe->fcf_index,
2959			acqe_fcoe->event_tag);
2960		/* If the event is not for currently used fcf do nothing */
2961		if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
2962			break;
2963		/*
2964		 * Currently, driver support only one FCF - so treat this as
2965		 * a link down.
2966		 */
2967		lpfc_linkdown(phba);
2968		/* Unregister FCF if no devices connected to it */
2969		lpfc_unregister_unused_fcf(phba);
2970		break;
2971
2972	default:
2973		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2974			"0288 Unknown FCoE event type 0x%x event tag "
2975			"0x%x\n", event_type, acqe_fcoe->event_tag);
2976		break;
2977	}
2978}
2979
2980/**
2981 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
2982 * @phba: pointer to lpfc hba data structure.
2983 * @acqe_link: pointer to the async dcbx completion queue entry.
2984 *
2985 * This routine is to handle the SLI4 asynchronous dcbx event.
2986 **/
2987static void
2988lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2989			 struct lpfc_acqe_dcbx *acqe_dcbx)
2990{
2991	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2992			"0290 The SLI4 DCBX asynchronous event is not "
2993			"handled yet\n");
2994}
2995
2996/**
2997 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
2998 * @phba: pointer to lpfc hba data structure.
2999 *
3000 * This routine is invoked by the worker thread to process all the pending
3001 * SLI4 asynchronous events.
3002 **/
3003void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3004{
3005	struct lpfc_cq_event *cq_event;
3006
3007	/* First, declare the async event has been handled */
3008	spin_lock_irq(&phba->hbalock);
3009	phba->hba_flag &= ~ASYNC_EVENT;
3010	spin_unlock_irq(&phba->hbalock);
3011	/* Now, handle all the async events */
3012	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3013		/* Get the first event from the head of the event queue */
3014		spin_lock_irq(&phba->hbalock);
3015		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3016				 cq_event, struct lpfc_cq_event, list);
3017		spin_unlock_irq(&phba->hbalock);
3018		/* Process the asynchronous event */
3019		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3020		case LPFC_TRAILER_CODE_LINK:
3021			lpfc_sli4_async_link_evt(phba,
3022						 &cq_event->cqe.acqe_link);
3023			break;
3024		case LPFC_TRAILER_CODE_FCOE:
3025			lpfc_sli4_async_fcoe_evt(phba,
3026						 &cq_event->cqe.acqe_fcoe);
3027			break;
3028		case LPFC_TRAILER_CODE_DCBX:
3029			lpfc_sli4_async_dcbx_evt(phba,
3030						 &cq_event->cqe.acqe_dcbx);
3031			break;
3032		default:
3033			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3034					"1804 Invalid asynchrous event code: "
3035					"x%x\n", bf_get(lpfc_trailer_code,
3036					&cq_event->cqe.mcqe_cmpl));
3037			break;
3038		}
3039		/* Free the completion event processed to the free pool */
3040		lpfc_sli4_cq_event_release(phba, cq_event);
3041	}
3042}
3043
3044/**
3045 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3046 * @phba: pointer to lpfc hba data structure.
3047 * @dev_grp: The HBA PCI-Device group number.
3048 *
3049 * This routine is invoked to set up the per HBA PCI-Device group function
3050 * API jump table entries.
3051 *
3052 * Return: 0 if success, otherwise -ENODEV
3053 **/
3054int
3055lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3056{
3057	int rc;
3058
3059	/* Set up lpfc PCI-device group */
3060	phba->pci_dev_grp = dev_grp;
3061
3062	/* The LPFC_PCI_DEV_OC uses SLI4 */
3063	if (dev_grp == LPFC_PCI_DEV_OC)
3064		phba->sli_rev = LPFC_SLI_REV4;
3065
3066	/* Set up device INIT API function jump table */
3067	rc = lpfc_init_api_table_setup(phba, dev_grp);
3068	if (rc)
3069		return -ENODEV;
3070	/* Set up SCSI API function jump table */
3071	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3072	if (rc)
3073		return -ENODEV;
3074	/* Set up SLI API function jump table */
3075	rc = lpfc_sli_api_table_setup(phba, dev_grp);
3076	if (rc)
3077		return -ENODEV;
3078	/* Set up MBOX API function jump table */
3079	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3080	if (rc)
3081		return -ENODEV;
3082
3083	return 0;
3084}
3085
3086/**
3087 * lpfc_log_intr_mode - Log the active interrupt mode
3088 * @phba: pointer to lpfc hba data structure.
3089 * @intr_mode: active interrupt mode adopted.
3090 *
3091 * This routine it invoked to log the currently used active interrupt mode
3092 * to the device.
3093 **/
3094static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3095{
3096	switch (intr_mode) {
3097	case 0:
3098		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3099				"0470 Enable INTx interrupt mode.\n");
3100		break;
3101	case 1:
3102		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3103				"0481 Enabled MSI interrupt mode.\n");
3104		break;
3105	case 2:
3106		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3107				"0480 Enabled MSI-X interrupt mode.\n");
3108		break;
3109	default:
3110		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3111				"0482 Illegal interrupt mode.\n");
3112		break;
3113	}
3114	return;
3115}
3116
3117/**
3118 * lpfc_enable_pci_dev - Enable a generic PCI device.
3119 * @phba: pointer to lpfc hba data structure.
3120 *
3121 * This routine is invoked to enable the PCI device that is common to all
3122 * PCI devices.
3123 *
3124 * Return codes
3125 * 	0 - sucessful
3126 * 	other values - error
3127 **/
3128static int
3129lpfc_enable_pci_dev(struct lpfc_hba *phba)
3130{
3131	struct pci_dev *pdev;
3132	int bars;
3133
3134	/* Obtain PCI device reference */
3135	if (!phba->pcidev)
3136		goto out_error;
3137	else
3138		pdev = phba->pcidev;
3139	/* Select PCI BARs */
3140	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3141	/* Enable PCI device */
3142	if (pci_enable_device_mem(pdev))
3143		goto out_error;
3144	/* Request PCI resource for the device */
3145	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3146		goto out_disable_device;
3147	/* Set up device as PCI master and save state for EEH */
3148	pci_set_master(pdev);
3149	pci_try_set_mwi(pdev);
3150	pci_save_state(pdev);
3151
3152	return 0;
3153
3154out_disable_device:
3155	pci_disable_device(pdev);
3156out_error:
3157	return -ENODEV;
3158}
3159
3160/**
3161 * lpfc_disable_pci_dev - Disable a generic PCI device.
3162 * @phba: pointer to lpfc hba data structure.
3163 *
3164 * This routine is invoked to disable the PCI device that is common to all
3165 * PCI devices.
3166 **/
3167static void
3168lpfc_disable_pci_dev(struct lpfc_hba *phba)
3169{
3170	struct pci_dev *pdev;
3171	int bars;
3172
3173	/* Obtain PCI device reference */
3174	if (!phba->pcidev)
3175		return;
3176	else
3177		pdev = phba->pcidev;
3178	/* Select PCI BARs */
3179	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3180	/* Release PCI resource and disable PCI device */
3181	pci_release_selected_regions(pdev, bars);
3182	pci_disable_device(pdev);
3183	/* Null out PCI private reference to driver */
3184	pci_set_drvdata(pdev, NULL);
3185
3186	return;
3187}
3188
3189/**
3190 * lpfc_reset_hba - Reset a hba
3191 * @phba: pointer to lpfc hba data structure.
3192 *
3193 * This routine is invoked to reset a hba device. It brings the HBA
3194 * offline, performs a board restart, and then brings the board back
3195 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3196 * on outstanding mailbox commands.
3197 **/
3198void
3199lpfc_reset_hba(struct lpfc_hba *phba)
3200{
3201	/* If resets are disabled then set error state and return. */
3202	if (!phba->cfg_enable_hba_reset) {
3203		phba->link_state = LPFC_HBA_ERROR;
3204		return;
3205	}
3206	lpfc_offline_prep(phba);
3207	lpfc_offline(phba);
3208	lpfc_sli_brdrestart(phba);
3209	lpfc_online(phba);
3210	lpfc_unblock_mgmt_io(phba);
3211}
3212
3213/**
3214 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3215 * @phba: pointer to lpfc hba data structure.
3216 *
3217 * This routine is invoked to set up the driver internal resources specific to
3218 * support the SLI-3 HBA device it attached to.
3219 *
3220 * Return codes
3221 * 	0 - sucessful
3222 * 	other values - error
3223 **/
3224static int
3225lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3226{
3227	struct lpfc_sli *psli;
3228
3229	/*
3230	 * Initialize timers used by driver
3231	 */
3232
3233	/* Heartbeat timer */
3234	init_timer(&phba->hb_tmofunc);
3235	phba->hb_tmofunc.function = lpfc_hb_timeout;
3236	phba->hb_tmofunc.data = (unsigned long)phba;
3237
3238	psli = &phba->sli;
3239	/* MBOX heartbeat timer */
3240	init_timer(&psli->mbox_tmo);
3241	psli->mbox_tmo.function = lpfc_mbox_timeout;
3242	psli->mbox_tmo.data = (unsigned long) phba;
3243	/* FCP polling mode timer */
3244	init_timer(&phba->fcp_poll_timer);
3245	phba->fcp_poll_timer.function = lpfc_poll_timeout;
3246	phba->fcp_poll_timer.data = (unsigned long) phba;
3247	/* Fabric block timer */
3248	init_timer(&phba->fabric_block_timer);
3249	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3250	phba->fabric_block_timer.data = (unsigned long) phba;
3251	/* EA polling mode timer */
3252	init_timer(&phba->eratt_poll);
3253	phba->eratt_poll.function = lpfc_poll_eratt;
3254	phba->eratt_poll.data = (unsigned long) phba;
3255
3256	/* Host attention work mask setup */
3257	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3258	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3259
3260	/* Get all the module params for configuring this host */
3261	lpfc_get_cfgparam(phba);
3262	/*
3263	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3264	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3265	 * 2 segments are added since the IOCB needs a command and response bde.
3266	 */
3267	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3268		sizeof(struct fcp_rsp) +
3269			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3270
3271	if (phba->cfg_enable_bg) {
3272		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3273		phba->cfg_sg_dma_buf_size +=
3274			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3275	}
3276
3277	/* Also reinitialize the host templates with new values. */
3278	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3279	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3280
3281	phba->max_vpi = LPFC_MAX_VPI;
3282	/* This will be set to correct value after config_port mbox */
3283	phba->max_vports = 0;
3284
3285	/*
3286	 * Initialize the SLI Layer to run with lpfc HBAs.
3287	 */
3288	lpfc_sli_setup(phba);
3289	lpfc_sli_queue_setup(phba);
3290
3291	/* Allocate device driver memory */
3292	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3293		return -ENOMEM;
3294
3295	return 0;
3296}
3297
3298/**
3299 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3300 * @phba: pointer to lpfc hba data structure.
3301 *
3302 * This routine is invoked to unset the driver internal resources set up
3303 * specific for supporting the SLI-3 HBA device it attached to.
3304 **/
3305static void
3306lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3307{
3308	/* Free device driver memory allocated */
3309	lpfc_mem_free_all(phba);
3310
3311	return;
3312}
3313
3314/**
3315 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3316 * @phba: pointer to lpfc hba data structure.
3317 *
3318 * This routine is invoked to set up the driver internal resources specific to
3319 * support the SLI-4 HBA device it attached to.
3320 *
3321 * Return codes
3322 * 	0 - sucessful
3323 * 	other values - error
3324 **/
3325static int
3326lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3327{
3328	struct lpfc_sli *psli;
3329	int rc;
3330	int i, hbq_count;
3331
3332	/* Before proceed, wait for POST done and device ready */
3333	rc = lpfc_sli4_post_status_check(phba);
3334	if (rc)
3335		return -ENODEV;
3336
3337	/*
3338	 * Initialize timers used by driver
3339	 */
3340
3341	/* Heartbeat timer */
3342	init_timer(&phba->hb_tmofunc);
3343	phba->hb_tmofunc.function = lpfc_hb_timeout;
3344	phba->hb_tmofunc.data = (unsigned long)phba;
3345
3346	psli = &phba->sli;
3347	/* MBOX heartbeat timer */
3348	init_timer(&psli->mbox_tmo);
3349	psli->mbox_tmo.function = lpfc_mbox_timeout;
3350	psli->mbox_tmo.data = (unsigned long) phba;
3351	/* Fabric block timer */
3352	init_timer(&phba->fabric_block_timer);
3353	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3354	phba->fabric_block_timer.data = (unsigned long) phba;
3355	/* EA polling mode timer */
3356	init_timer(&phba->eratt_poll);
3357	phba->eratt_poll.function = lpfc_poll_eratt;
3358	phba->eratt_poll.data = (unsigned long) phba;
3359	/*
3360	 * We need to do a READ_CONFIG mailbox command here before
3361	 * calling lpfc_get_cfgparam. For VFs this will report the
3362	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3363	 * All of the resources allocated
3364	 * for this Port are tied to these values.
3365	 */
3366	/* Get all the module params for configuring this host */
3367	lpfc_get_cfgparam(phba);
3368	phba->max_vpi = LPFC_MAX_VPI;
3369	/* This will be set to correct value after the read_config mbox */
3370	phba->max_vports = 0;
3371
3372	/* Program the default value of vlan_id and fc_map */
3373	phba->valid_vlan = 0;
3374	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3375	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3376	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3377
3378	/*
3379	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3380	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3381	 * 2 segments are added since the IOCB needs a command and response bde.
3382	 * To insure that the scsi sgl does not cross a 4k page boundary only
3383	 * sgl sizes of 1k, 2k, 4k, and 8k are supported.
3384	 * Table of sgl sizes and seg_cnt:
3385	 * sgl size, 	sg_seg_cnt	total seg
3386	 * 1k		50		52
3387	 * 2k		114		116
3388	 * 4k		242		244
3389	 * 8k		498		500
3390	 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3391	 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3392	 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3393	 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3394	 */
3395	if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
3396		phba->cfg_sg_seg_cnt = 50;
3397	else if (phba->cfg_sg_seg_cnt <= 114)
3398		phba->cfg_sg_seg_cnt = 114;
3399	else if (phba->cfg_sg_seg_cnt <= 242)
3400		phba->cfg_sg_seg_cnt = 242;
3401	else
3402		phba->cfg_sg_seg_cnt = 498;
3403
3404	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
3405					+ sizeof(struct fcp_rsp);
3406	phba->cfg_sg_dma_buf_size +=
3407		((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
3408
3409	/* Initialize buffer queue management fields */
3410	hbq_count = lpfc_sli_hbq_count();
3411	for (i = 0; i < hbq_count; ++i)
3412		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
3413	INIT_LIST_HEAD(&phba->rb_pend_list);
3414	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
3415	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3416
3417	/*
3418	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
3419	 */
3420	/* Initialize the Abort scsi buffer list used by driver */
3421	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
3422	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
3423	/* This abort list used by worker thread */
3424	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3425
3426	/*
3427	 * Initialize dirver internal slow-path work queues
3428	 */
3429
3430	/* Driver internel slow-path CQ Event pool */
3431	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3432	/* Response IOCB work queue list */
3433	INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
3434	/* Asynchronous event CQ Event work queue list */
3435	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3436	/* Fast-path XRI aborted CQ Event work queue list */
3437	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
3438	/* Slow-path XRI aborted CQ Event work queue list */
3439	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
3440	/* Receive queue CQ Event work queue list */
3441	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
3442
3443	/* Initialize the driver internal SLI layer lists. */
3444	lpfc_sli_setup(phba);
3445	lpfc_sli_queue_setup(phba);
3446
3447	/* Allocate device driver memory */
3448	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
3449	if (rc)
3450		return -ENOMEM;
3451
3452	/* Create the bootstrap mailbox command */
3453	rc = lpfc_create_bootstrap_mbox(phba);
3454	if (unlikely(rc))
3455		goto out_free_mem;
3456
3457	/* Set up the host's endian order with the device. */
3458	rc = lpfc_setup_endian_order(phba);
3459	if (unlikely(rc))
3460		goto out_free_bsmbx;
3461
3462	/* Set up the hba's configuration parameters. */
3463	rc = lpfc_sli4_read_config(phba);
3464	if (unlikely(rc))
3465		goto out_free_bsmbx;
3466
3467	/* Perform a function reset */
3468	rc = lpfc_pci_function_reset(phba);
3469	if (unlikely(rc))
3470		goto out_free_bsmbx;
3471
3472	/* Create all the SLI4 queues */
3473	rc = lpfc_sli4_queue_create(phba);
3474	if (rc)
3475		goto out_free_bsmbx;
3476
3477	/* Create driver internal CQE event pool */
3478	rc = lpfc_sli4_cq_event_pool_create(phba);
3479	if (rc)
3480		goto out_destroy_queue;
3481
3482	/* Initialize and populate the iocb list per host */
3483	rc = lpfc_init_sgl_list(phba);
3484	if (rc) {
3485		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3486				"1400 Failed to initialize sgl list.\n");
3487		goto out_destroy_cq_event_pool;
3488	}
3489	rc = lpfc_init_active_sgl_array(phba);
3490	if (rc) {
3491		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3492				"1430 Failed to initialize sgl list.\n");
3493		goto out_free_sgl_list;
3494	}
3495
3496	rc = lpfc_sli4_init_rpi_hdrs(phba);
3497	if (rc) {
3498		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3499				"1432 Failed to initialize rpi headers.\n");
3500		goto out_free_active_sgl;
3501	}
3502
3503	phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3504				    phba->cfg_fcp_eq_count), GFP_KERNEL);
3505	if (!phba->sli4_hba.fcp_eq_hdl) {
3506		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3507				"2572 Failed allocate memory for fast-path "
3508				"per-EQ handle array\n");
3509		goto out_remove_rpi_hdrs;
3510	}
3511
3512	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
3513				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
3514	if (!phba->sli4_hba.msix_entries) {
3515		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3516				"2573 Failed allocate memory for msi-x "
3517				"interrupt vector entries\n");
3518		goto out_free_fcp_eq_hdl;
3519	}
3520
3521	return rc;
3522
3523out_free_fcp_eq_hdl:
3524	kfree(phba->sli4_hba.fcp_eq_hdl);
3525out_remove_rpi_hdrs:
3526	lpfc_sli4_remove_rpi_hdrs(phba);
3527out_free_active_sgl:
3528	lpfc_free_active_sgl(phba);
3529out_free_sgl_list:
3530	lpfc_free_sgl_list(phba);
3531out_destroy_cq_event_pool:
3532	lpfc_sli4_cq_event_pool_destroy(phba);
3533out_destroy_queue:
3534	lpfc_sli4_queue_destroy(phba);
3535out_free_bsmbx:
3536	lpfc_destroy_bootstrap_mbox(phba);
3537out_free_mem:
3538	lpfc_mem_free(phba);
3539	return rc;
3540}
3541
3542/**
3543 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
3544 * @phba: pointer to lpfc hba data structure.
3545 *
3546 * This routine is invoked to unset the driver internal resources set up
3547 * specific for supporting the SLI-4 HBA device it attached to.
3548 **/
3549static void
3550lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3551{
3552	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
3553
3554	/* unregister default FCFI from the HBA */
3555	lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
3556
3557	/* Free the default FCR table */
3558	lpfc_sli_remove_dflt_fcf(phba);
3559
3560	/* Free memory allocated for msi-x interrupt vector entries */
3561	kfree(phba->sli4_hba.msix_entries);
3562
3563	/* Free memory allocated for fast-path work queue handles */
3564	kfree(phba->sli4_hba.fcp_eq_hdl);
3565
3566	/* Free the allocated rpi headers. */
3567	lpfc_sli4_remove_rpi_hdrs(phba);
3568	lpfc_sli4_remove_rpis(phba);
3569
3570	/* Free the ELS sgl list */
3571	lpfc_free_active_sgl(phba);
3572	lpfc_free_sgl_list(phba);
3573
3574	/* Free the SCSI sgl management array */
3575	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3576
3577	/* Free the SLI4 queues */
3578	lpfc_sli4_queue_destroy(phba);
3579
3580	/* Free the completion queue EQ event pool */
3581	lpfc_sli4_cq_event_release_all(phba);
3582	lpfc_sli4_cq_event_pool_destroy(phba);
3583
3584	/* Reset SLI4 HBA FCoE function */
3585	lpfc_pci_function_reset(phba);
3586
3587	/* Free the bsmbx region. */
3588	lpfc_destroy_bootstrap_mbox(phba);
3589
3590	/* Free the SLI Layer memory with SLI4 HBAs */
3591	lpfc_mem_free_all(phba);
3592
3593	/* Free the current connect table */
3594	list_for_each_entry_safe(conn_entry, next_conn_entry,
3595		&phba->fcf_conn_rec_list, list)
3596		kfree(conn_entry);
3597
3598	return;
3599}
3600
3601/**
3602 * lpfc_init_api_table_setup - Set up init api fucntion jump table
3603 * @phba: The hba struct for which this call is being executed.
3604 * @dev_grp: The HBA PCI-Device group number.
3605 *
3606 * This routine sets up the device INIT interface API function jump table
3607 * in @phba struct.
3608 *
3609 * Returns: 0 - success, -ENODEV - failure.
3610 **/
3611int
3612lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3613{
3614	switch (dev_grp) {
3615	case LPFC_PCI_DEV_LP:
3616		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
3617		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
3618		phba->lpfc_stop_port = lpfc_stop_port_s3;
3619		break;
3620	case LPFC_PCI_DEV_OC:
3621		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
3622		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
3623		phba->lpfc_stop_port = lpfc_stop_port_s4;
3624		break;
3625	default:
3626		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3627				"1431 Invalid HBA PCI-device group: 0x%x\n",
3628				dev_grp);
3629		return -ENODEV;
3630		break;
3631	}
3632	return 0;
3633}
3634
3635/**
3636 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3637 * @phba: pointer to lpfc hba data structure.
3638 *
3639 * This routine is invoked to set up the driver internal resources before the
3640 * device specific resource setup to support the HBA device it attached to.
3641 *
3642 * Return codes
3643 *	0 - sucessful
3644 *	other values - error
3645 **/
3646static int
3647lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3648{
3649	/*
3650	 * Driver resources common to all SLI revisions
3651	 */
3652	atomic_set(&phba->fast_event_count, 0);
3653	spin_lock_init(&phba->hbalock);
3654
3655	/* Initialize ndlp management spinlock */
3656	spin_lock_init(&phba->ndlp_lock);
3657
3658	INIT_LIST_HEAD(&phba->port_list);
3659	INIT_LIST_HEAD(&phba->work_list);
3660	init_waitqueue_head(&phba->wait_4_mlo_m_q);
3661
3662	/* Initialize the wait queue head for the kernel thread */
3663	init_waitqueue_head(&phba->work_waitq);
3664
3665	/* Initialize the scsi buffer list used by driver for scsi IO */
3666	spin_lock_init(&phba->scsi_buf_list_lock);
3667	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
3668
3669	/* Initialize the fabric iocb list */
3670	INIT_LIST_HEAD(&phba->fabric_iocb_list);
3671
3672	/* Initialize list to save ELS buffers */
3673	INIT_LIST_HEAD(&phba->elsbuf);
3674
3675	/* Initialize FCF connection rec list */
3676	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
3677
3678	return 0;
3679}
3680
3681/**
3682 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
3683 * @phba: pointer to lpfc hba data structure.
3684 *
3685 * This routine is invoked to set up the driver internal resources after the
3686 * device specific resource setup to support the HBA device it attached to.
3687 *
3688 * Return codes
3689 * 	0 - sucessful
3690 * 	other values - error
3691 **/
3692static int
3693lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
3694{
3695	int error;
3696
3697	/* Startup the kernel thread for this host adapter. */
3698	phba->worker_thread = kthread_run(lpfc_do_work, phba,
3699					  "lpfc_worker_%d", phba->brd_no);
3700	if (IS_ERR(phba->worker_thread)) {
3701		error = PTR_ERR(phba->worker_thread);
3702		return error;
3703	}
3704
3705	return 0;
3706}
3707
3708/**
3709 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
3710 * @phba: pointer to lpfc hba data structure.
3711 *
3712 * This routine is invoked to unset the driver internal resources set up after
3713 * the device specific resource setup for supporting the HBA device it
3714 * attached to.
3715 **/
3716static void
3717lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
3718{
3719	/* Stop kernel worker thread */
3720	kthread_stop(phba->worker_thread);
3721}
3722
3723/**
3724 * lpfc_free_iocb_list - Free iocb list.
3725 * @phba: pointer to lpfc hba data structure.
3726 *
3727 * This routine is invoked to free the driver's IOCB list and memory.
3728 **/
3729static void
3730lpfc_free_iocb_list(struct lpfc_hba *phba)
3731{
3732	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
3733
3734	spin_lock_irq(&phba->hbalock);
3735	list_for_each_entry_safe(iocbq_entry, iocbq_next,
3736				 &phba->lpfc_iocb_list, list) {
3737		list_del(&iocbq_entry->list);
3738		kfree(iocbq_entry);
3739		phba->total_iocbq_bufs--;
3740	}
3741	spin_unlock_irq(&phba->hbalock);
3742
3743	return;
3744}
3745
3746/**
3747 * lpfc_init_iocb_list - Allocate and initialize iocb list.
3748 * @phba: pointer to lpfc hba data structure.
3749 *
3750 * This routine is invoked to allocate and initizlize the driver's IOCB
3751 * list and set up the IOCB tag array accordingly.
3752 *
3753 * Return codes
3754 *	0 - sucessful
3755 *	other values - error
3756 **/
3757static int
3758lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
3759{
3760	struct lpfc_iocbq *iocbq_entry = NULL;
3761	uint16_t iotag;
3762	int i;
3763
3764	/* Initialize and populate the iocb list per host.  */
3765	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3766	for (i = 0; i < iocb_count; i++) {
3767		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
3768		if (iocbq_entry == NULL) {
3769			printk(KERN_ERR "%s: only allocated %d iocbs of "
3770				"expected %d count. Unloading driver.\n",
3771				__func__, i, LPFC_IOCB_LIST_CNT);
3772			goto out_free_iocbq;
3773		}
3774
3775		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
3776		if (iotag == 0) {
3777			kfree(iocbq_entry);
3778			printk(KERN_ERR "%s: failed to allocate IOTAG. "
3779				"Unloading driver.\n", __func__);
3780			goto out_free_iocbq;
3781		}
3782		iocbq_entry->sli4_xritag = NO_XRI;
3783
3784		spin_lock_irq(&phba->hbalock);
3785		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
3786		phba->total_iocbq_bufs++;
3787		spin_unlock_irq(&phba->hbalock);
3788	}
3789
3790	return 0;
3791
3792out_free_iocbq:
3793	lpfc_free_iocb_list(phba);
3794
3795	return -ENOMEM;
3796}
3797
3798/**
3799 * lpfc_free_sgl_list - Free sgl list.
3800 * @phba: pointer to lpfc hba data structure.
3801 *
3802 * This routine is invoked to free the driver's sgl list and memory.
3803 **/
3804static void
3805lpfc_free_sgl_list(struct lpfc_hba *phba)
3806{
3807	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
3808	LIST_HEAD(sglq_list);
3809	int rc = 0;
3810
3811	spin_lock_irq(&phba->hbalock);
3812	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
3813	spin_unlock_irq(&phba->hbalock);
3814
3815	list_for_each_entry_safe(sglq_entry, sglq_next,
3816				 &sglq_list, list) {
3817		list_del(&sglq_entry->list);
3818		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
3819		kfree(sglq_entry);
3820		phba->sli4_hba.total_sglq_bufs--;
3821	}
3822	rc = lpfc_sli4_remove_all_sgl_pages(phba);
3823	if (rc) {
3824		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3825			"2005 Unable to deregister pages from HBA: %x", rc);
3826	}
3827	kfree(phba->sli4_hba.lpfc_els_sgl_array);
3828}
3829
3830/**
3831 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
3832 * @phba: pointer to lpfc hba data structure.
3833 *
3834 * This routine is invoked to allocate the driver's active sgl memory.
3835 * This array will hold the sglq_entry's for active IOs.
3836 **/
3837static int
3838lpfc_init_active_sgl_array(struct lpfc_hba *phba)
3839{
3840	int size;
3841	size = sizeof(struct lpfc_sglq *);
3842	size *= phba->sli4_hba.max_cfg_param.max_xri;
3843
3844	phba->sli4_hba.lpfc_sglq_active_list =
3845		kzalloc(size, GFP_KERNEL);
3846	if (!phba->sli4_hba.lpfc_sglq_active_list)
3847		return -ENOMEM;
3848	return 0;
3849}
3850
3851/**
3852 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3853 * @phba: pointer to lpfc hba data structure.
3854 *
3855 * This routine is invoked to walk through the array of active sglq entries
3856 * and free all of the resources.
3857 * This is just a place holder for now.
3858 **/
3859static void
3860lpfc_free_active_sgl(struct lpfc_hba *phba)
3861{
3862	kfree(phba->sli4_hba.lpfc_sglq_active_list);
3863}
3864
3865/**
3866 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3867 * @phba: pointer to lpfc hba data structure.
3868 *
3869 * This routine is invoked to allocate and initizlize the driver's sgl
3870 * list and set up the sgl xritag tag array accordingly.
3871 *
3872 * Return codes
3873 *	0 - sucessful
3874 *	other values - error
3875 **/
3876static int
3877lpfc_init_sgl_list(struct lpfc_hba *phba)
3878{
3879	struct lpfc_sglq *sglq_entry = NULL;
3880	int i;
3881	int els_xri_cnt;
3882
3883	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3884	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3885				"2400 lpfc_init_sgl_list els %d.\n",
3886				els_xri_cnt);
3887	/* Initialize and populate the sglq list per host/VF. */
3888	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
3889	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
3890
3891	/* Sanity check on XRI management */
3892	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
3893		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3894				"2562 No room left for SCSI XRI allocation: "
3895				"max_xri=%d, els_xri=%d\n",
3896				phba->sli4_hba.max_cfg_param.max_xri,
3897				els_xri_cnt);
3898		return -ENOMEM;
3899	}
3900
3901	/* Allocate memory for the ELS XRI management array */
3902	phba->sli4_hba.lpfc_els_sgl_array =
3903			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
3904			GFP_KERNEL);
3905
3906	if (!phba->sli4_hba.lpfc_els_sgl_array) {
3907		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3908				"2401 Failed to allocate memory for ELS "
3909				"XRI management array of size %d.\n",
3910				els_xri_cnt);
3911		return -ENOMEM;
3912	}
3913
3914	/* Keep the SCSI XRI into the XRI management array */
3915	phba->sli4_hba.scsi_xri_max =
3916			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3917	phba->sli4_hba.scsi_xri_cnt = 0;
3918
3919	phba->sli4_hba.lpfc_scsi_psb_array =
3920			kzalloc((sizeof(struct lpfc_scsi_buf *) *
3921			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
3922
3923	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
3924		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3925				"2563 Failed to allocate memory for SCSI "
3926				"XRI management array of size %d.\n",
3927				phba->sli4_hba.scsi_xri_max);
3928		kfree(phba->sli4_hba.lpfc_els_sgl_array);
3929		return -ENOMEM;
3930	}
3931
3932	for (i = 0; i < els_xri_cnt; i++) {
3933		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
3934		if (sglq_entry == NULL) {
3935			printk(KERN_ERR "%s: only allocated %d sgls of "
3936				"expected %d count. Unloading driver.\n",
3937				__func__, i, els_xri_cnt);
3938			goto out_free_mem;
3939		}
3940
3941		sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
3942		if (sglq_entry->sli4_xritag == NO_XRI) {
3943			kfree(sglq_entry);
3944			printk(KERN_ERR "%s: failed to allocate XRI.\n"
3945				"Unloading driver.\n", __func__);
3946			goto out_free_mem;
3947		}
3948		sglq_entry->buff_type = GEN_BUFF_TYPE;
3949		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
3950		if (sglq_entry->virt == NULL) {
3951			kfree(sglq_entry);
3952			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
3953				"Unloading driver.\n", __func__);
3954			goto out_free_mem;
3955		}
3956		sglq_entry->sgl = sglq_entry->virt;
3957		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3958
3959		/* The list order is used by later block SGL registraton */
3960		spin_lock_irq(&phba->hbalock);
3961		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
3962		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
3963		phba->sli4_hba.total_sglq_bufs++;
3964		spin_unlock_irq(&phba->hbalock);
3965	}
3966	return 0;
3967
3968out_free_mem:
3969	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3970	lpfc_free_sgl_list(phba);
3971	return -ENOMEM;
3972}
3973
3974/**
3975 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
3976 * @phba: pointer to lpfc hba data structure.
3977 *
3978 * This routine is invoked to post rpi header templates to the
3979 * HBA consistent with the SLI-4 interface spec.  This routine
3980 * posts a PAGE_SIZE memory region to the port to hold up to
3981 * PAGE_SIZE modulo 64 rpi context headers.
3982 * No locks are held here because this is an initialization routine
3983 * called only from probe or lpfc_online when interrupts are not
3984 * enabled and the driver is reinitializing the device.
3985 *
3986 * Return codes
3987 * 	0 - sucessful
3988 * 	ENOMEM - No availble memory
3989 *      EIO - The mailbox failed to complete successfully.
3990 **/
3991int
3992lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
3993{
3994	int rc = 0;
3995	int longs;
3996	uint16_t rpi_count;
3997	struct lpfc_rpi_hdr *rpi_hdr;
3998
3999	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4000
4001	/*
4002	 * Provision an rpi bitmask range for discovery. The total count
4003	 * is the difference between max and base + 1.
4004	 */
4005	rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
4006		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4007
4008	longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4009	phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
4010					   GFP_KERNEL);
4011	if (!phba->sli4_hba.rpi_bmask)
4012		return -ENOMEM;
4013
4014	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4015	if (!rpi_hdr) {
4016		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4017				"0391 Error during rpi post operation\n");
4018		lpfc_sli4_remove_rpis(phba);
4019		rc = -ENODEV;
4020	}
4021
4022	return rc;
4023}
4024
4025/**
4026 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4027 * @phba: pointer to lpfc hba data structure.
4028 *
4029 * This routine is invoked to allocate a single 4KB memory region to
4030 * support rpis and stores them in the phba.  This single region
4031 * provides support for up to 64 rpis.  The region is used globally
4032 * by the device.
4033 *
4034 * Returns:
4035 *   A valid rpi hdr on success.
4036 *   A NULL pointer on any failure.
4037 **/
4038struct lpfc_rpi_hdr *
4039lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4040{
4041	uint16_t rpi_limit, curr_rpi_range;
4042	struct lpfc_dmabuf *dmabuf;
4043	struct lpfc_rpi_hdr *rpi_hdr;
4044
4045	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4046		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4047
4048	spin_lock_irq(&phba->hbalock);
4049	curr_rpi_range = phba->sli4_hba.next_rpi;
4050	spin_unlock_irq(&phba->hbalock);
4051
4052	/*
4053	 * The port has a limited number of rpis. The increment here
4054	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4055	 * and to allow the full max_rpi range per port.
4056	 */
4057	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4058		return NULL;
4059
4060	/*
4061	 * First allocate the protocol header region for the port.  The
4062	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4063	 */
4064	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4065	if (!dmabuf)
4066		return NULL;
4067
4068	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4069					  LPFC_HDR_TEMPLATE_SIZE,
4070					  &dmabuf->phys,
4071					  GFP_KERNEL);
4072	if (!dmabuf->virt) {
4073		rpi_hdr = NULL;
4074		goto err_free_dmabuf;
4075	}
4076
4077	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4078	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4079		rpi_hdr = NULL;
4080		goto err_free_coherent;
4081	}
4082
4083	/* Save the rpi header data for cleanup later. */
4084	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4085	if (!rpi_hdr)
4086		goto err_free_coherent;
4087
4088	rpi_hdr->dmabuf = dmabuf;
4089	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4090	rpi_hdr->page_count = 1;
4091	spin_lock_irq(&phba->hbalock);
4092	rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4093	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4094
4095	/*
4096	 * The next_rpi stores the next module-64 rpi value to post
4097	 * in any subsequent rpi memory region postings.
4098	 */
4099	phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4100	spin_unlock_irq(&phba->hbalock);
4101	return rpi_hdr;
4102
4103 err_free_coherent:
4104	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4105			  dmabuf->virt, dmabuf->phys);
4106 err_free_dmabuf:
4107	kfree(dmabuf);
4108	return NULL;
4109}
4110
4111/**
4112 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4113 * @phba: pointer to lpfc hba data structure.
4114 *
4115 * This routine is invoked to remove all memory resources allocated
4116 * to support rpis. This routine presumes the caller has released all
4117 * rpis consumed by fabric or port logins and is prepared to have
4118 * the header pages removed.
4119 **/
4120void
4121lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4122{
4123	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4124
4125	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4126				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4127		list_del(&rpi_hdr->list);
4128		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4129				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4130		kfree(rpi_hdr->dmabuf);
4131		kfree(rpi_hdr);
4132	}
4133
4134	phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4135	memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4136}
4137
4138/**
4139 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4140 * @pdev: pointer to pci device data structure.
4141 *
4142 * This routine is invoked to allocate the driver hba data structure for an
4143 * HBA device. If the allocation is successful, the phba reference to the
4144 * PCI device data structure is set.
4145 *
4146 * Return codes
4147 *      pointer to @phba - sucessful
4148 *      NULL - error
4149 **/
4150static struct lpfc_hba *
4151lpfc_hba_alloc(struct pci_dev *pdev)
4152{
4153	struct lpfc_hba *phba;
4154
4155	/* Allocate memory for HBA structure */
4156	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4157	if (!phba) {
4158		dev_err(&pdev->dev, "failed to allocate hba struct\n");
4159		return NULL;
4160	}
4161
4162	/* Set reference to PCI device in HBA structure */
4163	phba->pcidev = pdev;
4164
4165	/* Assign an unused board number */
4166	phba->brd_no = lpfc_get_instance();
4167	if (phba->brd_no < 0) {
4168		kfree(phba);
4169		return NULL;
4170	}
4171
4172	return phba;
4173}
4174
4175/**
4176 * lpfc_hba_free - Free driver hba data structure with a device.
4177 * @phba: pointer to lpfc hba data structure.
4178 *
4179 * This routine is invoked to free the driver hba data structure with an
4180 * HBA device.
4181 **/
4182static void
4183lpfc_hba_free(struct lpfc_hba *phba)
4184{
4185	/* Release the driver assigned board number */
4186	idr_remove(&lpfc_hba_index, phba->brd_no);
4187
4188	kfree(phba);
4189	return;
4190}
4191
4192/**
4193 * lpfc_create_shost - Create hba physical port with associated scsi host.
4194 * @phba: pointer to lpfc hba data structure.
4195 *
4196 * This routine is invoked to create HBA physical port and associate a SCSI
4197 * host with it.
4198 *
4199 * Return codes
4200 *      0 - sucessful
4201 *      other values - error
4202 **/
4203static int
4204lpfc_create_shost(struct lpfc_hba *phba)
4205{
4206	struct lpfc_vport *vport;
4207	struct Scsi_Host  *shost;
4208
4209	/* Initialize HBA FC structure */
4210	phba->fc_edtov = FF_DEF_EDTOV;
4211	phba->fc_ratov = FF_DEF_RATOV;
4212	phba->fc_altov = FF_DEF_ALTOV;
4213	phba->fc_arbtov = FF_DEF_ARBTOV;
4214
4215	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4216	if (!vport)
4217		return -ENODEV;
4218
4219	shost = lpfc_shost_from_vport(vport);
4220	phba->pport = vport;
4221	lpfc_debugfs_initialize(vport);
4222	/* Put reference to SCSI host to driver's device private data */
4223	pci_set_drvdata(phba->pcidev, shost);
4224
4225	return 0;
4226}
4227
4228/**
4229 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4230 * @phba: pointer to lpfc hba data structure.
4231 *
4232 * This routine is invoked to destroy HBA physical port and the associated
4233 * SCSI host.
4234 **/
4235static void
4236lpfc_destroy_shost(struct lpfc_hba *phba)
4237{
4238	struct lpfc_vport *vport = phba->pport;
4239
4240	/* Destroy physical port that associated with the SCSI host */
4241	destroy_port(vport);
4242
4243	return;
4244}
4245
4246/**
4247 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4248 * @phba: pointer to lpfc hba data structure.
4249 * @shost: the shost to be used to detect Block guard settings.
4250 *
4251 * This routine sets up the local Block guard protocol settings for @shost.
4252 * This routine also allocates memory for debugging bg buffers.
4253 **/
4254static void
4255lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4256{
4257	int pagecnt = 10;
4258	if (lpfc_prot_mask && lpfc_prot_guard) {
4259		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4260				"1478 Registering BlockGuard with the "
4261				"SCSI layer\n");
4262		scsi_host_set_prot(shost, lpfc_prot_mask);
4263		scsi_host_set_guard(shost, lpfc_prot_guard);
4264	}
4265	if (!_dump_buf_data) {
4266		while (pagecnt) {
4267			spin_lock_init(&_dump_buf_lock);
4268			_dump_buf_data =
4269				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4270			if (_dump_buf_data) {
4271				printk(KERN_ERR "BLKGRD allocated %d pages for "
4272				       "_dump_buf_data at 0x%p\n",
4273				       (1 << pagecnt), _dump_buf_data);
4274				_dump_buf_data_order = pagecnt;
4275				memset(_dump_buf_data, 0,
4276				       ((1 << PAGE_SHIFT) << pagecnt));
4277				break;
4278			} else
4279				--pagecnt;
4280		}
4281		if (!_dump_buf_data_order)
4282			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4283			       "memory for hexdump\n");
4284	} else
4285		printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
4286		       "\n", _dump_buf_data);
4287	if (!_dump_buf_dif) {
4288		while (pagecnt) {
4289			_dump_buf_dif =
4290				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4291			if (_dump_buf_dif) {
4292				printk(KERN_ERR "BLKGRD allocated %d pages for "
4293				       "_dump_buf_dif at 0x%p\n",
4294				       (1 << pagecnt), _dump_buf_dif);
4295				_dump_buf_dif_order = pagecnt;
4296				memset(_dump_buf_dif, 0,
4297				       ((1 << PAGE_SHIFT) << pagecnt));
4298				break;
4299			} else
4300				--pagecnt;
4301		}
4302		if (!_dump_buf_dif_order)
4303			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4304			       "memory for hexdump\n");
4305	} else
4306		printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
4307		       _dump_buf_dif);
4308}
4309
4310/**
4311 * lpfc_post_init_setup - Perform necessary device post initialization setup.
4312 * @phba: pointer to lpfc hba data structure.
4313 *
4314 * This routine is invoked to perform all the necessary post initialization
4315 * setup for the device.
4316 **/
4317static void
4318lpfc_post_init_setup(struct lpfc_hba *phba)
4319{
4320	struct Scsi_Host  *shost;
4321	struct lpfc_adapter_event_header adapter_event;
4322
4323	/* Get the default values for Model Name and Description */
4324	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
4325
4326	/*
4327	 * hba setup may have changed the hba_queue_depth so we need to
4328	 * adjust the value of can_queue.
4329	 */
4330	shost = pci_get_drvdata(phba->pcidev);
4331	shost->can_queue = phba->cfg_hba_queue_depth - 10;
4332	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4333		lpfc_setup_bg(phba, shost);
4334
4335	lpfc_host_attrib_init(shost);
4336
4337	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
4338		spin_lock_irq(shost->host_lock);
4339		lpfc_poll_start_timer(phba);
4340		spin_unlock_irq(shost->host_lock);
4341	}
4342
4343	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4344			"0428 Perform SCSI scan\n");
4345	/* Send board arrival event to upper layer */
4346	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
4347	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
4348	fc_host_post_vendor_event(shost, fc_get_event_number(),
4349				  sizeof(adapter_event),
4350				  (char *) &adapter_event,
4351				  LPFC_NL_VENDOR_ID);
4352	return;
4353}
4354
4355/**
4356 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
4357 * @phba: pointer to lpfc hba data structure.
4358 *
4359 * This routine is invoked to set up the PCI device memory space for device
4360 * with SLI-3 interface spec.
4361 *
4362 * Return codes
4363 * 	0 - sucessful
4364 * 	other values - error
4365 **/
4366static int
4367lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4368{
4369	struct pci_dev *pdev;
4370	unsigned long bar0map_len, bar2map_len;
4371	int i, hbq_count;
4372	void *ptr;
4373	int error = -ENODEV;
4374
4375	/* Obtain PCI device reference */
4376	if (!phba->pcidev)
4377		return error;
4378	else
4379		pdev = phba->pcidev;
4380
4381	/* Set the device DMA mask size */
4382	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
4383		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
4384			return error;
4385
4386	/* Get the bus address of Bar0 and Bar2 and the number of bytes
4387	 * required by each mapping.
4388	 */
4389	phba->pci_bar0_map = pci_resource_start(pdev, 0);
4390	bar0map_len = pci_resource_len(pdev, 0);
4391
4392	phba->pci_bar2_map = pci_resource_start(pdev, 2);
4393	bar2map_len = pci_resource_len(pdev, 2);
4394
4395	/* Map HBA SLIM to a kernel virtual address. */
4396	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
4397	if (!phba->slim_memmap_p) {
4398		dev_printk(KERN_ERR, &pdev->dev,
4399			   "ioremap failed for SLIM memory.\n");
4400		goto out;
4401	}
4402
4403	/* Map HBA Control Registers to a kernel virtual address. */
4404	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
4405	if (!phba->ctrl_regs_memmap_p) {
4406		dev_printk(KERN_ERR, &pdev->dev,
4407			   "ioremap failed for HBA control registers.\n");
4408		goto out_iounmap_slim;
4409	}
4410
4411	/* Allocate memory for SLI-2 structures */
4412	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
4413					       SLI2_SLIM_SIZE,
4414					       &phba->slim2p.phys,
4415					       GFP_KERNEL);
4416	if (!phba->slim2p.virt)
4417		goto out_iounmap;
4418
4419	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
4420	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
4421	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
4422	phba->IOCBs = (phba->slim2p.virt +
4423		       offsetof(struct lpfc_sli2_slim, IOCBs));
4424
4425	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
4426						 lpfc_sli_hbq_size(),
4427						 &phba->hbqslimp.phys,
4428						 GFP_KERNEL);
4429	if (!phba->hbqslimp.virt)
4430		goto out_free_slim;
4431
4432	hbq_count = lpfc_sli_hbq_count();
4433	ptr = phba->hbqslimp.virt;
4434	for (i = 0; i < hbq_count; ++i) {
4435		phba->hbqs[i].hbq_virt = ptr;
4436		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4437		ptr += (lpfc_hbq_defs[i]->entry_count *
4438			sizeof(struct lpfc_hbq_entry));
4439	}
4440	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
4441	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
4442
4443	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
4444
4445	INIT_LIST_HEAD(&phba->rb_pend_list);
4446
4447	phba->MBslimaddr = phba->slim_memmap_p;
4448	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
4449	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
4450	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
4451	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
4452
4453	return 0;
4454
4455out_free_slim:
4456	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4457			  phba->slim2p.virt, phba->slim2p.phys);
4458out_iounmap:
4459	iounmap(phba->ctrl_regs_memmap_p);
4460out_iounmap_slim:
4461	iounmap(phba->slim_memmap_p);
4462out:
4463	return error;
4464}
4465
4466/**
4467 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
4468 * @phba: pointer to lpfc hba data structure.
4469 *
4470 * This routine is invoked to unset the PCI device memory space for device
4471 * with SLI-3 interface spec.
4472 **/
4473static void
4474lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4475{
4476	struct pci_dev *pdev;
4477
4478	/* Obtain PCI device reference */
4479	if (!phba->pcidev)
4480		return;
4481	else
4482		pdev = phba->pcidev;
4483
4484	/* Free coherent DMA memory allocated */
4485	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
4486			  phba->hbqslimp.virt, phba->hbqslimp.phys);
4487	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4488			  phba->slim2p.virt, phba->slim2p.phys);
4489
4490	/* I/O memory unmap */
4491	iounmap(phba->ctrl_regs_memmap_p);
4492	iounmap(phba->slim_memmap_p);
4493
4494	return;
4495}
4496
4497/**
4498 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
4499 * @phba: pointer to lpfc hba data structure.
4500 *
4501 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
4502 * done and check status.
4503 *
4504 * Return 0 if successful, otherwise -ENODEV.
4505 **/
4506int
4507lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4508{
4509	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
4510	uint32_t onlnreg0, onlnreg1;
4511	int i, port_error = -ENODEV;
4512
4513	if (!phba->sli4_hba.STAregaddr)
4514		return -ENODEV;
4515
4516	/* Wait up to 30 seconds for the SLI Port POST done and ready */
4517	for (i = 0; i < 3000; i++) {
4518		sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
4519		/* Encounter fatal POST error, break out */
4520		if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
4521			port_error = -ENODEV;
4522			break;
4523		}
4524		if (LPFC_POST_STAGE_ARMFW_READY ==
4525		    bf_get(lpfc_hst_state_port_status, &sta_reg)) {
4526			port_error = 0;
4527			break;
4528		}
4529		msleep(10);
4530	}
4531
4532	if (port_error)
4533		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4534			"1408 Failure HBA POST Status: sta_reg=0x%x, "
4535			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
4536			"dl=x%x, pstatus=x%x\n", sta_reg.word0,
4537			bf_get(lpfc_hst_state_perr, &sta_reg),
4538			bf_get(lpfc_hst_state_sfi, &sta_reg),
4539			bf_get(lpfc_hst_state_nip, &sta_reg),
4540			bf_get(lpfc_hst_state_ipc, &sta_reg),
4541			bf_get(lpfc_hst_state_xrom, &sta_reg),
4542			bf_get(lpfc_hst_state_dl, &sta_reg),
4543			bf_get(lpfc_hst_state_port_status, &sta_reg));
4544
4545	/* Log device information */
4546	scratchpad.word0 =  readl(phba->sli4_hba.SCRATCHPADregaddr);
4547	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4548			"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4549			"FeatureL1=0x%x, FeatureL2=0x%x\n",
4550			bf_get(lpfc_scratchpad_chiptype, &scratchpad),
4551			bf_get(lpfc_scratchpad_slirev, &scratchpad),
4552			bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4553			bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4554
4555	/* With uncoverable error, log the error message and return error */
4556	onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4557	onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4558	if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4559		uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4560		uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4561		if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4562			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4563					"1422 HBA Unrecoverable error: "
4564					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4565					"online0_reg=0x%x, online1_reg=0x%x\n",
4566					uerrlo_reg.word0, uerrhi_reg.word0,
4567					onlnreg0, onlnreg1);
4568		}
4569		return -ENODEV;
4570	}
4571
4572	return port_error;
4573}
4574
4575/**
4576 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
4577 * @phba: pointer to lpfc hba data structure.
4578 *
4579 * This routine is invoked to set up SLI4 BAR0 PCI config space register
4580 * memory map.
4581 **/
4582static void
4583lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4584{
4585	phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4586					LPFC_UERR_STATUS_LO;
4587	phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4588					LPFC_UERR_STATUS_HI;
4589	phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
4590					LPFC_ONLINE0;
4591	phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
4592					LPFC_ONLINE1;
4593	phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
4594					LPFC_SCRATCHPAD;
4595}
4596
4597/**
4598 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
4599 * @phba: pointer to lpfc hba data structure.
4600 *
4601 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
4602 * memory map.
4603 **/
4604static void
4605lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
4606{
4607
4608	phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4609				    LPFC_HST_STATE;
4610	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4611				    LPFC_HST_ISR0;
4612	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4613				    LPFC_HST_IMR0;
4614	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4615				     LPFC_HST_ISCR0;
4616	return;
4617}
4618
4619/**
4620 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
4621 * @phba: pointer to lpfc hba data structure.
4622 * @vf: virtual function number
4623 *
4624 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
4625 * based on the given viftual function number, @vf.
4626 *
4627 * Return 0 if successful, otherwise -ENODEV.
4628 **/
4629static int
4630lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
4631{
4632	if (vf > LPFC_VIR_FUNC_MAX)
4633		return -ENODEV;
4634
4635	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4636				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
4637	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4638				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
4639	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4640				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
4641	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4642				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
4643	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4644				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
4645	return 0;
4646}
4647
4648/**
4649 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
4650 * @phba: pointer to lpfc hba data structure.
4651 *
4652 * This routine is invoked to create the bootstrap mailbox
4653 * region consistent with the SLI-4 interface spec.  This
4654 * routine allocates all memory necessary to communicate
4655 * mailbox commands to the port and sets up all alignment
4656 * needs.  No locks are expected to be held when calling
4657 * this routine.
4658 *
4659 * Return codes
4660 * 	0 - sucessful
4661 * 	ENOMEM - could not allocated memory.
4662 **/
4663static int
4664lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
4665{
4666	uint32_t bmbx_size;
4667	struct lpfc_dmabuf *dmabuf;
4668	struct dma_address *dma_address;
4669	uint32_t pa_addr;
4670	uint64_t phys_addr;
4671
4672	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4673	if (!dmabuf)
4674		return -ENOMEM;
4675
4676	/*
4677	 * The bootstrap mailbox region is comprised of 2 parts
4678	 * plus an alignment restriction of 16 bytes.
4679	 */
4680	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
4681	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4682					  bmbx_size,
4683					  &dmabuf->phys,
4684					  GFP_KERNEL);
4685	if (!dmabuf->virt) {
4686		kfree(dmabuf);
4687		return -ENOMEM;
4688	}
4689	memset(dmabuf->virt, 0, bmbx_size);
4690
4691	/*
4692	 * Initialize the bootstrap mailbox pointers now so that the register
4693	 * operations are simple later.  The mailbox dma address is required
4694	 * to be 16-byte aligned.  Also align the virtual memory as each
4695	 * maibox is copied into the bmbx mailbox region before issuing the
4696	 * command to the port.
4697	 */
4698	phba->sli4_hba.bmbx.dmabuf = dmabuf;
4699	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
4700
4701	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
4702					      LPFC_ALIGN_16_BYTE);
4703	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
4704					      LPFC_ALIGN_16_BYTE);
4705
4706	/*
4707	 * Set the high and low physical addresses now.  The SLI4 alignment
4708	 * requirement is 16 bytes and the mailbox is posted to the port
4709	 * as two 30-bit addresses.  The other data is a bit marking whether
4710	 * the 30-bit address is the high or low address.
4711	 * Upcast bmbx aphys to 64bits so shift instruction compiles
4712	 * clean on 32 bit machines.
4713	 */
4714	dma_address = &phba->sli4_hba.bmbx.dma_address;
4715	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
4716	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
4717	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
4718					   LPFC_BMBX_BIT1_ADDR_HI);
4719
4720	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
4721	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
4722					   LPFC_BMBX_BIT1_ADDR_LO);
4723	return 0;
4724}
4725
4726/**
4727 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
4728 * @phba: pointer to lpfc hba data structure.
4729 *
4730 * This routine is invoked to teardown the bootstrap mailbox
4731 * region and release all host resources. This routine requires
4732 * the caller to ensure all mailbox commands recovered, no
4733 * additional mailbox comands are sent, and interrupts are disabled
4734 * before calling this routine.
4735 *
4736 **/
4737static void
4738lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
4739{
4740	dma_free_coherent(&phba->pcidev->dev,
4741			  phba->sli4_hba.bmbx.bmbx_size,
4742			  phba->sli4_hba.bmbx.dmabuf->virt,
4743			  phba->sli4_hba.bmbx.dmabuf->phys);
4744
4745	kfree(phba->sli4_hba.bmbx.dmabuf);
4746	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
4747}
4748
4749/**
4750 * lpfc_sli4_read_config - Get the config parameters.
4751 * @phba: pointer to lpfc hba data structure.
4752 *
4753 * This routine is invoked to read the configuration parameters from the HBA.
4754 * The configuration parameters are used to set the base and maximum values
4755 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
4756 * allocation for the port.
4757 *
4758 * Return codes
4759 * 	0 - sucessful
4760 * 	ENOMEM - No availble memory
4761 *      EIO - The mailbox failed to complete successfully.
4762 **/
4763static int
4764lpfc_sli4_read_config(struct lpfc_hba *phba)
4765{
4766	LPFC_MBOXQ_t *pmb;
4767	struct lpfc_mbx_read_config *rd_config;
4768	uint32_t rc = 0;
4769
4770	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4771	if (!pmb) {
4772		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4773				"2011 Unable to allocate memory for issuing "
4774				"SLI_CONFIG_SPECIAL mailbox command\n");
4775		return -ENOMEM;
4776	}
4777
4778	lpfc_read_config(phba, pmb);
4779
4780	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4781	if (rc != MBX_SUCCESS) {
4782		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4783			"2012 Mailbox failed , mbxCmd x%x "
4784			"READ_CONFIG, mbxStatus x%x\n",
4785			bf_get(lpfc_mqe_command, &pmb->u.mqe),
4786			bf_get(lpfc_mqe_status, &pmb->u.mqe));
4787		rc = -EIO;
4788	} else {
4789		rd_config = &pmb->u.mqe.un.rd_config;
4790		phba->sli4_hba.max_cfg_param.max_xri =
4791			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
4792		phba->sli4_hba.max_cfg_param.xri_base =
4793			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
4794		phba->sli4_hba.max_cfg_param.max_vpi =
4795			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
4796		phba->sli4_hba.max_cfg_param.vpi_base =
4797			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
4798		phba->sli4_hba.max_cfg_param.max_rpi =
4799			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
4800		phba->sli4_hba.max_cfg_param.rpi_base =
4801			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
4802		phba->sli4_hba.max_cfg_param.max_vfi =
4803			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
4804		phba->sli4_hba.max_cfg_param.vfi_base =
4805			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
4806		phba->sli4_hba.max_cfg_param.max_fcfi =
4807			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
4808		phba->sli4_hba.max_cfg_param.fcfi_base =
4809			bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
4810		phba->sli4_hba.max_cfg_param.max_eq =
4811			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
4812		phba->sli4_hba.max_cfg_param.max_rq =
4813			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
4814		phba->sli4_hba.max_cfg_param.max_wq =
4815			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
4816		phba->sli4_hba.max_cfg_param.max_cq =
4817			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
4818		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
4819		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
4820		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4821		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4822		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4823		phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
4824		phba->max_vports = phba->max_vpi;
4825		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4826				"2003 cfg params XRI(B:%d M:%d), "
4827				"VPI(B:%d M:%d) "
4828				"VFI(B:%d M:%d) "
4829				"RPI(B:%d M:%d) "
4830				"FCFI(B:%d M:%d)\n",
4831				phba->sli4_hba.max_cfg_param.xri_base,
4832				phba->sli4_hba.max_cfg_param.max_xri,
4833				phba->sli4_hba.max_cfg_param.vpi_base,
4834				phba->sli4_hba.max_cfg_param.max_vpi,
4835				phba->sli4_hba.max_cfg_param.vfi_base,
4836				phba->sli4_hba.max_cfg_param.max_vfi,
4837				phba->sli4_hba.max_cfg_param.rpi_base,
4838				phba->sli4_hba.max_cfg_param.max_rpi,
4839				phba->sli4_hba.max_cfg_param.fcfi_base,
4840				phba->sli4_hba.max_cfg_param.max_fcfi);
4841	}
4842	mempool_free(pmb, phba->mbox_mem_pool);
4843
4844	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
4845	if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
4846		phba->cfg_hba_queue_depth =
4847				phba->sli4_hba.max_cfg_param.max_xri;
4848	return rc;
4849}
4850
4851/**
4852 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
4853 * @phba: pointer to lpfc hba data structure.
4854 *
4855 * This routine is invoked to setup the host-side endian order to the
4856 * HBA consistent with the SLI-4 interface spec.
4857 *
4858 * Return codes
4859 * 	0 - sucessful
4860 * 	ENOMEM - No availble memory
4861 *      EIO - The mailbox failed to complete successfully.
4862 **/
4863static int
4864lpfc_setup_endian_order(struct lpfc_hba *phba)
4865{
4866	LPFC_MBOXQ_t *mboxq;
4867	uint32_t rc = 0;
4868	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
4869				      HOST_ENDIAN_HIGH_WORD1};
4870
4871	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4872	if (!mboxq) {
4873		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4874				"0492 Unable to allocate memory for issuing "
4875				"SLI_CONFIG_SPECIAL mailbox command\n");
4876		return -ENOMEM;
4877	}
4878
4879	/*
4880	 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
4881	 * words to contain special data values and no other data.
4882	 */
4883	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
4884	memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
4885	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4886	if (rc != MBX_SUCCESS) {
4887		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4888				"0493 SLI_CONFIG_SPECIAL mailbox failed with "
4889				"status x%x\n",
4890				rc);
4891		rc = -EIO;
4892	}
4893
4894	mempool_free(mboxq, phba->mbox_mem_pool);
4895	return rc;
4896}
4897
4898/**
4899 * lpfc_sli4_queue_create - Create all the SLI4 queues
4900 * @phba: pointer to lpfc hba data structure.
4901 *
4902 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
4903 * operation. For each SLI4 queue type, the parameters such as queue entry
4904 * count (queue depth) shall be taken from the module parameter. For now,
4905 * we just use some constant number as place holder.
4906 *
4907 * Return codes
4908 *      0 - sucessful
4909 *      ENOMEM - No availble memory
4910 *      EIO - The mailbox failed to complete successfully.
4911 **/
4912static int
4913lpfc_sli4_queue_create(struct lpfc_hba *phba)
4914{
4915	struct lpfc_queue *qdesc;
4916	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
4917	int cfg_fcp_wq_count;
4918	int cfg_fcp_eq_count;
4919
4920	/*
4921	 * Sanity check for confiugred queue parameters against the run-time
4922	 * device parameters
4923	 */
4924
4925	/* Sanity check on FCP fast-path WQ parameters */
4926	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
4927	if (cfg_fcp_wq_count >
4928	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
4929		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
4930				   LPFC_SP_WQN_DEF;
4931		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
4932			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4933					"2581 Not enough WQs (%d) from "
4934					"the pci function for supporting "
4935					"FCP WQs (%d)\n",
4936					phba->sli4_hba.max_cfg_param.max_wq,
4937					phba->cfg_fcp_wq_count);
4938			goto out_error;
4939		}
4940		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4941				"2582 Not enough WQs (%d) from the pci "
4942				"function for supporting the requested "
4943				"FCP WQs (%d), the actual FCP WQs can "
4944				"be supported: %d\n",
4945				phba->sli4_hba.max_cfg_param.max_wq,
4946				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
4947	}
4948	/* The actual number of FCP work queues adopted */
4949	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
4950
4951	/* Sanity check on FCP fast-path EQ parameters */
4952	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
4953	if (cfg_fcp_eq_count >
4954	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
4955		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
4956				   LPFC_SP_EQN_DEF;
4957		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
4958			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4959					"2574 Not enough EQs (%d) from the "
4960					"pci function for supporting FCP "
4961					"EQs (%d)\n",
4962					phba->sli4_hba.max_cfg_param.max_eq,
4963					phba->cfg_fcp_eq_count);
4964			goto out_error;
4965		}
4966		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4967				"2575 Not enough EQs (%d) from the pci "
4968				"function for supporting the requested "
4969				"FCP EQs (%d), the actual FCP EQs can "
4970				"be supported: %d\n",
4971				phba->sli4_hba.max_cfg_param.max_eq,
4972				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
4973	}
4974	/* It does not make sense to have more EQs than WQs */
4975	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4976		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4977				"2593 The number of FCP EQs (%d) is more "
4978				"than the number of FCP WQs (%d), take "
4979				"the number of FCP EQs same as than of "
4980				"WQs (%d)\n", cfg_fcp_eq_count,
4981				phba->cfg_fcp_wq_count,
4982				phba->cfg_fcp_wq_count);
4983		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
4984	}
4985	/* The actual number of FCP event queues adopted */
4986	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
4987	/* The overall number of event queues used */
4988	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
4989
4990	/*
4991	 * Create Event Queues (EQs)
4992	 */
4993
4994	/* Get EQ depth from module parameter, fake the default for now */
4995	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
4996	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
4997
4998	/* Create slow path event queue */
4999	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5000				      phba->sli4_hba.eq_ecount);
5001	if (!qdesc) {
5002		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5003				"0496 Failed allocate slow-path EQ\n");
5004		goto out_error;
5005	}
5006	phba->sli4_hba.sp_eq = qdesc;
5007
5008	/* Create fast-path FCP Event Queue(s) */
5009	phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
5010			       phba->cfg_fcp_eq_count), GFP_KERNEL);
5011	if (!phba->sli4_hba.fp_eq) {
5012		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5013				"2576 Failed allocate memory for fast-path "
5014				"EQ record array\n");
5015		goto out_free_sp_eq;
5016	}
5017	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5018		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5019					      phba->sli4_hba.eq_ecount);
5020		if (!qdesc) {
5021			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5022					"0497 Failed allocate fast-path EQ\n");
5023			goto out_free_fp_eq;
5024		}
5025		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5026	}
5027
5028	/*
5029	 * Create Complete Queues (CQs)
5030	 */
5031
5032	/* Get CQ depth from module parameter, fake the default for now */
5033	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5034	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5035
5036	/* Create slow-path Mailbox Command Complete Queue */
5037	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5038				      phba->sli4_hba.cq_ecount);
5039	if (!qdesc) {
5040		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5041				"0500 Failed allocate slow-path mailbox CQ\n");
5042		goto out_free_fp_eq;
5043	}
5044	phba->sli4_hba.mbx_cq = qdesc;
5045
5046	/* Create slow-path ELS Complete Queue */
5047	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5048				      phba->sli4_hba.cq_ecount);
5049	if (!qdesc) {
5050		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5051				"0501 Failed allocate slow-path ELS CQ\n");
5052		goto out_free_mbx_cq;
5053	}
5054	phba->sli4_hba.els_cq = qdesc;
5055
5056	/* Create slow-path Unsolicited Receive Complete Queue */
5057	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5058				      phba->sli4_hba.cq_ecount);
5059	if (!qdesc) {
5060		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5061				"0502 Failed allocate slow-path USOL RX CQ\n");
5062		goto out_free_els_cq;
5063	}
5064	phba->sli4_hba.rxq_cq = qdesc;
5065
5066	/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5067	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5068				phba->cfg_fcp_eq_count), GFP_KERNEL);
5069	if (!phba->sli4_hba.fcp_cq) {
5070		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5071				"2577 Failed allocate memory for fast-path "
5072				"CQ record array\n");
5073		goto out_free_rxq_cq;
5074	}
5075	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5076		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5077					      phba->sli4_hba.cq_ecount);
5078		if (!qdesc) {
5079			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5080					"0499 Failed allocate fast-path FCP "
5081					"CQ (%d)\n", fcp_cqidx);
5082			goto out_free_fcp_cq;
5083		}
5084		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5085	}
5086
5087	/* Create Mailbox Command Queue */
5088	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5089	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5090
5091	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5092				      phba->sli4_hba.mq_ecount);
5093	if (!qdesc) {
5094		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5095				"0505 Failed allocate slow-path MQ\n");
5096		goto out_free_fcp_cq;
5097	}
5098	phba->sli4_hba.mbx_wq = qdesc;
5099
5100	/*
5101	 * Create all the Work Queues (WQs)
5102	 */
5103	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5104	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5105
5106	/* Create slow-path ELS Work Queue */
5107	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5108				      phba->sli4_hba.wq_ecount);
5109	if (!qdesc) {
5110		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5111				"0504 Failed allocate slow-path ELS WQ\n");
5112		goto out_free_mbx_wq;
5113	}
5114	phba->sli4_hba.els_wq = qdesc;
5115
5116	/* Create fast-path FCP Work Queue(s) */
5117	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5118				phba->cfg_fcp_wq_count), GFP_KERNEL);
5119	if (!phba->sli4_hba.fcp_wq) {
5120		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5121				"2578 Failed allocate memory for fast-path "
5122				"WQ record array\n");
5123		goto out_free_els_wq;
5124	}
5125	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5126		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5127					      phba->sli4_hba.wq_ecount);
5128		if (!qdesc) {
5129			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5130					"0503 Failed allocate fast-path FCP "
5131					"WQ (%d)\n", fcp_wqidx);
5132			goto out_free_fcp_wq;
5133		}
5134		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5135	}
5136
5137	/*
5138	 * Create Receive Queue (RQ)
5139	 */
5140	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5141	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5142
5143	/* Create Receive Queue for header */
5144	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5145				      phba->sli4_hba.rq_ecount);
5146	if (!qdesc) {
5147		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5148				"0506 Failed allocate receive HRQ\n");
5149		goto out_free_fcp_wq;
5150	}
5151	phba->sli4_hba.hdr_rq = qdesc;
5152
5153	/* Create Receive Queue for data */
5154	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5155				      phba->sli4_hba.rq_ecount);
5156	if (!qdesc) {
5157		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5158				"0507 Failed allocate receive DRQ\n");
5159		goto out_free_hdr_rq;
5160	}
5161	phba->sli4_hba.dat_rq = qdesc;
5162
5163	return 0;
5164
5165out_free_hdr_rq:
5166	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5167	phba->sli4_hba.hdr_rq = NULL;
5168out_free_fcp_wq:
5169	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5170		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5171		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5172	}
5173	kfree(phba->sli4_hba.fcp_wq);
5174out_free_els_wq:
5175	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5176	phba->sli4_hba.els_wq = NULL;
5177out_free_mbx_wq:
5178	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5179	phba->sli4_hba.mbx_wq = NULL;
5180out_free_fcp_cq:
5181	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5182		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5183		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5184	}
5185	kfree(phba->sli4_hba.fcp_cq);
5186out_free_rxq_cq:
5187	lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5188	phba->sli4_hba.rxq_cq = NULL;
5189out_free_els_cq:
5190	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5191	phba->sli4_hba.els_cq = NULL;
5192out_free_mbx_cq:
5193	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5194	phba->sli4_hba.mbx_cq = NULL;
5195out_free_fp_eq:
5196	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5197		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5198		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5199	}
5200	kfree(phba->sli4_hba.fp_eq);
5201out_free_sp_eq:
5202	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5203	phba->sli4_hba.sp_eq = NULL;
5204out_error:
5205	return -ENOMEM;
5206}
5207
5208/**
5209 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5210 * @phba: pointer to lpfc hba data structure.
5211 *
5212 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5213 * operation.
5214 *
5215 * Return codes
5216 *      0 - sucessful
5217 *      ENOMEM - No availble memory
5218 *      EIO - The mailbox failed to complete successfully.
5219 **/
5220static void
5221lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5222{
5223	int fcp_qidx;
5224
5225	/* Release mailbox command work queue */
5226	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5227	phba->sli4_hba.mbx_wq = NULL;
5228
5229	/* Release ELS work queue */
5230	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5231	phba->sli4_hba.els_wq = NULL;
5232
5233	/* Release FCP work queue */
5234	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5235		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5236	kfree(phba->sli4_hba.fcp_wq);
5237	phba->sli4_hba.fcp_wq = NULL;
5238
5239	/* Release unsolicited receive queue */
5240	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5241	phba->sli4_hba.hdr_rq = NULL;
5242	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5243	phba->sli4_hba.dat_rq = NULL;
5244
5245	/* Release unsolicited receive complete queue */
5246	lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5247	phba->sli4_hba.rxq_cq = NULL;
5248
5249	/* Release ELS complete queue */
5250	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5251	phba->sli4_hba.els_cq = NULL;
5252
5253	/* Release mailbox command complete queue */
5254	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5255	phba->sli4_hba.mbx_cq = NULL;
5256
5257	/* Release FCP response complete queue */
5258	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5259		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5260	kfree(phba->sli4_hba.fcp_cq);
5261	phba->sli4_hba.fcp_cq = NULL;
5262
5263	/* Release fast-path event queue */
5264	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5265		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5266	kfree(phba->sli4_hba.fp_eq);
5267	phba->sli4_hba.fp_eq = NULL;
5268
5269	/* Release slow-path event queue */
5270	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5271	phba->sli4_hba.sp_eq = NULL;
5272
5273	return;
5274}
5275
5276/**
5277 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5278 * @phba: pointer to lpfc hba data structure.
5279 *
5280 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5281 * operation.
5282 *
5283 * Return codes
5284 *      0 - sucessful
5285 *      ENOMEM - No availble memory
5286 *      EIO - The mailbox failed to complete successfully.
5287 **/
5288int
5289lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5290{
5291	int rc = -ENOMEM;
5292	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5293	int fcp_cq_index = 0;
5294
5295	/*
5296	 * Set up Event Queues (EQs)
5297	 */
5298
5299	/* Set up slow-path event queue */
5300	if (!phba->sli4_hba.sp_eq) {
5301		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5302				"0520 Slow-path EQ not allocated\n");
5303		goto out_error;
5304	}
5305	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5306			    LPFC_SP_DEF_IMAX);
5307	if (rc) {
5308		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5309				"0521 Failed setup of slow-path EQ: "
5310				"rc = 0x%x\n", rc);
5311		goto out_error;
5312	}
5313	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5314			"2583 Slow-path EQ setup: queue-id=%d\n",
5315			phba->sli4_hba.sp_eq->queue_id);
5316
5317	/* Set up fast-path event queue */
5318	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5319		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5320			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5321					"0522 Fast-path EQ (%d) not "
5322					"allocated\n", fcp_eqidx);
5323			goto out_destroy_fp_eq;
5324		}
5325		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
5326				    phba->cfg_fcp_imax);
5327		if (rc) {
5328			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5329					"0523 Failed setup of fast-path EQ "
5330					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
5331			goto out_destroy_fp_eq;
5332		}
5333		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5334				"2584 Fast-path EQ setup: "
5335				"queue[%d]-id=%d\n", fcp_eqidx,
5336				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
5337	}
5338
5339	/*
5340	 * Set up Complete Queues (CQs)
5341	 */
5342
5343	/* Set up slow-path MBOX Complete Queue as the first CQ */
5344	if (!phba->sli4_hba.mbx_cq) {
5345		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5346				"0528 Mailbox CQ not allocated\n");
5347		goto out_destroy_fp_eq;
5348	}
5349	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
5350			    LPFC_MCQ, LPFC_MBOX);
5351	if (rc) {
5352		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5353				"0529 Failed setup of slow-path mailbox CQ: "
5354				"rc = 0x%x\n", rc);
5355		goto out_destroy_fp_eq;
5356	}
5357	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5358			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
5359			phba->sli4_hba.mbx_cq->queue_id,
5360			phba->sli4_hba.sp_eq->queue_id);
5361
5362	/* Set up slow-path ELS Complete Queue */
5363	if (!phba->sli4_hba.els_cq) {
5364		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5365				"0530 ELS CQ not allocated\n");
5366		goto out_destroy_mbx_cq;
5367	}
5368	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
5369			    LPFC_WCQ, LPFC_ELS);
5370	if (rc) {
5371		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5372				"0531 Failed setup of slow-path ELS CQ: "
5373				"rc = 0x%x\n", rc);
5374		goto out_destroy_mbx_cq;
5375	}
5376	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5377			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
5378			phba->sli4_hba.els_cq->queue_id,
5379			phba->sli4_hba.sp_eq->queue_id);
5380
5381	/* Set up slow-path Unsolicited Receive Complete Queue */
5382	if (!phba->sli4_hba.rxq_cq) {
5383		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5384				"0532 USOL RX CQ not allocated\n");
5385		goto out_destroy_els_cq;
5386	}
5387	rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5388			    LPFC_RCQ, LPFC_USOL);
5389	if (rc) {
5390		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5391				"0533 Failed setup of slow-path USOL RX CQ: "
5392				"rc = 0x%x\n", rc);
5393		goto out_destroy_els_cq;
5394	}
5395	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5396			"2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5397			phba->sli4_hba.rxq_cq->queue_id,
5398			phba->sli4_hba.sp_eq->queue_id);
5399
5400	/* Set up fast-path FCP Response Complete Queue */
5401	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5402		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
5403			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5404					"0526 Fast-path FCP CQ (%d) not "
5405					"allocated\n", fcp_cqidx);
5406			goto out_destroy_fcp_cq;
5407		}
5408		rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
5409				    phba->sli4_hba.fp_eq[fcp_cqidx],
5410				    LPFC_WCQ, LPFC_FCP);
5411		if (rc) {
5412			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5413					"0527 Failed setup of fast-path FCP "
5414					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
5415			goto out_destroy_fcp_cq;
5416		}
5417		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5418				"2588 FCP CQ setup: cq[%d]-id=%d, "
5419				"parent eq[%d]-id=%d\n",
5420				fcp_cqidx,
5421				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
5422				fcp_cqidx,
5423				phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
5424	}
5425
5426	/*
5427	 * Set up all the Work Queues (WQs)
5428	 */
5429
5430	/* Set up Mailbox Command Queue */
5431	if (!phba->sli4_hba.mbx_wq) {
5432		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5433				"0538 Slow-path MQ not allocated\n");
5434		goto out_destroy_fcp_cq;
5435	}
5436	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
5437			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
5438	if (rc) {
5439		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5440				"0539 Failed setup of slow-path MQ: "
5441				"rc = 0x%x\n", rc);
5442		goto out_destroy_fcp_cq;
5443	}
5444	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5445			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
5446			phba->sli4_hba.mbx_wq->queue_id,
5447			phba->sli4_hba.mbx_cq->queue_id);
5448
5449	/* Set up slow-path ELS Work Queue */
5450	if (!phba->sli4_hba.els_wq) {
5451		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5452				"0536 Slow-path ELS WQ not allocated\n");
5453		goto out_destroy_mbx_wq;
5454	}
5455	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
5456			    phba->sli4_hba.els_cq, LPFC_ELS);
5457	if (rc) {
5458		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5459				"0537 Failed setup of slow-path ELS WQ: "
5460				"rc = 0x%x\n", rc);
5461		goto out_destroy_mbx_wq;
5462	}
5463	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5464			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
5465			phba->sli4_hba.els_wq->queue_id,
5466			phba->sli4_hba.els_cq->queue_id);
5467
5468	/* Set up fast-path FCP Work Queue */
5469	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5470		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
5471			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5472					"0534 Fast-path FCP WQ (%d) not "
5473					"allocated\n", fcp_wqidx);
5474			goto out_destroy_fcp_wq;
5475		}
5476		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
5477				    phba->sli4_hba.fcp_cq[fcp_cq_index],
5478				    LPFC_FCP);
5479		if (rc) {
5480			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5481					"0535 Failed setup of fast-path FCP "
5482					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
5483			goto out_destroy_fcp_wq;
5484		}
5485		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5486				"2591 FCP WQ setup: wq[%d]-id=%d, "
5487				"parent cq[%d]-id=%d\n",
5488				fcp_wqidx,
5489				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
5490				fcp_cq_index,
5491				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
5492		/* Round robin FCP Work Queue's Completion Queue assignment */
5493		fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
5494	}
5495
5496	/*
5497	 * Create Receive Queue (RQ)
5498	 */
5499	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
5500		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5501				"0540 Receive Queue not allocated\n");
5502		goto out_destroy_fcp_wq;
5503	}
5504	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5505			    phba->sli4_hba.rxq_cq, LPFC_USOL);
5506	if (rc) {
5507		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5508				"0541 Failed setup of Receive Queue: "
5509				"rc = 0x%x\n", rc);
5510		goto out_destroy_fcp_wq;
5511	}
5512	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5513			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
5514			"parent cq-id=%d\n",
5515			phba->sli4_hba.hdr_rq->queue_id,
5516			phba->sli4_hba.dat_rq->queue_id,
5517			phba->sli4_hba.rxq_cq->queue_id);
5518	return 0;
5519
5520out_destroy_fcp_wq:
5521	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
5522		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
5523	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5524out_destroy_mbx_wq:
5525	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5526out_destroy_fcp_cq:
5527	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5528		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5529	lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5530out_destroy_els_cq:
5531	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5532out_destroy_mbx_cq:
5533	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5534out_destroy_fp_eq:
5535	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
5536		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
5537	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5538out_error:
5539	return rc;
5540}
5541
5542/**
5543 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
5544 * @phba: pointer to lpfc hba data structure.
5545 *
5546 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
5547 * operation.
5548 *
5549 * Return codes
5550 *      0 - sucessful
5551 *      ENOMEM - No availble memory
5552 *      EIO - The mailbox failed to complete successfully.
5553 **/
5554void
5555lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5556{
5557	int fcp_qidx;
5558
5559	/* Unset mailbox command work queue */
5560	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5561	/* Unset ELS work queue */
5562	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5563	/* Unset unsolicited receive queue */
5564	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
5565	/* Unset FCP work queue */
5566	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5567		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
5568	/* Unset mailbox command complete queue */
5569	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5570	/* Unset ELS complete queue */
5571	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5572	/* Unset unsolicited receive complete queue */
5573	lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5574	/* Unset FCP response complete queue */
5575	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5576		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
5577	/* Unset fast-path event queue */
5578	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5579		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
5580	/* Unset slow-path event queue */
5581	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5582}
5583
5584/**
5585 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
5586 * @phba: pointer to lpfc hba data structure.
5587 *
5588 * This routine is invoked to allocate and set up a pool of completion queue
5589 * events. The body of the completion queue event is a completion queue entry
5590 * CQE. For now, this pool is used for the interrupt service routine to queue
5591 * the following HBA completion queue events for the worker thread to process:
5592 *   - Mailbox asynchronous events
5593 *   - Receive queue completion unsolicited events
5594 * Later, this can be used for all the slow-path events.
5595 *
5596 * Return codes
5597 *      0 - sucessful
5598 *      -ENOMEM - No availble memory
5599 **/
5600static int
5601lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
5602{
5603	struct lpfc_cq_event *cq_event;
5604	int i;
5605
5606	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
5607		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
5608		if (!cq_event)
5609			goto out_pool_create_fail;
5610		list_add_tail(&cq_event->list,
5611			      &phba->sli4_hba.sp_cqe_event_pool);
5612	}
5613	return 0;
5614
5615out_pool_create_fail:
5616	lpfc_sli4_cq_event_pool_destroy(phba);
5617	return -ENOMEM;
5618}
5619
5620/**
5621 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
5622 * @phba: pointer to lpfc hba data structure.
5623 *
5624 * This routine is invoked to free the pool of completion queue events at
5625 * driver unload time. Note that, it is the responsibility of the driver
5626 * cleanup routine to free all the outstanding completion-queue events
5627 * allocated from this pool back into the pool before invoking this routine
5628 * to destroy the pool.
5629 **/
5630static void
5631lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
5632{
5633	struct lpfc_cq_event *cq_event, *next_cq_event;
5634
5635	list_for_each_entry_safe(cq_event, next_cq_event,
5636				 &phba->sli4_hba.sp_cqe_event_pool, list) {
5637		list_del(&cq_event->list);
5638		kfree(cq_event);
5639	}
5640}
5641
5642/**
5643 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5644 * @phba: pointer to lpfc hba data structure.
5645 *
5646 * This routine is the lock free version of the API invoked to allocate a
5647 * completion-queue event from the free pool.
5648 *
5649 * Return: Pointer to the newly allocated completion-queue event if successful
5650 *         NULL otherwise.
5651 **/
5652struct lpfc_cq_event *
5653__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5654{
5655	struct lpfc_cq_event *cq_event = NULL;
5656
5657	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
5658			 struct lpfc_cq_event, list);
5659	return cq_event;
5660}
5661
5662/**
5663 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5664 * @phba: pointer to lpfc hba data structure.
5665 *
5666 * This routine is the lock version of the API invoked to allocate a
5667 * completion-queue event from the free pool.
5668 *
5669 * Return: Pointer to the newly allocated completion-queue event if successful
5670 *         NULL otherwise.
5671 **/
5672struct lpfc_cq_event *
5673lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5674{
5675	struct lpfc_cq_event *cq_event;
5676	unsigned long iflags;
5677
5678	spin_lock_irqsave(&phba->hbalock, iflags);
5679	cq_event = __lpfc_sli4_cq_event_alloc(phba);
5680	spin_unlock_irqrestore(&phba->hbalock, iflags);
5681	return cq_event;
5682}
5683
5684/**
5685 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5686 * @phba: pointer to lpfc hba data structure.
5687 * @cq_event: pointer to the completion queue event to be freed.
5688 *
5689 * This routine is the lock free version of the API invoked to release a
5690 * completion-queue event back into the free pool.
5691 **/
5692void
5693__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5694			     struct lpfc_cq_event *cq_event)
5695{
5696	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
5697}
5698
5699/**
5700 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5701 * @phba: pointer to lpfc hba data structure.
5702 * @cq_event: pointer to the completion queue event to be freed.
5703 *
5704 * This routine is the lock version of the API invoked to release a
5705 * completion-queue event back into the free pool.
5706 **/
5707void
5708lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5709			   struct lpfc_cq_event *cq_event)
5710{
5711	unsigned long iflags;
5712	spin_lock_irqsave(&phba->hbalock, iflags);
5713	__lpfc_sli4_cq_event_release(phba, cq_event);
5714	spin_unlock_irqrestore(&phba->hbalock, iflags);
5715}
5716
5717/**
5718 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
5719 * @phba: pointer to lpfc hba data structure.
5720 *
5721 * This routine is to free all the pending completion-queue events to the
5722 * back into the free pool for device reset.
5723 **/
5724static void
5725lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
5726{
5727	LIST_HEAD(cqelist);
5728	struct lpfc_cq_event *cqe;
5729	unsigned long iflags;
5730
5731	/* Retrieve all the pending WCQEs from pending WCQE lists */
5732	spin_lock_irqsave(&phba->hbalock, iflags);
5733	/* Pending FCP XRI abort events */
5734	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
5735			 &cqelist);
5736	/* Pending ELS XRI abort events */
5737	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
5738			 &cqelist);
5739	/* Pending asynnc events */
5740	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
5741			 &cqelist);
5742	spin_unlock_irqrestore(&phba->hbalock, iflags);
5743
5744	while (!list_empty(&cqelist)) {
5745		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
5746		lpfc_sli4_cq_event_release(phba, cqe);
5747	}
5748}
5749
5750/**
5751 * lpfc_pci_function_reset - Reset pci function.
5752 * @phba: pointer to lpfc hba data structure.
5753 *
5754 * This routine is invoked to request a PCI function reset. It will destroys
5755 * all resources assigned to the PCI function which originates this request.
5756 *
5757 * Return codes
5758 *      0 - sucessful
5759 *      ENOMEM - No availble memory
5760 *      EIO - The mailbox failed to complete successfully.
5761 **/
5762int
5763lpfc_pci_function_reset(struct lpfc_hba *phba)
5764{
5765	LPFC_MBOXQ_t *mboxq;
5766	uint32_t rc = 0;
5767	uint32_t shdr_status, shdr_add_status;
5768	union lpfc_sli4_cfg_shdr *shdr;
5769
5770	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5771	if (!mboxq) {
5772		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5773				"0494 Unable to allocate memory for issuing "
5774				"SLI_FUNCTION_RESET mailbox command\n");
5775		return -ENOMEM;
5776	}
5777
5778	/* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
5779	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5780			 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
5781			 LPFC_SLI4_MBX_EMBED);
5782	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5783	shdr = (union lpfc_sli4_cfg_shdr *)
5784		&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5785	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5786	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5787	if (rc != MBX_TIMEOUT)
5788		mempool_free(mboxq, phba->mbox_mem_pool);
5789	if (shdr_status || shdr_add_status || rc) {
5790		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5791				"0495 SLI_FUNCTION_RESET mailbox failed with "
5792				"status x%x add_status x%x, mbx status x%x\n",
5793				shdr_status, shdr_add_status, rc);
5794		rc = -ENXIO;
5795	}
5796	return rc;
5797}
5798
5799/**
5800 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
5801 * @phba: pointer to lpfc hba data structure.
5802 * @cnt: number of nop mailbox commands to send.
5803 *
5804 * This routine is invoked to send a number @cnt of NOP mailbox command and
5805 * wait for each command to complete.
5806 *
5807 * Return: the number of NOP mailbox command completed.
5808 **/
5809static int
5810lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
5811{
5812	LPFC_MBOXQ_t *mboxq;
5813	int length, cmdsent;
5814	uint32_t mbox_tmo;
5815	uint32_t rc = 0;
5816	uint32_t shdr_status, shdr_add_status;
5817	union lpfc_sli4_cfg_shdr *shdr;
5818
5819	if (cnt == 0) {
5820		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5821				"2518 Requested to send 0 NOP mailbox cmd\n");
5822		return cnt;
5823	}
5824
5825	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5826	if (!mboxq) {
5827		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5828				"2519 Unable to allocate memory for issuing "
5829				"NOP mailbox command\n");
5830		return 0;
5831	}
5832
5833	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
5834	length = (sizeof(struct lpfc_mbx_nop) -
5835		  sizeof(struct lpfc_sli4_cfg_mhdr));
5836	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5837			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
5838
5839	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5840	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
5841		if (!phba->sli4_hba.intr_enable)
5842			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5843		else
5844			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
5845		if (rc == MBX_TIMEOUT)
5846			break;
5847		/* Check return status */
5848		shdr = (union lpfc_sli4_cfg_shdr *)
5849			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5850		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5851		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
5852					 &shdr->response);
5853		if (shdr_status || shdr_add_status || rc) {
5854			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5855					"2520 NOP mailbox command failed "
5856					"status x%x add_status x%x mbx "
5857					"status x%x\n", shdr_status,
5858					shdr_add_status, rc);
5859			break;
5860		}
5861	}
5862
5863	if (rc != MBX_TIMEOUT)
5864		mempool_free(mboxq, phba->mbox_mem_pool);
5865
5866	return cmdsent;
5867}
5868
5869/**
5870 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
5871 * @phba: pointer to lpfc hba data structure.
5872 * @fcfi: fcf index.
5873 *
5874 * This routine is invoked to unregister a FCFI from device.
5875 **/
5876void
5877lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5878{
5879	LPFC_MBOXQ_t *mbox;
5880	uint32_t mbox_tmo;
5881	int rc;
5882	unsigned long flags;
5883
5884	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5885
5886	if (!mbox)
5887		return;
5888
5889	lpfc_unreg_fcfi(mbox, fcfi);
5890
5891	if (!phba->sli4_hba.intr_enable)
5892		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5893	else {
5894		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5895		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5896	}
5897	if (rc != MBX_TIMEOUT)
5898		mempool_free(mbox, phba->mbox_mem_pool);
5899	if (rc != MBX_SUCCESS)
5900		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5901				"2517 Unregister FCFI command failed "
5902				"status %d, mbxStatus x%x\n", rc,
5903				bf_get(lpfc_mqe_status, &mbox->u.mqe));
5904	else {
5905		spin_lock_irqsave(&phba->hbalock, flags);
5906		/* Mark the FCFI is no longer registered */
5907		phba->fcf.fcf_flag &=
5908			~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
5909		spin_unlock_irqrestore(&phba->hbalock, flags);
5910	}
5911}
5912
5913/**
5914 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
5915 * @phba: pointer to lpfc hba data structure.
5916 *
5917 * This routine is invoked to set up the PCI device memory space for device
5918 * with SLI-4 interface spec.
5919 *
5920 * Return codes
5921 * 	0 - sucessful
5922 * 	other values - error
5923 **/
5924static int
5925lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
5926{
5927	struct pci_dev *pdev;
5928	unsigned long bar0map_len, bar1map_len, bar2map_len;
5929	int error = -ENODEV;
5930
5931	/* Obtain PCI device reference */
5932	if (!phba->pcidev)
5933		return error;
5934	else
5935		pdev = phba->pcidev;
5936
5937	/* Set the device DMA mask size */
5938	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
5939		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5940			return error;
5941
5942	/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
5943	 * number of bytes required by each mapping. They are actually
5944	 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
5945	 */
5946	phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
5947	bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
5948
5949	phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
5950	bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
5951
5952	phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
5953	bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
5954
5955	/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
5956	phba->sli4_hba.conf_regs_memmap_p =
5957				ioremap(phba->pci_bar0_map, bar0map_len);
5958	if (!phba->sli4_hba.conf_regs_memmap_p) {
5959		dev_printk(KERN_ERR, &pdev->dev,
5960			   "ioremap failed for SLI4 PCI config registers.\n");
5961		goto out;
5962	}
5963
5964	/* Map SLI4 HBA Control Register base to a kernel virtual address. */
5965	phba->sli4_hba.ctrl_regs_memmap_p =
5966				ioremap(phba->pci_bar1_map, bar1map_len);
5967	if (!phba->sli4_hba.ctrl_regs_memmap_p) {
5968		dev_printk(KERN_ERR, &pdev->dev,
5969			   "ioremap failed for SLI4 HBA control registers.\n");
5970		goto out_iounmap_conf;
5971	}
5972
5973	/* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
5974	phba->sli4_hba.drbl_regs_memmap_p =
5975				ioremap(phba->pci_bar2_map, bar2map_len);
5976	if (!phba->sli4_hba.drbl_regs_memmap_p) {
5977		dev_printk(KERN_ERR, &pdev->dev,
5978			   "ioremap failed for SLI4 HBA doorbell registers.\n");
5979		goto out_iounmap_ctrl;
5980	}
5981
5982	/* Set up BAR0 PCI config space register memory map */
5983	lpfc_sli4_bar0_register_memmap(phba);
5984
5985	/* Set up BAR1 register memory map */
5986	lpfc_sli4_bar1_register_memmap(phba);
5987
5988	/* Set up BAR2 register memory map */
5989	error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
5990	if (error)
5991		goto out_iounmap_all;
5992
5993	return 0;
5994
5995out_iounmap_all:
5996	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
5997out_iounmap_ctrl:
5998	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
5999out_iounmap_conf:
6000	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6001out:
6002	return error;
6003}
6004
6005/**
6006 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
6007 * @phba: pointer to lpfc hba data structure.
6008 *
6009 * This routine is invoked to unset the PCI device memory space for device
6010 * with SLI-4 interface spec.
6011 **/
6012static void
6013lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
6014{
6015	struct pci_dev *pdev;
6016
6017	/* Obtain PCI device reference */
6018	if (!phba->pcidev)
6019		return;
6020	else
6021		pdev = phba->pcidev;
6022
6023	/* Free coherent DMA memory allocated */
6024
6025	/* Unmap I/O memory space */
6026	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6027	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6028	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6029
6030	return;
6031}
6032
6033/**
6034 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6035 * @phba: pointer to lpfc hba data structure.
6036 *
6037 * This routine is invoked to enable the MSI-X interrupt vectors to device
6038 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6039 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6040 * invoked, enables either all or nothing, depending on the current
6041 * availability of PCI vector resources. The device driver is responsible
6042 * for calling the individual request_irq() to register each MSI-X vector
6043 * with a interrupt handler, which is done in this function. Note that
6044 * later when device is unloading, the driver should always call free_irq()
6045 * on all MSI-X vectors it has done request_irq() on before calling
6046 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
6047 * will be left with MSI-X enabled and leaks its vectors.
6048 *
6049 * Return codes
6050 *   0 - sucessful
6051 *   other values - error
6052 **/
6053static int
6054lpfc_sli_enable_msix(struct lpfc_hba *phba)
6055{
6056	int rc, i;
6057	LPFC_MBOXQ_t *pmb;
6058
6059	/* Set up MSI-X multi-message vectors */
6060	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6061		phba->msix_entries[i].entry = i;
6062
6063	/* Configure MSI-X capability structure */
6064	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
6065				ARRAY_SIZE(phba->msix_entries));
6066	if (rc) {
6067		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6068				"0420 PCI enable MSI-X failed (%d)\n", rc);
6069		goto msi_fail_out;
6070	}
6071	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6072		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6073				"0477 MSI-X entry[%d]: vector=x%x "
6074				"message=%d\n", i,
6075				phba->msix_entries[i].vector,
6076				phba->msix_entries[i].entry);
6077	/*
6078	 * Assign MSI-X vectors to interrupt handlers
6079	 */
6080
6081	/* vector-0 is associated to slow-path handler */
6082	rc = request_irq(phba->msix_entries[0].vector,
6083			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6084			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6085	if (rc) {
6086		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6087				"0421 MSI-X slow-path request_irq failed "
6088				"(%d)\n", rc);
6089		goto msi_fail_out;
6090	}
6091
6092	/* vector-1 is associated to fast-path handler */
6093	rc = request_irq(phba->msix_entries[1].vector,
6094			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6095			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
6096
6097	if (rc) {
6098		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6099				"0429 MSI-X fast-path request_irq failed "
6100				"(%d)\n", rc);
6101		goto irq_fail_out;
6102	}
6103
6104	/*
6105	 * Configure HBA MSI-X attention conditions to messages
6106	 */
6107	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6108
6109	if (!pmb) {
6110		rc = -ENOMEM;
6111		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6112				"0474 Unable to allocate memory for issuing "
6113				"MBOX_CONFIG_MSI command\n");
6114		goto mem_fail_out;
6115	}
6116	rc = lpfc_config_msi(phba, pmb);
6117	if (rc)
6118		goto mbx_fail_out;
6119	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6120	if (rc != MBX_SUCCESS) {
6121		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
6122				"0351 Config MSI mailbox command failed, "
6123				"mbxCmd x%x, mbxStatus x%x\n",
6124				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
6125		goto mbx_fail_out;
6126	}
6127
6128	/* Free memory allocated for mailbox command */
6129	mempool_free(pmb, phba->mbox_mem_pool);
6130	return rc;
6131
6132mbx_fail_out:
6133	/* Free memory allocated for mailbox command */
6134	mempool_free(pmb, phba->mbox_mem_pool);
6135
6136mem_fail_out:
6137	/* free the irq already requested */
6138	free_irq(phba->msix_entries[1].vector, phba);
6139
6140irq_fail_out:
6141	/* free the irq already requested */
6142	free_irq(phba->msix_entries[0].vector, phba);
6143
6144msi_fail_out:
6145	/* Unconfigure MSI-X capability structure */
6146	pci_disable_msix(phba->pcidev);
6147	return rc;
6148}
6149
6150/**
6151 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
6152 * @phba: pointer to lpfc hba data structure.
6153 *
6154 * This routine is invoked to release the MSI-X vectors and then disable the
6155 * MSI-X interrupt mode to device with SLI-3 interface spec.
6156 **/
6157static void
6158lpfc_sli_disable_msix(struct lpfc_hba *phba)
6159{
6160	int i;
6161
6162	/* Free up MSI-X multi-message vectors */
6163	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6164		free_irq(phba->msix_entries[i].vector, phba);
6165	/* Disable MSI-X */
6166	pci_disable_msix(phba->pcidev);
6167
6168	return;
6169}
6170
6171/**
6172 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
6173 * @phba: pointer to lpfc hba data structure.
6174 *
6175 * This routine is invoked to enable the MSI interrupt mode to device with
6176 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
6177 * enable the MSI vector. The device driver is responsible for calling the
6178 * request_irq() to register MSI vector with a interrupt the handler, which
6179 * is done in this function.
6180 *
6181 * Return codes
6182 * 	0 - sucessful
6183 * 	other values - error
6184 */
6185static int
6186lpfc_sli_enable_msi(struct lpfc_hba *phba)
6187{
6188	int rc;
6189
6190	rc = pci_enable_msi(phba->pcidev);
6191	if (!rc)
6192		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6193				"0462 PCI enable MSI mode success.\n");
6194	else {
6195		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6196				"0471 PCI enable MSI mode failed (%d)\n", rc);
6197		return rc;
6198	}
6199
6200	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6201			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6202	if (rc) {
6203		pci_disable_msi(phba->pcidev);
6204		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6205				"0478 MSI request_irq failed (%d)\n", rc);
6206	}
6207	return rc;
6208}
6209
6210/**
6211 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
6212 * @phba: pointer to lpfc hba data structure.
6213 *
6214 * This routine is invoked to disable the MSI interrupt mode to device with
6215 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
6216 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6217 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6218 * its vector.
6219 */
6220static void
6221lpfc_sli_disable_msi(struct lpfc_hba *phba)
6222{
6223	free_irq(phba->pcidev->irq, phba);
6224	pci_disable_msi(phba->pcidev);
6225	return;
6226}
6227
6228/**
6229 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
6230 * @phba: pointer to lpfc hba data structure.
6231 *
6232 * This routine is invoked to enable device interrupt and associate driver's
6233 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
6234 * spec. Depends on the interrupt mode configured to the driver, the driver
6235 * will try to fallback from the configured interrupt mode to an interrupt
6236 * mode which is supported by the platform, kernel, and device in the order
6237 * of:
6238 * MSI-X -> MSI -> IRQ.
6239 *
6240 * Return codes
6241 *   0 - sucessful
6242 *   other values - error
6243 **/
6244static uint32_t
6245lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6246{
6247	uint32_t intr_mode = LPFC_INTR_ERROR;
6248	int retval;
6249
6250	if (cfg_mode == 2) {
6251		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6252		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6253		if (!retval) {
6254			/* Now, try to enable MSI-X interrupt mode */
6255			retval = lpfc_sli_enable_msix(phba);
6256			if (!retval) {
6257				/* Indicate initialization to MSI-X mode */
6258				phba->intr_type = MSIX;
6259				intr_mode = 2;
6260			}
6261		}
6262	}
6263
6264	/* Fallback to MSI if MSI-X initialization failed */
6265	if (cfg_mode >= 1 && phba->intr_type == NONE) {
6266		retval = lpfc_sli_enable_msi(phba);
6267		if (!retval) {
6268			/* Indicate initialization to MSI mode */
6269			phba->intr_type = MSI;
6270			intr_mode = 1;
6271		}
6272	}
6273
6274	/* Fallback to INTx if both MSI-X/MSI initalization failed */
6275	if (phba->intr_type == NONE) {
6276		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6277				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6278		if (!retval) {
6279			/* Indicate initialization to INTx mode */
6280			phba->intr_type = INTx;
6281			intr_mode = 0;
6282		}
6283	}
6284	return intr_mode;
6285}
6286
6287/**
6288 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6289 * @phba: pointer to lpfc hba data structure.
6290 *
6291 * This routine is invoked to disable device interrupt and disassociate the
6292 * driver's interrupt handler(s) from interrupt vector(s) to device with
6293 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6294 * release the interrupt vector(s) for the message signaled interrupt.
6295 **/
6296static void
6297lpfc_sli_disable_intr(struct lpfc_hba *phba)
6298{
6299	/* Disable the currently initialized interrupt mode */
6300	if (phba->intr_type == MSIX)
6301		lpfc_sli_disable_msix(phba);
6302	else if (phba->intr_type == MSI)
6303		lpfc_sli_disable_msi(phba);
6304	else if (phba->intr_type == INTx)
6305		free_irq(phba->pcidev->irq, phba);
6306
6307	/* Reset interrupt management states */
6308	phba->intr_type = NONE;
6309	phba->sli.slistat.sli_intr = 0;
6310
6311	return;
6312}
6313
6314/**
6315 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6316 * @phba: pointer to lpfc hba data structure.
6317 *
6318 * This routine is invoked to enable the MSI-X interrupt vectors to device
6319 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6320 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6321 * enables either all or nothing, depending on the current availability of
6322 * PCI vector resources. The device driver is responsible for calling the
6323 * individual request_irq() to register each MSI-X vector with a interrupt
6324 * handler, which is done in this function. Note that later when device is
6325 * unloading, the driver should always call free_irq() on all MSI-X vectors
6326 * it has done request_irq() on before calling pci_disable_msix(). Failure
6327 * to do so results in a BUG_ON() and a device will be left with MSI-X
6328 * enabled and leaks its vectors.
6329 *
6330 * Return codes
6331 * 0 - sucessful
6332 * other values - error
6333 **/
6334static int
6335lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6336{
6337	int rc, index;
6338
6339	/* Set up MSI-X multi-message vectors */
6340	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6341		phba->sli4_hba.msix_entries[index].entry = index;
6342
6343	/* Configure MSI-X capability structure */
6344	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6345			     phba->sli4_hba.cfg_eqn);
6346	if (rc) {
6347		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6348				"0484 PCI enable MSI-X failed (%d)\n", rc);
6349		goto msi_fail_out;
6350	}
6351	/* Log MSI-X vector assignment */
6352	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6353		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6354				"0489 MSI-X entry[%d]: vector=x%x "
6355				"message=%d\n", index,
6356				phba->sli4_hba.msix_entries[index].vector,
6357				phba->sli4_hba.msix_entries[index].entry);
6358	/*
6359	 * Assign MSI-X vectors to interrupt handlers
6360	 */
6361
6362	/* The first vector must associated to slow-path handler for MQ */
6363	rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6364			 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6365			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6366	if (rc) {
6367		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6368				"0485 MSI-X slow-path request_irq failed "
6369				"(%d)\n", rc);
6370		goto msi_fail_out;
6371	}
6372
6373	/* The rest of the vector(s) are associated to fast-path handler(s) */
6374	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
6375		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
6376		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
6377		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
6378				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
6379				 LPFC_FP_DRIVER_HANDLER_NAME,
6380				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6381		if (rc) {
6382			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6383					"0486 MSI-X fast-path (%d) "
6384					"request_irq failed (%d)\n", index, rc);
6385			goto cfg_fail_out;
6386		}
6387	}
6388
6389	return rc;
6390
6391cfg_fail_out:
6392	/* free the irq already requested */
6393	for (--index; index >= 1; index--)
6394		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
6395			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6396
6397	/* free the irq already requested */
6398	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6399
6400msi_fail_out:
6401	/* Unconfigure MSI-X capability structure */
6402	pci_disable_msix(phba->pcidev);
6403	return rc;
6404}
6405
6406/**
6407 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
6408 * @phba: pointer to lpfc hba data structure.
6409 *
6410 * This routine is invoked to release the MSI-X vectors and then disable the
6411 * MSI-X interrupt mode to device with SLI-4 interface spec.
6412 **/
6413static void
6414lpfc_sli4_disable_msix(struct lpfc_hba *phba)
6415{
6416	int index;
6417
6418	/* Free up MSI-X multi-message vectors */
6419	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6420
6421	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
6422		free_irq(phba->sli4_hba.msix_entries[index].vector,
6423			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6424	/* Disable MSI-X */
6425	pci_disable_msix(phba->pcidev);
6426
6427	return;
6428}
6429
6430/**
6431 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
6432 * @phba: pointer to lpfc hba data structure.
6433 *
6434 * This routine is invoked to enable the MSI interrupt mode to device with
6435 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
6436 * to enable the MSI vector. The device driver is responsible for calling
6437 * the request_irq() to register MSI vector with a interrupt the handler,
6438 * which is done in this function.
6439 *
6440 * Return codes
6441 * 	0 - sucessful
6442 * 	other values - error
6443 **/
6444static int
6445lpfc_sli4_enable_msi(struct lpfc_hba *phba)
6446{
6447	int rc, index;
6448
6449	rc = pci_enable_msi(phba->pcidev);
6450	if (!rc)
6451		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6452				"0487 PCI enable MSI mode success.\n");
6453	else {
6454		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6455				"0488 PCI enable MSI mode failed (%d)\n", rc);
6456		return rc;
6457	}
6458
6459	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6460			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6461	if (rc) {
6462		pci_disable_msi(phba->pcidev);
6463		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6464				"0490 MSI request_irq failed (%d)\n", rc);
6465	}
6466
6467	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
6468		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6469		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6470	}
6471
6472	return rc;
6473}
6474
6475/**
6476 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
6477 * @phba: pointer to lpfc hba data structure.
6478 *
6479 * This routine is invoked to disable the MSI interrupt mode to device with
6480 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
6481 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6482 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6483 * its vector.
6484 **/
6485static void
6486lpfc_sli4_disable_msi(struct lpfc_hba *phba)
6487{
6488	free_irq(phba->pcidev->irq, phba);
6489	pci_disable_msi(phba->pcidev);
6490	return;
6491}
6492
6493/**
6494 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
6495 * @phba: pointer to lpfc hba data structure.
6496 *
6497 * This routine is invoked to enable device interrupt and associate driver's
6498 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
6499 * interface spec. Depends on the interrupt mode configured to the driver,
6500 * the driver will try to fallback from the configured interrupt mode to an
6501 * interrupt mode which is supported by the platform, kernel, and device in
6502 * the order of:
6503 * MSI-X -> MSI -> IRQ.
6504 *
6505 * Return codes
6506 * 	0 - sucessful
6507 * 	other values - error
6508 **/
6509static uint32_t
6510lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6511{
6512	uint32_t intr_mode = LPFC_INTR_ERROR;
6513	int retval, index;
6514
6515	if (cfg_mode == 2) {
6516		/* Preparation before conf_msi mbox cmd */
6517		retval = 0;
6518		if (!retval) {
6519			/* Now, try to enable MSI-X interrupt mode */
6520			retval = lpfc_sli4_enable_msix(phba);
6521			if (!retval) {
6522				/* Indicate initialization to MSI-X mode */
6523				phba->intr_type = MSIX;
6524				intr_mode = 2;
6525			}
6526		}
6527	}
6528
6529	/* Fallback to MSI if MSI-X initialization failed */
6530	if (cfg_mode >= 1 && phba->intr_type == NONE) {
6531		retval = lpfc_sli4_enable_msi(phba);
6532		if (!retval) {
6533			/* Indicate initialization to MSI mode */
6534			phba->intr_type = MSI;
6535			intr_mode = 1;
6536		}
6537	}
6538
6539	/* Fallback to INTx if both MSI-X/MSI initalization failed */
6540	if (phba->intr_type == NONE) {
6541		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6542				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6543		if (!retval) {
6544			/* Indicate initialization to INTx mode */
6545			phba->intr_type = INTx;
6546			intr_mode = 0;
6547			for (index = 0; index < phba->cfg_fcp_eq_count;
6548			     index++) {
6549				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6550				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6551			}
6552		}
6553	}
6554	return intr_mode;
6555}
6556
6557/**
6558 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
6559 * @phba: pointer to lpfc hba data structure.
6560 *
6561 * This routine is invoked to disable device interrupt and disassociate
6562 * the driver's interrupt handler(s) from interrupt vector(s) to device
6563 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
6564 * will release the interrupt vector(s) for the message signaled interrupt.
6565 **/
6566static void
6567lpfc_sli4_disable_intr(struct lpfc_hba *phba)
6568{
6569	/* Disable the currently initialized interrupt mode */
6570	if (phba->intr_type == MSIX)
6571		lpfc_sli4_disable_msix(phba);
6572	else if (phba->intr_type == MSI)
6573		lpfc_sli4_disable_msi(phba);
6574	else if (phba->intr_type == INTx)
6575		free_irq(phba->pcidev->irq, phba);
6576
6577	/* Reset interrupt management states */
6578	phba->intr_type = NONE;
6579	phba->sli.slistat.sli_intr = 0;
6580
6581	return;
6582}
6583
6584/**
6585 * lpfc_unset_hba - Unset SLI3 hba device initialization
6586 * @phba: pointer to lpfc hba data structure.
6587 *
6588 * This routine is invoked to unset the HBA device initialization steps to
6589 * a device with SLI-3 interface spec.
6590 **/
6591static void
6592lpfc_unset_hba(struct lpfc_hba *phba)
6593{
6594	struct lpfc_vport *vport = phba->pport;
6595	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
6596
6597	spin_lock_irq(shost->host_lock);
6598	vport->load_flag |= FC_UNLOADING;
6599	spin_unlock_irq(shost->host_lock);
6600
6601	lpfc_stop_hba_timers(phba);
6602
6603	phba->pport->work_port_events = 0;
6604
6605	lpfc_sli_hba_down(phba);
6606
6607	lpfc_sli_brdrestart(phba);
6608
6609	lpfc_sli_disable_intr(phba);
6610
6611	return;
6612}
6613
6614/**
6615 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
6616 * @phba: pointer to lpfc hba data structure.
6617 *
6618 * This routine is invoked to unset the HBA device initialization steps to
6619 * a device with SLI-4 interface spec.
6620 **/
6621static void
6622lpfc_sli4_unset_hba(struct lpfc_hba *phba)
6623{
6624	struct lpfc_vport *vport = phba->pport;
6625	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
6626
6627	spin_lock_irq(shost->host_lock);
6628	vport->load_flag |= FC_UNLOADING;
6629	spin_unlock_irq(shost->host_lock);
6630
6631	phba->pport->work_port_events = 0;
6632
6633	lpfc_sli4_hba_down(phba);
6634
6635	lpfc_sli4_disable_intr(phba);
6636
6637	return;
6638}
6639
6640/**
6641 * lpfc_sli4_hba_unset - Unset the fcoe hba
6642 * @phba: Pointer to HBA context object.
6643 *
6644 * This function is called in the SLI4 code path to reset the HBA's FCoE
6645 * function. The caller is not required to hold any lock. This routine
6646 * issues PCI function reset mailbox command to reset the FCoE function.
6647 * At the end of the function, it calls lpfc_hba_down_post function to
6648 * free any pending commands.
6649 **/
6650static void
6651lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6652{
6653	int wait_cnt = 0;
6654	LPFC_MBOXQ_t *mboxq;
6655
6656	lpfc_stop_hba_timers(phba);
6657	phba->sli4_hba.intr_enable = 0;
6658
6659	/*
6660	 * Gracefully wait out the potential current outstanding asynchronous
6661	 * mailbox command.
6662	 */
6663
6664	/* First, block any pending async mailbox command from posted */
6665	spin_lock_irq(&phba->hbalock);
6666	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
6667	spin_unlock_irq(&phba->hbalock);
6668	/* Now, trying to wait it out if we can */
6669	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6670		msleep(10);
6671		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
6672			break;
6673	}
6674	/* Forcefully release the outstanding mailbox command if timed out */
6675	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6676		spin_lock_irq(&phba->hbalock);
6677		mboxq = phba->sli.mbox_active;
6678		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
6679		__lpfc_mbox_cmpl_put(phba, mboxq);
6680		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6681		phba->sli.mbox_active = NULL;
6682		spin_unlock_irq(&phba->hbalock);
6683	}
6684
6685	/* Tear down the queues in the HBA */
6686	lpfc_sli4_queue_unset(phba);
6687
6688	/* Disable PCI subsystem interrupt */
6689	lpfc_sli4_disable_intr(phba);
6690
6691	/* Stop kthread signal shall trigger work_done one more time */
6692	kthread_stop(phba->worker_thread);
6693
6694	/* Stop the SLI4 device port */
6695	phba->pport->work_port_events = 0;
6696}
6697
6698/**
6699 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
6700 * @pdev: pointer to PCI device
6701 * @pid: pointer to PCI device identifier
6702 *
6703 * This routine is to be called to attach a device with SLI-3 interface spec
6704 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6705 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
6706 * information of the device and driver to see if the driver state that it can
6707 * support this kind of device. If the match is successful, the driver core
6708 * invokes this routine. If this routine determines it can claim the HBA, it
6709 * does all the initialization that it needs to do to handle the HBA properly.
6710 *
6711 * Return code
6712 * 	0 - driver can claim the device
6713 * 	negative value - driver can not claim the device
6714 **/
6715static int __devinit
6716lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6717{
6718	struct lpfc_hba   *phba;
6719	struct lpfc_vport *vport = NULL;
6720	int error;
6721	uint32_t cfg_mode, intr_mode;
6722
6723	/* Allocate memory for HBA structure */
6724	phba = lpfc_hba_alloc(pdev);
6725	if (!phba)
6726		return -ENOMEM;
6727
6728	/* Perform generic PCI device enabling operation */
6729	error = lpfc_enable_pci_dev(phba);
6730	if (error) {
6731		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6732				"1401 Failed to enable pci device.\n");
6733		goto out_free_phba;
6734	}
6735
6736	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
6737	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
6738	if (error)
6739		goto out_disable_pci_dev;
6740
6741	/* Set up SLI-3 specific device PCI memory space */
6742	error = lpfc_sli_pci_mem_setup(phba);
6743	if (error) {
6744		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6745				"1402 Failed to set up pci memory space.\n");
6746		goto out_disable_pci_dev;
6747	}
6748
6749	/* Set up phase-1 common device driver resources */
6750	error = lpfc_setup_driver_resource_phase1(phba);
6751	if (error) {
6752		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6753				"1403 Failed to set up driver resource.\n");
6754		goto out_unset_pci_mem_s3;
6755	}
6756
6757	/* Set up SLI-3 specific device driver resources */
6758	error = lpfc_sli_driver_resource_setup(phba);
6759	if (error) {
6760		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6761				"1404 Failed to set up driver resource.\n");
6762		goto out_unset_pci_mem_s3;
6763	}
6764
6765	/* Initialize and populate the iocb list per host */
6766	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
6767	if (error) {
6768		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6769				"1405 Failed to initialize iocb list.\n");
6770		goto out_unset_driver_resource_s3;
6771	}
6772
6773	/* Set up common device driver resources */
6774	error = lpfc_setup_driver_resource_phase2(phba);
6775	if (error) {
6776		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6777				"1406 Failed to set up driver resource.\n");
6778		goto out_free_iocb_list;
6779	}
6780
6781	/* Create SCSI host to the physical port */
6782	error = lpfc_create_shost(phba);
6783	if (error) {
6784		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6785				"1407 Failed to create scsi host.\n");
6786		goto out_unset_driver_resource;
6787	}
6788
6789	/* Configure sysfs attributes */
6790	vport = phba->pport;
6791	error = lpfc_alloc_sysfs_attr(vport);
6792	if (error) {
6793		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6794				"1476 Failed to allocate sysfs attr\n");
6795		goto out_destroy_shost;
6796	}
6797
6798	/* Now, trying to enable interrupt and bring up the device */
6799	cfg_mode = phba->cfg_use_msi;
6800	while (true) {
6801		/* Put device to a known state before enabling interrupt */
6802		lpfc_stop_port(phba);
6803		/* Configure and enable interrupt */
6804		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
6805		if (intr_mode == LPFC_INTR_ERROR) {
6806			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6807					"0431 Failed to enable interrupt.\n");
6808			error = -ENODEV;
6809			goto out_free_sysfs_attr;
6810		}
6811		/* SLI-3 HBA setup */
6812		if (lpfc_sli_hba_setup(phba)) {
6813			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6814					"1477 Failed to set up hba\n");
6815			error = -ENODEV;
6816			goto out_remove_device;
6817		}
6818
6819		/* Wait 50ms for the interrupts of previous mailbox commands */
6820		msleep(50);
6821		/* Check active interrupts on message signaled interrupts */
6822		if (intr_mode == 0 ||
6823		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
6824			/* Log the current active interrupt mode */
6825			phba->intr_mode = intr_mode;
6826			lpfc_log_intr_mode(phba, intr_mode);
6827			break;
6828		} else {
6829			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6830					"0447 Configure interrupt mode (%d) "
6831					"failed active interrupt test.\n",
6832					intr_mode);
6833			/* Disable the current interrupt mode */
6834			lpfc_sli_disable_intr(phba);
6835			/* Try next level of interrupt mode */
6836			cfg_mode = --intr_mode;
6837		}
6838	}
6839
6840	/* Perform post initialization setup */
6841	lpfc_post_init_setup(phba);
6842
6843	/* Check if there are static vports to be created. */
6844	lpfc_create_static_vport(phba);
6845
6846	return 0;
6847
6848out_remove_device:
6849	lpfc_unset_hba(phba);
6850out_free_sysfs_attr:
6851	lpfc_free_sysfs_attr(vport);
6852out_destroy_shost:
6853	lpfc_destroy_shost(phba);
6854out_unset_driver_resource:
6855	lpfc_unset_driver_resource_phase2(phba);
6856out_free_iocb_list:
6857	lpfc_free_iocb_list(phba);
6858out_unset_driver_resource_s3:
6859	lpfc_sli_driver_resource_unset(phba);
6860out_unset_pci_mem_s3:
6861	lpfc_sli_pci_mem_unset(phba);
6862out_disable_pci_dev:
6863	lpfc_disable_pci_dev(phba);
6864out_free_phba:
6865	lpfc_hba_free(phba);
6866	return error;
6867}
6868
6869/**
6870 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
6871 * @pdev: pointer to PCI device
6872 *
6873 * This routine is to be called to disattach a device with SLI-3 interface
6874 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6875 * removed from PCI bus, it performs all the necessary cleanup for the HBA
6876 * device to be removed from the PCI subsystem properly.
6877 **/
6878static void __devexit
6879lpfc_pci_remove_one_s3(struct pci_dev *pdev)
6880{
6881	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
6882	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6883	struct lpfc_vport **vports;
6884	struct lpfc_hba   *phba = vport->phba;
6885	int i;
6886	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
6887
6888	spin_lock_irq(&phba->hbalock);
6889	vport->load_flag |= FC_UNLOADING;
6890	spin_unlock_irq(&phba->hbalock);
6891
6892	lpfc_free_sysfs_attr(vport);
6893
6894	/* Release all the vports against this physical port */
6895	vports = lpfc_create_vport_work_array(phba);
6896	if (vports != NULL)
6897		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
6898			fc_vport_terminate(vports[i]->fc_vport);
6899	lpfc_destroy_vport_work_array(phba, vports);
6900
6901	/* Remove FC host and then SCSI host with the physical port */
6902	fc_remove_host(shost);
6903	scsi_remove_host(shost);
6904	lpfc_cleanup(vport);
6905
6906	/*
6907	 * Bring down the SLI Layer. This step disable all interrupts,
6908	 * clears the rings, discards all mailbox commands, and resets
6909	 * the HBA.
6910	 */
6911
6912	/* HBA interrupt will be diabled after this call */
6913	lpfc_sli_hba_down(phba);
6914	/* Stop kthread signal shall trigger work_done one more time */
6915	kthread_stop(phba->worker_thread);
6916	/* Final cleanup of txcmplq and reset the HBA */
6917	lpfc_sli_brdrestart(phba);
6918
6919	lpfc_stop_hba_timers(phba);
6920	spin_lock_irq(&phba->hbalock);
6921	list_del_init(&vport->listentry);
6922	spin_unlock_irq(&phba->hbalock);
6923
6924	lpfc_debugfs_terminate(vport);
6925
6926	/* Disable interrupt */
6927	lpfc_sli_disable_intr(phba);
6928
6929	pci_set_drvdata(pdev, NULL);
6930	scsi_host_put(shost);
6931
6932	/*
6933	 * Call scsi_free before mem_free since scsi bufs are released to their
6934	 * corresponding pools here.
6935	 */
6936	lpfc_scsi_free(phba);
6937	lpfc_mem_free_all(phba);
6938
6939	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
6940			  phba->hbqslimp.virt, phba->hbqslimp.phys);
6941
6942	/* Free resources associated with SLI2 interface */
6943	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6944			  phba->slim2p.virt, phba->slim2p.phys);
6945
6946	/* unmap adapter SLIM and Control Registers */
6947	iounmap(phba->ctrl_regs_memmap_p);
6948	iounmap(phba->slim_memmap_p);
6949
6950	lpfc_hba_free(phba);
6951
6952	pci_release_selected_regions(pdev, bars);
6953	pci_disable_device(pdev);
6954}
6955
6956/**
6957 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
6958 * @pdev: pointer to PCI device
6959 * @msg: power management message
6960 *
6961 * This routine is to be called from the kernel's PCI subsystem to support
6962 * system Power Management (PM) to device with SLI-3 interface spec. When
6963 * PM invokes this method, it quiesces the device by stopping the driver's
6964 * worker thread for the device, turning off device's interrupt and DMA,
6965 * and bring the device offline. Note that as the driver implements the
6966 * minimum PM requirements to a power-aware driver's PM support for the
6967 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
6968 * to the suspend() method call will be treated as SUSPEND and the driver will
6969 * fully reinitialize its device during resume() method call, the driver will
6970 * set device to PCI_D3hot state in PCI config space instead of setting it
6971 * according to the @msg provided by the PM.
6972 *
6973 * Return code
6974 * 	0 - driver suspended the device
6975 * 	Error otherwise
6976 **/
6977static int
6978lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
6979{
6980	struct Scsi_Host *shost = pci_get_drvdata(pdev);
6981	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
6982
6983	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6984			"0473 PCI device Power Management suspend.\n");
6985
6986	/* Bring down the device */
6987	lpfc_offline_prep(phba);
6988	lpfc_offline(phba);
6989	kthread_stop(phba->worker_thread);
6990
6991	/* Disable interrupt from device */
6992	lpfc_sli_disable_intr(phba);
6993
6994	/* Save device state to PCI config space */
6995	pci_save_state(pdev);
6996	pci_set_power_state(pdev, PCI_D3hot);
6997
6998	return 0;
6999}
7000
7001/**
7002 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
7003 * @pdev: pointer to PCI device
7004 *
7005 * This routine is to be called from the kernel's PCI subsystem to support
7006 * system Power Management (PM) to device with SLI-3 interface spec. When PM
7007 * invokes this method, it restores the device's PCI config space state and
7008 * fully reinitializes the device and brings it online. Note that as the
7009 * driver implements the minimum PM requirements to a power-aware driver's
7010 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
7011 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
7012 * driver will fully reinitialize its device during resume() method call,
7013 * the device will be set to PCI_D0 directly in PCI config space before
7014 * restoring the state.
7015 *
7016 * Return code
7017 * 	0 - driver suspended the device
7018 * 	Error otherwise
7019 **/
7020static int
7021lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7022{
7023	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7024	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7025	uint32_t intr_mode;
7026	int error;
7027
7028	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7029			"0452 PCI device Power Management resume.\n");
7030
7031	/* Restore device state from PCI config space */
7032	pci_set_power_state(pdev, PCI_D0);
7033	pci_restore_state(pdev);
7034	if (pdev->is_busmaster)
7035		pci_set_master(pdev);
7036
7037	/* Startup the kernel thread for this host adapter. */
7038	phba->worker_thread = kthread_run(lpfc_do_work, phba,
7039					"lpfc_worker_%d", phba->brd_no);
7040	if (IS_ERR(phba->worker_thread)) {
7041		error = PTR_ERR(phba->worker_thread);
7042		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7043				"0434 PM resume failed to start worker "
7044				"thread: error=x%x.\n", error);
7045		return error;
7046	}
7047
7048	/* Configure and enable interrupt */
7049	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7050	if (intr_mode == LPFC_INTR_ERROR) {
7051		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7052				"0430 PM resume Failed to enable interrupt\n");
7053		return -EIO;
7054	} else
7055		phba->intr_mode = intr_mode;
7056
7057	/* Restart HBA and bring it online */
7058	lpfc_sli_brdrestart(phba);
7059	lpfc_online(phba);
7060
7061	/* Log the current active interrupt mode */
7062	lpfc_log_intr_mode(phba, phba->intr_mode);
7063
7064	return 0;
7065}
7066
7067/**
7068 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7069 * @pdev: pointer to PCI device.
7070 * @state: the current PCI connection state.
7071 *
7072 * This routine is called from the PCI subsystem for I/O error handling to
7073 * device with SLI-3 interface spec. This function is called by the PCI
7074 * subsystem after a PCI bus error affecting this device has been detected.
7075 * When this function is invoked, it will need to stop all the I/Os and
7076 * interrupt(s) to the device. Once that is done, it will return
7077 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7078 * as desired.
7079 *
7080 * Return codes
7081 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7082 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7083 **/
7084static pci_ers_result_t
7085lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7086{
7087	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7088	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7089	struct lpfc_sli *psli = &phba->sli;
7090	struct lpfc_sli_ring  *pring;
7091
7092	if (state == pci_channel_io_perm_failure) {
7093		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7094				"0472 PCI channel I/O permanent failure\n");
7095		/* Block all SCSI devices' I/Os on the host */
7096		lpfc_scsi_dev_block(phba);
7097		/* Clean up all driver's outstanding SCSI I/Os */
7098		lpfc_sli_flush_fcp_rings(phba);
7099		return PCI_ERS_RESULT_DISCONNECT;
7100	}
7101
7102	pci_disable_device(pdev);
7103	/*
7104	 * There may be I/Os dropped by the firmware.
7105	 * Error iocb (I/O) on txcmplq and let the SCSI layer
7106	 * retry it after re-establishing link.
7107	 */
7108	pring = &psli->ring[psli->fcp_ring];
7109	lpfc_sli_abort_iocb_ring(phba, pring);
7110
7111	/* Disable interrupt */
7112	lpfc_sli_disable_intr(phba);
7113
7114	/* Request a slot reset. */
7115	return PCI_ERS_RESULT_NEED_RESET;
7116}
7117
7118/**
7119 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
7120 * @pdev: pointer to PCI device.
7121 *
7122 * This routine is called from the PCI subsystem for error handling to
7123 * device with SLI-3 interface spec. This is called after PCI bus has been
7124 * reset to restart the PCI card from scratch, as if from a cold-boot.
7125 * During the PCI subsystem error recovery, after driver returns
7126 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7127 * recovery and then call this routine before calling the .resume method
7128 * to recover the device. This function will initialize the HBA device,
7129 * enable the interrupt, but it will just put the HBA to offline state
7130 * without passing any I/O traffic.
7131 *
7132 * Return codes
7133 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7134 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7135 */
7136static pci_ers_result_t
7137lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7138{
7139	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7140	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7141	struct lpfc_sli *psli = &phba->sli;
7142	uint32_t intr_mode;
7143
7144	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
7145	if (pci_enable_device_mem(pdev)) {
7146		printk(KERN_ERR "lpfc: Cannot re-enable "
7147			"PCI device after reset.\n");
7148		return PCI_ERS_RESULT_DISCONNECT;
7149	}
7150
7151	pci_restore_state(pdev);
7152	if (pdev->is_busmaster)
7153		pci_set_master(pdev);
7154
7155	spin_lock_irq(&phba->hbalock);
7156	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7157	spin_unlock_irq(&phba->hbalock);
7158
7159	/* Configure and enable interrupt */
7160	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7161	if (intr_mode == LPFC_INTR_ERROR) {
7162		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7163				"0427 Cannot re-enable interrupt after "
7164				"slot reset.\n");
7165		return PCI_ERS_RESULT_DISCONNECT;
7166	} else
7167		phba->intr_mode = intr_mode;
7168
7169	/* Take device offline; this will perform cleanup */
7170	lpfc_offline(phba);
7171	lpfc_sli_brdrestart(phba);
7172
7173	/* Log the current active interrupt mode */
7174	lpfc_log_intr_mode(phba, phba->intr_mode);
7175
7176	return PCI_ERS_RESULT_RECOVERED;
7177}
7178
7179/**
7180 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
7181 * @pdev: pointer to PCI device
7182 *
7183 * This routine is called from the PCI subsystem for error handling to device
7184 * with SLI-3 interface spec. It is called when kernel error recovery tells
7185 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7186 * error recovery. After this call, traffic can start to flow from this device
7187 * again.
7188 */
7189static void
7190lpfc_io_resume_s3(struct pci_dev *pdev)
7191{
7192	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7193	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7194
7195	lpfc_online(phba);
7196}
7197
7198/**
7199 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7200 * @phba: pointer to lpfc hba data structure.
7201 *
7202 * returns the number of ELS/CT IOCBs to reserve
7203 **/
7204int
7205lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7206{
7207	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7208
7209	if (phba->sli_rev == LPFC_SLI_REV4) {
7210		if (max_xri <= 100)
7211			return 4;
7212		else if (max_xri <= 256)
7213			return 8;
7214		else if (max_xri <= 512)
7215			return 16;
7216		else if (max_xri <= 1024)
7217			return 32;
7218		else
7219			return 48;
7220	} else
7221		return 0;
7222}
7223
7224/**
7225 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
7226 * @pdev: pointer to PCI device
7227 * @pid: pointer to PCI device identifier
7228 *
7229 * This routine is called from the kernel's PCI subsystem to device with
7230 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7231 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7232 * information of the device and driver to see if the driver state that it
7233 * can support this kind of device. If the match is successful, the driver
7234 * core invokes this routine. If this routine determines it can claim the HBA,
7235 * it does all the initialization that it needs to do to handle the HBA
7236 * properly.
7237 *
7238 * Return code
7239 * 	0 - driver can claim the device
7240 * 	negative value - driver can not claim the device
7241 **/
7242static int __devinit
7243lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7244{
7245	struct lpfc_hba   *phba;
7246	struct lpfc_vport *vport = NULL;
7247	int error;
7248	uint32_t cfg_mode, intr_mode;
7249	int mcnt;
7250
7251	/* Allocate memory for HBA structure */
7252	phba = lpfc_hba_alloc(pdev);
7253	if (!phba)
7254		return -ENOMEM;
7255
7256	/* Perform generic PCI device enabling operation */
7257	error = lpfc_enable_pci_dev(phba);
7258	if (error) {
7259		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7260				"1409 Failed to enable pci device.\n");
7261		goto out_free_phba;
7262	}
7263
7264	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
7265	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
7266	if (error)
7267		goto out_disable_pci_dev;
7268
7269	/* Set up SLI-4 specific device PCI memory space */
7270	error = lpfc_sli4_pci_mem_setup(phba);
7271	if (error) {
7272		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7273				"1410 Failed to set up pci memory space.\n");
7274		goto out_disable_pci_dev;
7275	}
7276
7277	/* Set up phase-1 common device driver resources */
7278	error = lpfc_setup_driver_resource_phase1(phba);
7279	if (error) {
7280		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7281				"1411 Failed to set up driver resource.\n");
7282		goto out_unset_pci_mem_s4;
7283	}
7284
7285	/* Set up SLI-4 Specific device driver resources */
7286	error = lpfc_sli4_driver_resource_setup(phba);
7287	if (error) {
7288		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7289				"1412 Failed to set up driver resource.\n");
7290		goto out_unset_pci_mem_s4;
7291	}
7292
7293	/* Initialize and populate the iocb list per host */
7294	error = lpfc_init_iocb_list(phba,
7295			phba->sli4_hba.max_cfg_param.max_xri);
7296	if (error) {
7297		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7298				"1413 Failed to initialize iocb list.\n");
7299		goto out_unset_driver_resource_s4;
7300	}
7301
7302	/* Set up common device driver resources */
7303	error = lpfc_setup_driver_resource_phase2(phba);
7304	if (error) {
7305		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7306				"1414 Failed to set up driver resource.\n");
7307		goto out_free_iocb_list;
7308	}
7309
7310	/* Create SCSI host to the physical port */
7311	error = lpfc_create_shost(phba);
7312	if (error) {
7313		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7314				"1415 Failed to create scsi host.\n");
7315		goto out_unset_driver_resource;
7316	}
7317
7318	/* Configure sysfs attributes */
7319	vport = phba->pport;
7320	error = lpfc_alloc_sysfs_attr(vport);
7321	if (error) {
7322		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7323				"1416 Failed to allocate sysfs attr\n");
7324		goto out_destroy_shost;
7325	}
7326
7327	/* Now, trying to enable interrupt and bring up the device */
7328	cfg_mode = phba->cfg_use_msi;
7329	while (true) {
7330		/* Put device to a known state before enabling interrupt */
7331		lpfc_stop_port(phba);
7332		/* Configure and enable interrupt */
7333		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
7334		if (intr_mode == LPFC_INTR_ERROR) {
7335			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7336					"0426 Failed to enable interrupt.\n");
7337			error = -ENODEV;
7338			goto out_free_sysfs_attr;
7339		}
7340		/* Set up SLI-4 HBA */
7341		if (lpfc_sli4_hba_setup(phba)) {
7342			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7343					"1421 Failed to set up hba\n");
7344			error = -ENODEV;
7345			goto out_disable_intr;
7346		}
7347
7348		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
7349		if (intr_mode != 0)
7350			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
7351							    LPFC_ACT_INTR_CNT);
7352
7353		/* Check active interrupts received only for MSI/MSI-X */
7354		if (intr_mode == 0 ||
7355		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
7356			/* Log the current active interrupt mode */
7357			phba->intr_mode = intr_mode;
7358			lpfc_log_intr_mode(phba, intr_mode);
7359			break;
7360		}
7361		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7362				"0451 Configure interrupt mode (%d) "
7363				"failed active interrupt test.\n",
7364				intr_mode);
7365		/* Unset the preivous SLI-4 HBA setup */
7366		lpfc_sli4_unset_hba(phba);
7367		/* Try next level of interrupt mode */
7368		cfg_mode = --intr_mode;
7369	}
7370
7371	/* Perform post initialization setup */
7372	lpfc_post_init_setup(phba);
7373
7374	return 0;
7375
7376out_disable_intr:
7377	lpfc_sli4_disable_intr(phba);
7378out_free_sysfs_attr:
7379	lpfc_free_sysfs_attr(vport);
7380out_destroy_shost:
7381	lpfc_destroy_shost(phba);
7382out_unset_driver_resource:
7383	lpfc_unset_driver_resource_phase2(phba);
7384out_free_iocb_list:
7385	lpfc_free_iocb_list(phba);
7386out_unset_driver_resource_s4:
7387	lpfc_sli4_driver_resource_unset(phba);
7388out_unset_pci_mem_s4:
7389	lpfc_sli4_pci_mem_unset(phba);
7390out_disable_pci_dev:
7391	lpfc_disable_pci_dev(phba);
7392out_free_phba:
7393	lpfc_hba_free(phba);
7394	return error;
7395}
7396
7397/**
7398 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
7399 * @pdev: pointer to PCI device
7400 *
7401 * This routine is called from the kernel's PCI subsystem to device with
7402 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7403 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7404 * device to be removed from the PCI subsystem properly.
7405 **/
7406static void __devexit
7407lpfc_pci_remove_one_s4(struct pci_dev *pdev)
7408{
7409	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7410	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7411	struct lpfc_vport **vports;
7412	struct lpfc_hba *phba = vport->phba;
7413	int i;
7414
7415	/* Mark the device unloading flag */
7416	spin_lock_irq(&phba->hbalock);
7417	vport->load_flag |= FC_UNLOADING;
7418	spin_unlock_irq(&phba->hbalock);
7419
7420	/* Free the HBA sysfs attributes */
7421	lpfc_free_sysfs_attr(vport);
7422
7423	/* Release all the vports against this physical port */
7424	vports = lpfc_create_vport_work_array(phba);
7425	if (vports != NULL)
7426		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7427			fc_vport_terminate(vports[i]->fc_vport);
7428	lpfc_destroy_vport_work_array(phba, vports);
7429
7430	/* Remove FC host and then SCSI host with the physical port */
7431	fc_remove_host(shost);
7432	scsi_remove_host(shost);
7433
7434	/* Perform cleanup on the physical port */
7435	lpfc_cleanup(vport);
7436
7437	/*
7438	 * Bring down the SLI Layer. This step disables all interrupts,
7439	 * clears the rings, discards all mailbox commands, and resets
7440	 * the HBA FCoE function.
7441	 */
7442	lpfc_debugfs_terminate(vport);
7443	lpfc_sli4_hba_unset(phba);
7444
7445	spin_lock_irq(&phba->hbalock);
7446	list_del_init(&vport->listentry);
7447	spin_unlock_irq(&phba->hbalock);
7448
7449	/* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
7450	 * buffers are released to their corresponding pools here.
7451	 */
7452	lpfc_scsi_free(phba);
7453	lpfc_sli4_driver_resource_unset(phba);
7454
7455	/* Unmap adapter Control and Doorbell registers */
7456	lpfc_sli4_pci_mem_unset(phba);
7457
7458	/* Release PCI resources and disable device's PCI function */
7459	scsi_host_put(shost);
7460	lpfc_disable_pci_dev(phba);
7461
7462	/* Finally, free the driver's device data structure */
7463	lpfc_hba_free(phba);
7464
7465	return;
7466}
7467
7468/**
7469 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
7470 * @pdev: pointer to PCI device
7471 * @msg: power management message
7472 *
7473 * This routine is called from the kernel's PCI subsystem to support system
7474 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
7475 * this method, it quiesces the device by stopping the driver's worker
7476 * thread for the device, turning off device's interrupt and DMA, and bring
7477 * the device offline. Note that as the driver implements the minimum PM
7478 * requirements to a power-aware driver's PM support for suspend/resume -- all
7479 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
7480 * method call will be treated as SUSPEND and the driver will fully
7481 * reinitialize its device during resume() method call, the driver will set
7482 * device to PCI_D3hot state in PCI config space instead of setting it
7483 * according to the @msg provided by the PM.
7484 *
7485 * Return code
7486 * 	0 - driver suspended the device
7487 * 	Error otherwise
7488 **/
7489static int
7490lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
7491{
7492	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7493	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7494
7495	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7496			"0298 PCI device Power Management suspend.\n");
7497
7498	/* Bring down the device */
7499	lpfc_offline_prep(phba);
7500	lpfc_offline(phba);
7501	kthread_stop(phba->worker_thread);
7502
7503	/* Disable interrupt from device */
7504	lpfc_sli4_disable_intr(phba);
7505
7506	/* Save device state to PCI config space */
7507	pci_save_state(pdev);
7508	pci_set_power_state(pdev, PCI_D3hot);
7509
7510	return 0;
7511}
7512
7513/**
7514 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
7515 * @pdev: pointer to PCI device
7516 *
7517 * This routine is called from the kernel's PCI subsystem to support system
7518 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
7519 * this method, it restores the device's PCI config space state and fully
7520 * reinitializes the device and brings it online. Note that as the driver
7521 * implements the minimum PM requirements to a power-aware driver's PM for
7522 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7523 * to the suspend() method call will be treated as SUSPEND and the driver
7524 * will fully reinitialize its device during resume() method call, the device
7525 * will be set to PCI_D0 directly in PCI config space before restoring the
7526 * state.
7527 *
7528 * Return code
7529 * 	0 - driver suspended the device
7530 * 	Error otherwise
7531 **/
7532static int
7533lpfc_pci_resume_one_s4(struct pci_dev *pdev)
7534{
7535	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7536	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7537	uint32_t intr_mode;
7538	int error;
7539
7540	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7541			"0292 PCI device Power Management resume.\n");
7542
7543	/* Restore device state from PCI config space */
7544	pci_set_power_state(pdev, PCI_D0);
7545	pci_restore_state(pdev);
7546	if (pdev->is_busmaster)
7547		pci_set_master(pdev);
7548
7549	 /* Startup the kernel thread for this host adapter. */
7550	phba->worker_thread = kthread_run(lpfc_do_work, phba,
7551					"lpfc_worker_%d", phba->brd_no);
7552	if (IS_ERR(phba->worker_thread)) {
7553		error = PTR_ERR(phba->worker_thread);
7554		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7555				"0293 PM resume failed to start worker "
7556				"thread: error=x%x.\n", error);
7557		return error;
7558	}
7559
7560	/* Configure and enable interrupt */
7561	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
7562	if (intr_mode == LPFC_INTR_ERROR) {
7563		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7564				"0294 PM resume Failed to enable interrupt\n");
7565		return -EIO;
7566	} else
7567		phba->intr_mode = intr_mode;
7568
7569	/* Restart HBA and bring it online */
7570	lpfc_sli_brdrestart(phba);
7571	lpfc_online(phba);
7572
7573	/* Log the current active interrupt mode */
7574	lpfc_log_intr_mode(phba, phba->intr_mode);
7575
7576	return 0;
7577}
7578
7579/**
7580 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
7581 * @pdev: pointer to PCI device.
7582 * @state: the current PCI connection state.
7583 *
7584 * This routine is called from the PCI subsystem for error handling to device
7585 * with SLI-4 interface spec. This function is called by the PCI subsystem
7586 * after a PCI bus error affecting this device has been detected. When this
7587 * function is invoked, it will need to stop all the I/Os and interrupt(s)
7588 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
7589 * for the PCI subsystem to perform proper recovery as desired.
7590 *
7591 * Return codes
7592 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7593 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7594 **/
7595static pci_ers_result_t
7596lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
7597{
7598	return PCI_ERS_RESULT_NEED_RESET;
7599}
7600
7601/**
7602 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
7603 * @pdev: pointer to PCI device.
7604 *
7605 * This routine is called from the PCI subsystem for error handling to device
7606 * with SLI-4 interface spec. It is called after PCI bus has been reset to
7607 * restart the PCI card from scratch, as if from a cold-boot. During the
7608 * PCI subsystem error recovery, after the driver returns
7609 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7610 * recovery and then call this routine before calling the .resume method to
7611 * recover the device. This function will initialize the HBA device, enable
7612 * the interrupt, but it will just put the HBA to offline state without
7613 * passing any I/O traffic.
7614 *
7615 * Return codes
7616 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7617 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7618 */
7619static pci_ers_result_t
7620lpfc_io_slot_reset_s4(struct pci_dev *pdev)
7621{
7622	return PCI_ERS_RESULT_RECOVERED;
7623}
7624
7625/**
7626 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
7627 * @pdev: pointer to PCI device
7628 *
7629 * This routine is called from the PCI subsystem for error handling to device
7630 * with SLI-4 interface spec. It is called when kernel error recovery tells
7631 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7632 * error recovery. After this call, traffic can start to flow from this device
7633 * again.
7634 **/
7635static void
7636lpfc_io_resume_s4(struct pci_dev *pdev)
7637{
7638	return;
7639}
7640
7641/**
7642 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
7643 * @pdev: pointer to PCI device
7644 * @pid: pointer to PCI device identifier
7645 *
7646 * This routine is to be registered to the kernel's PCI subsystem. When an
7647 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
7648 * at PCI device-specific information of the device and driver to see if the
7649 * driver state that it can support this kind of device. If the match is
7650 * successful, the driver core invokes this routine. This routine dispatches
7651 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
7652 * do all the initialization that it needs to do to handle the HBA device
7653 * properly.
7654 *
7655 * Return code
7656 * 	0 - driver can claim the device
7657 * 	negative value - driver can not claim the device
7658 **/
7659static int __devinit
7660lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7661{
7662	int rc;
7663	struct lpfc_sli_intf intf;
7664
7665	if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0))
7666		return -ENODEV;
7667
7668	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
7669		(bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4))
7670		rc = lpfc_pci_probe_one_s4(pdev, pid);
7671	else
7672		rc = lpfc_pci_probe_one_s3(pdev, pid);
7673
7674	return rc;
7675}
7676
7677/**
7678 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
7679 * @pdev: pointer to PCI device
7680 *
7681 * This routine is to be registered to the kernel's PCI subsystem. When an
7682 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
7683 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
7684 * remove routine, which will perform all the necessary cleanup for the
7685 * device to be removed from the PCI subsystem properly.
7686 **/
7687static void __devexit
7688lpfc_pci_remove_one(struct pci_dev *pdev)
7689{
7690	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7691	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7692
7693	switch (phba->pci_dev_grp) {
7694	case LPFC_PCI_DEV_LP:
7695		lpfc_pci_remove_one_s3(pdev);
7696		break;
7697	case LPFC_PCI_DEV_OC:
7698		lpfc_pci_remove_one_s4(pdev);
7699		break;
7700	default:
7701		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7702				"1424 Invalid PCI device group: 0x%x\n",
7703				phba->pci_dev_grp);
7704		break;
7705	}
7706	return;
7707}
7708
7709/**
7710 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
7711 * @pdev: pointer to PCI device
7712 * @msg: power management message
7713 *
7714 * This routine is to be registered to the kernel's PCI subsystem to support
7715 * system Power Management (PM). When PM invokes this method, it dispatches
7716 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
7717 * suspend the device.
7718 *
7719 * Return code
7720 * 	0 - driver suspended the device
7721 * 	Error otherwise
7722 **/
7723static int
7724lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
7725{
7726	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7727	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7728	int rc = -ENODEV;
7729
7730	switch (phba->pci_dev_grp) {
7731	case LPFC_PCI_DEV_LP:
7732		rc = lpfc_pci_suspend_one_s3(pdev, msg);
7733		break;
7734	case LPFC_PCI_DEV_OC:
7735		rc = lpfc_pci_suspend_one_s4(pdev, msg);
7736		break;
7737	default:
7738		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7739				"1425 Invalid PCI device group: 0x%x\n",
7740				phba->pci_dev_grp);
7741		break;
7742	}
7743	return rc;
7744}
7745
7746/**
7747 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
7748 * @pdev: pointer to PCI device
7749 *
7750 * This routine is to be registered to the kernel's PCI subsystem to support
7751 * system Power Management (PM). When PM invokes this method, it dispatches
7752 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
7753 * resume the device.
7754 *
7755 * Return code
7756 * 	0 - driver suspended the device
7757 * 	Error otherwise
7758 **/
7759static int
7760lpfc_pci_resume_one(struct pci_dev *pdev)
7761{
7762	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7763	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7764	int rc = -ENODEV;
7765
7766	switch (phba->pci_dev_grp) {
7767	case LPFC_PCI_DEV_LP:
7768		rc = lpfc_pci_resume_one_s3(pdev);
7769		break;
7770	case LPFC_PCI_DEV_OC:
7771		rc = lpfc_pci_resume_one_s4(pdev);
7772		break;
7773	default:
7774		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7775				"1426 Invalid PCI device group: 0x%x\n",
7776				phba->pci_dev_grp);
7777		break;
7778	}
7779	return rc;
7780}
7781
7782/**
7783 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
7784 * @pdev: pointer to PCI device.
7785 * @state: the current PCI connection state.
7786 *
7787 * This routine is registered to the PCI subsystem for error handling. This
7788 * function is called by the PCI subsystem after a PCI bus error affecting
7789 * this device has been detected. When this routine is invoked, it dispatches
7790 * the action to the proper SLI-3 or SLI-4 device error detected handling
7791 * routine, which will perform the proper error detected operation.
7792 *
7793 * Return codes
7794 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7795 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7796 **/
7797static pci_ers_result_t
7798lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7799{
7800	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7801	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7802	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7803
7804	switch (phba->pci_dev_grp) {
7805	case LPFC_PCI_DEV_LP:
7806		rc = lpfc_io_error_detected_s3(pdev, state);
7807		break;
7808	case LPFC_PCI_DEV_OC:
7809		rc = lpfc_io_error_detected_s4(pdev, state);
7810		break;
7811	default:
7812		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7813				"1427 Invalid PCI device group: 0x%x\n",
7814				phba->pci_dev_grp);
7815		break;
7816	}
7817	return rc;
7818}
7819
7820/**
7821 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
7822 * @pdev: pointer to PCI device.
7823 *
7824 * This routine is registered to the PCI subsystem for error handling. This
7825 * function is called after PCI bus has been reset to restart the PCI card
7826 * from scratch, as if from a cold-boot. When this routine is invoked, it
7827 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
7828 * routine, which will perform the proper device reset.
7829 *
7830 * Return codes
7831 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7832 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7833 **/
7834static pci_ers_result_t
7835lpfc_io_slot_reset(struct pci_dev *pdev)
7836{
7837	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7838	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7839	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7840
7841	switch (phba->pci_dev_grp) {
7842	case LPFC_PCI_DEV_LP:
7843		rc = lpfc_io_slot_reset_s3(pdev);
7844		break;
7845	case LPFC_PCI_DEV_OC:
7846		rc = lpfc_io_slot_reset_s4(pdev);
7847		break;
7848	default:
7849		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7850				"1428 Invalid PCI device group: 0x%x\n",
7851				phba->pci_dev_grp);
7852		break;
7853	}
7854	return rc;
7855}
7856
7857/**
7858 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
7859 * @pdev: pointer to PCI device
7860 *
7861 * This routine is registered to the PCI subsystem for error handling. It
7862 * is called when kernel error recovery tells the lpfc driver that it is
7863 * OK to resume normal PCI operation after PCI bus error recovery. When
7864 * this routine is invoked, it dispatches the action to the proper SLI-3
7865 * or SLI-4 device io_resume routine, which will resume the device operation.
7866 **/
7867static void
7868lpfc_io_resume(struct pci_dev *pdev)
7869{
7870	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7871	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7872
7873	switch (phba->pci_dev_grp) {
7874	case LPFC_PCI_DEV_LP:
7875		lpfc_io_resume_s3(pdev);
7876		break;
7877	case LPFC_PCI_DEV_OC:
7878		lpfc_io_resume_s4(pdev);
7879		break;
7880	default:
7881		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7882				"1429 Invalid PCI device group: 0x%x\n",
7883				phba->pci_dev_grp);
7884		break;
7885	}
7886	return;
7887}
7888
7889static struct pci_device_id lpfc_id_table[] = {
7890	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
7891		PCI_ANY_ID, PCI_ANY_ID, },
7892	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
7893		PCI_ANY_ID, PCI_ANY_ID, },
7894	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
7895		PCI_ANY_ID, PCI_ANY_ID, },
7896	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
7897		PCI_ANY_ID, PCI_ANY_ID, },
7898	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
7899		PCI_ANY_ID, PCI_ANY_ID, },
7900	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
7901		PCI_ANY_ID, PCI_ANY_ID, },
7902	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
7903		PCI_ANY_ID, PCI_ANY_ID, },
7904	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
7905		PCI_ANY_ID, PCI_ANY_ID, },
7906	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
7907		PCI_ANY_ID, PCI_ANY_ID, },
7908	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
7909		PCI_ANY_ID, PCI_ANY_ID, },
7910	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
7911		PCI_ANY_ID, PCI_ANY_ID, },
7912	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
7913		PCI_ANY_ID, PCI_ANY_ID, },
7914	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
7915		PCI_ANY_ID, PCI_ANY_ID, },
7916	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
7917		PCI_ANY_ID, PCI_ANY_ID, },
7918	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
7919		PCI_ANY_ID, PCI_ANY_ID, },
7920	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
7921		PCI_ANY_ID, PCI_ANY_ID, },
7922	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
7923		PCI_ANY_ID, PCI_ANY_ID, },
7924	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
7925		PCI_ANY_ID, PCI_ANY_ID, },
7926	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
7927		PCI_ANY_ID, PCI_ANY_ID, },
7928	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
7929		PCI_ANY_ID, PCI_ANY_ID, },
7930	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
7931		PCI_ANY_ID, PCI_ANY_ID, },
7932	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
7933		PCI_ANY_ID, PCI_ANY_ID, },
7934	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
7935		PCI_ANY_ID, PCI_ANY_ID, },
7936	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
7937		PCI_ANY_ID, PCI_ANY_ID, },
7938	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
7939		PCI_ANY_ID, PCI_ANY_ID, },
7940	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
7941		PCI_ANY_ID, PCI_ANY_ID, },
7942	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
7943		PCI_ANY_ID, PCI_ANY_ID, },
7944	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
7945		PCI_ANY_ID, PCI_ANY_ID, },
7946	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
7947		PCI_ANY_ID, PCI_ANY_ID, },
7948	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
7949		PCI_ANY_ID, PCI_ANY_ID, },
7950	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
7951		PCI_ANY_ID, PCI_ANY_ID, },
7952	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
7953		PCI_ANY_ID, PCI_ANY_ID, },
7954	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
7955		PCI_ANY_ID, PCI_ANY_ID, },
7956	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
7957		PCI_ANY_ID, PCI_ANY_ID, },
7958	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
7959		PCI_ANY_ID, PCI_ANY_ID, },
7960	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
7961		PCI_ANY_ID, PCI_ANY_ID, },
7962	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
7963		PCI_ANY_ID, PCI_ANY_ID, },
7964	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7965		PCI_ANY_ID, PCI_ANY_ID, },
7966	{ 0 }
7967};
7968
7969MODULE_DEVICE_TABLE(pci, lpfc_id_table);
7970
7971static struct pci_error_handlers lpfc_err_handler = {
7972	.error_detected = lpfc_io_error_detected,
7973	.slot_reset = lpfc_io_slot_reset,
7974	.resume = lpfc_io_resume,
7975};
7976
7977static struct pci_driver lpfc_driver = {
7978	.name		= LPFC_DRIVER_NAME,
7979	.id_table	= lpfc_id_table,
7980	.probe		= lpfc_pci_probe_one,
7981	.remove		= __devexit_p(lpfc_pci_remove_one),
7982	.suspend        = lpfc_pci_suspend_one,
7983	.resume		= lpfc_pci_resume_one,
7984	.err_handler    = &lpfc_err_handler,
7985};
7986
7987/**
7988 * lpfc_init - lpfc module initialization routine
7989 *
7990 * This routine is to be invoked when the lpfc module is loaded into the
7991 * kernel. The special kernel macro module_init() is used to indicate the
7992 * role of this routine to the kernel as lpfc module entry point.
7993 *
7994 * Return codes
7995 *   0 - successful
7996 *   -ENOMEM - FC attach transport failed
7997 *   all others - failed
7998 */
7999static int __init
8000lpfc_init(void)
8001{
8002	int error = 0;
8003
8004	printk(LPFC_MODULE_DESC "\n");
8005	printk(LPFC_COPYRIGHT "\n");
8006
8007	if (lpfc_enable_npiv) {
8008		lpfc_transport_functions.vport_create = lpfc_vport_create;
8009		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
8010	}
8011	lpfc_transport_template =
8012				fc_attach_transport(&lpfc_transport_functions);
8013	if (lpfc_transport_template == NULL)
8014		return -ENOMEM;
8015	if (lpfc_enable_npiv) {
8016		lpfc_vport_transport_template =
8017			fc_attach_transport(&lpfc_vport_transport_functions);
8018		if (lpfc_vport_transport_template == NULL) {
8019			fc_release_transport(lpfc_transport_template);
8020			return -ENOMEM;
8021		}
8022	}
8023	error = pci_register_driver(&lpfc_driver);
8024	if (error) {
8025		fc_release_transport(lpfc_transport_template);
8026		if (lpfc_enable_npiv)
8027			fc_release_transport(lpfc_vport_transport_template);
8028	}
8029
8030	return error;
8031}
8032
8033/**
8034 * lpfc_exit - lpfc module removal routine
8035 *
8036 * This routine is invoked when the lpfc module is removed from the kernel.
8037 * The special kernel macro module_exit() is used to indicate the role of
8038 * this routine to the kernel as lpfc module exit point.
8039 */
8040static void __exit
8041lpfc_exit(void)
8042{
8043	pci_unregister_driver(&lpfc_driver);
8044	fc_release_transport(lpfc_transport_template);
8045	if (lpfc_enable_npiv)
8046		fc_release_transport(lpfc_vport_transport_template);
8047	if (_dump_buf_data) {
8048		printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data "
8049				"at 0x%p\n",
8050				(1L << _dump_buf_data_order), _dump_buf_data);
8051		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
8052	}
8053
8054	if (_dump_buf_dif) {
8055		printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif "
8056				"at 0x%p\n",
8057				(1L << _dump_buf_dif_order), _dump_buf_dif);
8058		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
8059	}
8060}
8061
8062module_init(lpfc_init);
8063module_exit(lpfc_exit);
8064MODULE_LICENSE("GPL");
8065MODULE_DESCRIPTION(LPFC_MODULE_DESC);
8066MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
8067MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
8068