lpfc_init.c revision da0436e915a5c17ee79e72c1bf978a4ebb1cbf4d
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
29#include <linux/spinlock.h>
30#include <linux/ctype.h>
31
32#include <scsi/scsi.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h>
36
37#include "lpfc_hw4.h"
38#include "lpfc_hw.h"
39#include "lpfc_sli.h"
40#include "lpfc_sli4.h"
41#include "lpfc_nl.h"
42#include "lpfc_disc.h"
43#include "lpfc_scsi.h"
44#include "lpfc.h"
45#include "lpfc_logmsg.h"
46#include "lpfc_crtn.h"
47#include "lpfc_vport.h"
48#include "lpfc_version.h"
49
50char *_dump_buf_data;
51unsigned long _dump_buf_data_order;
52char *_dump_buf_dif;
53unsigned long _dump_buf_dif_order;
54spinlock_t _dump_buf_lock;
55
56static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
57static int lpfc_post_rcv_buf(struct lpfc_hba *);
58static int lpfc_sli4_queue_create(struct lpfc_hba *);
59static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
60static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
61static int lpfc_setup_endian_order(struct lpfc_hba *);
62static int lpfc_sli4_read_config(struct lpfc_hba *);
63static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
64static void lpfc_free_sgl_list(struct lpfc_hba *);
65static int lpfc_init_sgl_list(struct lpfc_hba *);
66static int lpfc_init_active_sgl_array(struct lpfc_hba *);
67static void lpfc_free_active_sgl(struct lpfc_hba *);
68static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
69static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
70static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
71static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
72static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
73
74static struct scsi_transport_template *lpfc_transport_template = NULL;
75static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
76static DEFINE_IDR(lpfc_hba_index);
77
78/**
79 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
80 * @phba: pointer to lpfc hba data structure.
81 *
82 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
83 * mailbox command. It retrieves the revision information from the HBA and
84 * collects the Vital Product Data (VPD) about the HBA for preparing the
85 * configuration of the HBA.
86 *
87 * Return codes:
88 *   0 - success.
89 *   -ERESTART - requests the SLI layer to reset the HBA and try again.
90 *   Any other value - indicates an error.
91 **/
92int
93lpfc_config_port_prep(struct lpfc_hba *phba)
94{
95	lpfc_vpd_t *vp = &phba->vpd;
96	int i = 0, rc;
97	LPFC_MBOXQ_t *pmb;
98	MAILBOX_t *mb;
99	char *lpfc_vpd_data = NULL;
100	uint16_t offset = 0;
101	static char licensed[56] =
102		    "key unlock for use with gnu public licensed code only\0";
103	static int init_key = 1;
104
105	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
106	if (!pmb) {
107		phba->link_state = LPFC_HBA_ERROR;
108		return -ENOMEM;
109	}
110
111	mb = &pmb->mb;
112	phba->link_state = LPFC_INIT_MBX_CMDS;
113
114	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
115		if (init_key) {
116			uint32_t *ptext = (uint32_t *) licensed;
117
118			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
119				*ptext = cpu_to_be32(*ptext);
120			init_key = 0;
121		}
122
123		lpfc_read_nv(phba, pmb);
124		memset((char*)mb->un.varRDnvp.rsvd3, 0,
125			sizeof (mb->un.varRDnvp.rsvd3));
126		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
127			 sizeof (licensed));
128
129		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
130
131		if (rc != MBX_SUCCESS) {
132			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
133					"0324 Config Port initialization "
134					"error, mbxCmd x%x READ_NVPARM, "
135					"mbxStatus x%x\n",
136					mb->mbxCommand, mb->mbxStatus);
137			mempool_free(pmb, phba->mbox_mem_pool);
138			return -ERESTART;
139		}
140		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
141		       sizeof(phba->wwnn));
142		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
143		       sizeof(phba->wwpn));
144	}
145
146	phba->sli3_options = 0x0;
147
148	/* Setup and issue mailbox READ REV command */
149	lpfc_read_rev(phba, pmb);
150	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
151	if (rc != MBX_SUCCESS) {
152		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
153				"0439 Adapter failed to init, mbxCmd x%x "
154				"READ_REV, mbxStatus x%x\n",
155				mb->mbxCommand, mb->mbxStatus);
156		mempool_free( pmb, phba->mbox_mem_pool);
157		return -ERESTART;
158	}
159
160
161	/*
162	 * The value of rr must be 1 since the driver set the cv field to 1.
163	 * This setting requires the FW to set all revision fields.
164	 */
165	if (mb->un.varRdRev.rr == 0) {
166		vp->rev.rBit = 0;
167		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
168				"0440 Adapter failed to init, READ_REV has "
169				"missing revision information.\n");
170		mempool_free(pmb, phba->mbox_mem_pool);
171		return -ERESTART;
172	}
173
174	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
175		mempool_free(pmb, phba->mbox_mem_pool);
176		return -EINVAL;
177	}
178
179	/* Save information as VPD data */
180	vp->rev.rBit = 1;
181	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
182	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
183	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
184	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
185	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
186	vp->rev.biuRev = mb->un.varRdRev.biuRev;
187	vp->rev.smRev = mb->un.varRdRev.smRev;
188	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
189	vp->rev.endecRev = mb->un.varRdRev.endecRev;
190	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
191	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
192	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
193	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
194	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
195	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
196
197	/* If the sli feature level is less then 9, we must
198	 * tear down all RPIs and VPIs on link down if NPIV
199	 * is enabled.
200	 */
201	if (vp->rev.feaLevelHigh < 9)
202		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
203
204	if (lpfc_is_LC_HBA(phba->pcidev->device))
205		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
206						sizeof (phba->RandomData));
207
208	/* Get adapter VPD information */
209	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
210	if (!lpfc_vpd_data)
211		goto out_free_mbox;
212
213	do {
214		lpfc_dump_mem(phba, pmb, offset);
215		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
216
217		if (rc != MBX_SUCCESS) {
218			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
219					"0441 VPD not present on adapter, "
220					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
221					mb->mbxCommand, mb->mbxStatus);
222			mb->un.varDmp.word_cnt = 0;
223		}
224		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
225			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
226		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
227				      lpfc_vpd_data + offset,
228				      mb->un.varDmp.word_cnt);
229		offset += mb->un.varDmp.word_cnt;
230	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
231	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
232
233	kfree(lpfc_vpd_data);
234out_free_mbox:
235	mempool_free(pmb, phba->mbox_mem_pool);
236	return 0;
237}
238
239/**
240 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
241 * @phba: pointer to lpfc hba data structure.
242 * @pmboxq: pointer to the driver internal queue element for mailbox command.
243 *
244 * This is the completion handler for driver's configuring asynchronous event
245 * mailbox command to the device. If the mailbox command returns successfully,
246 * it will set internal async event support flag to 1; otherwise, it will
247 * set internal async event support flag to 0.
248 **/
249static void
250lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
251{
252	if (pmboxq->mb.mbxStatus == MBX_SUCCESS)
253		phba->temp_sensor_support = 1;
254	else
255		phba->temp_sensor_support = 0;
256	mempool_free(pmboxq, phba->mbox_mem_pool);
257	return;
258}
259
260/**
261 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
262 * @phba: pointer to lpfc hba data structure.
263 * @pmboxq: pointer to the driver internal queue element for mailbox command.
264 *
265 * This is the completion handler for dump mailbox command for getting
266 * wake up parameters. When this command complete, the response contain
267 * Option rom version of the HBA. This function translate the version number
268 * into a human readable string and store it in OptionROMVersion.
269 **/
270static void
271lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
272{
273	struct prog_id *prg;
274	uint32_t prog_id_word;
275	char dist = ' ';
276	/* character array used for decoding dist type. */
277	char dist_char[] = "nabx";
278
279	if (pmboxq->mb.mbxStatus != MBX_SUCCESS) {
280		mempool_free(pmboxq, phba->mbox_mem_pool);
281		return;
282	}
283
284	prg = (struct prog_id *) &prog_id_word;
285
286	/* word 7 contain option rom version */
287	prog_id_word = pmboxq->mb.un.varWords[7];
288
289	/* Decode the Option rom version word to a readable string */
290	if (prg->dist < 4)
291		dist = dist_char[prg->dist];
292
293	if ((prg->dist == 3) && (prg->num == 0))
294		sprintf(phba->OptionROMVersion, "%d.%d%d",
295			prg->ver, prg->rev, prg->lev);
296	else
297		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
298			prg->ver, prg->rev, prg->lev,
299			dist, prg->num);
300	mempool_free(pmboxq, phba->mbox_mem_pool);
301	return;
302}
303
304/**
305 * lpfc_config_port_post - Perform lpfc initialization after config port
306 * @phba: pointer to lpfc hba data structure.
307 *
308 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
309 * command call. It performs all internal resource and state setups on the
310 * port: post IOCB buffers, enable appropriate host interrupt attentions,
311 * ELS ring timers, etc.
312 *
313 * Return codes
314 *   0 - success.
315 *   Any other value - error.
316 **/
317int
318lpfc_config_port_post(struct lpfc_hba *phba)
319{
320	struct lpfc_vport *vport = phba->pport;
321	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
322	LPFC_MBOXQ_t *pmb;
323	MAILBOX_t *mb;
324	struct lpfc_dmabuf *mp;
325	struct lpfc_sli *psli = &phba->sli;
326	uint32_t status, timeout;
327	int i, j;
328	int rc;
329
330	spin_lock_irq(&phba->hbalock);
331	/*
332	 * If the Config port completed correctly the HBA is not
333	 * over heated any more.
334	 */
335	if (phba->over_temp_state == HBA_OVER_TEMP)
336		phba->over_temp_state = HBA_NORMAL_TEMP;
337	spin_unlock_irq(&phba->hbalock);
338
339	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
340	if (!pmb) {
341		phba->link_state = LPFC_HBA_ERROR;
342		return -ENOMEM;
343	}
344	mb = &pmb->mb;
345
346	/* Get login parameters for NID.  */
347	lpfc_read_sparam(phba, pmb, 0);
348	pmb->vport = vport;
349	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
350		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
351				"0448 Adapter failed init, mbxCmd x%x "
352				"READ_SPARM mbxStatus x%x\n",
353				mb->mbxCommand, mb->mbxStatus);
354		phba->link_state = LPFC_HBA_ERROR;
355		mp = (struct lpfc_dmabuf *) pmb->context1;
356		mempool_free( pmb, phba->mbox_mem_pool);
357		lpfc_mbuf_free(phba, mp->virt, mp->phys);
358		kfree(mp);
359		return -EIO;
360	}
361
362	mp = (struct lpfc_dmabuf *) pmb->context1;
363
364	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
365	lpfc_mbuf_free(phba, mp->virt, mp->phys);
366	kfree(mp);
367	pmb->context1 = NULL;
368
369	if (phba->cfg_soft_wwnn)
370		u64_to_wwn(phba->cfg_soft_wwnn,
371			   vport->fc_sparam.nodeName.u.wwn);
372	if (phba->cfg_soft_wwpn)
373		u64_to_wwn(phba->cfg_soft_wwpn,
374			   vport->fc_sparam.portName.u.wwn);
375	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
376	       sizeof (struct lpfc_name));
377	memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
378	       sizeof (struct lpfc_name));
379
380	/* Update the fc_host data structures with new wwn. */
381	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
382	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
383
384	/* If no serial number in VPD data, use low 6 bytes of WWNN */
385	/* This should be consolidated into parse_vpd ? - mr */
386	if (phba->SerialNumber[0] == 0) {
387		uint8_t *outptr;
388
389		outptr = &vport->fc_nodename.u.s.IEEE[0];
390		for (i = 0; i < 12; i++) {
391			status = *outptr++;
392			j = ((status & 0xf0) >> 4);
393			if (j <= 9)
394				phba->SerialNumber[i] =
395				    (char)((uint8_t) 0x30 + (uint8_t) j);
396			else
397				phba->SerialNumber[i] =
398				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
399			i++;
400			j = (status & 0xf);
401			if (j <= 9)
402				phba->SerialNumber[i] =
403				    (char)((uint8_t) 0x30 + (uint8_t) j);
404			else
405				phba->SerialNumber[i] =
406				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
407		}
408	}
409
410	lpfc_read_config(phba, pmb);
411	pmb->vport = vport;
412	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
413		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
414				"0453 Adapter failed to init, mbxCmd x%x "
415				"READ_CONFIG, mbxStatus x%x\n",
416				mb->mbxCommand, mb->mbxStatus);
417		phba->link_state = LPFC_HBA_ERROR;
418		mempool_free( pmb, phba->mbox_mem_pool);
419		return -EIO;
420	}
421
422	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
423	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
424		phba->cfg_hba_queue_depth =
425			mb->un.varRdConfig.max_xri + 1;
426
427	phba->lmt = mb->un.varRdConfig.lmt;
428
429	/* Get the default values for Model Name and Description */
430	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
431
432	if ((phba->cfg_link_speed > LINK_SPEED_10G)
433	    || ((phba->cfg_link_speed == LINK_SPEED_1G)
434		&& !(phba->lmt & LMT_1Gb))
435	    || ((phba->cfg_link_speed == LINK_SPEED_2G)
436		&& !(phba->lmt & LMT_2Gb))
437	    || ((phba->cfg_link_speed == LINK_SPEED_4G)
438		&& !(phba->lmt & LMT_4Gb))
439	    || ((phba->cfg_link_speed == LINK_SPEED_8G)
440		&& !(phba->lmt & LMT_8Gb))
441	    || ((phba->cfg_link_speed == LINK_SPEED_10G)
442		&& !(phba->lmt & LMT_10Gb))) {
443		/* Reset link speed to auto */
444		lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
445			"1302 Invalid speed for this board: "
446			"Reset link speed to auto: x%x\n",
447			phba->cfg_link_speed);
448			phba->cfg_link_speed = LINK_SPEED_AUTO;
449	}
450
451	phba->link_state = LPFC_LINK_DOWN;
452
453	/* Only process IOCBs on ELS ring till hba_state is READY */
454	if (psli->ring[psli->extra_ring].cmdringaddr)
455		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
456	if (psli->ring[psli->fcp_ring].cmdringaddr)
457		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
458	if (psli->ring[psli->next_ring].cmdringaddr)
459		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
460
461	/* Post receive buffers for desired rings */
462	if (phba->sli_rev != 3)
463		lpfc_post_rcv_buf(phba);
464
465	/*
466	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
467	 */
468	if (phba->intr_type == MSIX) {
469		rc = lpfc_config_msi(phba, pmb);
470		if (rc) {
471			mempool_free(pmb, phba->mbox_mem_pool);
472			return -EIO;
473		}
474		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
475		if (rc != MBX_SUCCESS) {
476			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
477					"0352 Config MSI mailbox command "
478					"failed, mbxCmd x%x, mbxStatus x%x\n",
479					pmb->mb.mbxCommand, pmb->mb.mbxStatus);
480			mempool_free(pmb, phba->mbox_mem_pool);
481			return -EIO;
482		}
483	}
484
485	/* Initialize ERATT handling flag */
486	phba->hba_flag &= ~HBA_ERATT_HANDLED;
487
488	/* Enable appropriate host interrupts */
489	spin_lock_irq(&phba->hbalock);
490	status = readl(phba->HCregaddr);
491	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
492	if (psli->num_rings > 0)
493		status |= HC_R0INT_ENA;
494	if (psli->num_rings > 1)
495		status |= HC_R1INT_ENA;
496	if (psli->num_rings > 2)
497		status |= HC_R2INT_ENA;
498	if (psli->num_rings > 3)
499		status |= HC_R3INT_ENA;
500
501	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
502	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
503		status &= ~(HC_R0INT_ENA);
504
505	writel(status, phba->HCregaddr);
506	readl(phba->HCregaddr); /* flush */
507	spin_unlock_irq(&phba->hbalock);
508
509	/* Set up ring-0 (ELS) timer */
510	timeout = phba->fc_ratov * 2;
511	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
512	/* Set up heart beat (HB) timer */
513	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
514	phba->hb_outstanding = 0;
515	phba->last_completion_time = jiffies;
516	/* Set up error attention (ERATT) polling timer */
517	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
518
519	lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
520	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
521	lpfc_set_loopback_flag(phba);
522	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
523	if (rc != MBX_SUCCESS) {
524		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
525				"0454 Adapter failed to init, mbxCmd x%x "
526				"INIT_LINK, mbxStatus x%x\n",
527				mb->mbxCommand, mb->mbxStatus);
528
529		/* Clear all interrupt enable conditions */
530		writel(0, phba->HCregaddr);
531		readl(phba->HCregaddr); /* flush */
532		/* Clear all pending interrupts */
533		writel(0xffffffff, phba->HAregaddr);
534		readl(phba->HAregaddr); /* flush */
535
536		phba->link_state = LPFC_HBA_ERROR;
537		if (rc != MBX_BUSY)
538			mempool_free(pmb, phba->mbox_mem_pool);
539		return -EIO;
540	}
541	/* MBOX buffer will be freed in mbox compl */
542	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
543	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
544	pmb->mbox_cmpl = lpfc_config_async_cmpl;
545	pmb->vport = phba->pport;
546	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
547
548	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
549		lpfc_printf_log(phba,
550				KERN_ERR,
551				LOG_INIT,
552				"0456 Adapter failed to issue "
553				"ASYNCEVT_ENABLE mbox status x%x \n.",
554				rc);
555		mempool_free(pmb, phba->mbox_mem_pool);
556	}
557
558	/* Get Option rom version */
559	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
560	lpfc_dump_wakeup_param(phba, pmb);
561	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
562	pmb->vport = phba->pport;
563	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
564
565	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
566		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
567				"to get Option ROM version status x%x\n.", rc);
568		mempool_free(pmb, phba->mbox_mem_pool);
569	}
570
571	return 0;
572}
573
574/**
575 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
576 * @phba: pointer to lpfc HBA data structure.
577 *
578 * This routine will do LPFC uninitialization before the HBA is reset when
579 * bringing down the SLI Layer.
580 *
581 * Return codes
582 *   0 - success.
583 *   Any other value - error.
584 **/
585int
586lpfc_hba_down_prep(struct lpfc_hba *phba)
587{
588	struct lpfc_vport **vports;
589	int i;
590
591	if (phba->sli_rev <= LPFC_SLI_REV3) {
592		/* Disable interrupts */
593		writel(0, phba->HCregaddr);
594		readl(phba->HCregaddr); /* flush */
595	}
596
597	if (phba->pport->load_flag & FC_UNLOADING)
598		lpfc_cleanup_discovery_resources(phba->pport);
599	else {
600		vports = lpfc_create_vport_work_array(phba);
601		if (vports != NULL)
602			for (i = 0; i <= phba->max_vports &&
603				vports[i] != NULL; i++)
604				lpfc_cleanup_discovery_resources(vports[i]);
605		lpfc_destroy_vport_work_array(phba, vports);
606	}
607	return 0;
608}
609
610/**
611 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
612 * @phba: pointer to lpfc HBA data structure.
613 *
614 * This routine will do uninitialization after the HBA is reset when bring
615 * down the SLI Layer.
616 *
617 * Return codes
618 *   0 - sucess.
619 *   Any other value - error.
620 **/
621static int
622lpfc_hba_down_post_s3(struct lpfc_hba *phba)
623{
624	struct lpfc_sli *psli = &phba->sli;
625	struct lpfc_sli_ring *pring;
626	struct lpfc_dmabuf *mp, *next_mp;
627	LIST_HEAD(completions);
628	int i;
629
630	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
631		lpfc_sli_hbqbuf_free_all(phba);
632	else {
633		/* Cleanup preposted buffers on the ELS ring */
634		pring = &psli->ring[LPFC_ELS_RING];
635		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
636			list_del(&mp->list);
637			pring->postbufq_cnt--;
638			lpfc_mbuf_free(phba, mp->virt, mp->phys);
639			kfree(mp);
640		}
641	}
642
643	spin_lock_irq(&phba->hbalock);
644	for (i = 0; i < psli->num_rings; i++) {
645		pring = &psli->ring[i];
646
647		/* At this point in time the HBA is either reset or DOA. Either
648		 * way, nothing should be on txcmplq as it will NEVER complete.
649		 */
650		list_splice_init(&pring->txcmplq, &completions);
651		pring->txcmplq_cnt = 0;
652		spin_unlock_irq(&phba->hbalock);
653
654		/* Cancel all the IOCBs from the completions list */
655		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
656				      IOERR_SLI_ABORTED);
657
658		lpfc_sli_abort_iocb_ring(phba, pring);
659		spin_lock_irq(&phba->hbalock);
660	}
661	spin_unlock_irq(&phba->hbalock);
662
663	return 0;
664}
665/**
666 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
667 * @phba: pointer to lpfc HBA data structure.
668 *
669 * This routine will do uninitialization after the HBA is reset when bring
670 * down the SLI Layer.
671 *
672 * Return codes
673 *   0 - sucess.
674 *   Any other value - error.
675 **/
676static int
677lpfc_hba_down_post_s4(struct lpfc_hba *phba)
678{
679	struct lpfc_scsi_buf *psb, *psb_next;
680	LIST_HEAD(aborts);
681	int ret;
682	unsigned long iflag = 0;
683	ret = lpfc_hba_down_post_s3(phba);
684	if (ret)
685		return ret;
686	/* At this point in time the HBA is either reset or DOA. Either
687	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
688	 * on the lpfc_sgl_list so that it can either be freed if the
689	 * driver is unloading or reposted if the driver is restarting
690	 * the port.
691	 */
692	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
693					/* scsl_buf_list */
694	/* abts_sgl_list_lock required because worker thread uses this
695	 * list.
696	 */
697	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
698	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
699			&phba->sli4_hba.lpfc_sgl_list);
700	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
701	/* abts_scsi_buf_list_lock required because worker thread uses this
702	 * list.
703	 */
704	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
705	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
706			&aborts);
707	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
708	spin_unlock_irq(&phba->hbalock);
709
710	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
711		psb->pCmd = NULL;
712		psb->status = IOSTAT_SUCCESS;
713	}
714	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
715	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
716	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
717	return 0;
718}
719
720/**
721 * lpfc_hba_down_post - Wrapper func for hba down post routine
722 * @phba: pointer to lpfc HBA data structure.
723 *
724 * This routine wraps the actual SLI3 or SLI4 routine for performing
725 * uninitialization after the HBA is reset when bring down the SLI Layer.
726 *
727 * Return codes
728 *   0 - sucess.
729 *   Any other value - error.
730 **/
731int
732lpfc_hba_down_post(struct lpfc_hba *phba)
733{
734	return (*phba->lpfc_hba_down_post)(phba);
735}
736
737/**
738 * lpfc_hb_timeout - The HBA-timer timeout handler
739 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
740 *
741 * This is the HBA-timer timeout handler registered to the lpfc driver. When
742 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
743 * work-port-events bitmap and the worker thread is notified. This timeout
744 * event will be used by the worker thread to invoke the actual timeout
745 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
746 * be performed in the timeout handler and the HBA timeout event bit shall
747 * be cleared by the worker thread after it has taken the event bitmap out.
748 **/
749static void
750lpfc_hb_timeout(unsigned long ptr)
751{
752	struct lpfc_hba *phba;
753	uint32_t tmo_posted;
754	unsigned long iflag;
755
756	phba = (struct lpfc_hba *)ptr;
757
758	/* Check for heart beat timeout conditions */
759	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
760	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
761	if (!tmo_posted)
762		phba->pport->work_port_events |= WORKER_HB_TMO;
763	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
764
765	/* Tell the worker thread there is work to do */
766	if (!tmo_posted)
767		lpfc_worker_wake_up(phba);
768	return;
769}
770
771/**
772 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
773 * @phba: pointer to lpfc hba data structure.
774 * @pmboxq: pointer to the driver internal queue element for mailbox command.
775 *
776 * This is the callback function to the lpfc heart-beat mailbox command.
777 * If configured, the lpfc driver issues the heart-beat mailbox command to
778 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
779 * heart-beat mailbox command is issued, the driver shall set up heart-beat
780 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
781 * heart-beat outstanding state. Once the mailbox command comes back and
782 * no error conditions detected, the heart-beat mailbox command timer is
783 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
784 * state is cleared for the next heart-beat. If the timer expired with the
785 * heart-beat outstanding state set, the driver will put the HBA offline.
786 **/
787static void
788lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
789{
790	unsigned long drvr_flag;
791
792	spin_lock_irqsave(&phba->hbalock, drvr_flag);
793	phba->hb_outstanding = 0;
794	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
795
796	/* Check and reset heart-beat timer is necessary */
797	mempool_free(pmboxq, phba->mbox_mem_pool);
798	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
799		!(phba->link_state == LPFC_HBA_ERROR) &&
800		!(phba->pport->load_flag & FC_UNLOADING))
801		mod_timer(&phba->hb_tmofunc,
802			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
803	return;
804}
805
806/**
807 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
808 * @phba: pointer to lpfc hba data structure.
809 *
810 * This is the actual HBA-timer timeout handler to be invoked by the worker
811 * thread whenever the HBA timer fired and HBA-timeout event posted. This
812 * handler performs any periodic operations needed for the device. If such
813 * periodic event has already been attended to either in the interrupt handler
814 * or by processing slow-ring or fast-ring events within the HBA-timer
815 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
816 * the timer for the next timeout period. If lpfc heart-beat mailbox command
817 * is configured and there is no heart-beat mailbox command outstanding, a
818 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
819 * has been a heart-beat mailbox command outstanding, the HBA shall be put
820 * to offline.
821 **/
822void
823lpfc_hb_timeout_handler(struct lpfc_hba *phba)
824{
825	LPFC_MBOXQ_t *pmboxq;
826	struct lpfc_dmabuf *buf_ptr;
827	int retval;
828	struct lpfc_sli *psli = &phba->sli;
829	LIST_HEAD(completions);
830
831	if ((phba->link_state == LPFC_HBA_ERROR) ||
832		(phba->pport->load_flag & FC_UNLOADING) ||
833		(phba->pport->fc_flag & FC_OFFLINE_MODE))
834		return;
835
836	spin_lock_irq(&phba->pport->work_port_lock);
837
838	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
839		jiffies)) {
840		spin_unlock_irq(&phba->pport->work_port_lock);
841		if (!phba->hb_outstanding)
842			mod_timer(&phba->hb_tmofunc,
843				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
844		else
845			mod_timer(&phba->hb_tmofunc,
846				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
847		return;
848	}
849	spin_unlock_irq(&phba->pport->work_port_lock);
850
851	if (phba->elsbuf_cnt &&
852		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
853		spin_lock_irq(&phba->hbalock);
854		list_splice_init(&phba->elsbuf, &completions);
855		phba->elsbuf_cnt = 0;
856		phba->elsbuf_prev_cnt = 0;
857		spin_unlock_irq(&phba->hbalock);
858
859		while (!list_empty(&completions)) {
860			list_remove_head(&completions, buf_ptr,
861				struct lpfc_dmabuf, list);
862			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
863			kfree(buf_ptr);
864		}
865	}
866	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
867
868	/* If there is no heart beat outstanding, issue a heartbeat command */
869	if (phba->cfg_enable_hba_heartbeat) {
870		if (!phba->hb_outstanding) {
871			pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
872			if (!pmboxq) {
873				mod_timer(&phba->hb_tmofunc,
874					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
875				return;
876			}
877
878			lpfc_heart_beat(phba, pmboxq);
879			pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
880			pmboxq->vport = phba->pport;
881			retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
882
883			if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
884				mempool_free(pmboxq, phba->mbox_mem_pool);
885				mod_timer(&phba->hb_tmofunc,
886					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
887				return;
888			}
889			mod_timer(&phba->hb_tmofunc,
890				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
891			phba->hb_outstanding = 1;
892			return;
893		} else {
894			/*
895			* If heart beat timeout called with hb_outstanding set
896			* we need to take the HBA offline.
897			*/
898			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
899					"0459 Adapter heartbeat failure, "
900					"taking this port offline.\n");
901
902			spin_lock_irq(&phba->hbalock);
903			psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
904			spin_unlock_irq(&phba->hbalock);
905
906			lpfc_offline_prep(phba);
907			lpfc_offline(phba);
908			lpfc_unblock_mgmt_io(phba);
909			phba->link_state = LPFC_HBA_ERROR;
910			lpfc_hba_down_post(phba);
911		}
912	}
913}
914
915/**
916 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
917 * @phba: pointer to lpfc hba data structure.
918 *
919 * This routine is called to bring the HBA offline when HBA hardware error
920 * other than Port Error 6 has been detected.
921 **/
922static void
923lpfc_offline_eratt(struct lpfc_hba *phba)
924{
925	struct lpfc_sli   *psli = &phba->sli;
926
927	spin_lock_irq(&phba->hbalock);
928	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
929	spin_unlock_irq(&phba->hbalock);
930	lpfc_offline_prep(phba);
931
932	lpfc_offline(phba);
933	lpfc_reset_barrier(phba);
934	lpfc_sli_brdreset(phba);
935	lpfc_hba_down_post(phba);
936	lpfc_sli_brdready(phba, HS_MBRDY);
937	lpfc_unblock_mgmt_io(phba);
938	phba->link_state = LPFC_HBA_ERROR;
939	return;
940}
941
942/**
943 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
944 * @phba: pointer to lpfc hba data structure.
945 *
946 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
947 * other than Port Error 6 has been detected.
948 **/
949static void
950lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
951{
952	lpfc_offline_prep(phba);
953	lpfc_offline(phba);
954	lpfc_sli4_brdreset(phba);
955	lpfc_hba_down_post(phba);
956	lpfc_sli4_post_status_check(phba);
957	lpfc_unblock_mgmt_io(phba);
958	phba->link_state = LPFC_HBA_ERROR;
959}
960
961/**
962 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
963 * @phba: pointer to lpfc hba data structure.
964 *
965 * This routine is invoked to handle the deferred HBA hardware error
966 * conditions. This type of error is indicated by HBA by setting ER1
967 * and another ER bit in the host status register. The driver will
968 * wait until the ER1 bit clears before handling the error condition.
969 **/
970static void
971lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
972{
973	uint32_t old_host_status = phba->work_hs;
974	struct lpfc_sli_ring  *pring;
975	struct lpfc_sli *psli = &phba->sli;
976
977	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
978		"0479 Deferred Adapter Hardware Error "
979		"Data: x%x x%x x%x\n",
980		phba->work_hs,
981		phba->work_status[0], phba->work_status[1]);
982
983	spin_lock_irq(&phba->hbalock);
984	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
985	spin_unlock_irq(&phba->hbalock);
986
987
988	/*
989	 * Firmware stops when it triggred erratt. That could cause the I/Os
990	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
991	 * SCSI layer retry it after re-establishing link.
992	 */
993	pring = &psli->ring[psli->fcp_ring];
994	lpfc_sli_abort_iocb_ring(phba, pring);
995
996	/*
997	 * There was a firmware error. Take the hba offline and then
998	 * attempt to restart it.
999	 */
1000	lpfc_offline_prep(phba);
1001	lpfc_offline(phba);
1002
1003	/* Wait for the ER1 bit to clear.*/
1004	while (phba->work_hs & HS_FFER1) {
1005		msleep(100);
1006		phba->work_hs = readl(phba->HSregaddr);
1007		/* If driver is unloading let the worker thread continue */
1008		if (phba->pport->load_flag & FC_UNLOADING) {
1009			phba->work_hs = 0;
1010			break;
1011		}
1012	}
1013
1014	/*
1015	 * This is to ptrotect against a race condition in which
1016	 * first write to the host attention register clear the
1017	 * host status register.
1018	 */
1019	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1020		phba->work_hs = old_host_status & ~HS_FFER1;
1021
1022	spin_lock_irq(&phba->hbalock);
1023	phba->hba_flag &= ~DEFER_ERATT;
1024	spin_unlock_irq(&phba->hbalock);
1025	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1026	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1027}
1028
1029static void
1030lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1031{
1032	struct lpfc_board_event_header board_event;
1033	struct Scsi_Host *shost;
1034
1035	board_event.event_type = FC_REG_BOARD_EVENT;
1036	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1037	shost = lpfc_shost_from_vport(phba->pport);
1038	fc_host_post_vendor_event(shost, fc_get_event_number(),
1039				  sizeof(board_event),
1040				  (char *) &board_event,
1041				  LPFC_NL_VENDOR_ID);
1042}
1043
1044/**
1045 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1046 * @phba: pointer to lpfc hba data structure.
1047 *
1048 * This routine is invoked to handle the following HBA hardware error
1049 * conditions:
1050 * 1 - HBA error attention interrupt
1051 * 2 - DMA ring index out of range
1052 * 3 - Mailbox command came back as unknown
1053 **/
1054static void
1055lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1056{
1057	struct lpfc_vport *vport = phba->pport;
1058	struct lpfc_sli   *psli = &phba->sli;
1059	struct lpfc_sli_ring  *pring;
1060	uint32_t event_data;
1061	unsigned long temperature;
1062	struct temp_event temp_event_data;
1063	struct Scsi_Host  *shost;
1064
1065	/* If the pci channel is offline, ignore possible errors,
1066	 * since we cannot communicate with the pci card anyway.
1067	 */
1068	if (pci_channel_offline(phba->pcidev)) {
1069		spin_lock_irq(&phba->hbalock);
1070		phba->hba_flag &= ~DEFER_ERATT;
1071		spin_unlock_irq(&phba->hbalock);
1072		return;
1073	}
1074
1075	/* If resets are disabled then leave the HBA alone and return */
1076	if (!phba->cfg_enable_hba_reset)
1077		return;
1078
1079	/* Send an internal error event to mgmt application */
1080	lpfc_board_errevt_to_mgmt(phba);
1081
1082	if (phba->hba_flag & DEFER_ERATT)
1083		lpfc_handle_deferred_eratt(phba);
1084
1085	if (phba->work_hs & HS_FFER6) {
1086		/* Re-establishing Link */
1087		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1088				"1301 Re-establishing Link "
1089				"Data: x%x x%x x%x\n",
1090				phba->work_hs,
1091				phba->work_status[0], phba->work_status[1]);
1092
1093		spin_lock_irq(&phba->hbalock);
1094		psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
1095		spin_unlock_irq(&phba->hbalock);
1096
1097		/*
1098		* Firmware stops when it triggled erratt with HS_FFER6.
1099		* That could cause the I/Os dropped by the firmware.
1100		* Error iocb (I/O) on txcmplq and let the SCSI layer
1101		* retry it after re-establishing link.
1102		*/
1103		pring = &psli->ring[psli->fcp_ring];
1104		lpfc_sli_abort_iocb_ring(phba, pring);
1105
1106		/*
1107		 * There was a firmware error.  Take the hba offline and then
1108		 * attempt to restart it.
1109		 */
1110		lpfc_offline_prep(phba);
1111		lpfc_offline(phba);
1112		lpfc_sli_brdrestart(phba);
1113		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1114			lpfc_unblock_mgmt_io(phba);
1115			return;
1116		}
1117		lpfc_unblock_mgmt_io(phba);
1118	} else if (phba->work_hs & HS_CRIT_TEMP) {
1119		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1120		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1121		temp_event_data.event_code = LPFC_CRIT_TEMP;
1122		temp_event_data.data = (uint32_t)temperature;
1123
1124		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1125				"0406 Adapter maximum temperature exceeded "
1126				"(%ld), taking this port offline "
1127				"Data: x%x x%x x%x\n",
1128				temperature, phba->work_hs,
1129				phba->work_status[0], phba->work_status[1]);
1130
1131		shost = lpfc_shost_from_vport(phba->pport);
1132		fc_host_post_vendor_event(shost, fc_get_event_number(),
1133					  sizeof(temp_event_data),
1134					  (char *) &temp_event_data,
1135					  SCSI_NL_VID_TYPE_PCI
1136					  | PCI_VENDOR_ID_EMULEX);
1137
1138		spin_lock_irq(&phba->hbalock);
1139		phba->over_temp_state = HBA_OVER_TEMP;
1140		spin_unlock_irq(&phba->hbalock);
1141		lpfc_offline_eratt(phba);
1142
1143	} else {
1144		/* The if clause above forces this code path when the status
1145		 * failure is a value other than FFER6. Do not call the offline
1146		 * twice. This is the adapter hardware error path.
1147		 */
1148		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1149				"0457 Adapter Hardware Error "
1150				"Data: x%x x%x x%x\n",
1151				phba->work_hs,
1152				phba->work_status[0], phba->work_status[1]);
1153
1154		event_data = FC_REG_DUMP_EVENT;
1155		shost = lpfc_shost_from_vport(vport);
1156		fc_host_post_vendor_event(shost, fc_get_event_number(),
1157				sizeof(event_data), (char *) &event_data,
1158				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1159
1160		lpfc_offline_eratt(phba);
1161	}
1162	return;
1163}
1164
1165/**
1166 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1167 * @phba: pointer to lpfc hba data structure.
1168 *
1169 * This routine is invoked to handle the SLI4 HBA hardware error attention
1170 * conditions.
1171 **/
1172static void
1173lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1174{
1175	struct lpfc_vport *vport = phba->pport;
1176	uint32_t event_data;
1177	struct Scsi_Host *shost;
1178
1179	/* If the pci channel is offline, ignore possible errors, since
1180	 * we cannot communicate with the pci card anyway.
1181	 */
1182	if (pci_channel_offline(phba->pcidev))
1183		return;
1184	/* If resets are disabled then leave the HBA alone and return */
1185	if (!phba->cfg_enable_hba_reset)
1186		return;
1187
1188	/* Send an internal error event to mgmt application */
1189	lpfc_board_errevt_to_mgmt(phba);
1190
1191	/* For now, the actual action for SLI4 device handling is not
1192	 * specified yet, just treated it as adaptor hardware failure
1193	 */
1194	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1195			"0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1196			phba->work_status[0], phba->work_status[1]);
1197
1198	event_data = FC_REG_DUMP_EVENT;
1199	shost = lpfc_shost_from_vport(vport);
1200	fc_host_post_vendor_event(shost, fc_get_event_number(),
1201				  sizeof(event_data), (char *) &event_data,
1202				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1203
1204	lpfc_sli4_offline_eratt(phba);
1205}
1206
1207/**
1208 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1209 * @phba: pointer to lpfc HBA data structure.
1210 *
1211 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1212 * routine from the API jump table function pointer from the lpfc_hba struct.
1213 *
1214 * Return codes
1215 *   0 - sucess.
1216 *   Any other value - error.
1217 **/
1218void
1219lpfc_handle_eratt(struct lpfc_hba *phba)
1220{
1221	(*phba->lpfc_handle_eratt)(phba);
1222}
1223
1224/**
1225 * lpfc_handle_latt - The HBA link event handler
1226 * @phba: pointer to lpfc hba data structure.
1227 *
1228 * This routine is invoked from the worker thread to handle a HBA host
1229 * attention link event.
1230 **/
1231void
1232lpfc_handle_latt(struct lpfc_hba *phba)
1233{
1234	struct lpfc_vport *vport = phba->pport;
1235	struct lpfc_sli   *psli = &phba->sli;
1236	LPFC_MBOXQ_t *pmb;
1237	volatile uint32_t control;
1238	struct lpfc_dmabuf *mp;
1239	int rc = 0;
1240
1241	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1242	if (!pmb) {
1243		rc = 1;
1244		goto lpfc_handle_latt_err_exit;
1245	}
1246
1247	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1248	if (!mp) {
1249		rc = 2;
1250		goto lpfc_handle_latt_free_pmb;
1251	}
1252
1253	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1254	if (!mp->virt) {
1255		rc = 3;
1256		goto lpfc_handle_latt_free_mp;
1257	}
1258
1259	/* Cleanup any outstanding ELS commands */
1260	lpfc_els_flush_all_cmd(phba);
1261
1262	psli->slistat.link_event++;
1263	lpfc_read_la(phba, pmb, mp);
1264	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
1265	pmb->vport = vport;
1266	/* Block ELS IOCBs until we have processed this mbox command */
1267	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1268	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1269	if (rc == MBX_NOT_FINISHED) {
1270		rc = 4;
1271		goto lpfc_handle_latt_free_mbuf;
1272	}
1273
1274	/* Clear Link Attention in HA REG */
1275	spin_lock_irq(&phba->hbalock);
1276	writel(HA_LATT, phba->HAregaddr);
1277	readl(phba->HAregaddr); /* flush */
1278	spin_unlock_irq(&phba->hbalock);
1279
1280	return;
1281
1282lpfc_handle_latt_free_mbuf:
1283	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1284	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1285lpfc_handle_latt_free_mp:
1286	kfree(mp);
1287lpfc_handle_latt_free_pmb:
1288	mempool_free(pmb, phba->mbox_mem_pool);
1289lpfc_handle_latt_err_exit:
1290	/* Enable Link attention interrupts */
1291	spin_lock_irq(&phba->hbalock);
1292	psli->sli_flag |= LPFC_PROCESS_LA;
1293	control = readl(phba->HCregaddr);
1294	control |= HC_LAINT_ENA;
1295	writel(control, phba->HCregaddr);
1296	readl(phba->HCregaddr); /* flush */
1297
1298	/* Clear Link Attention in HA REG */
1299	writel(HA_LATT, phba->HAregaddr);
1300	readl(phba->HAregaddr); /* flush */
1301	spin_unlock_irq(&phba->hbalock);
1302	lpfc_linkdown(phba);
1303	phba->link_state = LPFC_HBA_ERROR;
1304
1305	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1306		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1307
1308	return;
1309}
1310
1311/**
1312 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1313 * @phba: pointer to lpfc hba data structure.
1314 * @vpd: pointer to the vital product data.
1315 * @len: length of the vital product data in bytes.
1316 *
1317 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1318 * an array of characters. In this routine, the ModelName, ProgramType, and
1319 * ModelDesc, etc. fields of the phba data structure will be populated.
1320 *
1321 * Return codes
1322 *   0 - pointer to the VPD passed in is NULL
1323 *   1 - success
1324 **/
1325int
1326lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1327{
1328	uint8_t lenlo, lenhi;
1329	int Length;
1330	int i, j;
1331	int finished = 0;
1332	int index = 0;
1333
1334	if (!vpd)
1335		return 0;
1336
1337	/* Vital Product */
1338	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1339			"0455 Vital Product Data: x%x x%x x%x x%x\n",
1340			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1341			(uint32_t) vpd[3]);
1342	while (!finished && (index < (len - 4))) {
1343		switch (vpd[index]) {
1344		case 0x82:
1345		case 0x91:
1346			index += 1;
1347			lenlo = vpd[index];
1348			index += 1;
1349			lenhi = vpd[index];
1350			index += 1;
1351			i = ((((unsigned short)lenhi) << 8) + lenlo);
1352			index += i;
1353			break;
1354		case 0x90:
1355			index += 1;
1356			lenlo = vpd[index];
1357			index += 1;
1358			lenhi = vpd[index];
1359			index += 1;
1360			Length = ((((unsigned short)lenhi) << 8) + lenlo);
1361			if (Length > len - index)
1362				Length = len - index;
1363			while (Length > 0) {
1364			/* Look for Serial Number */
1365			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1366				index += 2;
1367				i = vpd[index];
1368				index += 1;
1369				j = 0;
1370				Length -= (3+i);
1371				while(i--) {
1372					phba->SerialNumber[j++] = vpd[index++];
1373					if (j == 31)
1374						break;
1375				}
1376				phba->SerialNumber[j] = 0;
1377				continue;
1378			}
1379			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1380				phba->vpd_flag |= VPD_MODEL_DESC;
1381				index += 2;
1382				i = vpd[index];
1383				index += 1;
1384				j = 0;
1385				Length -= (3+i);
1386				while(i--) {
1387					phba->ModelDesc[j++] = vpd[index++];
1388					if (j == 255)
1389						break;
1390				}
1391				phba->ModelDesc[j] = 0;
1392				continue;
1393			}
1394			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1395				phba->vpd_flag |= VPD_MODEL_NAME;
1396				index += 2;
1397				i = vpd[index];
1398				index += 1;
1399				j = 0;
1400				Length -= (3+i);
1401				while(i--) {
1402					phba->ModelName[j++] = vpd[index++];
1403					if (j == 79)
1404						break;
1405				}
1406				phba->ModelName[j] = 0;
1407				continue;
1408			}
1409			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1410				phba->vpd_flag |= VPD_PROGRAM_TYPE;
1411				index += 2;
1412				i = vpd[index];
1413				index += 1;
1414				j = 0;
1415				Length -= (3+i);
1416				while(i--) {
1417					phba->ProgramType[j++] = vpd[index++];
1418					if (j == 255)
1419						break;
1420				}
1421				phba->ProgramType[j] = 0;
1422				continue;
1423			}
1424			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1425				phba->vpd_flag |= VPD_PORT;
1426				index += 2;
1427				i = vpd[index];
1428				index += 1;
1429				j = 0;
1430				Length -= (3+i);
1431				while(i--) {
1432				phba->Port[j++] = vpd[index++];
1433				if (j == 19)
1434					break;
1435				}
1436				phba->Port[j] = 0;
1437				continue;
1438			}
1439			else {
1440				index += 2;
1441				i = vpd[index];
1442				index += 1;
1443				index += i;
1444				Length -= (3 + i);
1445			}
1446		}
1447		finished = 0;
1448		break;
1449		case 0x78:
1450			finished = 1;
1451			break;
1452		default:
1453			index ++;
1454			break;
1455		}
1456	}
1457
1458	return(1);
1459}
1460
1461/**
1462 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1463 * @phba: pointer to lpfc hba data structure.
1464 * @mdp: pointer to the data structure to hold the derived model name.
1465 * @descp: pointer to the data structure to hold the derived description.
1466 *
1467 * This routine retrieves HBA's description based on its registered PCI device
1468 * ID. The @descp passed into this function points to an array of 256 chars. It
1469 * shall be returned with the model name, maximum speed, and the host bus type.
1470 * The @mdp passed into this function points to an array of 80 chars. When the
1471 * function returns, the @mdp will be filled with the model name.
1472 **/
1473static void
1474lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1475{
1476	lpfc_vpd_t *vp;
1477	uint16_t dev_id = phba->pcidev->device;
1478	int max_speed;
1479	int GE = 0;
1480	int oneConnect = 0; /* default is not a oneConnect */
1481	struct {
1482		char * name;
1483		int    max_speed;
1484		char * bus;
1485	} m = {"<Unknown>", 0, ""};
1486
1487	if (mdp && mdp[0] != '\0'
1488		&& descp && descp[0] != '\0')
1489		return;
1490
1491	if (phba->lmt & LMT_10Gb)
1492		max_speed = 10;
1493	else if (phba->lmt & LMT_8Gb)
1494		max_speed = 8;
1495	else if (phba->lmt & LMT_4Gb)
1496		max_speed = 4;
1497	else if (phba->lmt & LMT_2Gb)
1498		max_speed = 2;
1499	else
1500		max_speed = 1;
1501
1502	vp = &phba->vpd;
1503
1504	switch (dev_id) {
1505	case PCI_DEVICE_ID_FIREFLY:
1506		m = (typeof(m)){"LP6000", max_speed, "PCI"};
1507		break;
1508	case PCI_DEVICE_ID_SUPERFLY:
1509		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1510			m = (typeof(m)){"LP7000", max_speed,  "PCI"};
1511		else
1512			m = (typeof(m)){"LP7000E", max_speed, "PCI"};
1513		break;
1514	case PCI_DEVICE_ID_DRAGONFLY:
1515		m = (typeof(m)){"LP8000", max_speed, "PCI"};
1516		break;
1517	case PCI_DEVICE_ID_CENTAUR:
1518		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1519			m = (typeof(m)){"LP9002", max_speed, "PCI"};
1520		else
1521			m = (typeof(m)){"LP9000", max_speed, "PCI"};
1522		break;
1523	case PCI_DEVICE_ID_RFLY:
1524		m = (typeof(m)){"LP952", max_speed, "PCI"};
1525		break;
1526	case PCI_DEVICE_ID_PEGASUS:
1527		m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
1528		break;
1529	case PCI_DEVICE_ID_THOR:
1530		m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
1531		break;
1532	case PCI_DEVICE_ID_VIPER:
1533		m = (typeof(m)){"LPX1000", max_speed,  "PCI-X"};
1534		break;
1535	case PCI_DEVICE_ID_PFLY:
1536		m = (typeof(m)){"LP982", max_speed, "PCI-X"};
1537		break;
1538	case PCI_DEVICE_ID_TFLY:
1539		m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
1540		break;
1541	case PCI_DEVICE_ID_HELIOS:
1542		m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
1543		break;
1544	case PCI_DEVICE_ID_HELIOS_SCSP:
1545		m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
1546		break;
1547	case PCI_DEVICE_ID_HELIOS_DCSP:
1548		m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
1549		break;
1550	case PCI_DEVICE_ID_NEPTUNE:
1551		m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
1552		break;
1553	case PCI_DEVICE_ID_NEPTUNE_SCSP:
1554		m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
1555		break;
1556	case PCI_DEVICE_ID_NEPTUNE_DCSP:
1557		m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
1558		break;
1559	case PCI_DEVICE_ID_BMID:
1560		m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
1561		break;
1562	case PCI_DEVICE_ID_BSMB:
1563		m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
1564		break;
1565	case PCI_DEVICE_ID_ZEPHYR:
1566		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1567		break;
1568	case PCI_DEVICE_ID_ZEPHYR_SCSP:
1569		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1570		break;
1571	case PCI_DEVICE_ID_ZEPHYR_DCSP:
1572		m = (typeof(m)){"LP2105", max_speed, "PCIe"};
1573		GE = 1;
1574		break;
1575	case PCI_DEVICE_ID_ZMID:
1576		m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
1577		break;
1578	case PCI_DEVICE_ID_ZSMB:
1579		m = (typeof(m)){"LPe111", max_speed, "PCIe"};
1580		break;
1581	case PCI_DEVICE_ID_LP101:
1582		m = (typeof(m)){"LP101", max_speed, "PCI-X"};
1583		break;
1584	case PCI_DEVICE_ID_LP10000S:
1585		m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
1586		break;
1587	case PCI_DEVICE_ID_LP11000S:
1588		m = (typeof(m)){"LP11000-S", max_speed,
1589			"PCI-X2"};
1590		break;
1591	case PCI_DEVICE_ID_LPE11000S:
1592		m = (typeof(m)){"LPe11000-S", max_speed,
1593			"PCIe"};
1594		break;
1595	case PCI_DEVICE_ID_SAT:
1596		m = (typeof(m)){"LPe12000", max_speed, "PCIe"};
1597		break;
1598	case PCI_DEVICE_ID_SAT_MID:
1599		m = (typeof(m)){"LPe1250", max_speed, "PCIe"};
1600		break;
1601	case PCI_DEVICE_ID_SAT_SMB:
1602		m = (typeof(m)){"LPe121", max_speed, "PCIe"};
1603		break;
1604	case PCI_DEVICE_ID_SAT_DCSP:
1605		m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"};
1606		break;
1607	case PCI_DEVICE_ID_SAT_SCSP:
1608		m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"};
1609		break;
1610	case PCI_DEVICE_ID_SAT_S:
1611		m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
1612		break;
1613	case PCI_DEVICE_ID_HORNET:
1614		m = (typeof(m)){"LP21000", max_speed, "PCIe"};
1615		GE = 1;
1616		break;
1617	case PCI_DEVICE_ID_PROTEUS_VF:
1618		m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1619		break;
1620	case PCI_DEVICE_ID_PROTEUS_PF:
1621		m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1622		break;
1623	case PCI_DEVICE_ID_PROTEUS_S:
1624		m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
1625		break;
1626	case PCI_DEVICE_ID_TIGERSHARK:
1627		oneConnect = 1;
1628		m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
1629		break;
1630	case PCI_DEVICE_ID_TIGERSHARK_S:
1631		oneConnect = 1;
1632		m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"};
1633		break;
1634	default:
1635		m = (typeof(m)){ NULL };
1636		break;
1637	}
1638
1639	if (mdp && mdp[0] == '\0')
1640		snprintf(mdp, 79,"%s", m.name);
1641	/* oneConnect hba requires special processing, they are all initiators
1642	 * and we put the port number on the end
1643	 */
1644	if (descp && descp[0] == '\0') {
1645		if (oneConnect)
1646			snprintf(descp, 255,
1647				"Emulex OneConnect %s, FCoE Initiator, Port %s",
1648				m.name,
1649				phba->Port);
1650		else
1651			snprintf(descp, 255,
1652				"Emulex %s %d%s %s %s",
1653				m.name, m.max_speed,
1654				(GE) ? "GE" : "Gb",
1655				m.bus,
1656				(GE) ? "FCoE Adapter" :
1657					"Fibre Channel Adapter");
1658	}
1659}
1660
1661/**
1662 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1663 * @phba: pointer to lpfc hba data structure.
1664 * @pring: pointer to a IOCB ring.
1665 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1666 *
1667 * This routine posts a given number of IOCBs with the associated DMA buffer
1668 * descriptors specified by the cnt argument to the given IOCB ring.
1669 *
1670 * Return codes
1671 *   The number of IOCBs NOT able to be posted to the IOCB ring.
1672 **/
1673int
1674lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1675{
1676	IOCB_t *icmd;
1677	struct lpfc_iocbq *iocb;
1678	struct lpfc_dmabuf *mp1, *mp2;
1679
1680	cnt += pring->missbufcnt;
1681
1682	/* While there are buffers to post */
1683	while (cnt > 0) {
1684		/* Allocate buffer for  command iocb */
1685		iocb = lpfc_sli_get_iocbq(phba);
1686		if (iocb == NULL) {
1687			pring->missbufcnt = cnt;
1688			return cnt;
1689		}
1690		icmd = &iocb->iocb;
1691
1692		/* 2 buffers can be posted per command */
1693		/* Allocate buffer to post */
1694		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1695		if (mp1)
1696		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1697		if (!mp1 || !mp1->virt) {
1698			kfree(mp1);
1699			lpfc_sli_release_iocbq(phba, iocb);
1700			pring->missbufcnt = cnt;
1701			return cnt;
1702		}
1703
1704		INIT_LIST_HEAD(&mp1->list);
1705		/* Allocate buffer to post */
1706		if (cnt > 1) {
1707			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1708			if (mp2)
1709				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1710							    &mp2->phys);
1711			if (!mp2 || !mp2->virt) {
1712				kfree(mp2);
1713				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1714				kfree(mp1);
1715				lpfc_sli_release_iocbq(phba, iocb);
1716				pring->missbufcnt = cnt;
1717				return cnt;
1718			}
1719
1720			INIT_LIST_HEAD(&mp2->list);
1721		} else {
1722			mp2 = NULL;
1723		}
1724
1725		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1726		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1727		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1728		icmd->ulpBdeCount = 1;
1729		cnt--;
1730		if (mp2) {
1731			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1732			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1733			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1734			cnt--;
1735			icmd->ulpBdeCount = 2;
1736		}
1737
1738		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1739		icmd->ulpLe = 1;
1740
1741		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1742		    IOCB_ERROR) {
1743			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1744			kfree(mp1);
1745			cnt++;
1746			if (mp2) {
1747				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1748				kfree(mp2);
1749				cnt++;
1750			}
1751			lpfc_sli_release_iocbq(phba, iocb);
1752			pring->missbufcnt = cnt;
1753			return cnt;
1754		}
1755		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1756		if (mp2)
1757			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1758	}
1759	pring->missbufcnt = 0;
1760	return 0;
1761}
1762
1763/**
1764 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
1765 * @phba: pointer to lpfc hba data structure.
1766 *
1767 * This routine posts initial receive IOCB buffers to the ELS ring. The
1768 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1769 * set to 64 IOCBs.
1770 *
1771 * Return codes
1772 *   0 - success (currently always success)
1773 **/
1774static int
1775lpfc_post_rcv_buf(struct lpfc_hba *phba)
1776{
1777	struct lpfc_sli *psli = &phba->sli;
1778
1779	/* Ring 0, ELS / CT buffers */
1780	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
1781	/* Ring 2 - FCP no buffers needed */
1782
1783	return 0;
1784}
1785
1786#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1787
1788/**
1789 * lpfc_sha_init - Set up initial array of hash table entries
1790 * @HashResultPointer: pointer to an array as hash table.
1791 *
1792 * This routine sets up the initial values to the array of hash table entries
1793 * for the LC HBAs.
1794 **/
1795static void
1796lpfc_sha_init(uint32_t * HashResultPointer)
1797{
1798	HashResultPointer[0] = 0x67452301;
1799	HashResultPointer[1] = 0xEFCDAB89;
1800	HashResultPointer[2] = 0x98BADCFE;
1801	HashResultPointer[3] = 0x10325476;
1802	HashResultPointer[4] = 0xC3D2E1F0;
1803}
1804
1805/**
1806 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
1807 * @HashResultPointer: pointer to an initial/result hash table.
1808 * @HashWorkingPointer: pointer to an working hash table.
1809 *
1810 * This routine iterates an initial hash table pointed by @HashResultPointer
1811 * with the values from the working hash table pointeed by @HashWorkingPointer.
1812 * The results are putting back to the initial hash table, returned through
1813 * the @HashResultPointer as the result hash table.
1814 **/
1815static void
1816lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
1817{
1818	int t;
1819	uint32_t TEMP;
1820	uint32_t A, B, C, D, E;
1821	t = 16;
1822	do {
1823		HashWorkingPointer[t] =
1824		    S(1,
1825		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
1826								     8] ^
1827		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
1828	} while (++t <= 79);
1829	t = 0;
1830	A = HashResultPointer[0];
1831	B = HashResultPointer[1];
1832	C = HashResultPointer[2];
1833	D = HashResultPointer[3];
1834	E = HashResultPointer[4];
1835
1836	do {
1837		if (t < 20) {
1838			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
1839		} else if (t < 40) {
1840			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
1841		} else if (t < 60) {
1842			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
1843		} else {
1844			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
1845		}
1846		TEMP += S(5, A) + E + HashWorkingPointer[t];
1847		E = D;
1848		D = C;
1849		C = S(30, B);
1850		B = A;
1851		A = TEMP;
1852	} while (++t <= 79);
1853
1854	HashResultPointer[0] += A;
1855	HashResultPointer[1] += B;
1856	HashResultPointer[2] += C;
1857	HashResultPointer[3] += D;
1858	HashResultPointer[4] += E;
1859
1860}
1861
1862/**
1863 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
1864 * @RandomChallenge: pointer to the entry of host challenge random number array.
1865 * @HashWorking: pointer to the entry of the working hash array.
1866 *
1867 * This routine calculates the working hash array referred by @HashWorking
1868 * from the challenge random numbers associated with the host, referred by
1869 * @RandomChallenge. The result is put into the entry of the working hash
1870 * array and returned by reference through @HashWorking.
1871 **/
1872static void
1873lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
1874{
1875	*HashWorking = (*RandomChallenge ^ *HashWorking);
1876}
1877
1878/**
1879 * lpfc_hba_init - Perform special handling for LC HBA initialization
1880 * @phba: pointer to lpfc hba data structure.
1881 * @hbainit: pointer to an array of unsigned 32-bit integers.
1882 *
1883 * This routine performs the special handling for LC HBA initialization.
1884 **/
1885void
1886lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1887{
1888	int t;
1889	uint32_t *HashWorking;
1890	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
1891
1892	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
1893	if (!HashWorking)
1894		return;
1895
1896	HashWorking[0] = HashWorking[78] = *pwwnn++;
1897	HashWorking[1] = HashWorking[79] = *pwwnn;
1898
1899	for (t = 0; t < 7; t++)
1900		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
1901
1902	lpfc_sha_init(hbainit);
1903	lpfc_sha_iterate(hbainit, HashWorking);
1904	kfree(HashWorking);
1905}
1906
1907/**
1908 * lpfc_cleanup - Performs vport cleanups before deleting a vport
1909 * @vport: pointer to a virtual N_Port data structure.
1910 *
1911 * This routine performs the necessary cleanups before deleting the @vport.
1912 * It invokes the discovery state machine to perform necessary state
1913 * transitions and to release the ndlps associated with the @vport. Note,
1914 * the physical port is treated as @vport 0.
1915 **/
1916void
1917lpfc_cleanup(struct lpfc_vport *vport)
1918{
1919	struct lpfc_hba   *phba = vport->phba;
1920	struct lpfc_nodelist *ndlp, *next_ndlp;
1921	int i = 0;
1922
1923	if (phba->link_state > LPFC_LINK_DOWN)
1924		lpfc_port_link_failure(vport);
1925
1926	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1927		if (!NLP_CHK_NODE_ACT(ndlp)) {
1928			ndlp = lpfc_enable_node(vport, ndlp,
1929						NLP_STE_UNUSED_NODE);
1930			if (!ndlp)
1931				continue;
1932			spin_lock_irq(&phba->ndlp_lock);
1933			NLP_SET_FREE_REQ(ndlp);
1934			spin_unlock_irq(&phba->ndlp_lock);
1935			/* Trigger the release of the ndlp memory */
1936			lpfc_nlp_put(ndlp);
1937			continue;
1938		}
1939		spin_lock_irq(&phba->ndlp_lock);
1940		if (NLP_CHK_FREE_REQ(ndlp)) {
1941			/* The ndlp should not be in memory free mode already */
1942			spin_unlock_irq(&phba->ndlp_lock);
1943			continue;
1944		} else
1945			/* Indicate request for freeing ndlp memory */
1946			NLP_SET_FREE_REQ(ndlp);
1947		spin_unlock_irq(&phba->ndlp_lock);
1948
1949		if (vport->port_type != LPFC_PHYSICAL_PORT &&
1950		    ndlp->nlp_DID == Fabric_DID) {
1951			/* Just free up ndlp with Fabric_DID for vports */
1952			lpfc_nlp_put(ndlp);
1953			continue;
1954		}
1955
1956		if (ndlp->nlp_type & NLP_FABRIC)
1957			lpfc_disc_state_machine(vport, ndlp, NULL,
1958					NLP_EVT_DEVICE_RECOVERY);
1959
1960		lpfc_disc_state_machine(vport, ndlp, NULL,
1961					     NLP_EVT_DEVICE_RM);
1962
1963	}
1964
1965	/* At this point, ALL ndlp's should be gone
1966	 * because of the previous NLP_EVT_DEVICE_RM.
1967	 * Lets wait for this to happen, if needed.
1968	 */
1969	while (!list_empty(&vport->fc_nodes)) {
1970		if (i++ > 3000) {
1971			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1972				"0233 Nodelist not empty\n");
1973			list_for_each_entry_safe(ndlp, next_ndlp,
1974						&vport->fc_nodes, nlp_listp) {
1975				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
1976						LOG_NODE,
1977						"0282 did:x%x ndlp:x%p "
1978						"usgmap:x%x refcnt:%d\n",
1979						ndlp->nlp_DID, (void *)ndlp,
1980						ndlp->nlp_usg_map,
1981						atomic_read(
1982							&ndlp->kref.refcount));
1983			}
1984			break;
1985		}
1986
1987		/* Wait for any activity on ndlps to settle */
1988		msleep(10);
1989	}
1990}
1991
1992/**
1993 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
1994 * @vport: pointer to a virtual N_Port data structure.
1995 *
1996 * This routine stops all the timers associated with a @vport. This function
1997 * is invoked before disabling or deleting a @vport. Note that the physical
1998 * port is treated as @vport 0.
1999 **/
2000void
2001lpfc_stop_vport_timers(struct lpfc_vport *vport)
2002{
2003	del_timer_sync(&vport->els_tmofunc);
2004	del_timer_sync(&vport->fc_fdmitmo);
2005	lpfc_can_disctmo(vport);
2006	return;
2007}
2008
2009/**
2010 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2011 * @phba: pointer to lpfc hba data structure.
2012 *
2013 * This routine stops all the timers associated with a HBA. This function is
2014 * invoked before either putting a HBA offline or unloading the driver.
2015 **/
2016void
2017lpfc_stop_hba_timers(struct lpfc_hba *phba)
2018{
2019	lpfc_stop_vport_timers(phba->pport);
2020	del_timer_sync(&phba->sli.mbox_tmo);
2021	del_timer_sync(&phba->fabric_block_timer);
2022	del_timer_sync(&phba->eratt_poll);
2023	del_timer_sync(&phba->hb_tmofunc);
2024	phba->hb_outstanding = 0;
2025
2026	switch (phba->pci_dev_grp) {
2027	case LPFC_PCI_DEV_LP:
2028		/* Stop any LightPulse device specific driver timers */
2029		del_timer_sync(&phba->fcp_poll_timer);
2030		break;
2031	case LPFC_PCI_DEV_OC:
2032		/* Stop any OneConnect device sepcific driver timers */
2033		break;
2034	default:
2035		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2036				"0297 Invalid device group (x%x)\n",
2037				phba->pci_dev_grp);
2038		break;
2039	}
2040	return;
2041}
2042
2043/**
2044 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2045 * @phba: pointer to lpfc hba data structure.
2046 *
2047 * This routine marks a HBA's management interface as blocked. Once the HBA's
2048 * management interface is marked as blocked, all the user space access to
2049 * the HBA, whether they are from sysfs interface or libdfc interface will
2050 * all be blocked. The HBA is set to block the management interface when the
2051 * driver prepares the HBA interface for online or offline.
2052 **/
2053static void
2054lpfc_block_mgmt_io(struct lpfc_hba * phba)
2055{
2056	unsigned long iflag;
2057
2058	spin_lock_irqsave(&phba->hbalock, iflag);
2059	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2060	spin_unlock_irqrestore(&phba->hbalock, iflag);
2061}
2062
2063/**
2064 * lpfc_online - Initialize and bring a HBA online
2065 * @phba: pointer to lpfc hba data structure.
2066 *
2067 * This routine initializes the HBA and brings a HBA online. During this
2068 * process, the management interface is blocked to prevent user space access
2069 * to the HBA interfering with the driver initialization.
2070 *
2071 * Return codes
2072 *   0 - successful
2073 *   1 - failed
2074 **/
2075int
2076lpfc_online(struct lpfc_hba *phba)
2077{
2078	struct lpfc_vport *vport;
2079	struct lpfc_vport **vports;
2080	int i;
2081
2082	if (!phba)
2083		return 0;
2084	vport = phba->pport;
2085
2086	if (!(vport->fc_flag & FC_OFFLINE_MODE))
2087		return 0;
2088
2089	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2090			"0458 Bring Adapter online\n");
2091
2092	lpfc_block_mgmt_io(phba);
2093
2094	if (!lpfc_sli_queue_setup(phba)) {
2095		lpfc_unblock_mgmt_io(phba);
2096		return 1;
2097	}
2098
2099	if (phba->sli_rev == LPFC_SLI_REV4) {
2100		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2101			lpfc_unblock_mgmt_io(phba);
2102			return 1;
2103		}
2104	} else {
2105		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
2106			lpfc_unblock_mgmt_io(phba);
2107			return 1;
2108		}
2109	}
2110
2111	vports = lpfc_create_vport_work_array(phba);
2112	if (vports != NULL)
2113		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2114			struct Scsi_Host *shost;
2115			shost = lpfc_shost_from_vport(vports[i]);
2116			spin_lock_irq(shost->host_lock);
2117			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2118			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2119				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2120			spin_unlock_irq(shost->host_lock);
2121		}
2122		lpfc_destroy_vport_work_array(phba, vports);
2123
2124	lpfc_unblock_mgmt_io(phba);
2125	return 0;
2126}
2127
2128/**
2129 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2130 * @phba: pointer to lpfc hba data structure.
2131 *
2132 * This routine marks a HBA's management interface as not blocked. Once the
2133 * HBA's management interface is marked as not blocked, all the user space
2134 * access to the HBA, whether they are from sysfs interface or libdfc
2135 * interface will be allowed. The HBA is set to block the management interface
2136 * when the driver prepares the HBA interface for online or offline and then
2137 * set to unblock the management interface afterwards.
2138 **/
2139void
2140lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2141{
2142	unsigned long iflag;
2143
2144	spin_lock_irqsave(&phba->hbalock, iflag);
2145	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2146	spin_unlock_irqrestore(&phba->hbalock, iflag);
2147}
2148
2149/**
2150 * lpfc_offline_prep - Prepare a HBA to be brought offline
2151 * @phba: pointer to lpfc hba data structure.
2152 *
2153 * This routine is invoked to prepare a HBA to be brought offline. It performs
2154 * unregistration login to all the nodes on all vports and flushes the mailbox
2155 * queue to make it ready to be brought offline.
2156 **/
2157void
2158lpfc_offline_prep(struct lpfc_hba * phba)
2159{
2160	struct lpfc_vport *vport = phba->pport;
2161	struct lpfc_nodelist  *ndlp, *next_ndlp;
2162	struct lpfc_vport **vports;
2163	int i;
2164
2165	if (vport->fc_flag & FC_OFFLINE_MODE)
2166		return;
2167
2168	lpfc_block_mgmt_io(phba);
2169
2170	lpfc_linkdown(phba);
2171
2172	/* Issue an unreg_login to all nodes on all vports */
2173	vports = lpfc_create_vport_work_array(phba);
2174	if (vports != NULL) {
2175		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2176			struct Scsi_Host *shost;
2177
2178			if (vports[i]->load_flag & FC_UNLOADING)
2179				continue;
2180			vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
2181			shost =	lpfc_shost_from_vport(vports[i]);
2182			list_for_each_entry_safe(ndlp, next_ndlp,
2183						 &vports[i]->fc_nodes,
2184						 nlp_listp) {
2185				if (!NLP_CHK_NODE_ACT(ndlp))
2186					continue;
2187				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2188					continue;
2189				if (ndlp->nlp_type & NLP_FABRIC) {
2190					lpfc_disc_state_machine(vports[i], ndlp,
2191						NULL, NLP_EVT_DEVICE_RECOVERY);
2192					lpfc_disc_state_machine(vports[i], ndlp,
2193						NULL, NLP_EVT_DEVICE_RM);
2194				}
2195				spin_lock_irq(shost->host_lock);
2196				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2197				spin_unlock_irq(shost->host_lock);
2198				lpfc_unreg_rpi(vports[i], ndlp);
2199			}
2200		}
2201	}
2202	lpfc_destroy_vport_work_array(phba, vports);
2203
2204	lpfc_sli_flush_mbox_queue(phba);
2205}
2206
2207/**
2208 * lpfc_offline - Bring a HBA offline
2209 * @phba: pointer to lpfc hba data structure.
2210 *
2211 * This routine actually brings a HBA offline. It stops all the timers
2212 * associated with the HBA, brings down the SLI layer, and eventually
2213 * marks the HBA as in offline state for the upper layer protocol.
2214 **/
2215void
2216lpfc_offline(struct lpfc_hba *phba)
2217{
2218	struct Scsi_Host  *shost;
2219	struct lpfc_vport **vports;
2220	int i;
2221
2222	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2223		return;
2224
2225	/* stop port and all timers associated with this hba */
2226	lpfc_stop_port(phba);
2227	vports = lpfc_create_vport_work_array(phba);
2228	if (vports != NULL)
2229		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2230			lpfc_stop_vport_timers(vports[i]);
2231	lpfc_destroy_vport_work_array(phba, vports);
2232	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2233			"0460 Bring Adapter offline\n");
2234	/* Bring down the SLI Layer and cleanup.  The HBA is offline
2235	   now.  */
2236	lpfc_sli_hba_down(phba);
2237	spin_lock_irq(&phba->hbalock);
2238	phba->work_ha = 0;
2239	spin_unlock_irq(&phba->hbalock);
2240	vports = lpfc_create_vport_work_array(phba);
2241	if (vports != NULL)
2242		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2243			shost = lpfc_shost_from_vport(vports[i]);
2244			spin_lock_irq(shost->host_lock);
2245			vports[i]->work_port_events = 0;
2246			vports[i]->fc_flag |= FC_OFFLINE_MODE;
2247			spin_unlock_irq(shost->host_lock);
2248		}
2249	lpfc_destroy_vport_work_array(phba, vports);
2250}
2251
2252/**
2253 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2254 * @phba: pointer to lpfc hba data structure.
2255 *
2256 * This routine is to free all the SCSI buffers and IOCBs from the driver
2257 * list back to kernel. It is called from lpfc_pci_remove_one to free
2258 * the internal resources before the device is removed from the system.
2259 *
2260 * Return codes
2261 *   0 - successful (for now, it always returns 0)
2262 **/
2263static int
2264lpfc_scsi_free(struct lpfc_hba *phba)
2265{
2266	struct lpfc_scsi_buf *sb, *sb_next;
2267	struct lpfc_iocbq *io, *io_next;
2268
2269	spin_lock_irq(&phba->hbalock);
2270	/* Release all the lpfc_scsi_bufs maintained by this host. */
2271	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2272		list_del(&sb->list);
2273		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2274			      sb->dma_handle);
2275		kfree(sb);
2276		phba->total_scsi_bufs--;
2277	}
2278
2279	/* Release all the lpfc_iocbq entries maintained by this host. */
2280	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2281		list_del(&io->list);
2282		kfree(io);
2283		phba->total_iocbq_bufs--;
2284	}
2285
2286	spin_unlock_irq(&phba->hbalock);
2287
2288	return 0;
2289}
2290
2291/**
2292 * lpfc_create_port - Create an FC port
2293 * @phba: pointer to lpfc hba data structure.
2294 * @instance: a unique integer ID to this FC port.
2295 * @dev: pointer to the device data structure.
2296 *
2297 * This routine creates a FC port for the upper layer protocol. The FC port
2298 * can be created on top of either a physical port or a virtual port provided
2299 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2300 * and associates the FC port created before adding the shost into the SCSI
2301 * layer.
2302 *
2303 * Return codes
2304 *   @vport - pointer to the virtual N_Port data structure.
2305 *   NULL - port create failed.
2306 **/
2307struct lpfc_vport *
2308lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2309{
2310	struct lpfc_vport *vport;
2311	struct Scsi_Host  *shost;
2312	int error = 0;
2313
2314	if (dev != &phba->pcidev->dev)
2315		shost = scsi_host_alloc(&lpfc_vport_template,
2316					sizeof(struct lpfc_vport));
2317	else
2318		shost = scsi_host_alloc(&lpfc_template,
2319					sizeof(struct lpfc_vport));
2320	if (!shost)
2321		goto out;
2322
2323	vport = (struct lpfc_vport *) shost->hostdata;
2324	vport->phba = phba;
2325	vport->load_flag |= FC_LOADING;
2326	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2327	vport->fc_rscn_flush = 0;
2328
2329	lpfc_get_vport_cfgparam(vport);
2330	shost->unique_id = instance;
2331	shost->max_id = LPFC_MAX_TARGET;
2332	shost->max_lun = vport->cfg_max_luns;
2333	shost->this_id = -1;
2334	shost->max_cmd_len = 16;
2335	if (phba->sli_rev == LPFC_SLI_REV4) {
2336		shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
2337		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2338	}
2339
2340	/*
2341	 * Set initial can_queue value since 0 is no longer supported and
2342	 * scsi_add_host will fail. This will be adjusted later based on the
2343	 * max xri value determined in hba setup.
2344	 */
2345	shost->can_queue = phba->cfg_hba_queue_depth - 10;
2346	if (dev != &phba->pcidev->dev) {
2347		shost->transportt = lpfc_vport_transport_template;
2348		vport->port_type = LPFC_NPIV_PORT;
2349	} else {
2350		shost->transportt = lpfc_transport_template;
2351		vport->port_type = LPFC_PHYSICAL_PORT;
2352	}
2353
2354	/* Initialize all internally managed lists. */
2355	INIT_LIST_HEAD(&vport->fc_nodes);
2356	INIT_LIST_HEAD(&vport->rcv_buffer_list);
2357	spin_lock_init(&vport->work_port_lock);
2358
2359	init_timer(&vport->fc_disctmo);
2360	vport->fc_disctmo.function = lpfc_disc_timeout;
2361	vport->fc_disctmo.data = (unsigned long)vport;
2362
2363	init_timer(&vport->fc_fdmitmo);
2364	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2365	vport->fc_fdmitmo.data = (unsigned long)vport;
2366
2367	init_timer(&vport->els_tmofunc);
2368	vport->els_tmofunc.function = lpfc_els_timeout;
2369	vport->els_tmofunc.data = (unsigned long)vport;
2370
2371	error = scsi_add_host(shost, dev);
2372	if (error)
2373		goto out_put_shost;
2374
2375	spin_lock_irq(&phba->hbalock);
2376	list_add_tail(&vport->listentry, &phba->port_list);
2377	spin_unlock_irq(&phba->hbalock);
2378	return vport;
2379
2380out_put_shost:
2381	scsi_host_put(shost);
2382out:
2383	return NULL;
2384}
2385
2386/**
2387 * destroy_port -  destroy an FC port
2388 * @vport: pointer to an lpfc virtual N_Port data structure.
2389 *
2390 * This routine destroys a FC port from the upper layer protocol. All the
2391 * resources associated with the port are released.
2392 **/
2393void
2394destroy_port(struct lpfc_vport *vport)
2395{
2396	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2397	struct lpfc_hba  *phba = vport->phba;
2398
2399	lpfc_debugfs_terminate(vport);
2400	fc_remove_host(shost);
2401	scsi_remove_host(shost);
2402
2403	spin_lock_irq(&phba->hbalock);
2404	list_del_init(&vport->listentry);
2405	spin_unlock_irq(&phba->hbalock);
2406
2407	lpfc_cleanup(vport);
2408	return;
2409}
2410
2411/**
2412 * lpfc_get_instance - Get a unique integer ID
2413 *
2414 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2415 * uses the kernel idr facility to perform the task.
2416 *
2417 * Return codes:
2418 *   instance - a unique integer ID allocated as the new instance.
2419 *   -1 - lpfc get instance failed.
2420 **/
2421int
2422lpfc_get_instance(void)
2423{
2424	int instance = 0;
2425
2426	/* Assign an unused number */
2427	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2428		return -1;
2429	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2430		return -1;
2431	return instance;
2432}
2433
2434/**
2435 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2436 * @shost: pointer to SCSI host data structure.
2437 * @time: elapsed time of the scan in jiffies.
2438 *
2439 * This routine is called by the SCSI layer with a SCSI host to determine
2440 * whether the scan host is finished.
2441 *
2442 * Note: there is no scan_start function as adapter initialization will have
2443 * asynchronously kicked off the link initialization.
2444 *
2445 * Return codes
2446 *   0 - SCSI host scan is not over yet.
2447 *   1 - SCSI host scan is over.
2448 **/
2449int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2450{
2451	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2452	struct lpfc_hba   *phba = vport->phba;
2453	int stat = 0;
2454
2455	spin_lock_irq(shost->host_lock);
2456
2457	if (vport->load_flag & FC_UNLOADING) {
2458		stat = 1;
2459		goto finished;
2460	}
2461	if (time >= 30 * HZ) {
2462		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2463				"0461 Scanning longer than 30 "
2464				"seconds.  Continuing initialization\n");
2465		stat = 1;
2466		goto finished;
2467	}
2468	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2469		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2470				"0465 Link down longer than 15 "
2471				"seconds.  Continuing initialization\n");
2472		stat = 1;
2473		goto finished;
2474	}
2475
2476	if (vport->port_state != LPFC_VPORT_READY)
2477		goto finished;
2478	if (vport->num_disc_nodes || vport->fc_prli_sent)
2479		goto finished;
2480	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2481		goto finished;
2482	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2483		goto finished;
2484
2485	stat = 1;
2486
2487finished:
2488	spin_unlock_irq(shost->host_lock);
2489	return stat;
2490}
2491
2492/**
2493 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2494 * @shost: pointer to SCSI host data structure.
2495 *
2496 * This routine initializes a given SCSI host attributes on a FC port. The
2497 * SCSI host can be either on top of a physical port or a virtual port.
2498 **/
2499void lpfc_host_attrib_init(struct Scsi_Host *shost)
2500{
2501	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2502	struct lpfc_hba   *phba = vport->phba;
2503	/*
2504	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
2505	 */
2506
2507	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2508	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2509	fc_host_supported_classes(shost) = FC_COS_CLASS3;
2510
2511	memset(fc_host_supported_fc4s(shost), 0,
2512	       sizeof(fc_host_supported_fc4s(shost)));
2513	fc_host_supported_fc4s(shost)[2] = 1;
2514	fc_host_supported_fc4s(shost)[7] = 1;
2515
2516	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2517				 sizeof fc_host_symbolic_name(shost));
2518
2519	fc_host_supported_speeds(shost) = 0;
2520	if (phba->lmt & LMT_10Gb)
2521		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2522	if (phba->lmt & LMT_8Gb)
2523		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2524	if (phba->lmt & LMT_4Gb)
2525		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2526	if (phba->lmt & LMT_2Gb)
2527		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2528	if (phba->lmt & LMT_1Gb)
2529		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2530
2531	fc_host_maxframe_size(shost) =
2532		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2533		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2534
2535	/* This value is also unchanging */
2536	memset(fc_host_active_fc4s(shost), 0,
2537	       sizeof(fc_host_active_fc4s(shost)));
2538	fc_host_active_fc4s(shost)[2] = 1;
2539	fc_host_active_fc4s(shost)[7] = 1;
2540
2541	fc_host_max_npiv_vports(shost) = phba->max_vpi;
2542	spin_lock_irq(shost->host_lock);
2543	vport->load_flag &= ~FC_LOADING;
2544	spin_unlock_irq(shost->host_lock);
2545}
2546
2547/**
2548 * lpfc_stop_port_s3 - Stop SLI3 device port
2549 * @phba: pointer to lpfc hba data structure.
2550 *
2551 * This routine is invoked to stop an SLI3 device port, it stops the device
2552 * from generating interrupts and stops the device driver's timers for the
2553 * device.
2554 **/
2555static void
2556lpfc_stop_port_s3(struct lpfc_hba *phba)
2557{
2558	/* Clear all interrupt enable conditions */
2559	writel(0, phba->HCregaddr);
2560	readl(phba->HCregaddr); /* flush */
2561	/* Clear all pending interrupts */
2562	writel(0xffffffff, phba->HAregaddr);
2563	readl(phba->HAregaddr); /* flush */
2564
2565	/* Reset some HBA SLI setup states */
2566	lpfc_stop_hba_timers(phba);
2567	phba->pport->work_port_events = 0;
2568}
2569
2570/**
2571 * lpfc_stop_port_s4 - Stop SLI4 device port
2572 * @phba: pointer to lpfc hba data structure.
2573 *
2574 * This routine is invoked to stop an SLI4 device port, it stops the device
2575 * from generating interrupts and stops the device driver's timers for the
2576 * device.
2577 **/
2578static void
2579lpfc_stop_port_s4(struct lpfc_hba *phba)
2580{
2581	/* Reset some HBA SLI4 setup states */
2582	lpfc_stop_hba_timers(phba);
2583	phba->pport->work_port_events = 0;
2584	phba->sli4_hba.intr_enable = 0;
2585	/* Hard clear it for now, shall have more graceful way to wait later */
2586	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2587}
2588
2589/**
2590 * lpfc_stop_port - Wrapper function for stopping hba port
2591 * @phba: Pointer to HBA context object.
2592 *
2593 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2594 * the API jump table function pointer from the lpfc_hba struct.
2595 **/
2596void
2597lpfc_stop_port(struct lpfc_hba *phba)
2598{
2599	phba->lpfc_stop_port(phba);
2600}
2601
2602/**
2603 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2604 * @phba: pointer to lpfc hba data structure.
2605 *
2606 * This routine is invoked to remove the driver default fcf record from
2607 * the port.  This routine currently acts on FCF Index 0.
2608 *
2609 **/
2610void
2611lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2612{
2613	int rc = 0;
2614	LPFC_MBOXQ_t *mboxq;
2615	struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2616	uint32_t mbox_tmo, req_len;
2617	uint32_t shdr_status, shdr_add_status;
2618
2619	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2620	if (!mboxq) {
2621		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2622			"2020 Failed to allocate mbox for ADD_FCF cmd\n");
2623		return;
2624	}
2625
2626	req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2627		  sizeof(struct lpfc_sli4_cfg_mhdr);
2628	rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2629			      LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2630			      req_len, LPFC_SLI4_MBX_EMBED);
2631	/*
2632	 * In phase 1, there is a single FCF index, 0.  In phase2, the driver
2633	 * supports multiple FCF indices.
2634	 */
2635	del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2636	bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2637	bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2638	       phba->fcf.fcf_indx);
2639
2640	if (!phba->sli4_hba.intr_enable)
2641		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2642	else {
2643		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2644		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2645	}
2646	/* The IOCTL status is embedded in the mailbox subheader. */
2647	shdr_status = bf_get(lpfc_mbox_hdr_status,
2648			     &del_fcf_record->header.cfg_shdr.response);
2649	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2650				 &del_fcf_record->header.cfg_shdr.response);
2651	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2652		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2653				"2516 DEL FCF of default FCF Index failed "
2654				"mbx status x%x, status x%x add_status x%x\n",
2655				rc, shdr_status, shdr_add_status);
2656	}
2657	if (rc != MBX_TIMEOUT)
2658		mempool_free(mboxq, phba->mbox_mem_pool);
2659}
2660
2661/**
2662 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2663 * @phba: pointer to lpfc hba data structure.
2664 * @acqe_link: pointer to the async link completion queue entry.
2665 *
2666 * This routine is to parse the SLI4 link-attention link fault code and
2667 * translate it into the base driver's read link attention mailbox command
2668 * status.
2669 *
2670 * Return: Link-attention status in terms of base driver's coding.
2671 **/
2672static uint16_t
2673lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
2674			   struct lpfc_acqe_link *acqe_link)
2675{
2676	uint16_t latt_fault;
2677
2678	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
2679	case LPFC_ASYNC_LINK_FAULT_NONE:
2680	case LPFC_ASYNC_LINK_FAULT_LOCAL:
2681	case LPFC_ASYNC_LINK_FAULT_REMOTE:
2682		latt_fault = 0;
2683		break;
2684	default:
2685		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2686				"0398 Invalid link fault code: x%x\n",
2687				bf_get(lpfc_acqe_link_fault, acqe_link));
2688		latt_fault = MBXERR_ERROR;
2689		break;
2690	}
2691	return latt_fault;
2692}
2693
2694/**
2695 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
2696 * @phba: pointer to lpfc hba data structure.
2697 * @acqe_link: pointer to the async link completion queue entry.
2698 *
2699 * This routine is to parse the SLI4 link attention type and translate it
2700 * into the base driver's link attention type coding.
2701 *
2702 * Return: Link attention type in terms of base driver's coding.
2703 **/
2704static uint8_t
2705lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
2706			  struct lpfc_acqe_link *acqe_link)
2707{
2708	uint8_t att_type;
2709
2710	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
2711	case LPFC_ASYNC_LINK_STATUS_DOWN:
2712	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
2713		att_type = AT_LINK_DOWN;
2714		break;
2715	case LPFC_ASYNC_LINK_STATUS_UP:
2716		/* Ignore physical link up events - wait for logical link up */
2717		att_type = AT_RESERVED;
2718		break;
2719	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
2720		att_type = AT_LINK_UP;
2721		break;
2722	default:
2723		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2724				"0399 Invalid link attention type: x%x\n",
2725				bf_get(lpfc_acqe_link_status, acqe_link));
2726		att_type = AT_RESERVED;
2727		break;
2728	}
2729	return att_type;
2730}
2731
2732/**
2733 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
2734 * @phba: pointer to lpfc hba data structure.
2735 * @acqe_link: pointer to the async link completion queue entry.
2736 *
2737 * This routine is to parse the SLI4 link-attention link speed and translate
2738 * it into the base driver's link-attention link speed coding.
2739 *
2740 * Return: Link-attention link speed in terms of base driver's coding.
2741 **/
2742static uint8_t
2743lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
2744				struct lpfc_acqe_link *acqe_link)
2745{
2746	uint8_t link_speed;
2747
2748	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
2749	case LPFC_ASYNC_LINK_SPEED_ZERO:
2750		link_speed = LA_UNKNW_LINK;
2751		break;
2752	case LPFC_ASYNC_LINK_SPEED_10MBPS:
2753		link_speed = LA_UNKNW_LINK;
2754		break;
2755	case LPFC_ASYNC_LINK_SPEED_100MBPS:
2756		link_speed = LA_UNKNW_LINK;
2757		break;
2758	case LPFC_ASYNC_LINK_SPEED_1GBPS:
2759		link_speed = LA_1GHZ_LINK;
2760		break;
2761	case LPFC_ASYNC_LINK_SPEED_10GBPS:
2762		link_speed = LA_10GHZ_LINK;
2763		break;
2764	default:
2765		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2766				"0483 Invalid link-attention link speed: x%x\n",
2767				bf_get(lpfc_acqe_link_speed, acqe_link));
2768		link_speed = LA_UNKNW_LINK;
2769		break;
2770	}
2771	return link_speed;
2772}
2773
2774/**
2775 * lpfc_sli4_async_link_evt - Process the asynchronous link event
2776 * @phba: pointer to lpfc hba data structure.
2777 * @acqe_link: pointer to the async link completion queue entry.
2778 *
2779 * This routine is to handle the SLI4 asynchronous link event.
2780 **/
2781static void
2782lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2783			 struct lpfc_acqe_link *acqe_link)
2784{
2785	struct lpfc_dmabuf *mp;
2786	LPFC_MBOXQ_t *pmb;
2787	MAILBOX_t *mb;
2788	READ_LA_VAR *la;
2789	uint8_t att_type;
2790
2791	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
2792	if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
2793		return;
2794	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2795	if (!pmb) {
2796		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2797				"0395 The mboxq allocation failed\n");
2798		return;
2799	}
2800	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2801	if (!mp) {
2802		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2803				"0396 The lpfc_dmabuf allocation failed\n");
2804		goto out_free_pmb;
2805	}
2806	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2807	if (!mp->virt) {
2808		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2809				"0397 The mbuf allocation failed\n");
2810		goto out_free_dmabuf;
2811	}
2812
2813	/* Cleanup any outstanding ELS commands */
2814	lpfc_els_flush_all_cmd(phba);
2815
2816	/* Block ELS IOCBs until we have done process link event */
2817	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2818
2819	/* Update link event statistics */
2820	phba->sli.slistat.link_event++;
2821
2822	/* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
2823	lpfc_read_la(phba, pmb, mp);
2824	pmb->vport = phba->pport;
2825
2826	/* Parse and translate status field */
2827	mb = &pmb->u.mb;
2828	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
2829
2830	/* Parse and translate link attention fields */
2831	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
2832	la->eventTag = acqe_link->event_tag;
2833	la->attType = att_type;
2834	la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
2835
2836	/* Fake the the following irrelvant fields */
2837	la->topology = TOPOLOGY_PT_PT;
2838	la->granted_AL_PA = 0;
2839	la->il = 0;
2840	la->pb = 0;
2841	la->fa = 0;
2842	la->mm = 0;
2843
2844	/* Keep the link status for extra SLI4 state machine reference */
2845	phba->sli4_hba.link_state.speed =
2846				bf_get(lpfc_acqe_link_speed, acqe_link);
2847	phba->sli4_hba.link_state.duplex =
2848				bf_get(lpfc_acqe_link_duplex, acqe_link);
2849	phba->sli4_hba.link_state.status =
2850				bf_get(lpfc_acqe_link_status, acqe_link);
2851	phba->sli4_hba.link_state.physical =
2852				bf_get(lpfc_acqe_link_physical, acqe_link);
2853	phba->sli4_hba.link_state.fault =
2854				bf_get(lpfc_acqe_link_fault, acqe_link);
2855
2856	/* Invoke the lpfc_handle_latt mailbox command callback function */
2857	lpfc_mbx_cmpl_read_la(phba, pmb);
2858
2859	return;
2860
2861out_free_dmabuf:
2862	kfree(mp);
2863out_free_pmb:
2864	mempool_free(pmb, phba->mbox_mem_pool);
2865}
2866
2867/**
2868 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
2869 * @phba: pointer to lpfc hba data structure.
2870 * @acqe_link: pointer to the async fcoe completion queue entry.
2871 *
2872 * This routine is to handle the SLI4 asynchronous fcoe event.
2873 **/
2874static void
2875lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2876			 struct lpfc_acqe_fcoe *acqe_fcoe)
2877{
2878	uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2879	int rc;
2880
2881	switch (event_type) {
2882	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2883		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2884			"2546 New FCF found index 0x%x tag 0x%x \n",
2885			acqe_fcoe->fcf_index,
2886			acqe_fcoe->event_tag);
2887		/*
2888		 * If the current FCF is in discovered state,
2889		 * do nothing.
2890		 */
2891		spin_lock_irq(&phba->hbalock);
2892		if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
2893			spin_unlock_irq(&phba->hbalock);
2894			break;
2895		}
2896		spin_unlock_irq(&phba->hbalock);
2897
2898		/* Read the FCF table and re-discover SAN. */
2899		rc = lpfc_sli4_read_fcf_record(phba,
2900			LPFC_FCOE_FCF_GET_FIRST);
2901		if (rc)
2902			lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2903				"2547 Read FCF record failed 0x%x\n",
2904				rc);
2905		break;
2906
2907	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
2908		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2909			"2548 FCF Table full count 0x%x tag 0x%x \n",
2910			bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
2911			acqe_fcoe->event_tag);
2912		break;
2913
2914	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2915		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2916			"2549 FCF disconnected fron network index 0x%x"
2917			" tag 0x%x \n", acqe_fcoe->fcf_index,
2918			acqe_fcoe->event_tag);
2919		/* If the event is not for currently used fcf do nothing */
2920		if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
2921			break;
2922		/*
2923		 * Currently, driver support only one FCF - so treat this as
2924		 * a link down.
2925		 */
2926		lpfc_linkdown(phba);
2927		/* Unregister FCF if no devices connected to it */
2928		lpfc_unregister_unused_fcf(phba);
2929		break;
2930
2931	default:
2932		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2933			"0288 Unknown FCoE event type 0x%x event tag "
2934			"0x%x\n", event_type, acqe_fcoe->event_tag);
2935		break;
2936	}
2937}
2938
2939/**
2940 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
2941 * @phba: pointer to lpfc hba data structure.
2942 * @acqe_link: pointer to the async dcbx completion queue entry.
2943 *
2944 * This routine is to handle the SLI4 asynchronous dcbx event.
2945 **/
2946static void
2947lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2948			 struct lpfc_acqe_dcbx *acqe_dcbx)
2949{
2950	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2951			"0290 The SLI4 DCBX asynchronous event is not "
2952			"handled yet\n");
2953}
2954
2955/**
2956 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
2957 * @phba: pointer to lpfc hba data structure.
2958 *
2959 * This routine is invoked by the worker thread to process all the pending
2960 * SLI4 asynchronous events.
2961 **/
2962void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
2963{
2964	struct lpfc_cq_event *cq_event;
2965
2966	/* First, declare the async event has been handled */
2967	spin_lock_irq(&phba->hbalock);
2968	phba->hba_flag &= ~ASYNC_EVENT;
2969	spin_unlock_irq(&phba->hbalock);
2970	/* Now, handle all the async events */
2971	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
2972		/* Get the first event from the head of the event queue */
2973		spin_lock_irq(&phba->hbalock);
2974		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
2975				 cq_event, struct lpfc_cq_event, list);
2976		spin_unlock_irq(&phba->hbalock);
2977		/* Process the asynchronous event */
2978		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
2979		case LPFC_TRAILER_CODE_LINK:
2980			lpfc_sli4_async_link_evt(phba,
2981						 &cq_event->cqe.acqe_link);
2982			break;
2983		case LPFC_TRAILER_CODE_FCOE:
2984			lpfc_sli4_async_fcoe_evt(phba,
2985						 &cq_event->cqe.acqe_fcoe);
2986			break;
2987		case LPFC_TRAILER_CODE_DCBX:
2988			lpfc_sli4_async_dcbx_evt(phba,
2989						 &cq_event->cqe.acqe_dcbx);
2990			break;
2991		default:
2992			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2993					"1804 Invalid asynchrous event code: "
2994					"x%x\n", bf_get(lpfc_trailer_code,
2995					&cq_event->cqe.mcqe_cmpl));
2996			break;
2997		}
2998		/* Free the completion event processed to the free pool */
2999		lpfc_sli4_cq_event_release(phba, cq_event);
3000	}
3001}
3002
3003/**
3004 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3005 * @phba: pointer to lpfc hba data structure.
3006 * @dev_grp: The HBA PCI-Device group number.
3007 *
3008 * This routine is invoked to set up the per HBA PCI-Device group function
3009 * API jump table entries.
3010 *
3011 * Return: 0 if success, otherwise -ENODEV
3012 **/
3013int
3014lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3015{
3016	int rc;
3017
3018	/* Set up lpfc PCI-device group */
3019	phba->pci_dev_grp = dev_grp;
3020
3021	/* The LPFC_PCI_DEV_OC uses SLI4 */
3022	if (dev_grp == LPFC_PCI_DEV_OC)
3023		phba->sli_rev = LPFC_SLI_REV4;
3024
3025	/* Set up device INIT API function jump table */
3026	rc = lpfc_init_api_table_setup(phba, dev_grp);
3027	if (rc)
3028		return -ENODEV;
3029	/* Set up SCSI API function jump table */
3030	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3031	if (rc)
3032		return -ENODEV;
3033	/* Set up SLI API function jump table */
3034	rc = lpfc_sli_api_table_setup(phba, dev_grp);
3035	if (rc)
3036		return -ENODEV;
3037	/* Set up MBOX API function jump table */
3038	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3039	if (rc)
3040		return -ENODEV;
3041
3042	return 0;
3043}
3044
3045/**
3046 * lpfc_log_intr_mode - Log the active interrupt mode
3047 * @phba: pointer to lpfc hba data structure.
3048 * @intr_mode: active interrupt mode adopted.
3049 *
3050 * This routine it invoked to log the currently used active interrupt mode
3051 * to the device.
3052 **/
3053static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3054{
3055	switch (intr_mode) {
3056	case 0:
3057		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3058				"0470 Enable INTx interrupt mode.\n");
3059		break;
3060	case 1:
3061		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3062				"0481 Enabled MSI interrupt mode.\n");
3063		break;
3064	case 2:
3065		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3066				"0480 Enabled MSI-X interrupt mode.\n");
3067		break;
3068	default:
3069		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3070				"0482 Illegal interrupt mode.\n");
3071		break;
3072	}
3073	return;
3074}
3075
3076/**
3077 * lpfc_enable_pci_dev - Enable a generic PCI device.
3078 * @phba: pointer to lpfc hba data structure.
3079 *
3080 * This routine is invoked to enable the PCI device that is common to all
3081 * PCI devices.
3082 *
3083 * Return codes
3084 * 	0 - sucessful
3085 * 	other values - error
3086 **/
3087static int
3088lpfc_enable_pci_dev(struct lpfc_hba *phba)
3089{
3090	struct pci_dev *pdev;
3091	int bars;
3092
3093	/* Obtain PCI device reference */
3094	if (!phba->pcidev)
3095		goto out_error;
3096	else
3097		pdev = phba->pcidev;
3098	/* Select PCI BARs */
3099	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3100	/* Enable PCI device */
3101	if (pci_enable_device_mem(pdev))
3102		goto out_error;
3103	/* Request PCI resource for the device */
3104	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3105		goto out_disable_device;
3106	/* Set up device as PCI master and save state for EEH */
3107	pci_set_master(pdev);
3108	pci_try_set_mwi(pdev);
3109	pci_save_state(pdev);
3110
3111	return 0;
3112
3113out_disable_device:
3114	pci_disable_device(pdev);
3115out_error:
3116	return -ENODEV;
3117}
3118
3119/**
3120 * lpfc_disable_pci_dev - Disable a generic PCI device.
3121 * @phba: pointer to lpfc hba data structure.
3122 *
3123 * This routine is invoked to disable the PCI device that is common to all
3124 * PCI devices.
3125 **/
3126static void
3127lpfc_disable_pci_dev(struct lpfc_hba *phba)
3128{
3129	struct pci_dev *pdev;
3130	int bars;
3131
3132	/* Obtain PCI device reference */
3133	if (!phba->pcidev)
3134		return;
3135	else
3136		pdev = phba->pcidev;
3137	/* Select PCI BARs */
3138	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3139	/* Release PCI resource and disable PCI device */
3140	pci_release_selected_regions(pdev, bars);
3141	pci_disable_device(pdev);
3142	/* Null out PCI private reference to driver */
3143	pci_set_drvdata(pdev, NULL);
3144
3145	return;
3146}
3147
3148/**
3149 * lpfc_reset_hba - Reset a hba
3150 * @phba: pointer to lpfc hba data structure.
3151 *
3152 * This routine is invoked to reset a hba device. It brings the HBA
3153 * offline, performs a board restart, and then brings the board back
3154 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3155 * on outstanding mailbox commands.
3156 **/
3157void
3158lpfc_reset_hba(struct lpfc_hba *phba)
3159{
3160	/* If resets are disabled then set error state and return. */
3161	if (!phba->cfg_enable_hba_reset) {
3162		phba->link_state = LPFC_HBA_ERROR;
3163		return;
3164	}
3165	lpfc_offline_prep(phba);
3166	lpfc_offline(phba);
3167	lpfc_sli_brdrestart(phba);
3168	lpfc_online(phba);
3169	lpfc_unblock_mgmt_io(phba);
3170}
3171
3172/**
3173 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3174 * @phba: pointer to lpfc hba data structure.
3175 *
3176 * This routine is invoked to set up the driver internal resources specific to
3177 * support the SLI-3 HBA device it attached to.
3178 *
3179 * Return codes
3180 * 	0 - sucessful
3181 * 	other values - error
3182 **/
3183static int
3184lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3185{
3186	struct lpfc_sli *psli;
3187
3188	/*
3189	 * Initialize timers used by driver
3190	 */
3191
3192	/* Heartbeat timer */
3193	init_timer(&phba->hb_tmofunc);
3194	phba->hb_tmofunc.function = lpfc_hb_timeout;
3195	phba->hb_tmofunc.data = (unsigned long)phba;
3196
3197	psli = &phba->sli;
3198	/* MBOX heartbeat timer */
3199	init_timer(&psli->mbox_tmo);
3200	psli->mbox_tmo.function = lpfc_mbox_timeout;
3201	psli->mbox_tmo.data = (unsigned long) phba;
3202	/* FCP polling mode timer */
3203	init_timer(&phba->fcp_poll_timer);
3204	phba->fcp_poll_timer.function = lpfc_poll_timeout;
3205	phba->fcp_poll_timer.data = (unsigned long) phba;
3206	/* Fabric block timer */
3207	init_timer(&phba->fabric_block_timer);
3208	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3209	phba->fabric_block_timer.data = (unsigned long) phba;
3210	/* EA polling mode timer */
3211	init_timer(&phba->eratt_poll);
3212	phba->eratt_poll.function = lpfc_poll_eratt;
3213	phba->eratt_poll.data = (unsigned long) phba;
3214
3215	/* Host attention work mask setup */
3216	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3217	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3218
3219	/* Get all the module params for configuring this host */
3220	lpfc_get_cfgparam(phba);
3221	/*
3222	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3223	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3224	 * 2 segments are added since the IOCB needs a command and response bde.
3225	 */
3226	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3227		sizeof(struct fcp_rsp) +
3228			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3229
3230	if (phba->cfg_enable_bg) {
3231		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3232		phba->cfg_sg_dma_buf_size +=
3233			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3234	}
3235
3236	/* Also reinitialize the host templates with new values. */
3237	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3238	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3239
3240	phba->max_vpi = LPFC_MAX_VPI;
3241	/* This will be set to correct value after config_port mbox */
3242	phba->max_vports = 0;
3243
3244	/*
3245	 * Initialize the SLI Layer to run with lpfc HBAs.
3246	 */
3247	lpfc_sli_setup(phba);
3248	lpfc_sli_queue_setup(phba);
3249
3250	/* Allocate device driver memory */
3251	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3252		return -ENOMEM;
3253
3254	return 0;
3255}
3256
3257/**
3258 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3259 * @phba: pointer to lpfc hba data structure.
3260 *
3261 * This routine is invoked to unset the driver internal resources set up
3262 * specific for supporting the SLI-3 HBA device it attached to.
3263 **/
3264static void
3265lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3266{
3267	/* Free device driver memory allocated */
3268	lpfc_mem_free_all(phba);
3269
3270	return;
3271}
3272
3273/**
3274 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3275 * @phba: pointer to lpfc hba data structure.
3276 *
3277 * This routine is invoked to set up the driver internal resources specific to
3278 * support the SLI-4 HBA device it attached to.
3279 *
3280 * Return codes
3281 * 	0 - sucessful
3282 * 	other values - error
3283 **/
3284static int
3285lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3286{
3287	struct lpfc_sli *psli;
3288	int rc;
3289	int i, hbq_count;
3290
3291	/* Before proceed, wait for POST done and device ready */
3292	rc = lpfc_sli4_post_status_check(phba);
3293	if (rc)
3294		return -ENODEV;
3295
3296	/*
3297	 * Initialize timers used by driver
3298	 */
3299
3300	/* Heartbeat timer */
3301	init_timer(&phba->hb_tmofunc);
3302	phba->hb_tmofunc.function = lpfc_hb_timeout;
3303	phba->hb_tmofunc.data = (unsigned long)phba;
3304
3305	psli = &phba->sli;
3306	/* MBOX heartbeat timer */
3307	init_timer(&psli->mbox_tmo);
3308	psli->mbox_tmo.function = lpfc_mbox_timeout;
3309	psli->mbox_tmo.data = (unsigned long) phba;
3310	/* Fabric block timer */
3311	init_timer(&phba->fabric_block_timer);
3312	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3313	phba->fabric_block_timer.data = (unsigned long) phba;
3314	/* EA polling mode timer */
3315	init_timer(&phba->eratt_poll);
3316	phba->eratt_poll.function = lpfc_poll_eratt;
3317	phba->eratt_poll.data = (unsigned long) phba;
3318	/*
3319	 * We need to do a READ_CONFIG mailbox command here before
3320	 * calling lpfc_get_cfgparam. For VFs this will report the
3321	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3322	 * All of the resources allocated
3323	 * for this Port are tied to these values.
3324	 */
3325	/* Get all the module params for configuring this host */
3326	lpfc_get_cfgparam(phba);
3327	phba->max_vpi = LPFC_MAX_VPI;
3328	/* This will be set to correct value after the read_config mbox */
3329	phba->max_vports = 0;
3330
3331	/* Program the default value of vlan_id and fc_map */
3332	phba->valid_vlan = 0;
3333	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3334	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3335	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3336
3337	/*
3338	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3339	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3340	 * 2 segments are added since the IOCB needs a command and response bde.
3341	 * To insure that the scsi sgl does not cross a 4k page boundary only
3342	 * sgl sizes of 1k, 2k, 4k, and 8k are supported.
3343	 * Table of sgl sizes and seg_cnt:
3344	 * sgl size, 	sg_seg_cnt	total seg
3345	 * 1k		50		52
3346	 * 2k		114		116
3347	 * 4k		242		244
3348	 * 8k		498		500
3349	 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3350	 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3351	 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3352	 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3353	 */
3354	if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
3355		phba->cfg_sg_seg_cnt = 50;
3356	else if (phba->cfg_sg_seg_cnt <= 114)
3357		phba->cfg_sg_seg_cnt = 114;
3358	else if (phba->cfg_sg_seg_cnt <= 242)
3359		phba->cfg_sg_seg_cnt = 242;
3360	else
3361		phba->cfg_sg_seg_cnt = 498;
3362
3363	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
3364					+ sizeof(struct fcp_rsp);
3365	phba->cfg_sg_dma_buf_size +=
3366		((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
3367
3368	/* Initialize buffer queue management fields */
3369	hbq_count = lpfc_sli_hbq_count();
3370	for (i = 0; i < hbq_count; ++i)
3371		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
3372	INIT_LIST_HEAD(&phba->rb_pend_list);
3373	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
3374	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3375
3376	/*
3377	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
3378	 */
3379	/* Initialize the Abort scsi buffer list used by driver */
3380	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
3381	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
3382	/* This abort list used by worker thread */
3383	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3384
3385	/*
3386	 * Initialize dirver internal slow-path work queues
3387	 */
3388
3389	/* Driver internel slow-path CQ Event pool */
3390	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3391	/* Response IOCB work queue list */
3392	INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
3393	/* Asynchronous event CQ Event work queue list */
3394	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3395	/* Fast-path XRI aborted CQ Event work queue list */
3396	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
3397	/* Slow-path XRI aborted CQ Event work queue list */
3398	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
3399	/* Receive queue CQ Event work queue list */
3400	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
3401
3402	/* Initialize the driver internal SLI layer lists. */
3403	lpfc_sli_setup(phba);
3404	lpfc_sli_queue_setup(phba);
3405
3406	/* Allocate device driver memory */
3407	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
3408	if (rc)
3409		return -ENOMEM;
3410
3411	/* Create the bootstrap mailbox command */
3412	rc = lpfc_create_bootstrap_mbox(phba);
3413	if (unlikely(rc))
3414		goto out_free_mem;
3415
3416	/* Set up the host's endian order with the device. */
3417	rc = lpfc_setup_endian_order(phba);
3418	if (unlikely(rc))
3419		goto out_free_bsmbx;
3420
3421	/* Set up the hba's configuration parameters. */
3422	rc = lpfc_sli4_read_config(phba);
3423	if (unlikely(rc))
3424		goto out_free_bsmbx;
3425
3426	/* Perform a function reset */
3427	rc = lpfc_pci_function_reset(phba);
3428	if (unlikely(rc))
3429		goto out_free_bsmbx;
3430
3431	/* Create all the SLI4 queues */
3432	rc = lpfc_sli4_queue_create(phba);
3433	if (rc)
3434		goto out_free_bsmbx;
3435
3436	/* Create driver internal CQE event pool */
3437	rc = lpfc_sli4_cq_event_pool_create(phba);
3438	if (rc)
3439		goto out_destroy_queue;
3440
3441	/* Initialize and populate the iocb list per host */
3442	rc = lpfc_init_sgl_list(phba);
3443	if (rc) {
3444		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3445				"1400 Failed to initialize sgl list.\n");
3446		goto out_destroy_cq_event_pool;
3447	}
3448	rc = lpfc_init_active_sgl_array(phba);
3449	if (rc) {
3450		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3451				"1430 Failed to initialize sgl list.\n");
3452		goto out_free_sgl_list;
3453	}
3454
3455	rc = lpfc_sli4_init_rpi_hdrs(phba);
3456	if (rc) {
3457		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3458				"1432 Failed to initialize rpi headers.\n");
3459		goto out_free_active_sgl;
3460	}
3461
3462	phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3463				    phba->cfg_fcp_eq_count), GFP_KERNEL);
3464	if (!phba->sli4_hba.fcp_eq_hdl) {
3465		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3466				"2572 Failed allocate memory for fast-path "
3467				"per-EQ handle array\n");
3468		goto out_remove_rpi_hdrs;
3469	}
3470
3471	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
3472				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
3473	if (!phba->sli4_hba.msix_entries) {
3474		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3475				"2573 Failed allocate memory for msi-x "
3476				"interrupt vector entries\n");
3477		goto out_free_fcp_eq_hdl;
3478	}
3479
3480	return rc;
3481
3482out_free_fcp_eq_hdl:
3483	kfree(phba->sli4_hba.fcp_eq_hdl);
3484out_remove_rpi_hdrs:
3485	lpfc_sli4_remove_rpi_hdrs(phba);
3486out_free_active_sgl:
3487	lpfc_free_active_sgl(phba);
3488out_free_sgl_list:
3489	lpfc_free_sgl_list(phba);
3490out_destroy_cq_event_pool:
3491	lpfc_sli4_cq_event_pool_destroy(phba);
3492out_destroy_queue:
3493	lpfc_sli4_queue_destroy(phba);
3494out_free_bsmbx:
3495	lpfc_destroy_bootstrap_mbox(phba);
3496out_free_mem:
3497	lpfc_mem_free(phba);
3498	return rc;
3499}
3500
3501/**
3502 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
3503 * @phba: pointer to lpfc hba data structure.
3504 *
3505 * This routine is invoked to unset the driver internal resources set up
3506 * specific for supporting the SLI-4 HBA device it attached to.
3507 **/
3508static void
3509lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3510{
3511	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
3512
3513	/* unregister default FCFI from the HBA */
3514	lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
3515
3516	/* Free the default FCR table */
3517	lpfc_sli_remove_dflt_fcf(phba);
3518
3519	/* Free memory allocated for msi-x interrupt vector entries */
3520	kfree(phba->sli4_hba.msix_entries);
3521
3522	/* Free memory allocated for fast-path work queue handles */
3523	kfree(phba->sli4_hba.fcp_eq_hdl);
3524
3525	/* Free the allocated rpi headers. */
3526	lpfc_sli4_remove_rpi_hdrs(phba);
3527
3528	/* Free the ELS sgl list */
3529	lpfc_free_active_sgl(phba);
3530	lpfc_free_sgl_list(phba);
3531
3532	/* Free the SCSI sgl management array */
3533	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3534
3535	/* Free the SLI4 queues */
3536	lpfc_sli4_queue_destroy(phba);
3537
3538	/* Free the completion queue EQ event pool */
3539	lpfc_sli4_cq_event_release_all(phba);
3540	lpfc_sli4_cq_event_pool_destroy(phba);
3541
3542	/* Reset SLI4 HBA FCoE function */
3543	lpfc_pci_function_reset(phba);
3544
3545	/* Free the bsmbx region. */
3546	lpfc_destroy_bootstrap_mbox(phba);
3547
3548	/* Free the SLI Layer memory with SLI4 HBAs */
3549	lpfc_mem_free_all(phba);
3550
3551	/* Free the current connect table */
3552	list_for_each_entry_safe(conn_entry, next_conn_entry,
3553		&phba->fcf_conn_rec_list, list)
3554		kfree(conn_entry);
3555
3556	return;
3557}
3558
3559/**
3560 * lpfc_init_api_table_setup - Set up init api fucntion jump table
3561 * @phba: The hba struct for which this call is being executed.
3562 * @dev_grp: The HBA PCI-Device group number.
3563 *
3564 * This routine sets up the device INIT interface API function jump table
3565 * in @phba struct.
3566 *
3567 * Returns: 0 - success, -ENODEV - failure.
3568 **/
3569int
3570lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3571{
3572	switch (dev_grp) {
3573	case LPFC_PCI_DEV_LP:
3574		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
3575		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
3576		phba->lpfc_stop_port = lpfc_stop_port_s3;
3577		break;
3578	case LPFC_PCI_DEV_OC:
3579		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
3580		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
3581		phba->lpfc_stop_port = lpfc_stop_port_s4;
3582		break;
3583	default:
3584		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3585				"1431 Invalid HBA PCI-device group: 0x%x\n",
3586				dev_grp);
3587		return -ENODEV;
3588		break;
3589	}
3590	return 0;
3591}
3592
3593/**
3594 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3595 * @phba: pointer to lpfc hba data structure.
3596 *
3597 * This routine is invoked to set up the driver internal resources before the
3598 * device specific resource setup to support the HBA device it attached to.
3599 *
3600 * Return codes
3601 *	0 - sucessful
3602 *	other values - error
3603 **/
3604static int
3605lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3606{
3607	/*
3608	 * Driver resources common to all SLI revisions
3609	 */
3610	atomic_set(&phba->fast_event_count, 0);
3611	spin_lock_init(&phba->hbalock);
3612
3613	/* Initialize ndlp management spinlock */
3614	spin_lock_init(&phba->ndlp_lock);
3615
3616	INIT_LIST_HEAD(&phba->port_list);
3617	INIT_LIST_HEAD(&phba->work_list);
3618	init_waitqueue_head(&phba->wait_4_mlo_m_q);
3619
3620	/* Initialize the wait queue head for the kernel thread */
3621	init_waitqueue_head(&phba->work_waitq);
3622
3623	/* Initialize the scsi buffer list used by driver for scsi IO */
3624	spin_lock_init(&phba->scsi_buf_list_lock);
3625	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
3626
3627	/* Initialize the fabric iocb list */
3628	INIT_LIST_HEAD(&phba->fabric_iocb_list);
3629
3630	/* Initialize list to save ELS buffers */
3631	INIT_LIST_HEAD(&phba->elsbuf);
3632
3633	/* Initialize FCF connection rec list */
3634	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
3635
3636	return 0;
3637}
3638
3639/**
3640 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
3641 * @phba: pointer to lpfc hba data structure.
3642 *
3643 * This routine is invoked to set up the driver internal resources after the
3644 * device specific resource setup to support the HBA device it attached to.
3645 *
3646 * Return codes
3647 * 	0 - sucessful
3648 * 	other values - error
3649 **/
3650static int
3651lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
3652{
3653	int error;
3654
3655	/* Startup the kernel thread for this host adapter. */
3656	phba->worker_thread = kthread_run(lpfc_do_work, phba,
3657					  "lpfc_worker_%d", phba->brd_no);
3658	if (IS_ERR(phba->worker_thread)) {
3659		error = PTR_ERR(phba->worker_thread);
3660		return error;
3661	}
3662
3663	return 0;
3664}
3665
3666/**
3667 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
3668 * @phba: pointer to lpfc hba data structure.
3669 *
3670 * This routine is invoked to unset the driver internal resources set up after
3671 * the device specific resource setup for supporting the HBA device it
3672 * attached to.
3673 **/
3674static void
3675lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
3676{
3677	/* Stop kernel worker thread */
3678	kthread_stop(phba->worker_thread);
3679}
3680
3681/**
3682 * lpfc_free_iocb_list - Free iocb list.
3683 * @phba: pointer to lpfc hba data structure.
3684 *
3685 * This routine is invoked to free the driver's IOCB list and memory.
3686 **/
3687static void
3688lpfc_free_iocb_list(struct lpfc_hba *phba)
3689{
3690	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
3691
3692	spin_lock_irq(&phba->hbalock);
3693	list_for_each_entry_safe(iocbq_entry, iocbq_next,
3694				 &phba->lpfc_iocb_list, list) {
3695		list_del(&iocbq_entry->list);
3696		kfree(iocbq_entry);
3697		phba->total_iocbq_bufs--;
3698	}
3699	spin_unlock_irq(&phba->hbalock);
3700
3701	return;
3702}
3703
3704/**
3705 * lpfc_init_iocb_list - Allocate and initialize iocb list.
3706 * @phba: pointer to lpfc hba data structure.
3707 *
3708 * This routine is invoked to allocate and initizlize the driver's IOCB
3709 * list and set up the IOCB tag array accordingly.
3710 *
3711 * Return codes
3712 *	0 - sucessful
3713 *	other values - error
3714 **/
3715static int
3716lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
3717{
3718	struct lpfc_iocbq *iocbq_entry = NULL;
3719	uint16_t iotag;
3720	int i;
3721
3722	/* Initialize and populate the iocb list per host.  */
3723	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3724	for (i = 0; i < iocb_count; i++) {
3725		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
3726		if (iocbq_entry == NULL) {
3727			printk(KERN_ERR "%s: only allocated %d iocbs of "
3728				"expected %d count. Unloading driver.\n",
3729				__func__, i, LPFC_IOCB_LIST_CNT);
3730			goto out_free_iocbq;
3731		}
3732
3733		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
3734		if (iotag == 0) {
3735			kfree(iocbq_entry);
3736			printk(KERN_ERR "%s: failed to allocate IOTAG. "
3737				"Unloading driver.\n", __func__);
3738			goto out_free_iocbq;
3739		}
3740		iocbq_entry->sli4_xritag = NO_XRI;
3741
3742		spin_lock_irq(&phba->hbalock);
3743		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
3744		phba->total_iocbq_bufs++;
3745		spin_unlock_irq(&phba->hbalock);
3746	}
3747
3748	return 0;
3749
3750out_free_iocbq:
3751	lpfc_free_iocb_list(phba);
3752
3753	return -ENOMEM;
3754}
3755
3756/**
3757 * lpfc_free_sgl_list - Free sgl list.
3758 * @phba: pointer to lpfc hba data structure.
3759 *
3760 * This routine is invoked to free the driver's sgl list and memory.
3761 **/
3762static void
3763lpfc_free_sgl_list(struct lpfc_hba *phba)
3764{
3765	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
3766	LIST_HEAD(sglq_list);
3767	int rc = 0;
3768
3769	spin_lock_irq(&phba->hbalock);
3770	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
3771	spin_unlock_irq(&phba->hbalock);
3772
3773	list_for_each_entry_safe(sglq_entry, sglq_next,
3774				 &sglq_list, list) {
3775		list_del(&sglq_entry->list);
3776		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
3777		kfree(sglq_entry);
3778		phba->sli4_hba.total_sglq_bufs--;
3779	}
3780	rc = lpfc_sli4_remove_all_sgl_pages(phba);
3781	if (rc) {
3782		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3783			"2005 Unable to deregister pages from HBA: %x", rc);
3784	}
3785	kfree(phba->sli4_hba.lpfc_els_sgl_array);
3786}
3787
3788/**
3789 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
3790 * @phba: pointer to lpfc hba data structure.
3791 *
3792 * This routine is invoked to allocate the driver's active sgl memory.
3793 * This array will hold the sglq_entry's for active IOs.
3794 **/
3795static int
3796lpfc_init_active_sgl_array(struct lpfc_hba *phba)
3797{
3798	int size;
3799	size = sizeof(struct lpfc_sglq *);
3800	size *= phba->sli4_hba.max_cfg_param.max_xri;
3801
3802	phba->sli4_hba.lpfc_sglq_active_list =
3803		kzalloc(size, GFP_KERNEL);
3804	if (!phba->sli4_hba.lpfc_sglq_active_list)
3805		return -ENOMEM;
3806	return 0;
3807}
3808
3809/**
3810 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3811 * @phba: pointer to lpfc hba data structure.
3812 *
3813 * This routine is invoked to walk through the array of active sglq entries
3814 * and free all of the resources.
3815 * This is just a place holder for now.
3816 **/
3817static void
3818lpfc_free_active_sgl(struct lpfc_hba *phba)
3819{
3820	kfree(phba->sli4_hba.lpfc_sglq_active_list);
3821}
3822
3823/**
3824 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3825 * @phba: pointer to lpfc hba data structure.
3826 *
3827 * This routine is invoked to allocate and initizlize the driver's sgl
3828 * list and set up the sgl xritag tag array accordingly.
3829 *
3830 * Return codes
3831 *	0 - sucessful
3832 *	other values - error
3833 **/
3834static int
3835lpfc_init_sgl_list(struct lpfc_hba *phba)
3836{
3837	struct lpfc_sglq *sglq_entry = NULL;
3838	int i;
3839	int els_xri_cnt;
3840
3841	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3842	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3843				"2400 lpfc_init_sgl_list els %d.\n",
3844				els_xri_cnt);
3845	/* Initialize and populate the sglq list per host/VF. */
3846	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
3847	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
3848
3849	/* Sanity check on XRI management */
3850	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
3851		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3852				"2562 No room left for SCSI XRI allocation: "
3853				"max_xri=%d, els_xri=%d\n",
3854				phba->sli4_hba.max_cfg_param.max_xri,
3855				els_xri_cnt);
3856		return -ENOMEM;
3857	}
3858
3859	/* Allocate memory for the ELS XRI management array */
3860	phba->sli4_hba.lpfc_els_sgl_array =
3861			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
3862			GFP_KERNEL);
3863
3864	if (!phba->sli4_hba.lpfc_els_sgl_array) {
3865		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3866				"2401 Failed to allocate memory for ELS "
3867				"XRI management array of size %d.\n",
3868				els_xri_cnt);
3869		return -ENOMEM;
3870	}
3871
3872	/* Keep the SCSI XRI into the XRI management array */
3873	phba->sli4_hba.scsi_xri_max =
3874			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3875	phba->sli4_hba.scsi_xri_cnt = 0;
3876
3877	phba->sli4_hba.lpfc_scsi_psb_array =
3878			kzalloc((sizeof(struct lpfc_scsi_buf *) *
3879			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
3880
3881	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
3882		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3883				"2563 Failed to allocate memory for SCSI "
3884				"XRI management array of size %d.\n",
3885				phba->sli4_hba.scsi_xri_max);
3886		kfree(phba->sli4_hba.lpfc_els_sgl_array);
3887		return -ENOMEM;
3888	}
3889
3890	for (i = 0; i < els_xri_cnt; i++) {
3891		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
3892		if (sglq_entry == NULL) {
3893			printk(KERN_ERR "%s: only allocated %d sgls of "
3894				"expected %d count. Unloading driver.\n",
3895				__func__, i, els_xri_cnt);
3896			goto out_free_mem;
3897		}
3898
3899		sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
3900		if (sglq_entry->sli4_xritag == NO_XRI) {
3901			kfree(sglq_entry);
3902			printk(KERN_ERR "%s: failed to allocate XRI.\n"
3903				"Unloading driver.\n", __func__);
3904			goto out_free_mem;
3905		}
3906		sglq_entry->buff_type = GEN_BUFF_TYPE;
3907		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
3908		if (sglq_entry->virt == NULL) {
3909			kfree(sglq_entry);
3910			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
3911				"Unloading driver.\n", __func__);
3912			goto out_free_mem;
3913		}
3914		sglq_entry->sgl = sglq_entry->virt;
3915		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3916
3917		/* The list order is used by later block SGL registraton */
3918		spin_lock_irq(&phba->hbalock);
3919		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
3920		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
3921		phba->sli4_hba.total_sglq_bufs++;
3922		spin_unlock_irq(&phba->hbalock);
3923	}
3924	return 0;
3925
3926out_free_mem:
3927	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3928	lpfc_free_sgl_list(phba);
3929	return -ENOMEM;
3930}
3931
3932/**
3933 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
3934 * @phba: pointer to lpfc hba data structure.
3935 *
3936 * This routine is invoked to post rpi header templates to the
3937 * HBA consistent with the SLI-4 interface spec.  This routine
3938 * posts a PAGE_SIZE memory region to the port to hold up to
3939 * PAGE_SIZE modulo 64 rpi context headers.
3940 * No locks are held here because this is an initialization routine
3941 * called only from probe or lpfc_online when interrupts are not
3942 * enabled and the driver is reinitializing the device.
3943 *
3944 * Return codes
3945 * 	0 - sucessful
3946 * 	ENOMEM - No availble memory
3947 *      EIO - The mailbox failed to complete successfully.
3948 **/
3949int
3950lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
3951{
3952	int rc = 0;
3953	int longs;
3954	uint16_t rpi_count;
3955	struct lpfc_rpi_hdr *rpi_hdr;
3956
3957	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
3958
3959	/*
3960	 * Provision an rpi bitmask range for discovery. The total count
3961	 * is the difference between max and base + 1.
3962	 */
3963	rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
3964		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
3965
3966	longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
3967	phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
3968					   GFP_KERNEL);
3969	if (!phba->sli4_hba.rpi_bmask)
3970		return -ENOMEM;
3971
3972	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
3973	if (!rpi_hdr) {
3974		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3975				"0391 Error during rpi post operation\n");
3976		lpfc_sli4_remove_rpis(phba);
3977		rc = -ENODEV;
3978	}
3979
3980	return rc;
3981}
3982
3983/**
3984 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
3985 * @phba: pointer to lpfc hba data structure.
3986 *
3987 * This routine is invoked to allocate a single 4KB memory region to
3988 * support rpis and stores them in the phba.  This single region
3989 * provides support for up to 64 rpis.  The region is used globally
3990 * by the device.
3991 *
3992 * Returns:
3993 *   A valid rpi hdr on success.
3994 *   A NULL pointer on any failure.
3995 **/
3996struct lpfc_rpi_hdr *
3997lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
3998{
3999	uint16_t rpi_limit, curr_rpi_range;
4000	struct lpfc_dmabuf *dmabuf;
4001	struct lpfc_rpi_hdr *rpi_hdr;
4002
4003	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4004		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4005
4006	spin_lock_irq(&phba->hbalock);
4007	curr_rpi_range = phba->sli4_hba.next_rpi;
4008	spin_unlock_irq(&phba->hbalock);
4009
4010	/*
4011	 * The port has a limited number of rpis. The increment here
4012	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4013	 * and to allow the full max_rpi range per port.
4014	 */
4015	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4016		return NULL;
4017
4018	/*
4019	 * First allocate the protocol header region for the port.  The
4020	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4021	 */
4022	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4023	if (!dmabuf)
4024		return NULL;
4025
4026	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4027					  LPFC_HDR_TEMPLATE_SIZE,
4028					  &dmabuf->phys,
4029					  GFP_KERNEL);
4030	if (!dmabuf->virt) {
4031		rpi_hdr = NULL;
4032		goto err_free_dmabuf;
4033	}
4034
4035	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4036	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4037		rpi_hdr = NULL;
4038		goto err_free_coherent;
4039	}
4040
4041	/* Save the rpi header data for cleanup later. */
4042	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4043	if (!rpi_hdr)
4044		goto err_free_coherent;
4045
4046	rpi_hdr->dmabuf = dmabuf;
4047	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4048	rpi_hdr->page_count = 1;
4049	spin_lock_irq(&phba->hbalock);
4050	rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4051	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4052
4053	/*
4054	 * The next_rpi stores the next module-64 rpi value to post
4055	 * in any subsequent rpi memory region postings.
4056	 */
4057	phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4058	spin_unlock_irq(&phba->hbalock);
4059	return rpi_hdr;
4060
4061 err_free_coherent:
4062	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4063			  dmabuf->virt, dmabuf->phys);
4064 err_free_dmabuf:
4065	kfree(dmabuf);
4066	return NULL;
4067}
4068
4069/**
4070 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4071 * @phba: pointer to lpfc hba data structure.
4072 *
4073 * This routine is invoked to remove all memory resources allocated
4074 * to support rpis. This routine presumes the caller has released all
4075 * rpis consumed by fabric or port logins and is prepared to have
4076 * the header pages removed.
4077 **/
4078void
4079lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4080{
4081	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4082
4083	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4084				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4085		list_del(&rpi_hdr->list);
4086		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4087				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4088		kfree(rpi_hdr->dmabuf);
4089		kfree(rpi_hdr);
4090	}
4091
4092	phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4093	memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4094}
4095
4096/**
4097 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4098 * @pdev: pointer to pci device data structure.
4099 *
4100 * This routine is invoked to allocate the driver hba data structure for an
4101 * HBA device. If the allocation is successful, the phba reference to the
4102 * PCI device data structure is set.
4103 *
4104 * Return codes
4105 *      pointer to @phba - sucessful
4106 *      NULL - error
4107 **/
4108static struct lpfc_hba *
4109lpfc_hba_alloc(struct pci_dev *pdev)
4110{
4111	struct lpfc_hba *phba;
4112
4113	/* Allocate memory for HBA structure */
4114	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4115	if (!phba) {
4116		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4117				"1417 Failed to allocate hba struct.\n");
4118		return NULL;
4119	}
4120
4121	/* Set reference to PCI device in HBA structure */
4122	phba->pcidev = pdev;
4123
4124	/* Assign an unused board number */
4125	phba->brd_no = lpfc_get_instance();
4126	if (phba->brd_no < 0) {
4127		kfree(phba);
4128		return NULL;
4129	}
4130
4131	return phba;
4132}
4133
4134/**
4135 * lpfc_hba_free - Free driver hba data structure with a device.
4136 * @phba: pointer to lpfc hba data structure.
4137 *
4138 * This routine is invoked to free the driver hba data structure with an
4139 * HBA device.
4140 **/
4141static void
4142lpfc_hba_free(struct lpfc_hba *phba)
4143{
4144	/* Release the driver assigned board number */
4145	idr_remove(&lpfc_hba_index, phba->brd_no);
4146
4147	kfree(phba);
4148	return;
4149}
4150
4151/**
4152 * lpfc_create_shost - Create hba physical port with associated scsi host.
4153 * @phba: pointer to lpfc hba data structure.
4154 *
4155 * This routine is invoked to create HBA physical port and associate a SCSI
4156 * host with it.
4157 *
4158 * Return codes
4159 *      0 - sucessful
4160 *      other values - error
4161 **/
4162static int
4163lpfc_create_shost(struct lpfc_hba *phba)
4164{
4165	struct lpfc_vport *vport;
4166	struct Scsi_Host  *shost;
4167
4168	/* Initialize HBA FC structure */
4169	phba->fc_edtov = FF_DEF_EDTOV;
4170	phba->fc_ratov = FF_DEF_RATOV;
4171	phba->fc_altov = FF_DEF_ALTOV;
4172	phba->fc_arbtov = FF_DEF_ARBTOV;
4173
4174	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4175	if (!vport)
4176		return -ENODEV;
4177
4178	shost = lpfc_shost_from_vport(vport);
4179	phba->pport = vport;
4180	lpfc_debugfs_initialize(vport);
4181	/* Put reference to SCSI host to driver's device private data */
4182	pci_set_drvdata(phba->pcidev, shost);
4183
4184	return 0;
4185}
4186
4187/**
4188 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4189 * @phba: pointer to lpfc hba data structure.
4190 *
4191 * This routine is invoked to destroy HBA physical port and the associated
4192 * SCSI host.
4193 **/
4194static void
4195lpfc_destroy_shost(struct lpfc_hba *phba)
4196{
4197	struct lpfc_vport *vport = phba->pport;
4198
4199	/* Destroy physical port that associated with the SCSI host */
4200	destroy_port(vport);
4201
4202	return;
4203}
4204
4205/**
4206 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4207 * @phba: pointer to lpfc hba data structure.
4208 * @shost: the shost to be used to detect Block guard settings.
4209 *
4210 * This routine sets up the local Block guard protocol settings for @shost.
4211 * This routine also allocates memory for debugging bg buffers.
4212 **/
4213static void
4214lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4215{
4216	int pagecnt = 10;
4217	if (lpfc_prot_mask && lpfc_prot_guard) {
4218		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4219				"1478 Registering BlockGuard with the "
4220				"SCSI layer\n");
4221		scsi_host_set_prot(shost, lpfc_prot_mask);
4222		scsi_host_set_guard(shost, lpfc_prot_guard);
4223	}
4224	if (!_dump_buf_data) {
4225		while (pagecnt) {
4226			spin_lock_init(&_dump_buf_lock);
4227			_dump_buf_data =
4228				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4229			if (_dump_buf_data) {
4230				printk(KERN_ERR "BLKGRD allocated %d pages for "
4231				       "_dump_buf_data at 0x%p\n",
4232				       (1 << pagecnt), _dump_buf_data);
4233				_dump_buf_data_order = pagecnt;
4234				memset(_dump_buf_data, 0,
4235				       ((1 << PAGE_SHIFT) << pagecnt));
4236				break;
4237			} else
4238				--pagecnt;
4239		}
4240		if (!_dump_buf_data_order)
4241			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4242			       "memory for hexdump\n");
4243	} else
4244		printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
4245		       "\n", _dump_buf_data);
4246	if (!_dump_buf_dif) {
4247		while (pagecnt) {
4248			_dump_buf_dif =
4249				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4250			if (_dump_buf_dif) {
4251				printk(KERN_ERR "BLKGRD allocated %d pages for "
4252				       "_dump_buf_dif at 0x%p\n",
4253				       (1 << pagecnt), _dump_buf_dif);
4254				_dump_buf_dif_order = pagecnt;
4255				memset(_dump_buf_dif, 0,
4256				       ((1 << PAGE_SHIFT) << pagecnt));
4257				break;
4258			} else
4259				--pagecnt;
4260		}
4261		if (!_dump_buf_dif_order)
4262			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4263			       "memory for hexdump\n");
4264	} else
4265		printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
4266		       _dump_buf_dif);
4267}
4268
4269/**
4270 * lpfc_post_init_setup - Perform necessary device post initialization setup.
4271 * @phba: pointer to lpfc hba data structure.
4272 *
4273 * This routine is invoked to perform all the necessary post initialization
4274 * setup for the device.
4275 **/
4276static void
4277lpfc_post_init_setup(struct lpfc_hba *phba)
4278{
4279	struct Scsi_Host  *shost;
4280	struct lpfc_adapter_event_header adapter_event;
4281
4282	/* Get the default values for Model Name and Description */
4283	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
4284
4285	/*
4286	 * hba setup may have changed the hba_queue_depth so we need to
4287	 * adjust the value of can_queue.
4288	 */
4289	shost = pci_get_drvdata(phba->pcidev);
4290	shost->can_queue = phba->cfg_hba_queue_depth - 10;
4291	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4292		lpfc_setup_bg(phba, shost);
4293
4294	lpfc_host_attrib_init(shost);
4295
4296	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
4297		spin_lock_irq(shost->host_lock);
4298		lpfc_poll_start_timer(phba);
4299		spin_unlock_irq(shost->host_lock);
4300	}
4301
4302	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4303			"0428 Perform SCSI scan\n");
4304	/* Send board arrival event to upper layer */
4305	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
4306	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
4307	fc_host_post_vendor_event(shost, fc_get_event_number(),
4308				  sizeof(adapter_event),
4309				  (char *) &adapter_event,
4310				  LPFC_NL_VENDOR_ID);
4311	return;
4312}
4313
4314/**
4315 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
4316 * @phba: pointer to lpfc hba data structure.
4317 *
4318 * This routine is invoked to set up the PCI device memory space for device
4319 * with SLI-3 interface spec.
4320 *
4321 * Return codes
4322 * 	0 - sucessful
4323 * 	other values - error
4324 **/
4325static int
4326lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4327{
4328	struct pci_dev *pdev;
4329	unsigned long bar0map_len, bar2map_len;
4330	int i, hbq_count;
4331	void *ptr;
4332	int error = -ENODEV;
4333
4334	/* Obtain PCI device reference */
4335	if (!phba->pcidev)
4336		return error;
4337	else
4338		pdev = phba->pcidev;
4339
4340	/* Set the device DMA mask size */
4341	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
4342		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
4343			return error;
4344
4345	/* Get the bus address of Bar0 and Bar2 and the number of bytes
4346	 * required by each mapping.
4347	 */
4348	phba->pci_bar0_map = pci_resource_start(pdev, 0);
4349	bar0map_len = pci_resource_len(pdev, 0);
4350
4351	phba->pci_bar2_map = pci_resource_start(pdev, 2);
4352	bar2map_len = pci_resource_len(pdev, 2);
4353
4354	/* Map HBA SLIM to a kernel virtual address. */
4355	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
4356	if (!phba->slim_memmap_p) {
4357		dev_printk(KERN_ERR, &pdev->dev,
4358			   "ioremap failed for SLIM memory.\n");
4359		goto out;
4360	}
4361
4362	/* Map HBA Control Registers to a kernel virtual address. */
4363	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
4364	if (!phba->ctrl_regs_memmap_p) {
4365		dev_printk(KERN_ERR, &pdev->dev,
4366			   "ioremap failed for HBA control registers.\n");
4367		goto out_iounmap_slim;
4368	}
4369
4370	/* Allocate memory for SLI-2 structures */
4371	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
4372					       SLI2_SLIM_SIZE,
4373					       &phba->slim2p.phys,
4374					       GFP_KERNEL);
4375	if (!phba->slim2p.virt)
4376		goto out_iounmap;
4377
4378	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
4379	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
4380	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
4381	phba->IOCBs = (phba->slim2p.virt +
4382		       offsetof(struct lpfc_sli2_slim, IOCBs));
4383
4384	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
4385						 lpfc_sli_hbq_size(),
4386						 &phba->hbqslimp.phys,
4387						 GFP_KERNEL);
4388	if (!phba->hbqslimp.virt)
4389		goto out_free_slim;
4390
4391	hbq_count = lpfc_sli_hbq_count();
4392	ptr = phba->hbqslimp.virt;
4393	for (i = 0; i < hbq_count; ++i) {
4394		phba->hbqs[i].hbq_virt = ptr;
4395		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4396		ptr += (lpfc_hbq_defs[i]->entry_count *
4397			sizeof(struct lpfc_hbq_entry));
4398	}
4399	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
4400	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
4401
4402	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
4403
4404	INIT_LIST_HEAD(&phba->rb_pend_list);
4405
4406	phba->MBslimaddr = phba->slim_memmap_p;
4407	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
4408	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
4409	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
4410	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
4411
4412	return 0;
4413
4414out_free_slim:
4415	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4416			  phba->slim2p.virt, phba->slim2p.phys);
4417out_iounmap:
4418	iounmap(phba->ctrl_regs_memmap_p);
4419out_iounmap_slim:
4420	iounmap(phba->slim_memmap_p);
4421out:
4422	return error;
4423}
4424
4425/**
4426 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
4427 * @phba: pointer to lpfc hba data structure.
4428 *
4429 * This routine is invoked to unset the PCI device memory space for device
4430 * with SLI-3 interface spec.
4431 **/
4432static void
4433lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4434{
4435	struct pci_dev *pdev;
4436
4437	/* Obtain PCI device reference */
4438	if (!phba->pcidev)
4439		return;
4440	else
4441		pdev = phba->pcidev;
4442
4443	/* Free coherent DMA memory allocated */
4444	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
4445			  phba->hbqslimp.virt, phba->hbqslimp.phys);
4446	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4447			  phba->slim2p.virt, phba->slim2p.phys);
4448
4449	/* I/O memory unmap */
4450	iounmap(phba->ctrl_regs_memmap_p);
4451	iounmap(phba->slim_memmap_p);
4452
4453	return;
4454}
4455
4456/**
4457 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
4458 * @phba: pointer to lpfc hba data structure.
4459 *
4460 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
4461 * done and check status.
4462 *
4463 * Return 0 if successful, otherwise -ENODEV.
4464 **/
4465int
4466lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4467{
4468	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
4469	uint32_t onlnreg0, onlnreg1;
4470	int i, port_error = -ENODEV;
4471
4472	if (!phba->sli4_hba.STAregaddr)
4473		return -ENODEV;
4474
4475	/* With uncoverable error, log the error message and return error */
4476	onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4477	onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4478	if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4479		uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4480		uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4481		if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4482			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4483					"1422 HBA Unrecoverable error: "
4484					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4485					"online0_reg=0x%x, online1_reg=0x%x\n",
4486					uerrlo_reg.word0, uerrhi_reg.word0,
4487					onlnreg0, onlnreg1);
4488		}
4489		return -ENODEV;
4490	}
4491
4492	/* Wait up to 30 seconds for the SLI Port POST done and ready */
4493	for (i = 0; i < 3000; i++) {
4494		sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
4495		/* Encounter fatal POST error, break out */
4496		if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
4497			port_error = -ENODEV;
4498			break;
4499		}
4500		if (LPFC_POST_STAGE_ARMFW_READY ==
4501		    bf_get(lpfc_hst_state_port_status, &sta_reg)) {
4502			port_error = 0;
4503			break;
4504		}
4505		msleep(10);
4506	}
4507
4508	if (port_error)
4509		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4510			"1408 Failure HBA POST Status: sta_reg=0x%x, "
4511			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
4512			"dl=x%x, pstatus=x%x\n", sta_reg.word0,
4513			bf_get(lpfc_hst_state_perr, &sta_reg),
4514			bf_get(lpfc_hst_state_sfi, &sta_reg),
4515			bf_get(lpfc_hst_state_nip, &sta_reg),
4516			bf_get(lpfc_hst_state_ipc, &sta_reg),
4517			bf_get(lpfc_hst_state_xrom, &sta_reg),
4518			bf_get(lpfc_hst_state_dl, &sta_reg),
4519			bf_get(lpfc_hst_state_port_status, &sta_reg));
4520
4521	/* Log device information */
4522	scratchpad.word0 =  readl(phba->sli4_hba.SCRATCHPADregaddr);
4523	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4524			"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4525			"FeatureL1=0x%x, FeatureL2=0x%x\n",
4526			bf_get(lpfc_scratchpad_chiptype, &scratchpad),
4527			bf_get(lpfc_scratchpad_slirev, &scratchpad),
4528			bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4529			bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4530
4531	return port_error;
4532}
4533
4534/**
4535 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
4536 * @phba: pointer to lpfc hba data structure.
4537 *
4538 * This routine is invoked to set up SLI4 BAR0 PCI config space register
4539 * memory map.
4540 **/
4541static void
4542lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4543{
4544	phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4545					LPFC_UERR_STATUS_LO;
4546	phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4547					LPFC_UERR_STATUS_HI;
4548	phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
4549					LPFC_ONLINE0;
4550	phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
4551					LPFC_ONLINE1;
4552	phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
4553					LPFC_SCRATCHPAD;
4554}
4555
4556/**
4557 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
4558 * @phba: pointer to lpfc hba data structure.
4559 *
4560 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
4561 * memory map.
4562 **/
4563static void
4564lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
4565{
4566
4567	phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4568				    LPFC_HST_STATE;
4569	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4570				    LPFC_HST_ISR0;
4571	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4572				    LPFC_HST_IMR0;
4573	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4574				     LPFC_HST_ISCR0;
4575	return;
4576}
4577
4578/**
4579 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
4580 * @phba: pointer to lpfc hba data structure.
4581 * @vf: virtual function number
4582 *
4583 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
4584 * based on the given viftual function number, @vf.
4585 *
4586 * Return 0 if successful, otherwise -ENODEV.
4587 **/
4588static int
4589lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
4590{
4591	if (vf > LPFC_VIR_FUNC_MAX)
4592		return -ENODEV;
4593
4594	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4595				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
4596	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4597				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
4598	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4599				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
4600	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4601				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
4602	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4603				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
4604	return 0;
4605}
4606
4607/**
4608 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
4609 * @phba: pointer to lpfc hba data structure.
4610 *
4611 * This routine is invoked to create the bootstrap mailbox
4612 * region consistent with the SLI-4 interface spec.  This
4613 * routine allocates all memory necessary to communicate
4614 * mailbox commands to the port and sets up all alignment
4615 * needs.  No locks are expected to be held when calling
4616 * this routine.
4617 *
4618 * Return codes
4619 * 	0 - sucessful
4620 * 	ENOMEM - could not allocated memory.
4621 **/
4622static int
4623lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
4624{
4625	uint32_t bmbx_size;
4626	struct lpfc_dmabuf *dmabuf;
4627	struct dma_address *dma_address;
4628	uint32_t pa_addr;
4629	uint64_t phys_addr;
4630
4631	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4632	if (!dmabuf)
4633		return -ENOMEM;
4634
4635	/*
4636	 * The bootstrap mailbox region is comprised of 2 parts
4637	 * plus an alignment restriction of 16 bytes.
4638	 */
4639	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
4640	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4641					  bmbx_size,
4642					  &dmabuf->phys,
4643					  GFP_KERNEL);
4644	if (!dmabuf->virt) {
4645		kfree(dmabuf);
4646		return -ENOMEM;
4647	}
4648	memset(dmabuf->virt, 0, bmbx_size);
4649
4650	/*
4651	 * Initialize the bootstrap mailbox pointers now so that the register
4652	 * operations are simple later.  The mailbox dma address is required
4653	 * to be 16-byte aligned.  Also align the virtual memory as each
4654	 * maibox is copied into the bmbx mailbox region before issuing the
4655	 * command to the port.
4656	 */
4657	phba->sli4_hba.bmbx.dmabuf = dmabuf;
4658	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
4659
4660	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
4661					      LPFC_ALIGN_16_BYTE);
4662	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
4663					      LPFC_ALIGN_16_BYTE);
4664
4665	/*
4666	 * Set the high and low physical addresses now.  The SLI4 alignment
4667	 * requirement is 16 bytes and the mailbox is posted to the port
4668	 * as two 30-bit addresses.  The other data is a bit marking whether
4669	 * the 30-bit address is the high or low address.
4670	 * Upcast bmbx aphys to 64bits so shift instruction compiles
4671	 * clean on 32 bit machines.
4672	 */
4673	dma_address = &phba->sli4_hba.bmbx.dma_address;
4674	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
4675	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
4676	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
4677					   LPFC_BMBX_BIT1_ADDR_HI);
4678
4679	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
4680	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
4681					   LPFC_BMBX_BIT1_ADDR_LO);
4682	return 0;
4683}
4684
4685/**
4686 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
4687 * @phba: pointer to lpfc hba data structure.
4688 *
4689 * This routine is invoked to teardown the bootstrap mailbox
4690 * region and release all host resources. This routine requires
4691 * the caller to ensure all mailbox commands recovered, no
4692 * additional mailbox comands are sent, and interrupts are disabled
4693 * before calling this routine.
4694 *
4695 **/
4696static void
4697lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
4698{
4699	dma_free_coherent(&phba->pcidev->dev,
4700			  phba->sli4_hba.bmbx.bmbx_size,
4701			  phba->sli4_hba.bmbx.dmabuf->virt,
4702			  phba->sli4_hba.bmbx.dmabuf->phys);
4703
4704	kfree(phba->sli4_hba.bmbx.dmabuf);
4705	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
4706}
4707
4708/**
4709 * lpfc_sli4_read_config - Get the config parameters.
4710 * @phba: pointer to lpfc hba data structure.
4711 *
4712 * This routine is invoked to read the configuration parameters from the HBA.
4713 * The configuration parameters are used to set the base and maximum values
4714 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
4715 * allocation for the port.
4716 *
4717 * Return codes
4718 * 	0 - sucessful
4719 * 	ENOMEM - No availble memory
4720 *      EIO - The mailbox failed to complete successfully.
4721 **/
4722static int
4723lpfc_sli4_read_config(struct lpfc_hba *phba)
4724{
4725	LPFC_MBOXQ_t *pmb;
4726	struct lpfc_mbx_read_config *rd_config;
4727	uint32_t rc = 0;
4728
4729	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4730	if (!pmb) {
4731		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4732				"2011 Unable to allocate memory for issuing "
4733				"SLI_CONFIG_SPECIAL mailbox command\n");
4734		return -ENOMEM;
4735	}
4736
4737	lpfc_read_config(phba, pmb);
4738
4739	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4740	if (rc != MBX_SUCCESS) {
4741		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4742			"2012 Mailbox failed , mbxCmd x%x "
4743			"READ_CONFIG, mbxStatus x%x\n",
4744			bf_get(lpfc_mqe_command, &pmb->u.mqe),
4745			bf_get(lpfc_mqe_status, &pmb->u.mqe));
4746		rc = -EIO;
4747	} else {
4748		rd_config = &pmb->u.mqe.un.rd_config;
4749		phba->sli4_hba.max_cfg_param.max_xri =
4750			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
4751		phba->sli4_hba.max_cfg_param.xri_base =
4752			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
4753		phba->sli4_hba.max_cfg_param.max_vpi =
4754			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
4755		phba->sli4_hba.max_cfg_param.vpi_base =
4756			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
4757		phba->sli4_hba.max_cfg_param.max_rpi =
4758			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
4759		phba->sli4_hba.max_cfg_param.rpi_base =
4760			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
4761		phba->sli4_hba.max_cfg_param.max_vfi =
4762			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
4763		phba->sli4_hba.max_cfg_param.vfi_base =
4764			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
4765		phba->sli4_hba.max_cfg_param.max_fcfi =
4766			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
4767		phba->sli4_hba.max_cfg_param.fcfi_base =
4768			bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
4769		phba->sli4_hba.max_cfg_param.max_eq =
4770			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
4771		phba->sli4_hba.max_cfg_param.max_rq =
4772			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
4773		phba->sli4_hba.max_cfg_param.max_wq =
4774			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
4775		phba->sli4_hba.max_cfg_param.max_cq =
4776			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
4777		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
4778		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
4779		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4780		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4781		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4782		phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
4783		phba->max_vports = phba->max_vpi;
4784		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4785				"2003 cfg params XRI(B:%d M:%d), "
4786				"VPI(B:%d M:%d) "
4787				"VFI(B:%d M:%d) "
4788				"RPI(B:%d M:%d) "
4789				"FCFI(B:%d M:%d)\n",
4790				phba->sli4_hba.max_cfg_param.xri_base,
4791				phba->sli4_hba.max_cfg_param.max_xri,
4792				phba->sli4_hba.max_cfg_param.vpi_base,
4793				phba->sli4_hba.max_cfg_param.max_vpi,
4794				phba->sli4_hba.max_cfg_param.vfi_base,
4795				phba->sli4_hba.max_cfg_param.max_vfi,
4796				phba->sli4_hba.max_cfg_param.rpi_base,
4797				phba->sli4_hba.max_cfg_param.max_rpi,
4798				phba->sli4_hba.max_cfg_param.fcfi_base,
4799				phba->sli4_hba.max_cfg_param.max_fcfi);
4800	}
4801	mempool_free(pmb, phba->mbox_mem_pool);
4802
4803	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
4804	if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
4805		phba->cfg_hba_queue_depth =
4806				phba->sli4_hba.max_cfg_param.max_xri;
4807	return rc;
4808}
4809
4810/**
4811 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
4812 * @phba: pointer to lpfc hba data structure.
4813 *
4814 * This routine is invoked to setup the host-side endian order to the
4815 * HBA consistent with the SLI-4 interface spec.
4816 *
4817 * Return codes
4818 * 	0 - sucessful
4819 * 	ENOMEM - No availble memory
4820 *      EIO - The mailbox failed to complete successfully.
4821 **/
4822static int
4823lpfc_setup_endian_order(struct lpfc_hba *phba)
4824{
4825	LPFC_MBOXQ_t *mboxq;
4826	uint32_t rc = 0;
4827	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
4828				      HOST_ENDIAN_HIGH_WORD1};
4829
4830	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4831	if (!mboxq) {
4832		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4833				"0492 Unable to allocate memory for issuing "
4834				"SLI_CONFIG_SPECIAL mailbox command\n");
4835		return -ENOMEM;
4836	}
4837
4838	/*
4839	 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
4840	 * words to contain special data values and no other data.
4841	 */
4842	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
4843	memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
4844	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4845	if (rc != MBX_SUCCESS) {
4846		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4847				"0493 SLI_CONFIG_SPECIAL mailbox failed with "
4848				"status x%x\n",
4849				rc);
4850		rc = -EIO;
4851	}
4852
4853	mempool_free(mboxq, phba->mbox_mem_pool);
4854	return rc;
4855}
4856
4857/**
4858 * lpfc_sli4_queue_create - Create all the SLI4 queues
4859 * @phba: pointer to lpfc hba data structure.
4860 *
4861 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
4862 * operation. For each SLI4 queue type, the parameters such as queue entry
4863 * count (queue depth) shall be taken from the module parameter. For now,
4864 * we just use some constant number as place holder.
4865 *
4866 * Return codes
4867 *      0 - sucessful
4868 *      ENOMEM - No availble memory
4869 *      EIO - The mailbox failed to complete successfully.
4870 **/
4871static int
4872lpfc_sli4_queue_create(struct lpfc_hba *phba)
4873{
4874	struct lpfc_queue *qdesc;
4875	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
4876	int cfg_fcp_wq_count;
4877	int cfg_fcp_eq_count;
4878
4879	/*
4880	 * Sanity check for confiugred queue parameters against the run-time
4881	 * device parameters
4882	 */
4883
4884	/* Sanity check on FCP fast-path WQ parameters */
4885	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
4886	if (cfg_fcp_wq_count >
4887	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
4888		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
4889				   LPFC_SP_WQN_DEF;
4890		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
4891			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4892					"2581 Not enough WQs (%d) from "
4893					"the pci function for supporting "
4894					"FCP WQs (%d)\n",
4895					phba->sli4_hba.max_cfg_param.max_wq,
4896					phba->cfg_fcp_wq_count);
4897			goto out_error;
4898		}
4899		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4900				"2582 Not enough WQs (%d) from the pci "
4901				"function for supporting the requested "
4902				"FCP WQs (%d), the actual FCP WQs can "
4903				"be supported: %d\n",
4904				phba->sli4_hba.max_cfg_param.max_wq,
4905				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
4906	}
4907	/* The actual number of FCP work queues adopted */
4908	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
4909
4910	/* Sanity check on FCP fast-path EQ parameters */
4911	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
4912	if (cfg_fcp_eq_count >
4913	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
4914		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
4915				   LPFC_SP_EQN_DEF;
4916		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
4917			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4918					"2574 Not enough EQs (%d) from the "
4919					"pci function for supporting FCP "
4920					"EQs (%d)\n",
4921					phba->sli4_hba.max_cfg_param.max_eq,
4922					phba->cfg_fcp_eq_count);
4923			goto out_error;
4924		}
4925		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4926				"2575 Not enough EQs (%d) from the pci "
4927				"function for supporting the requested "
4928				"FCP EQs (%d), the actual FCP EQs can "
4929				"be supported: %d\n",
4930				phba->sli4_hba.max_cfg_param.max_eq,
4931				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
4932	}
4933	/* It does not make sense to have more EQs than WQs */
4934	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4935		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4936				"2593 The number of FCP EQs (%d) is more "
4937				"than the number of FCP WQs (%d), take "
4938				"the number of FCP EQs same as than of "
4939				"WQs (%d)\n", cfg_fcp_eq_count,
4940				phba->cfg_fcp_wq_count,
4941				phba->cfg_fcp_wq_count);
4942		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
4943	}
4944	/* The actual number of FCP event queues adopted */
4945	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
4946	/* The overall number of event queues used */
4947	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
4948
4949	/*
4950	 * Create Event Queues (EQs)
4951	 */
4952
4953	/* Get EQ depth from module parameter, fake the default for now */
4954	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
4955	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
4956
4957	/* Create slow path event queue */
4958	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4959				      phba->sli4_hba.eq_ecount);
4960	if (!qdesc) {
4961		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4962				"0496 Failed allocate slow-path EQ\n");
4963		goto out_error;
4964	}
4965	phba->sli4_hba.sp_eq = qdesc;
4966
4967	/* Create fast-path FCP Event Queue(s) */
4968	phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
4969			       phba->cfg_fcp_eq_count), GFP_KERNEL);
4970	if (!phba->sli4_hba.fp_eq) {
4971		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4972				"2576 Failed allocate memory for fast-path "
4973				"EQ record array\n");
4974		goto out_free_sp_eq;
4975	}
4976	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
4977		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4978					      phba->sli4_hba.eq_ecount);
4979		if (!qdesc) {
4980			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4981					"0497 Failed allocate fast-path EQ\n");
4982			goto out_free_fp_eq;
4983		}
4984		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
4985	}
4986
4987	/*
4988	 * Create Complete Queues (CQs)
4989	 */
4990
4991	/* Get CQ depth from module parameter, fake the default for now */
4992	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
4993	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
4994
4995	/* Create slow-path Mailbox Command Complete Queue */
4996	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
4997				      phba->sli4_hba.cq_ecount);
4998	if (!qdesc) {
4999		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5000				"0500 Failed allocate slow-path mailbox CQ\n");
5001		goto out_free_fp_eq;
5002	}
5003	phba->sli4_hba.mbx_cq = qdesc;
5004
5005	/* Create slow-path ELS Complete Queue */
5006	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5007				      phba->sli4_hba.cq_ecount);
5008	if (!qdesc) {
5009		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5010				"0501 Failed allocate slow-path ELS CQ\n");
5011		goto out_free_mbx_cq;
5012	}
5013	phba->sli4_hba.els_cq = qdesc;
5014
5015	/* Create slow-path Unsolicited Receive Complete Queue */
5016	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5017				      phba->sli4_hba.cq_ecount);
5018	if (!qdesc) {
5019		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5020				"0502 Failed allocate slow-path USOL RX CQ\n");
5021		goto out_free_els_cq;
5022	}
5023	phba->sli4_hba.rxq_cq = qdesc;
5024
5025	/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5026	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5027				phba->cfg_fcp_eq_count), GFP_KERNEL);
5028	if (!phba->sli4_hba.fcp_cq) {
5029		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5030				"2577 Failed allocate memory for fast-path "
5031				"CQ record array\n");
5032		goto out_free_rxq_cq;
5033	}
5034	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5035		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5036					      phba->sli4_hba.cq_ecount);
5037		if (!qdesc) {
5038			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5039					"0499 Failed allocate fast-path FCP "
5040					"CQ (%d)\n", fcp_cqidx);
5041			goto out_free_fcp_cq;
5042		}
5043		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5044	}
5045
5046	/* Create Mailbox Command Queue */
5047	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5048	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5049
5050	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5051				      phba->sli4_hba.mq_ecount);
5052	if (!qdesc) {
5053		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5054				"0505 Failed allocate slow-path MQ\n");
5055		goto out_free_fcp_cq;
5056	}
5057	phba->sli4_hba.mbx_wq = qdesc;
5058
5059	/*
5060	 * Create all the Work Queues (WQs)
5061	 */
5062	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5063	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5064
5065	/* Create slow-path ELS Work Queue */
5066	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5067				      phba->sli4_hba.wq_ecount);
5068	if (!qdesc) {
5069		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5070				"0504 Failed allocate slow-path ELS WQ\n");
5071		goto out_free_mbx_wq;
5072	}
5073	phba->sli4_hba.els_wq = qdesc;
5074
5075	/* Create fast-path FCP Work Queue(s) */
5076	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5077				phba->cfg_fcp_wq_count), GFP_KERNEL);
5078	if (!phba->sli4_hba.fcp_wq) {
5079		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5080				"2578 Failed allocate memory for fast-path "
5081				"WQ record array\n");
5082		goto out_free_els_wq;
5083	}
5084	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5085		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5086					      phba->sli4_hba.wq_ecount);
5087		if (!qdesc) {
5088			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5089					"0503 Failed allocate fast-path FCP "
5090					"WQ (%d)\n", fcp_wqidx);
5091			goto out_free_fcp_wq;
5092		}
5093		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5094	}
5095
5096	/*
5097	 * Create Receive Queue (RQ)
5098	 */
5099	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5100	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5101
5102	/* Create Receive Queue for header */
5103	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5104				      phba->sli4_hba.rq_ecount);
5105	if (!qdesc) {
5106		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5107				"0506 Failed allocate receive HRQ\n");
5108		goto out_free_fcp_wq;
5109	}
5110	phba->sli4_hba.hdr_rq = qdesc;
5111
5112	/* Create Receive Queue for data */
5113	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5114				      phba->sli4_hba.rq_ecount);
5115	if (!qdesc) {
5116		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5117				"0507 Failed allocate receive DRQ\n");
5118		goto out_free_hdr_rq;
5119	}
5120	phba->sli4_hba.dat_rq = qdesc;
5121
5122	return 0;
5123
5124out_free_hdr_rq:
5125	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5126	phba->sli4_hba.hdr_rq = NULL;
5127out_free_fcp_wq:
5128	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5129		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5130		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5131	}
5132	kfree(phba->sli4_hba.fcp_wq);
5133out_free_els_wq:
5134	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5135	phba->sli4_hba.els_wq = NULL;
5136out_free_mbx_wq:
5137	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5138	phba->sli4_hba.mbx_wq = NULL;
5139out_free_fcp_cq:
5140	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5141		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5142		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5143	}
5144	kfree(phba->sli4_hba.fcp_cq);
5145out_free_rxq_cq:
5146	lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5147	phba->sli4_hba.rxq_cq = NULL;
5148out_free_els_cq:
5149	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5150	phba->sli4_hba.els_cq = NULL;
5151out_free_mbx_cq:
5152	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5153	phba->sli4_hba.mbx_cq = NULL;
5154out_free_fp_eq:
5155	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5156		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5157		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5158	}
5159	kfree(phba->sli4_hba.fp_eq);
5160out_free_sp_eq:
5161	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5162	phba->sli4_hba.sp_eq = NULL;
5163out_error:
5164	return -ENOMEM;
5165}
5166
5167/**
5168 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5169 * @phba: pointer to lpfc hba data structure.
5170 *
5171 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5172 * operation.
5173 *
5174 * Return codes
5175 *      0 - sucessful
5176 *      ENOMEM - No availble memory
5177 *      EIO - The mailbox failed to complete successfully.
5178 **/
5179static void
5180lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5181{
5182	int fcp_qidx;
5183
5184	/* Release mailbox command work queue */
5185	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5186	phba->sli4_hba.mbx_wq = NULL;
5187
5188	/* Release ELS work queue */
5189	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5190	phba->sli4_hba.els_wq = NULL;
5191
5192	/* Release FCP work queue */
5193	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5194		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5195	kfree(phba->sli4_hba.fcp_wq);
5196	phba->sli4_hba.fcp_wq = NULL;
5197
5198	/* Release unsolicited receive queue */
5199	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5200	phba->sli4_hba.hdr_rq = NULL;
5201	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5202	phba->sli4_hba.dat_rq = NULL;
5203
5204	/* Release unsolicited receive complete queue */
5205	lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5206	phba->sli4_hba.rxq_cq = NULL;
5207
5208	/* Release ELS complete queue */
5209	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5210	phba->sli4_hba.els_cq = NULL;
5211
5212	/* Release mailbox command complete queue */
5213	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5214	phba->sli4_hba.mbx_cq = NULL;
5215
5216	/* Release FCP response complete queue */
5217	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5218		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5219	kfree(phba->sli4_hba.fcp_cq);
5220	phba->sli4_hba.fcp_cq = NULL;
5221
5222	/* Release fast-path event queue */
5223	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5224		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5225	kfree(phba->sli4_hba.fp_eq);
5226	phba->sli4_hba.fp_eq = NULL;
5227
5228	/* Release slow-path event queue */
5229	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5230	phba->sli4_hba.sp_eq = NULL;
5231
5232	return;
5233}
5234
5235/**
5236 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5237 * @phba: pointer to lpfc hba data structure.
5238 *
5239 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5240 * operation.
5241 *
5242 * Return codes
5243 *      0 - sucessful
5244 *      ENOMEM - No availble memory
5245 *      EIO - The mailbox failed to complete successfully.
5246 **/
5247int
5248lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5249{
5250	int rc = -ENOMEM;
5251	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5252	int fcp_cq_index = 0;
5253
5254	/*
5255	 * Set up Event Queues (EQs)
5256	 */
5257
5258	/* Set up slow-path event queue */
5259	if (!phba->sli4_hba.sp_eq) {
5260		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5261				"0520 Slow-path EQ not allocated\n");
5262		goto out_error;
5263	}
5264	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5265			    LPFC_SP_DEF_IMAX);
5266	if (rc) {
5267		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5268				"0521 Failed setup of slow-path EQ: "
5269				"rc = 0x%x\n", rc);
5270		goto out_error;
5271	}
5272	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5273			"2583 Slow-path EQ setup: queue-id=%d\n",
5274			phba->sli4_hba.sp_eq->queue_id);
5275
5276	/* Set up fast-path event queue */
5277	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5278		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5279			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5280					"0522 Fast-path EQ (%d) not "
5281					"allocated\n", fcp_eqidx);
5282			goto out_destroy_fp_eq;
5283		}
5284		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
5285				    phba->cfg_fcp_imax);
5286		if (rc) {
5287			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5288					"0523 Failed setup of fast-path EQ "
5289					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
5290			goto out_destroy_fp_eq;
5291		}
5292		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5293				"2584 Fast-path EQ setup: "
5294				"queue[%d]-id=%d\n", fcp_eqidx,
5295				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
5296	}
5297
5298	/*
5299	 * Set up Complete Queues (CQs)
5300	 */
5301
5302	/* Set up slow-path MBOX Complete Queue as the first CQ */
5303	if (!phba->sli4_hba.mbx_cq) {
5304		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5305				"0528 Mailbox CQ not allocated\n");
5306		goto out_destroy_fp_eq;
5307	}
5308	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
5309			    LPFC_MCQ, LPFC_MBOX);
5310	if (rc) {
5311		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5312				"0529 Failed setup of slow-path mailbox CQ: "
5313				"rc = 0x%x\n", rc);
5314		goto out_destroy_fp_eq;
5315	}
5316	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5317			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
5318			phba->sli4_hba.mbx_cq->queue_id,
5319			phba->sli4_hba.sp_eq->queue_id);
5320
5321	/* Set up slow-path ELS Complete Queue */
5322	if (!phba->sli4_hba.els_cq) {
5323		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5324				"0530 ELS CQ not allocated\n");
5325		goto out_destroy_mbx_cq;
5326	}
5327	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
5328			    LPFC_WCQ, LPFC_ELS);
5329	if (rc) {
5330		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5331				"0531 Failed setup of slow-path ELS CQ: "
5332				"rc = 0x%x\n", rc);
5333		goto out_destroy_mbx_cq;
5334	}
5335	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5336			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
5337			phba->sli4_hba.els_cq->queue_id,
5338			phba->sli4_hba.sp_eq->queue_id);
5339
5340	/* Set up slow-path Unsolicited Receive Complete Queue */
5341	if (!phba->sli4_hba.rxq_cq) {
5342		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5343				"0532 USOL RX CQ not allocated\n");
5344		goto out_destroy_els_cq;
5345	}
5346	rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5347			    LPFC_RCQ, LPFC_USOL);
5348	if (rc) {
5349		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5350				"0533 Failed setup of slow-path USOL RX CQ: "
5351				"rc = 0x%x\n", rc);
5352		goto out_destroy_els_cq;
5353	}
5354	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5355			"2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5356			phba->sli4_hba.rxq_cq->queue_id,
5357			phba->sli4_hba.sp_eq->queue_id);
5358
5359	/* Set up fast-path FCP Response Complete Queue */
5360	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5361		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
5362			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5363					"0526 Fast-path FCP CQ (%d) not "
5364					"allocated\n", fcp_cqidx);
5365			goto out_destroy_fcp_cq;
5366		}
5367		rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
5368				    phba->sli4_hba.fp_eq[fcp_cqidx],
5369				    LPFC_WCQ, LPFC_FCP);
5370		if (rc) {
5371			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5372					"0527 Failed setup of fast-path FCP "
5373					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
5374			goto out_destroy_fcp_cq;
5375		}
5376		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5377				"2588 FCP CQ setup: cq[%d]-id=%d, "
5378				"parent eq[%d]-id=%d\n",
5379				fcp_cqidx,
5380				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
5381				fcp_cqidx,
5382				phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
5383	}
5384
5385	/*
5386	 * Set up all the Work Queues (WQs)
5387	 */
5388
5389	/* Set up Mailbox Command Queue */
5390	if (!phba->sli4_hba.mbx_wq) {
5391		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5392				"0538 Slow-path MQ not allocated\n");
5393		goto out_destroy_fcp_cq;
5394	}
5395	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
5396			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
5397	if (rc) {
5398		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5399				"0539 Failed setup of slow-path MQ: "
5400				"rc = 0x%x\n", rc);
5401		goto out_destroy_fcp_cq;
5402	}
5403	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5404			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
5405			phba->sli4_hba.mbx_wq->queue_id,
5406			phba->sli4_hba.mbx_cq->queue_id);
5407
5408	/* Set up slow-path ELS Work Queue */
5409	if (!phba->sli4_hba.els_wq) {
5410		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5411				"0536 Slow-path ELS WQ not allocated\n");
5412		goto out_destroy_mbx_wq;
5413	}
5414	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
5415			    phba->sli4_hba.els_cq, LPFC_ELS);
5416	if (rc) {
5417		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5418				"0537 Failed setup of slow-path ELS WQ: "
5419				"rc = 0x%x\n", rc);
5420		goto out_destroy_mbx_wq;
5421	}
5422	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5423			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
5424			phba->sli4_hba.els_wq->queue_id,
5425			phba->sli4_hba.els_cq->queue_id);
5426
5427	/* Set up fast-path FCP Work Queue */
5428	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5429		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
5430			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5431					"0534 Fast-path FCP WQ (%d) not "
5432					"allocated\n", fcp_wqidx);
5433			goto out_destroy_fcp_wq;
5434		}
5435		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
5436				    phba->sli4_hba.fcp_cq[fcp_cq_index],
5437				    LPFC_FCP);
5438		if (rc) {
5439			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5440					"0535 Failed setup of fast-path FCP "
5441					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
5442			goto out_destroy_fcp_wq;
5443		}
5444		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5445				"2591 FCP WQ setup: wq[%d]-id=%d, "
5446				"parent cq[%d]-id=%d\n",
5447				fcp_wqidx,
5448				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
5449				fcp_cq_index,
5450				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
5451		/* Round robin FCP Work Queue's Completion Queue assignment */
5452		fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
5453	}
5454
5455	/*
5456	 * Create Receive Queue (RQ)
5457	 */
5458	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
5459		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5460				"0540 Receive Queue not allocated\n");
5461		goto out_destroy_fcp_wq;
5462	}
5463	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5464			    phba->sli4_hba.rxq_cq, LPFC_USOL);
5465	if (rc) {
5466		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5467				"0541 Failed setup of Receive Queue: "
5468				"rc = 0x%x\n", rc);
5469		goto out_destroy_fcp_wq;
5470	}
5471	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5472			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
5473			"parent cq-id=%d\n",
5474			phba->sli4_hba.hdr_rq->queue_id,
5475			phba->sli4_hba.dat_rq->queue_id,
5476			phba->sli4_hba.rxq_cq->queue_id);
5477	return 0;
5478
5479out_destroy_fcp_wq:
5480	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
5481		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
5482	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5483out_destroy_mbx_wq:
5484	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5485out_destroy_fcp_cq:
5486	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5487		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5488	lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5489out_destroy_els_cq:
5490	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5491out_destroy_mbx_cq:
5492	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5493out_destroy_fp_eq:
5494	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
5495		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
5496	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5497out_error:
5498	return rc;
5499}
5500
5501/**
5502 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
5503 * @phba: pointer to lpfc hba data structure.
5504 *
5505 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
5506 * operation.
5507 *
5508 * Return codes
5509 *      0 - sucessful
5510 *      ENOMEM - No availble memory
5511 *      EIO - The mailbox failed to complete successfully.
5512 **/
5513void
5514lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5515{
5516	int fcp_qidx;
5517
5518	/* Unset mailbox command work queue */
5519	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5520	/* Unset ELS work queue */
5521	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5522	/* Unset unsolicited receive queue */
5523	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
5524	/* Unset FCP work queue */
5525	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5526		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
5527	/* Unset mailbox command complete queue */
5528	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5529	/* Unset ELS complete queue */
5530	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5531	/* Unset unsolicited receive complete queue */
5532	lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5533	/* Unset FCP response complete queue */
5534	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5535		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
5536	/* Unset fast-path event queue */
5537	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5538		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
5539	/* Unset slow-path event queue */
5540	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5541}
5542
5543/**
5544 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
5545 * @phba: pointer to lpfc hba data structure.
5546 *
5547 * This routine is invoked to allocate and set up a pool of completion queue
5548 * events. The body of the completion queue event is a completion queue entry
5549 * CQE. For now, this pool is used for the interrupt service routine to queue
5550 * the following HBA completion queue events for the worker thread to process:
5551 *   - Mailbox asynchronous events
5552 *   - Receive queue completion unsolicited events
5553 * Later, this can be used for all the slow-path events.
5554 *
5555 * Return codes
5556 *      0 - sucessful
5557 *      -ENOMEM - No availble memory
5558 **/
5559static int
5560lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
5561{
5562	struct lpfc_cq_event *cq_event;
5563	int i;
5564
5565	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
5566		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
5567		if (!cq_event)
5568			goto out_pool_create_fail;
5569		list_add_tail(&cq_event->list,
5570			      &phba->sli4_hba.sp_cqe_event_pool);
5571	}
5572	return 0;
5573
5574out_pool_create_fail:
5575	lpfc_sli4_cq_event_pool_destroy(phba);
5576	return -ENOMEM;
5577}
5578
5579/**
5580 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
5581 * @phba: pointer to lpfc hba data structure.
5582 *
5583 * This routine is invoked to free the pool of completion queue events at
5584 * driver unload time. Note that, it is the responsibility of the driver
5585 * cleanup routine to free all the outstanding completion-queue events
5586 * allocated from this pool back into the pool before invoking this routine
5587 * to destroy the pool.
5588 **/
5589static void
5590lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
5591{
5592	struct lpfc_cq_event *cq_event, *next_cq_event;
5593
5594	list_for_each_entry_safe(cq_event, next_cq_event,
5595				 &phba->sli4_hba.sp_cqe_event_pool, list) {
5596		list_del(&cq_event->list);
5597		kfree(cq_event);
5598	}
5599}
5600
5601/**
5602 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5603 * @phba: pointer to lpfc hba data structure.
5604 *
5605 * This routine is the lock free version of the API invoked to allocate a
5606 * completion-queue event from the free pool.
5607 *
5608 * Return: Pointer to the newly allocated completion-queue event if successful
5609 *         NULL otherwise.
5610 **/
5611struct lpfc_cq_event *
5612__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5613{
5614	struct lpfc_cq_event *cq_event = NULL;
5615
5616	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
5617			 struct lpfc_cq_event, list);
5618	return cq_event;
5619}
5620
5621/**
5622 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5623 * @phba: pointer to lpfc hba data structure.
5624 *
5625 * This routine is the lock version of the API invoked to allocate a
5626 * completion-queue event from the free pool.
5627 *
5628 * Return: Pointer to the newly allocated completion-queue event if successful
5629 *         NULL otherwise.
5630 **/
5631struct lpfc_cq_event *
5632lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5633{
5634	struct lpfc_cq_event *cq_event;
5635	unsigned long iflags;
5636
5637	spin_lock_irqsave(&phba->hbalock, iflags);
5638	cq_event = __lpfc_sli4_cq_event_alloc(phba);
5639	spin_unlock_irqrestore(&phba->hbalock, iflags);
5640	return cq_event;
5641}
5642
5643/**
5644 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5645 * @phba: pointer to lpfc hba data structure.
5646 * @cq_event: pointer to the completion queue event to be freed.
5647 *
5648 * This routine is the lock free version of the API invoked to release a
5649 * completion-queue event back into the free pool.
5650 **/
5651void
5652__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5653			     struct lpfc_cq_event *cq_event)
5654{
5655	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
5656}
5657
5658/**
5659 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5660 * @phba: pointer to lpfc hba data structure.
5661 * @cq_event: pointer to the completion queue event to be freed.
5662 *
5663 * This routine is the lock version of the API invoked to release a
5664 * completion-queue event back into the free pool.
5665 **/
5666void
5667lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5668			   struct lpfc_cq_event *cq_event)
5669{
5670	unsigned long iflags;
5671	spin_lock_irqsave(&phba->hbalock, iflags);
5672	__lpfc_sli4_cq_event_release(phba, cq_event);
5673	spin_unlock_irqrestore(&phba->hbalock, iflags);
5674}
5675
5676/**
5677 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
5678 * @phba: pointer to lpfc hba data structure.
5679 *
5680 * This routine is to free all the pending completion-queue events to the
5681 * back into the free pool for device reset.
5682 **/
5683static void
5684lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
5685{
5686	LIST_HEAD(cqelist);
5687	struct lpfc_cq_event *cqe;
5688	unsigned long iflags;
5689
5690	/* Retrieve all the pending WCQEs from pending WCQE lists */
5691	spin_lock_irqsave(&phba->hbalock, iflags);
5692	/* Pending FCP XRI abort events */
5693	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
5694			 &cqelist);
5695	/* Pending ELS XRI abort events */
5696	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
5697			 &cqelist);
5698	/* Pending asynnc events */
5699	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
5700			 &cqelist);
5701	spin_unlock_irqrestore(&phba->hbalock, iflags);
5702
5703	while (!list_empty(&cqelist)) {
5704		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
5705		lpfc_sli4_cq_event_release(phba, cqe);
5706	}
5707}
5708
5709/**
5710 * lpfc_pci_function_reset - Reset pci function.
5711 * @phba: pointer to lpfc hba data structure.
5712 *
5713 * This routine is invoked to request a PCI function reset. It will destroys
5714 * all resources assigned to the PCI function which originates this request.
5715 *
5716 * Return codes
5717 *      0 - sucessful
5718 *      ENOMEM - No availble memory
5719 *      EIO - The mailbox failed to complete successfully.
5720 **/
5721int
5722lpfc_pci_function_reset(struct lpfc_hba *phba)
5723{
5724	LPFC_MBOXQ_t *mboxq;
5725	uint32_t rc = 0;
5726	uint32_t shdr_status, shdr_add_status;
5727	union lpfc_sli4_cfg_shdr *shdr;
5728
5729	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5730	if (!mboxq) {
5731		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5732				"0494 Unable to allocate memory for issuing "
5733				"SLI_FUNCTION_RESET mailbox command\n");
5734		return -ENOMEM;
5735	}
5736
5737	/* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
5738	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5739			 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
5740			 LPFC_SLI4_MBX_EMBED);
5741	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5742	shdr = (union lpfc_sli4_cfg_shdr *)
5743		&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5744	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5745	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5746	if (rc != MBX_TIMEOUT)
5747		mempool_free(mboxq, phba->mbox_mem_pool);
5748	if (shdr_status || shdr_add_status || rc) {
5749		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5750				"0495 SLI_FUNCTION_RESET mailbox failed with "
5751				"status x%x add_status x%x, mbx status x%x\n",
5752				shdr_status, shdr_add_status, rc);
5753		rc = -ENXIO;
5754	}
5755	return rc;
5756}
5757
5758/**
5759 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
5760 * @phba: pointer to lpfc hba data structure.
5761 * @cnt: number of nop mailbox commands to send.
5762 *
5763 * This routine is invoked to send a number @cnt of NOP mailbox command and
5764 * wait for each command to complete.
5765 *
5766 * Return: the number of NOP mailbox command completed.
5767 **/
5768static int
5769lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
5770{
5771	LPFC_MBOXQ_t *mboxq;
5772	int length, cmdsent;
5773	uint32_t mbox_tmo;
5774	uint32_t rc = 0;
5775	uint32_t shdr_status, shdr_add_status;
5776	union lpfc_sli4_cfg_shdr *shdr;
5777
5778	if (cnt == 0) {
5779		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5780				"2518 Requested to send 0 NOP mailbox cmd\n");
5781		return cnt;
5782	}
5783
5784	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5785	if (!mboxq) {
5786		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5787				"2519 Unable to allocate memory for issuing "
5788				"NOP mailbox command\n");
5789		return 0;
5790	}
5791
5792	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
5793	length = (sizeof(struct lpfc_mbx_nop) -
5794		  sizeof(struct lpfc_sli4_cfg_mhdr));
5795	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5796			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
5797
5798	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5799	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
5800		if (!phba->sli4_hba.intr_enable)
5801			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5802		else
5803			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
5804		if (rc == MBX_TIMEOUT)
5805			break;
5806		/* Check return status */
5807		shdr = (union lpfc_sli4_cfg_shdr *)
5808			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5809		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5810		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
5811					 &shdr->response);
5812		if (shdr_status || shdr_add_status || rc) {
5813			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5814					"2520 NOP mailbox command failed "
5815					"status x%x add_status x%x mbx "
5816					"status x%x\n", shdr_status,
5817					shdr_add_status, rc);
5818			break;
5819		}
5820	}
5821
5822	if (rc != MBX_TIMEOUT)
5823		mempool_free(mboxq, phba->mbox_mem_pool);
5824
5825	return cmdsent;
5826}
5827
5828/**
5829 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
5830 * @phba: pointer to lpfc hba data structure.
5831 * @fcfi: fcf index.
5832 *
5833 * This routine is invoked to unregister a FCFI from device.
5834 **/
5835void
5836lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5837{
5838	LPFC_MBOXQ_t *mbox;
5839	uint32_t mbox_tmo;
5840	int rc;
5841	unsigned long flags;
5842
5843	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5844
5845	if (!mbox)
5846		return;
5847
5848	lpfc_unreg_fcfi(mbox, fcfi);
5849
5850	if (!phba->sli4_hba.intr_enable)
5851		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5852	else {
5853		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5854		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5855	}
5856	if (rc != MBX_TIMEOUT)
5857		mempool_free(mbox, phba->mbox_mem_pool);
5858	if (rc != MBX_SUCCESS)
5859		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5860				"2517 Unregister FCFI command failed "
5861				"status %d, mbxStatus x%x\n", rc,
5862				bf_get(lpfc_mqe_status, &mbox->u.mqe));
5863	else {
5864		spin_lock_irqsave(&phba->hbalock, flags);
5865		/* Mark the FCFI is no longer registered */
5866		phba->fcf.fcf_flag &=
5867			~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
5868		spin_unlock_irqrestore(&phba->hbalock, flags);
5869	}
5870}
5871
5872/**
5873 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
5874 * @phba: pointer to lpfc hba data structure.
5875 *
5876 * This routine is invoked to set up the PCI device memory space for device
5877 * with SLI-4 interface spec.
5878 *
5879 * Return codes
5880 * 	0 - sucessful
5881 * 	other values - error
5882 **/
5883static int
5884lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
5885{
5886	struct pci_dev *pdev;
5887	unsigned long bar0map_len, bar1map_len, bar2map_len;
5888	int error = -ENODEV;
5889
5890	/* Obtain PCI device reference */
5891	if (!phba->pcidev)
5892		return error;
5893	else
5894		pdev = phba->pcidev;
5895
5896	/* Set the device DMA mask size */
5897	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
5898		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5899			return error;
5900
5901	/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
5902	 * number of bytes required by each mapping. They are actually
5903	 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
5904	 */
5905	phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
5906	bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
5907
5908	phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
5909	bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
5910
5911	phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
5912	bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
5913
5914	/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
5915	phba->sli4_hba.conf_regs_memmap_p =
5916				ioremap(phba->pci_bar0_map, bar0map_len);
5917	if (!phba->sli4_hba.conf_regs_memmap_p) {
5918		dev_printk(KERN_ERR, &pdev->dev,
5919			   "ioremap failed for SLI4 PCI config registers.\n");
5920		goto out;
5921	}
5922
5923	/* Map SLI4 HBA Control Register base to a kernel virtual address. */
5924	phba->sli4_hba.ctrl_regs_memmap_p =
5925				ioremap(phba->pci_bar1_map, bar1map_len);
5926	if (!phba->sli4_hba.ctrl_regs_memmap_p) {
5927		dev_printk(KERN_ERR, &pdev->dev,
5928			   "ioremap failed for SLI4 HBA control registers.\n");
5929		goto out_iounmap_conf;
5930	}
5931
5932	/* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
5933	phba->sli4_hba.drbl_regs_memmap_p =
5934				ioremap(phba->pci_bar2_map, bar2map_len);
5935	if (!phba->sli4_hba.drbl_regs_memmap_p) {
5936		dev_printk(KERN_ERR, &pdev->dev,
5937			   "ioremap failed for SLI4 HBA doorbell registers.\n");
5938		goto out_iounmap_ctrl;
5939	}
5940
5941	/* Set up BAR0 PCI config space register memory map */
5942	lpfc_sli4_bar0_register_memmap(phba);
5943
5944	/* Set up BAR1 register memory map */
5945	lpfc_sli4_bar1_register_memmap(phba);
5946
5947	/* Set up BAR2 register memory map */
5948	error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
5949	if (error)
5950		goto out_iounmap_all;
5951
5952	return 0;
5953
5954out_iounmap_all:
5955	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
5956out_iounmap_ctrl:
5957	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
5958out_iounmap_conf:
5959	iounmap(phba->sli4_hba.conf_regs_memmap_p);
5960out:
5961	return error;
5962}
5963
5964/**
5965 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
5966 * @phba: pointer to lpfc hba data structure.
5967 *
5968 * This routine is invoked to unset the PCI device memory space for device
5969 * with SLI-4 interface spec.
5970 **/
5971static void
5972lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
5973{
5974	struct pci_dev *pdev;
5975
5976	/* Obtain PCI device reference */
5977	if (!phba->pcidev)
5978		return;
5979	else
5980		pdev = phba->pcidev;
5981
5982	/* Free coherent DMA memory allocated */
5983
5984	/* Unmap I/O memory space */
5985	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
5986	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
5987	iounmap(phba->sli4_hba.conf_regs_memmap_p);
5988
5989	return;
5990}
5991
5992/**
5993 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
5994 * @phba: pointer to lpfc hba data structure.
5995 *
5996 * This routine is invoked to enable the MSI-X interrupt vectors to device
5997 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
5998 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
5999 * invoked, enables either all or nothing, depending on the current
6000 * availability of PCI vector resources. The device driver is responsible
6001 * for calling the individual request_irq() to register each MSI-X vector
6002 * with a interrupt handler, which is done in this function. Note that
6003 * later when device is unloading, the driver should always call free_irq()
6004 * on all MSI-X vectors it has done request_irq() on before calling
6005 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
6006 * will be left with MSI-X enabled and leaks its vectors.
6007 *
6008 * Return codes
6009 *   0 - sucessful
6010 *   other values - error
6011 **/
6012static int
6013lpfc_sli_enable_msix(struct lpfc_hba *phba)
6014{
6015	int rc, i;
6016	LPFC_MBOXQ_t *pmb;
6017
6018	/* Set up MSI-X multi-message vectors */
6019	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6020		phba->msix_entries[i].entry = i;
6021
6022	/* Configure MSI-X capability structure */
6023	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
6024				ARRAY_SIZE(phba->msix_entries));
6025	if (rc) {
6026		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6027				"0420 PCI enable MSI-X failed (%d)\n", rc);
6028		goto msi_fail_out;
6029	}
6030	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6031		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6032				"0477 MSI-X entry[%d]: vector=x%x "
6033				"message=%d\n", i,
6034				phba->msix_entries[i].vector,
6035				phba->msix_entries[i].entry);
6036	/*
6037	 * Assign MSI-X vectors to interrupt handlers
6038	 */
6039
6040	/* vector-0 is associated to slow-path handler */
6041	rc = request_irq(phba->msix_entries[0].vector,
6042			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6043			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6044	if (rc) {
6045		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6046				"0421 MSI-X slow-path request_irq failed "
6047				"(%d)\n", rc);
6048		goto msi_fail_out;
6049	}
6050
6051	/* vector-1 is associated to fast-path handler */
6052	rc = request_irq(phba->msix_entries[1].vector,
6053			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6054			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
6055
6056	if (rc) {
6057		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6058				"0429 MSI-X fast-path request_irq failed "
6059				"(%d)\n", rc);
6060		goto irq_fail_out;
6061	}
6062
6063	/*
6064	 * Configure HBA MSI-X attention conditions to messages
6065	 */
6066	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6067
6068	if (!pmb) {
6069		rc = -ENOMEM;
6070		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6071				"0474 Unable to allocate memory for issuing "
6072				"MBOX_CONFIG_MSI command\n");
6073		goto mem_fail_out;
6074	}
6075	rc = lpfc_config_msi(phba, pmb);
6076	if (rc)
6077		goto mbx_fail_out;
6078	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6079	if (rc != MBX_SUCCESS) {
6080		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
6081				"0351 Config MSI mailbox command failed, "
6082				"mbxCmd x%x, mbxStatus x%x\n",
6083				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
6084		goto mbx_fail_out;
6085	}
6086
6087	/* Free memory allocated for mailbox command */
6088	mempool_free(pmb, phba->mbox_mem_pool);
6089	return rc;
6090
6091mbx_fail_out:
6092	/* Free memory allocated for mailbox command */
6093	mempool_free(pmb, phba->mbox_mem_pool);
6094
6095mem_fail_out:
6096	/* free the irq already requested */
6097	free_irq(phba->msix_entries[1].vector, phba);
6098
6099irq_fail_out:
6100	/* free the irq already requested */
6101	free_irq(phba->msix_entries[0].vector, phba);
6102
6103msi_fail_out:
6104	/* Unconfigure MSI-X capability structure */
6105	pci_disable_msix(phba->pcidev);
6106	return rc;
6107}
6108
6109/**
6110 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
6111 * @phba: pointer to lpfc hba data structure.
6112 *
6113 * This routine is invoked to release the MSI-X vectors and then disable the
6114 * MSI-X interrupt mode to device with SLI-3 interface spec.
6115 **/
6116static void
6117lpfc_sli_disable_msix(struct lpfc_hba *phba)
6118{
6119	int i;
6120
6121	/* Free up MSI-X multi-message vectors */
6122	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6123		free_irq(phba->msix_entries[i].vector, phba);
6124	/* Disable MSI-X */
6125	pci_disable_msix(phba->pcidev);
6126
6127	return;
6128}
6129
6130/**
6131 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
6132 * @phba: pointer to lpfc hba data structure.
6133 *
6134 * This routine is invoked to enable the MSI interrupt mode to device with
6135 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
6136 * enable the MSI vector. The device driver is responsible for calling the
6137 * request_irq() to register MSI vector with a interrupt the handler, which
6138 * is done in this function.
6139 *
6140 * Return codes
6141 * 	0 - sucessful
6142 * 	other values - error
6143 */
6144static int
6145lpfc_sli_enable_msi(struct lpfc_hba *phba)
6146{
6147	int rc;
6148
6149	rc = pci_enable_msi(phba->pcidev);
6150	if (!rc)
6151		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6152				"0462 PCI enable MSI mode success.\n");
6153	else {
6154		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6155				"0471 PCI enable MSI mode failed (%d)\n", rc);
6156		return rc;
6157	}
6158
6159	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6160			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6161	if (rc) {
6162		pci_disable_msi(phba->pcidev);
6163		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6164				"0478 MSI request_irq failed (%d)\n", rc);
6165	}
6166	return rc;
6167}
6168
6169/**
6170 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
6171 * @phba: pointer to lpfc hba data structure.
6172 *
6173 * This routine is invoked to disable the MSI interrupt mode to device with
6174 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
6175 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6176 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6177 * its vector.
6178 */
6179static void
6180lpfc_sli_disable_msi(struct lpfc_hba *phba)
6181{
6182	free_irq(phba->pcidev->irq, phba);
6183	pci_disable_msi(phba->pcidev);
6184	return;
6185}
6186
6187/**
6188 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
6189 * @phba: pointer to lpfc hba data structure.
6190 *
6191 * This routine is invoked to enable device interrupt and associate driver's
6192 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
6193 * spec. Depends on the interrupt mode configured to the driver, the driver
6194 * will try to fallback from the configured interrupt mode to an interrupt
6195 * mode which is supported by the platform, kernel, and device in the order
6196 * of:
6197 * MSI-X -> MSI -> IRQ.
6198 *
6199 * Return codes
6200 *   0 - sucessful
6201 *   other values - error
6202 **/
6203static uint32_t
6204lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6205{
6206	uint32_t intr_mode = LPFC_INTR_ERROR;
6207	int retval;
6208
6209	if (cfg_mode == 2) {
6210		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6211		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6212		if (!retval) {
6213			/* Now, try to enable MSI-X interrupt mode */
6214			retval = lpfc_sli_enable_msix(phba);
6215			if (!retval) {
6216				/* Indicate initialization to MSI-X mode */
6217				phba->intr_type = MSIX;
6218				intr_mode = 2;
6219			}
6220		}
6221	}
6222
6223	/* Fallback to MSI if MSI-X initialization failed */
6224	if (cfg_mode >= 1 && phba->intr_type == NONE) {
6225		retval = lpfc_sli_enable_msi(phba);
6226		if (!retval) {
6227			/* Indicate initialization to MSI mode */
6228			phba->intr_type = MSI;
6229			intr_mode = 1;
6230		}
6231	}
6232
6233	/* Fallback to INTx if both MSI-X/MSI initalization failed */
6234	if (phba->intr_type == NONE) {
6235		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6236				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6237		if (!retval) {
6238			/* Indicate initialization to INTx mode */
6239			phba->intr_type = INTx;
6240			intr_mode = 0;
6241		}
6242	}
6243	return intr_mode;
6244}
6245
6246/**
6247 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6248 * @phba: pointer to lpfc hba data structure.
6249 *
6250 * This routine is invoked to disable device interrupt and disassociate the
6251 * driver's interrupt handler(s) from interrupt vector(s) to device with
6252 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6253 * release the interrupt vector(s) for the message signaled interrupt.
6254 **/
6255static void
6256lpfc_sli_disable_intr(struct lpfc_hba *phba)
6257{
6258	/* Disable the currently initialized interrupt mode */
6259	if (phba->intr_type == MSIX)
6260		lpfc_sli_disable_msix(phba);
6261	else if (phba->intr_type == MSI)
6262		lpfc_sli_disable_msi(phba);
6263	else if (phba->intr_type == INTx)
6264		free_irq(phba->pcidev->irq, phba);
6265
6266	/* Reset interrupt management states */
6267	phba->intr_type = NONE;
6268	phba->sli.slistat.sli_intr = 0;
6269
6270	return;
6271}
6272
6273/**
6274 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6275 * @phba: pointer to lpfc hba data structure.
6276 *
6277 * This routine is invoked to enable the MSI-X interrupt vectors to device
6278 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6279 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6280 * enables either all or nothing, depending on the current availability of
6281 * PCI vector resources. The device driver is responsible for calling the
6282 * individual request_irq() to register each MSI-X vector with a interrupt
6283 * handler, which is done in this function. Note that later when device is
6284 * unloading, the driver should always call free_irq() on all MSI-X vectors
6285 * it has done request_irq() on before calling pci_disable_msix(). Failure
6286 * to do so results in a BUG_ON() and a device will be left with MSI-X
6287 * enabled and leaks its vectors.
6288 *
6289 * Return codes
6290 * 0 - sucessful
6291 * other values - error
6292 **/
6293static int
6294lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6295{
6296	int rc, index;
6297
6298	/* Set up MSI-X multi-message vectors */
6299	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6300		phba->sli4_hba.msix_entries[index].entry = index;
6301
6302	/* Configure MSI-X capability structure */
6303	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6304			     phba->sli4_hba.cfg_eqn);
6305	if (rc) {
6306		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6307				"0484 PCI enable MSI-X failed (%d)\n", rc);
6308		goto msi_fail_out;
6309	}
6310	/* Log MSI-X vector assignment */
6311	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6312		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6313				"0489 MSI-X entry[%d]: vector=x%x "
6314				"message=%d\n", index,
6315				phba->sli4_hba.msix_entries[index].vector,
6316				phba->sli4_hba.msix_entries[index].entry);
6317	/*
6318	 * Assign MSI-X vectors to interrupt handlers
6319	 */
6320
6321	/* The first vector must associated to slow-path handler for MQ */
6322	rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6323			 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6324			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6325	if (rc) {
6326		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6327				"0485 MSI-X slow-path request_irq failed "
6328				"(%d)\n", rc);
6329		goto msi_fail_out;
6330	}
6331
6332	/* The rest of the vector(s) are associated to fast-path handler(s) */
6333	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
6334		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
6335		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
6336		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
6337				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
6338				 LPFC_FP_DRIVER_HANDLER_NAME,
6339				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6340		if (rc) {
6341			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6342					"0486 MSI-X fast-path (%d) "
6343					"request_irq failed (%d)\n", index, rc);
6344			goto cfg_fail_out;
6345		}
6346	}
6347
6348	return rc;
6349
6350cfg_fail_out:
6351	/* free the irq already requested */
6352	for (--index; index >= 1; index--)
6353		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
6354			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6355
6356	/* free the irq already requested */
6357	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6358
6359msi_fail_out:
6360	/* Unconfigure MSI-X capability structure */
6361	pci_disable_msix(phba->pcidev);
6362	return rc;
6363}
6364
6365/**
6366 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
6367 * @phba: pointer to lpfc hba data structure.
6368 *
6369 * This routine is invoked to release the MSI-X vectors and then disable the
6370 * MSI-X interrupt mode to device with SLI-4 interface spec.
6371 **/
6372static void
6373lpfc_sli4_disable_msix(struct lpfc_hba *phba)
6374{
6375	int index;
6376
6377	/* Free up MSI-X multi-message vectors */
6378	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6379
6380	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
6381		free_irq(phba->sli4_hba.msix_entries[index].vector,
6382			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6383	/* Disable MSI-X */
6384	pci_disable_msix(phba->pcidev);
6385
6386	return;
6387}
6388
6389/**
6390 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
6391 * @phba: pointer to lpfc hba data structure.
6392 *
6393 * This routine is invoked to enable the MSI interrupt mode to device with
6394 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
6395 * to enable the MSI vector. The device driver is responsible for calling
6396 * the request_irq() to register MSI vector with a interrupt the handler,
6397 * which is done in this function.
6398 *
6399 * Return codes
6400 * 	0 - sucessful
6401 * 	other values - error
6402 **/
6403static int
6404lpfc_sli4_enable_msi(struct lpfc_hba *phba)
6405{
6406	int rc, index;
6407
6408	rc = pci_enable_msi(phba->pcidev);
6409	if (!rc)
6410		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6411				"0487 PCI enable MSI mode success.\n");
6412	else {
6413		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6414				"0488 PCI enable MSI mode failed (%d)\n", rc);
6415		return rc;
6416	}
6417
6418	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6419			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6420	if (rc) {
6421		pci_disable_msi(phba->pcidev);
6422		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6423				"0490 MSI request_irq failed (%d)\n", rc);
6424	}
6425
6426	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
6427		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6428		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6429	}
6430
6431	return rc;
6432}
6433
6434/**
6435 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
6436 * @phba: pointer to lpfc hba data structure.
6437 *
6438 * This routine is invoked to disable the MSI interrupt mode to device with
6439 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
6440 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6441 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6442 * its vector.
6443 **/
6444static void
6445lpfc_sli4_disable_msi(struct lpfc_hba *phba)
6446{
6447	free_irq(phba->pcidev->irq, phba);
6448	pci_disable_msi(phba->pcidev);
6449	return;
6450}
6451
6452/**
6453 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
6454 * @phba: pointer to lpfc hba data structure.
6455 *
6456 * This routine is invoked to enable device interrupt and associate driver's
6457 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
6458 * interface spec. Depends on the interrupt mode configured to the driver,
6459 * the driver will try to fallback from the configured interrupt mode to an
6460 * interrupt mode which is supported by the platform, kernel, and device in
6461 * the order of:
6462 * MSI-X -> MSI -> IRQ.
6463 *
6464 * Return codes
6465 * 	0 - sucessful
6466 * 	other values - error
6467 **/
6468static uint32_t
6469lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6470{
6471	uint32_t intr_mode = LPFC_INTR_ERROR;
6472	int retval, index;
6473
6474	if (cfg_mode == 2) {
6475		/* Preparation before conf_msi mbox cmd */
6476		retval = 0;
6477		if (!retval) {
6478			/* Now, try to enable MSI-X interrupt mode */
6479			retval = lpfc_sli4_enable_msix(phba);
6480			if (!retval) {
6481				/* Indicate initialization to MSI-X mode */
6482				phba->intr_type = MSIX;
6483				intr_mode = 2;
6484			}
6485		}
6486	}
6487
6488	/* Fallback to MSI if MSI-X initialization failed */
6489	if (cfg_mode >= 1 && phba->intr_type == NONE) {
6490		retval = lpfc_sli4_enable_msi(phba);
6491		if (!retval) {
6492			/* Indicate initialization to MSI mode */
6493			phba->intr_type = MSI;
6494			intr_mode = 1;
6495		}
6496	}
6497
6498	/* Fallback to INTx if both MSI-X/MSI initalization failed */
6499	if (phba->intr_type == NONE) {
6500		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6501				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6502		if (!retval) {
6503			/* Indicate initialization to INTx mode */
6504			phba->intr_type = INTx;
6505			intr_mode = 0;
6506			for (index = 0; index < phba->cfg_fcp_eq_count;
6507			     index++) {
6508				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6509				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6510			}
6511		}
6512	}
6513	return intr_mode;
6514}
6515
6516/**
6517 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
6518 * @phba: pointer to lpfc hba data structure.
6519 *
6520 * This routine is invoked to disable device interrupt and disassociate
6521 * the driver's interrupt handler(s) from interrupt vector(s) to device
6522 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
6523 * will release the interrupt vector(s) for the message signaled interrupt.
6524 **/
6525static void
6526lpfc_sli4_disable_intr(struct lpfc_hba *phba)
6527{
6528	/* Disable the currently initialized interrupt mode */
6529	if (phba->intr_type == MSIX)
6530		lpfc_sli4_disable_msix(phba);
6531	else if (phba->intr_type == MSI)
6532		lpfc_sli4_disable_msi(phba);
6533	else if (phba->intr_type == INTx)
6534		free_irq(phba->pcidev->irq, phba);
6535
6536	/* Reset interrupt management states */
6537	phba->intr_type = NONE;
6538	phba->sli.slistat.sli_intr = 0;
6539
6540	return;
6541}
6542
6543/**
6544 * lpfc_unset_hba - Unset SLI3 hba device initialization
6545 * @phba: pointer to lpfc hba data structure.
6546 *
6547 * This routine is invoked to unset the HBA device initialization steps to
6548 * a device with SLI-3 interface spec.
6549 **/
6550static void
6551lpfc_unset_hba(struct lpfc_hba *phba)
6552{
6553	struct lpfc_vport *vport = phba->pport;
6554	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
6555
6556	spin_lock_irq(shost->host_lock);
6557	vport->load_flag |= FC_UNLOADING;
6558	spin_unlock_irq(shost->host_lock);
6559
6560	lpfc_stop_hba_timers(phba);
6561
6562	phba->pport->work_port_events = 0;
6563
6564	lpfc_sli_hba_down(phba);
6565
6566	lpfc_sli_brdrestart(phba);
6567
6568	lpfc_sli_disable_intr(phba);
6569
6570	return;
6571}
6572
6573/**
6574 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
6575 * @phba: pointer to lpfc hba data structure.
6576 *
6577 * This routine is invoked to unset the HBA device initialization steps to
6578 * a device with SLI-4 interface spec.
6579 **/
6580static void
6581lpfc_sli4_unset_hba(struct lpfc_hba *phba)
6582{
6583	struct lpfc_vport *vport = phba->pport;
6584	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
6585
6586	spin_lock_irq(shost->host_lock);
6587	vport->load_flag |= FC_UNLOADING;
6588	spin_unlock_irq(shost->host_lock);
6589
6590	phba->pport->work_port_events = 0;
6591
6592	lpfc_sli4_hba_down(phba);
6593
6594	lpfc_sli4_disable_intr(phba);
6595
6596	return;
6597}
6598
6599/**
6600 * lpfc_sli4_hba_unset - Unset the fcoe hba
6601 * @phba: Pointer to HBA context object.
6602 *
6603 * This function is called in the SLI4 code path to reset the HBA's FCoE
6604 * function. The caller is not required to hold any lock. This routine
6605 * issues PCI function reset mailbox command to reset the FCoE function.
6606 * At the end of the function, it calls lpfc_hba_down_post function to
6607 * free any pending commands.
6608 **/
6609static void
6610lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6611{
6612	int wait_cnt = 0;
6613	LPFC_MBOXQ_t *mboxq;
6614
6615	lpfc_stop_hba_timers(phba);
6616	phba->sli4_hba.intr_enable = 0;
6617
6618	/*
6619	 * Gracefully wait out the potential current outstanding asynchronous
6620	 * mailbox command.
6621	 */
6622
6623	/* First, block any pending async mailbox command from posted */
6624	spin_lock_irq(&phba->hbalock);
6625	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
6626	spin_unlock_irq(&phba->hbalock);
6627	/* Now, trying to wait it out if we can */
6628	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6629		msleep(10);
6630		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
6631			break;
6632	}
6633	/* Forcefully release the outstanding mailbox command if timed out */
6634	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6635		spin_lock_irq(&phba->hbalock);
6636		mboxq = phba->sli.mbox_active;
6637		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
6638		__lpfc_mbox_cmpl_put(phba, mboxq);
6639		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6640		phba->sli.mbox_active = NULL;
6641		spin_unlock_irq(&phba->hbalock);
6642	}
6643
6644	/* Tear down the queues in the HBA */
6645	lpfc_sli4_queue_unset(phba);
6646
6647	/* Disable PCI subsystem interrupt */
6648	lpfc_sli4_disable_intr(phba);
6649
6650	/* Stop kthread signal shall trigger work_done one more time */
6651	kthread_stop(phba->worker_thread);
6652
6653	/* Stop the SLI4 device port */
6654	phba->pport->work_port_events = 0;
6655}
6656
6657/**
6658 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
6659 * @pdev: pointer to PCI device
6660 * @pid: pointer to PCI device identifier
6661 *
6662 * This routine is to be called to attach a device with SLI-3 interface spec
6663 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6664 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
6665 * information of the device and driver to see if the driver state that it can
6666 * support this kind of device. If the match is successful, the driver core
6667 * invokes this routine. If this routine determines it can claim the HBA, it
6668 * does all the initialization that it needs to do to handle the HBA properly.
6669 *
6670 * Return code
6671 * 	0 - driver can claim the device
6672 * 	negative value - driver can not claim the device
6673 **/
6674static int __devinit
6675lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6676{
6677	struct lpfc_hba   *phba;
6678	struct lpfc_vport *vport = NULL;
6679	int error;
6680	uint32_t cfg_mode, intr_mode;
6681
6682	/* Allocate memory for HBA structure */
6683	phba = lpfc_hba_alloc(pdev);
6684	if (!phba)
6685		return -ENOMEM;
6686
6687	/* Perform generic PCI device enabling operation */
6688	error = lpfc_enable_pci_dev(phba);
6689	if (error) {
6690		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6691				"1401 Failed to enable pci device.\n");
6692		goto out_free_phba;
6693	}
6694
6695	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
6696	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
6697	if (error)
6698		goto out_disable_pci_dev;
6699
6700	/* Set up SLI-3 specific device PCI memory space */
6701	error = lpfc_sli_pci_mem_setup(phba);
6702	if (error) {
6703		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6704				"1402 Failed to set up pci memory space.\n");
6705		goto out_disable_pci_dev;
6706	}
6707
6708	/* Set up phase-1 common device driver resources */
6709	error = lpfc_setup_driver_resource_phase1(phba);
6710	if (error) {
6711		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6712				"1403 Failed to set up driver resource.\n");
6713		goto out_unset_pci_mem_s3;
6714	}
6715
6716	/* Set up SLI-3 specific device driver resources */
6717	error = lpfc_sli_driver_resource_setup(phba);
6718	if (error) {
6719		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6720				"1404 Failed to set up driver resource.\n");
6721		goto out_unset_pci_mem_s3;
6722	}
6723
6724	/* Initialize and populate the iocb list per host */
6725	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
6726	if (error) {
6727		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6728				"1405 Failed to initialize iocb list.\n");
6729		goto out_unset_driver_resource_s3;
6730	}
6731
6732	/* Set up common device driver resources */
6733	error = lpfc_setup_driver_resource_phase2(phba);
6734	if (error) {
6735		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6736				"1406 Failed to set up driver resource.\n");
6737		goto out_free_iocb_list;
6738	}
6739
6740	/* Create SCSI host to the physical port */
6741	error = lpfc_create_shost(phba);
6742	if (error) {
6743		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6744				"1407 Failed to create scsi host.\n");
6745		goto out_unset_driver_resource;
6746	}
6747
6748	/* Configure sysfs attributes */
6749	vport = phba->pport;
6750	error = lpfc_alloc_sysfs_attr(vport);
6751	if (error) {
6752		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6753				"1476 Failed to allocate sysfs attr\n");
6754		goto out_destroy_shost;
6755	}
6756
6757	/* Now, trying to enable interrupt and bring up the device */
6758	cfg_mode = phba->cfg_use_msi;
6759	while (true) {
6760		/* Put device to a known state before enabling interrupt */
6761		lpfc_stop_port(phba);
6762		/* Configure and enable interrupt */
6763		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
6764		if (intr_mode == LPFC_INTR_ERROR) {
6765			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6766					"0431 Failed to enable interrupt.\n");
6767			error = -ENODEV;
6768			goto out_free_sysfs_attr;
6769		}
6770		/* SLI-3 HBA setup */
6771		if (lpfc_sli_hba_setup(phba)) {
6772			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6773					"1477 Failed to set up hba\n");
6774			error = -ENODEV;
6775			goto out_remove_device;
6776		}
6777
6778		/* Wait 50ms for the interrupts of previous mailbox commands */
6779		msleep(50);
6780		/* Check active interrupts on message signaled interrupts */
6781		if (intr_mode == 0 ||
6782		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
6783			/* Log the current active interrupt mode */
6784			phba->intr_mode = intr_mode;
6785			lpfc_log_intr_mode(phba, intr_mode);
6786			break;
6787		} else {
6788			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6789					"0447 Configure interrupt mode (%d) "
6790					"failed active interrupt test.\n",
6791					intr_mode);
6792			/* Disable the current interrupt mode */
6793			lpfc_sli_disable_intr(phba);
6794			/* Try next level of interrupt mode */
6795			cfg_mode = --intr_mode;
6796		}
6797	}
6798
6799	/* Perform post initialization setup */
6800	lpfc_post_init_setup(phba);
6801
6802	/* Check if there are static vports to be created. */
6803	lpfc_create_static_vport(phba);
6804
6805	return 0;
6806
6807out_remove_device:
6808	lpfc_unset_hba(phba);
6809out_free_sysfs_attr:
6810	lpfc_free_sysfs_attr(vport);
6811out_destroy_shost:
6812	lpfc_destroy_shost(phba);
6813out_unset_driver_resource:
6814	lpfc_unset_driver_resource_phase2(phba);
6815out_free_iocb_list:
6816	lpfc_free_iocb_list(phba);
6817out_unset_driver_resource_s3:
6818	lpfc_sli_driver_resource_unset(phba);
6819out_unset_pci_mem_s3:
6820	lpfc_sli_pci_mem_unset(phba);
6821out_disable_pci_dev:
6822	lpfc_disable_pci_dev(phba);
6823out_free_phba:
6824	lpfc_hba_free(phba);
6825	return error;
6826}
6827
6828/**
6829 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
6830 * @pdev: pointer to PCI device
6831 *
6832 * This routine is to be called to disattach a device with SLI-3 interface
6833 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6834 * removed from PCI bus, it performs all the necessary cleanup for the HBA
6835 * device to be removed from the PCI subsystem properly.
6836 **/
6837static void __devexit
6838lpfc_pci_remove_one_s3(struct pci_dev *pdev)
6839{
6840	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
6841	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6842	struct lpfc_vport **vports;
6843	struct lpfc_hba   *phba = vport->phba;
6844	int i;
6845	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
6846
6847	spin_lock_irq(&phba->hbalock);
6848	vport->load_flag |= FC_UNLOADING;
6849	spin_unlock_irq(&phba->hbalock);
6850
6851	lpfc_free_sysfs_attr(vport);
6852
6853	/* Release all the vports against this physical port */
6854	vports = lpfc_create_vport_work_array(phba);
6855	if (vports != NULL)
6856		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
6857			fc_vport_terminate(vports[i]->fc_vport);
6858	lpfc_destroy_vport_work_array(phba, vports);
6859
6860	/* Remove FC host and then SCSI host with the physical port */
6861	fc_remove_host(shost);
6862	scsi_remove_host(shost);
6863	lpfc_cleanup(vport);
6864
6865	/*
6866	 * Bring down the SLI Layer. This step disable all interrupts,
6867	 * clears the rings, discards all mailbox commands, and resets
6868	 * the HBA.
6869	 */
6870
6871	/* HBA interrupt will be diabled after this call */
6872	lpfc_sli_hba_down(phba);
6873	/* Stop kthread signal shall trigger work_done one more time */
6874	kthread_stop(phba->worker_thread);
6875	/* Final cleanup of txcmplq and reset the HBA */
6876	lpfc_sli_brdrestart(phba);
6877
6878	lpfc_stop_hba_timers(phba);
6879	spin_lock_irq(&phba->hbalock);
6880	list_del_init(&vport->listentry);
6881	spin_unlock_irq(&phba->hbalock);
6882
6883	lpfc_debugfs_terminate(vport);
6884
6885	/* Disable interrupt */
6886	lpfc_sli_disable_intr(phba);
6887
6888	pci_set_drvdata(pdev, NULL);
6889	scsi_host_put(shost);
6890
6891	/*
6892	 * Call scsi_free before mem_free since scsi bufs are released to their
6893	 * corresponding pools here.
6894	 */
6895	lpfc_scsi_free(phba);
6896	lpfc_mem_free_all(phba);
6897
6898	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
6899			  phba->hbqslimp.virt, phba->hbqslimp.phys);
6900
6901	/* Free resources associated with SLI2 interface */
6902	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6903			  phba->slim2p.virt, phba->slim2p.phys);
6904
6905	/* unmap adapter SLIM and Control Registers */
6906	iounmap(phba->ctrl_regs_memmap_p);
6907	iounmap(phba->slim_memmap_p);
6908
6909	lpfc_hba_free(phba);
6910
6911	pci_release_selected_regions(pdev, bars);
6912	pci_disable_device(pdev);
6913}
6914
6915/**
6916 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
6917 * @pdev: pointer to PCI device
6918 * @msg: power management message
6919 *
6920 * This routine is to be called from the kernel's PCI subsystem to support
6921 * system Power Management (PM) to device with SLI-3 interface spec. When
6922 * PM invokes this method, it quiesces the device by stopping the driver's
6923 * worker thread for the device, turning off device's interrupt and DMA,
6924 * and bring the device offline. Note that as the driver implements the
6925 * minimum PM requirements to a power-aware driver's PM support for the
6926 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
6927 * to the suspend() method call will be treated as SUSPEND and the driver will
6928 * fully reinitialize its device during resume() method call, the driver will
6929 * set device to PCI_D3hot state in PCI config space instead of setting it
6930 * according to the @msg provided by the PM.
6931 *
6932 * Return code
6933 * 	0 - driver suspended the device
6934 * 	Error otherwise
6935 **/
6936static int
6937lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
6938{
6939	struct Scsi_Host *shost = pci_get_drvdata(pdev);
6940	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
6941
6942	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6943			"0473 PCI device Power Management suspend.\n");
6944
6945	/* Bring down the device */
6946	lpfc_offline_prep(phba);
6947	lpfc_offline(phba);
6948	kthread_stop(phba->worker_thread);
6949
6950	/* Disable interrupt from device */
6951	lpfc_sli_disable_intr(phba);
6952
6953	/* Save device state to PCI config space */
6954	pci_save_state(pdev);
6955	pci_set_power_state(pdev, PCI_D3hot);
6956
6957	return 0;
6958}
6959
6960/**
6961 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
6962 * @pdev: pointer to PCI device
6963 *
6964 * This routine is to be called from the kernel's PCI subsystem to support
6965 * system Power Management (PM) to device with SLI-3 interface spec. When PM
6966 * invokes this method, it restores the device's PCI config space state and
6967 * fully reinitializes the device and brings it online. Note that as the
6968 * driver implements the minimum PM requirements to a power-aware driver's
6969 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
6970 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
6971 * driver will fully reinitialize its device during resume() method call,
6972 * the device will be set to PCI_D0 directly in PCI config space before
6973 * restoring the state.
6974 *
6975 * Return code
6976 * 	0 - driver suspended the device
6977 * 	Error otherwise
6978 **/
6979static int
6980lpfc_pci_resume_one_s3(struct pci_dev *pdev)
6981{
6982	struct Scsi_Host *shost = pci_get_drvdata(pdev);
6983	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
6984	uint32_t intr_mode;
6985	int error;
6986
6987	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6988			"0452 PCI device Power Management resume.\n");
6989
6990	/* Restore device state from PCI config space */
6991	pci_set_power_state(pdev, PCI_D0);
6992	pci_restore_state(pdev);
6993	if (pdev->is_busmaster)
6994		pci_set_master(pdev);
6995
6996	/* Startup the kernel thread for this host adapter. */
6997	phba->worker_thread = kthread_run(lpfc_do_work, phba,
6998					"lpfc_worker_%d", phba->brd_no);
6999	if (IS_ERR(phba->worker_thread)) {
7000		error = PTR_ERR(phba->worker_thread);
7001		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7002				"0434 PM resume failed to start worker "
7003				"thread: error=x%x.\n", error);
7004		return error;
7005	}
7006
7007	/* Configure and enable interrupt */
7008	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7009	if (intr_mode == LPFC_INTR_ERROR) {
7010		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7011				"0430 PM resume Failed to enable interrupt\n");
7012		return -EIO;
7013	} else
7014		phba->intr_mode = intr_mode;
7015
7016	/* Restart HBA and bring it online */
7017	lpfc_sli_brdrestart(phba);
7018	lpfc_online(phba);
7019
7020	/* Log the current active interrupt mode */
7021	lpfc_log_intr_mode(phba, phba->intr_mode);
7022
7023	return 0;
7024}
7025
7026/**
7027 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7028 * @pdev: pointer to PCI device.
7029 * @state: the current PCI connection state.
7030 *
7031 * This routine is called from the PCI subsystem for I/O error handling to
7032 * device with SLI-3 interface spec. This function is called by the PCI
7033 * subsystem after a PCI bus error affecting this device has been detected.
7034 * When this function is invoked, it will need to stop all the I/Os and
7035 * interrupt(s) to the device. Once that is done, it will return
7036 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7037 * as desired.
7038 *
7039 * Return codes
7040 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7041 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7042 **/
7043static pci_ers_result_t
7044lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7045{
7046	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7047	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7048	struct lpfc_sli *psli = &phba->sli;
7049	struct lpfc_sli_ring  *pring;
7050
7051	if (state == pci_channel_io_perm_failure) {
7052		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7053				"0472 PCI channel I/O permanent failure\n");
7054		/* Block all SCSI devices' I/Os on the host */
7055		lpfc_scsi_dev_block(phba);
7056		/* Clean up all driver's outstanding SCSI I/Os */
7057		lpfc_sli_flush_fcp_rings(phba);
7058		return PCI_ERS_RESULT_DISCONNECT;
7059	}
7060
7061	pci_disable_device(pdev);
7062	/*
7063	 * There may be I/Os dropped by the firmware.
7064	 * Error iocb (I/O) on txcmplq and let the SCSI layer
7065	 * retry it after re-establishing link.
7066	 */
7067	pring = &psli->ring[psli->fcp_ring];
7068	lpfc_sli_abort_iocb_ring(phba, pring);
7069
7070	/* Disable interrupt */
7071	lpfc_sli_disable_intr(phba);
7072
7073	/* Request a slot reset. */
7074	return PCI_ERS_RESULT_NEED_RESET;
7075}
7076
7077/**
7078 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
7079 * @pdev: pointer to PCI device.
7080 *
7081 * This routine is called from the PCI subsystem for error handling to
7082 * device with SLI-3 interface spec. This is called after PCI bus has been
7083 * reset to restart the PCI card from scratch, as if from a cold-boot.
7084 * During the PCI subsystem error recovery, after driver returns
7085 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7086 * recovery and then call this routine before calling the .resume method
7087 * to recover the device. This function will initialize the HBA device,
7088 * enable the interrupt, but it will just put the HBA to offline state
7089 * without passing any I/O traffic.
7090 *
7091 * Return codes
7092 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7093 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7094 */
7095static pci_ers_result_t
7096lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7097{
7098	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7099	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7100	struct lpfc_sli *psli = &phba->sli;
7101	uint32_t intr_mode;
7102
7103	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
7104	if (pci_enable_device_mem(pdev)) {
7105		printk(KERN_ERR "lpfc: Cannot re-enable "
7106			"PCI device after reset.\n");
7107		return PCI_ERS_RESULT_DISCONNECT;
7108	}
7109
7110	pci_restore_state(pdev);
7111	if (pdev->is_busmaster)
7112		pci_set_master(pdev);
7113
7114	spin_lock_irq(&phba->hbalock);
7115	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7116	spin_unlock_irq(&phba->hbalock);
7117
7118	/* Configure and enable interrupt */
7119	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7120	if (intr_mode == LPFC_INTR_ERROR) {
7121		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7122				"0427 Cannot re-enable interrupt after "
7123				"slot reset.\n");
7124		return PCI_ERS_RESULT_DISCONNECT;
7125	} else
7126		phba->intr_mode = intr_mode;
7127
7128	/* Take device offline; this will perform cleanup */
7129	lpfc_offline(phba);
7130	lpfc_sli_brdrestart(phba);
7131
7132	/* Log the current active interrupt mode */
7133	lpfc_log_intr_mode(phba, phba->intr_mode);
7134
7135	return PCI_ERS_RESULT_RECOVERED;
7136}
7137
7138/**
7139 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
7140 * @pdev: pointer to PCI device
7141 *
7142 * This routine is called from the PCI subsystem for error handling to device
7143 * with SLI-3 interface spec. It is called when kernel error recovery tells
7144 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7145 * error recovery. After this call, traffic can start to flow from this device
7146 * again.
7147 */
7148static void
7149lpfc_io_resume_s3(struct pci_dev *pdev)
7150{
7151	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7152	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7153
7154	lpfc_online(phba);
7155}
7156
7157/**
7158 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7159 * @phba: pointer to lpfc hba data structure.
7160 *
7161 * returns the number of ELS/CT IOCBs to reserve
7162 **/
7163int
7164lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7165{
7166	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7167
7168	if (max_xri <= 100)
7169		return 4;
7170	else if (max_xri <= 256)
7171		return 8;
7172	else if (max_xri <= 512)
7173		return 16;
7174	else if (max_xri <= 1024)
7175		return 32;
7176	else
7177		return 48;
7178}
7179
7180/**
7181 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
7182 * @pdev: pointer to PCI device
7183 * @pid: pointer to PCI device identifier
7184 *
7185 * This routine is called from the kernel's PCI subsystem to device with
7186 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7187 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7188 * information of the device and driver to see if the driver state that it
7189 * can support this kind of device. If the match is successful, the driver
7190 * core invokes this routine. If this routine determines it can claim the HBA,
7191 * it does all the initialization that it needs to do to handle the HBA
7192 * properly.
7193 *
7194 * Return code
7195 * 	0 - driver can claim the device
7196 * 	negative value - driver can not claim the device
7197 **/
7198static int __devinit
7199lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7200{
7201	struct lpfc_hba   *phba;
7202	struct lpfc_vport *vport = NULL;
7203	int error;
7204	uint32_t cfg_mode, intr_mode;
7205	int mcnt;
7206
7207	/* Allocate memory for HBA structure */
7208	phba = lpfc_hba_alloc(pdev);
7209	if (!phba)
7210		return -ENOMEM;
7211
7212	/* Perform generic PCI device enabling operation */
7213	error = lpfc_enable_pci_dev(phba);
7214	if (error) {
7215		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7216				"1409 Failed to enable pci device.\n");
7217		goto out_free_phba;
7218	}
7219
7220	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
7221	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
7222	if (error)
7223		goto out_disable_pci_dev;
7224
7225	/* Set up SLI-4 specific device PCI memory space */
7226	error = lpfc_sli4_pci_mem_setup(phba);
7227	if (error) {
7228		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7229				"1410 Failed to set up pci memory space.\n");
7230		goto out_disable_pci_dev;
7231	}
7232
7233	/* Set up phase-1 common device driver resources */
7234	error = lpfc_setup_driver_resource_phase1(phba);
7235	if (error) {
7236		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7237				"1411 Failed to set up driver resource.\n");
7238		goto out_unset_pci_mem_s4;
7239	}
7240
7241	/* Set up SLI-4 Specific device driver resources */
7242	error = lpfc_sli4_driver_resource_setup(phba);
7243	if (error) {
7244		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7245				"1412 Failed to set up driver resource.\n");
7246		goto out_unset_pci_mem_s4;
7247	}
7248
7249	/* Initialize and populate the iocb list per host */
7250	error = lpfc_init_iocb_list(phba,
7251			phba->sli4_hba.max_cfg_param.max_xri);
7252	if (error) {
7253		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7254				"1413 Failed to initialize iocb list.\n");
7255		goto out_unset_driver_resource_s4;
7256	}
7257
7258	/* Set up common device driver resources */
7259	error = lpfc_setup_driver_resource_phase2(phba);
7260	if (error) {
7261		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7262				"1414 Failed to set up driver resource.\n");
7263		goto out_free_iocb_list;
7264	}
7265
7266	/* Create SCSI host to the physical port */
7267	error = lpfc_create_shost(phba);
7268	if (error) {
7269		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7270				"1415 Failed to create scsi host.\n");
7271		goto out_unset_driver_resource;
7272	}
7273
7274	/* Configure sysfs attributes */
7275	vport = phba->pport;
7276	error = lpfc_alloc_sysfs_attr(vport);
7277	if (error) {
7278		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7279				"1416 Failed to allocate sysfs attr\n");
7280		goto out_destroy_shost;
7281	}
7282
7283	/* Now, trying to enable interrupt and bring up the device */
7284	cfg_mode = phba->cfg_use_msi;
7285	while (true) {
7286		/* Put device to a known state before enabling interrupt */
7287		lpfc_stop_port(phba);
7288		/* Configure and enable interrupt */
7289		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
7290		if (intr_mode == LPFC_INTR_ERROR) {
7291			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7292					"0426 Failed to enable interrupt.\n");
7293			error = -ENODEV;
7294			goto out_free_sysfs_attr;
7295		}
7296		/* Set up SLI-4 HBA */
7297		if (lpfc_sli4_hba_setup(phba)) {
7298			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7299					"1421 Failed to set up hba\n");
7300			error = -ENODEV;
7301			goto out_disable_intr;
7302		}
7303
7304		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
7305		if (intr_mode != 0)
7306			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
7307							    LPFC_ACT_INTR_CNT);
7308
7309		/* Check active interrupts received only for MSI/MSI-X */
7310		if (intr_mode == 0 ||
7311		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
7312			/* Log the current active interrupt mode */
7313			phba->intr_mode = intr_mode;
7314			lpfc_log_intr_mode(phba, intr_mode);
7315			break;
7316		}
7317		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7318				"0451 Configure interrupt mode (%d) "
7319				"failed active interrupt test.\n",
7320				intr_mode);
7321		/* Unset the preivous SLI-4 HBA setup */
7322		lpfc_sli4_unset_hba(phba);
7323		/* Try next level of interrupt mode */
7324		cfg_mode = --intr_mode;
7325	}
7326
7327	/* Perform post initialization setup */
7328	lpfc_post_init_setup(phba);
7329
7330	return 0;
7331
7332out_disable_intr:
7333	lpfc_sli4_disable_intr(phba);
7334out_free_sysfs_attr:
7335	lpfc_free_sysfs_attr(vport);
7336out_destroy_shost:
7337	lpfc_destroy_shost(phba);
7338out_unset_driver_resource:
7339	lpfc_unset_driver_resource_phase2(phba);
7340out_free_iocb_list:
7341	lpfc_free_iocb_list(phba);
7342out_unset_driver_resource_s4:
7343	lpfc_sli4_driver_resource_unset(phba);
7344out_unset_pci_mem_s4:
7345	lpfc_sli4_pci_mem_unset(phba);
7346out_disable_pci_dev:
7347	lpfc_disable_pci_dev(phba);
7348out_free_phba:
7349	lpfc_hba_free(phba);
7350	return error;
7351}
7352
7353/**
7354 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
7355 * @pdev: pointer to PCI device
7356 *
7357 * This routine is called from the kernel's PCI subsystem to device with
7358 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7359 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7360 * device to be removed from the PCI subsystem properly.
7361 **/
7362static void __devexit
7363lpfc_pci_remove_one_s4(struct pci_dev *pdev)
7364{
7365	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7366	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7367	struct lpfc_vport **vports;
7368	struct lpfc_hba *phba = vport->phba;
7369	int i;
7370
7371	/* Mark the device unloading flag */
7372	spin_lock_irq(&phba->hbalock);
7373	vport->load_flag |= FC_UNLOADING;
7374	spin_unlock_irq(&phba->hbalock);
7375
7376	/* Free the HBA sysfs attributes */
7377	lpfc_free_sysfs_attr(vport);
7378
7379	/* Release all the vports against this physical port */
7380	vports = lpfc_create_vport_work_array(phba);
7381	if (vports != NULL)
7382		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7383			fc_vport_terminate(vports[i]->fc_vport);
7384	lpfc_destroy_vport_work_array(phba, vports);
7385
7386	/* Remove FC host and then SCSI host with the physical port */
7387	fc_remove_host(shost);
7388	scsi_remove_host(shost);
7389
7390	/* Perform cleanup on the physical port */
7391	lpfc_cleanup(vport);
7392
7393	/*
7394	 * Bring down the SLI Layer. This step disables all interrupts,
7395	 * clears the rings, discards all mailbox commands, and resets
7396	 * the HBA FCoE function.
7397	 */
7398	lpfc_debugfs_terminate(vport);
7399	lpfc_sli4_hba_unset(phba);
7400
7401	spin_lock_irq(&phba->hbalock);
7402	list_del_init(&vport->listentry);
7403	spin_unlock_irq(&phba->hbalock);
7404
7405	/* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
7406	 * buffers are released to their corresponding pools here.
7407	 */
7408	lpfc_scsi_free(phba);
7409	lpfc_sli4_driver_resource_unset(phba);
7410
7411	/* Unmap adapter Control and Doorbell registers */
7412	lpfc_sli4_pci_mem_unset(phba);
7413
7414	/* Release PCI resources and disable device's PCI function */
7415	scsi_host_put(shost);
7416	lpfc_disable_pci_dev(phba);
7417
7418	/* Finally, free the driver's device data structure */
7419	lpfc_hba_free(phba);
7420
7421	return;
7422}
7423
7424/**
7425 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
7426 * @pdev: pointer to PCI device
7427 * @msg: power management message
7428 *
7429 * This routine is called from the kernel's PCI subsystem to support system
7430 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
7431 * this method, it quiesces the device by stopping the driver's worker
7432 * thread for the device, turning off device's interrupt and DMA, and bring
7433 * the device offline. Note that as the driver implements the minimum PM
7434 * requirements to a power-aware driver's PM support for suspend/resume -- all
7435 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
7436 * method call will be treated as SUSPEND and the driver will fully
7437 * reinitialize its device during resume() method call, the driver will set
7438 * device to PCI_D3hot state in PCI config space instead of setting it
7439 * according to the @msg provided by the PM.
7440 *
7441 * Return code
7442 * 	0 - driver suspended the device
7443 * 	Error otherwise
7444 **/
7445static int
7446lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
7447{
7448	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7449	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7450
7451	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7452			"0298 PCI device Power Management suspend.\n");
7453
7454	/* Bring down the device */
7455	lpfc_offline_prep(phba);
7456	lpfc_offline(phba);
7457	kthread_stop(phba->worker_thread);
7458
7459	/* Disable interrupt from device */
7460	lpfc_sli4_disable_intr(phba);
7461
7462	/* Save device state to PCI config space */
7463	pci_save_state(pdev);
7464	pci_set_power_state(pdev, PCI_D3hot);
7465
7466	return 0;
7467}
7468
7469/**
7470 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
7471 * @pdev: pointer to PCI device
7472 *
7473 * This routine is called from the kernel's PCI subsystem to support system
7474 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
7475 * this method, it restores the device's PCI config space state and fully
7476 * reinitializes the device and brings it online. Note that as the driver
7477 * implements the minimum PM requirements to a power-aware driver's PM for
7478 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7479 * to the suspend() method call will be treated as SUSPEND and the driver
7480 * will fully reinitialize its device during resume() method call, the device
7481 * will be set to PCI_D0 directly in PCI config space before restoring the
7482 * state.
7483 *
7484 * Return code
7485 * 	0 - driver suspended the device
7486 * 	Error otherwise
7487 **/
7488static int
7489lpfc_pci_resume_one_s4(struct pci_dev *pdev)
7490{
7491	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7492	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7493	uint32_t intr_mode;
7494	int error;
7495
7496	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7497			"0292 PCI device Power Management resume.\n");
7498
7499	/* Restore device state from PCI config space */
7500	pci_set_power_state(pdev, PCI_D0);
7501	pci_restore_state(pdev);
7502	if (pdev->is_busmaster)
7503		pci_set_master(pdev);
7504
7505	 /* Startup the kernel thread for this host adapter. */
7506	phba->worker_thread = kthread_run(lpfc_do_work, phba,
7507					"lpfc_worker_%d", phba->brd_no);
7508	if (IS_ERR(phba->worker_thread)) {
7509		error = PTR_ERR(phba->worker_thread);
7510		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7511				"0293 PM resume failed to start worker "
7512				"thread: error=x%x.\n", error);
7513		return error;
7514	}
7515
7516	/* Configure and enable interrupt */
7517	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
7518	if (intr_mode == LPFC_INTR_ERROR) {
7519		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7520				"0294 PM resume Failed to enable interrupt\n");
7521		return -EIO;
7522	} else
7523		phba->intr_mode = intr_mode;
7524
7525	/* Restart HBA and bring it online */
7526	lpfc_sli_brdrestart(phba);
7527	lpfc_online(phba);
7528
7529	/* Log the current active interrupt mode */
7530	lpfc_log_intr_mode(phba, phba->intr_mode);
7531
7532	return 0;
7533}
7534
7535/**
7536 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
7537 * @pdev: pointer to PCI device.
7538 * @state: the current PCI connection state.
7539 *
7540 * This routine is called from the PCI subsystem for error handling to device
7541 * with SLI-4 interface spec. This function is called by the PCI subsystem
7542 * after a PCI bus error affecting this device has been detected. When this
7543 * function is invoked, it will need to stop all the I/Os and interrupt(s)
7544 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
7545 * for the PCI subsystem to perform proper recovery as desired.
7546 *
7547 * Return codes
7548 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7549 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7550 **/
7551static pci_ers_result_t
7552lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
7553{
7554	return PCI_ERS_RESULT_NEED_RESET;
7555}
7556
7557/**
7558 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
7559 * @pdev: pointer to PCI device.
7560 *
7561 * This routine is called from the PCI subsystem for error handling to device
7562 * with SLI-4 interface spec. It is called after PCI bus has been reset to
7563 * restart the PCI card from scratch, as if from a cold-boot. During the
7564 * PCI subsystem error recovery, after the driver returns
7565 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7566 * recovery and then call this routine before calling the .resume method to
7567 * recover the device. This function will initialize the HBA device, enable
7568 * the interrupt, but it will just put the HBA to offline state without
7569 * passing any I/O traffic.
7570 *
7571 * Return codes
7572 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7573 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7574 */
7575static pci_ers_result_t
7576lpfc_io_slot_reset_s4(struct pci_dev *pdev)
7577{
7578	return PCI_ERS_RESULT_RECOVERED;
7579}
7580
7581/**
7582 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
7583 * @pdev: pointer to PCI device
7584 *
7585 * This routine is called from the PCI subsystem for error handling to device
7586 * with SLI-4 interface spec. It is called when kernel error recovery tells
7587 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7588 * error recovery. After this call, traffic can start to flow from this device
7589 * again.
7590 **/
7591static void
7592lpfc_io_resume_s4(struct pci_dev *pdev)
7593{
7594	return;
7595}
7596
7597/**
7598 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
7599 * @pdev: pointer to PCI device
7600 * @pid: pointer to PCI device identifier
7601 *
7602 * This routine is to be registered to the kernel's PCI subsystem. When an
7603 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
7604 * at PCI device-specific information of the device and driver to see if the
7605 * driver state that it can support this kind of device. If the match is
7606 * successful, the driver core invokes this routine. This routine dispatches
7607 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
7608 * do all the initialization that it needs to do to handle the HBA device
7609 * properly.
7610 *
7611 * Return code
7612 * 	0 - driver can claim the device
7613 * 	negative value - driver can not claim the device
7614 **/
7615static int __devinit
7616lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7617{
7618	int rc;
7619	uint16_t dev_id;
7620
7621	if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
7622		return -ENODEV;
7623
7624	switch (dev_id) {
7625	case PCI_DEVICE_ID_TIGERSHARK:
7626	case PCI_DEVICE_ID_TIGERSHARK_S:
7627		rc = lpfc_pci_probe_one_s4(pdev, pid);
7628		break;
7629	default:
7630		rc = lpfc_pci_probe_one_s3(pdev, pid);
7631		break;
7632	}
7633	return rc;
7634}
7635
7636/**
7637 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
7638 * @pdev: pointer to PCI device
7639 *
7640 * This routine is to be registered to the kernel's PCI subsystem. When an
7641 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
7642 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
7643 * remove routine, which will perform all the necessary cleanup for the
7644 * device to be removed from the PCI subsystem properly.
7645 **/
7646static void __devexit
7647lpfc_pci_remove_one(struct pci_dev *pdev)
7648{
7649	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7650	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7651
7652	switch (phba->pci_dev_grp) {
7653	case LPFC_PCI_DEV_LP:
7654		lpfc_pci_remove_one_s3(pdev);
7655		break;
7656	case LPFC_PCI_DEV_OC:
7657		lpfc_pci_remove_one_s4(pdev);
7658		break;
7659	default:
7660		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7661				"1424 Invalid PCI device group: 0x%x\n",
7662				phba->pci_dev_grp);
7663		break;
7664	}
7665	return;
7666}
7667
7668/**
7669 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
7670 * @pdev: pointer to PCI device
7671 * @msg: power management message
7672 *
7673 * This routine is to be registered to the kernel's PCI subsystem to support
7674 * system Power Management (PM). When PM invokes this method, it dispatches
7675 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
7676 * suspend the device.
7677 *
7678 * Return code
7679 * 	0 - driver suspended the device
7680 * 	Error otherwise
7681 **/
7682static int
7683lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
7684{
7685	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7686	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7687	int rc = -ENODEV;
7688
7689	switch (phba->pci_dev_grp) {
7690	case LPFC_PCI_DEV_LP:
7691		rc = lpfc_pci_suspend_one_s3(pdev, msg);
7692		break;
7693	case LPFC_PCI_DEV_OC:
7694		rc = lpfc_pci_suspend_one_s4(pdev, msg);
7695		break;
7696	default:
7697		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7698				"1425 Invalid PCI device group: 0x%x\n",
7699				phba->pci_dev_grp);
7700		break;
7701	}
7702	return rc;
7703}
7704
7705/**
7706 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
7707 * @pdev: pointer to PCI device
7708 *
7709 * This routine is to be registered to the kernel's PCI subsystem to support
7710 * system Power Management (PM). When PM invokes this method, it dispatches
7711 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
7712 * resume the device.
7713 *
7714 * Return code
7715 * 	0 - driver suspended the device
7716 * 	Error otherwise
7717 **/
7718static int
7719lpfc_pci_resume_one(struct pci_dev *pdev)
7720{
7721	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7722	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7723	int rc = -ENODEV;
7724
7725	switch (phba->pci_dev_grp) {
7726	case LPFC_PCI_DEV_LP:
7727		rc = lpfc_pci_resume_one_s3(pdev);
7728		break;
7729	case LPFC_PCI_DEV_OC:
7730		rc = lpfc_pci_resume_one_s4(pdev);
7731		break;
7732	default:
7733		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7734				"1426 Invalid PCI device group: 0x%x\n",
7735				phba->pci_dev_grp);
7736		break;
7737	}
7738	return rc;
7739}
7740
7741/**
7742 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
7743 * @pdev: pointer to PCI device.
7744 * @state: the current PCI connection state.
7745 *
7746 * This routine is registered to the PCI subsystem for error handling. This
7747 * function is called by the PCI subsystem after a PCI bus error affecting
7748 * this device has been detected. When this routine is invoked, it dispatches
7749 * the action to the proper SLI-3 or SLI-4 device error detected handling
7750 * routine, which will perform the proper error detected operation.
7751 *
7752 * Return codes
7753 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7754 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7755 **/
7756static pci_ers_result_t
7757lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7758{
7759	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7760	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7761	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7762
7763	switch (phba->pci_dev_grp) {
7764	case LPFC_PCI_DEV_LP:
7765		rc = lpfc_io_error_detected_s3(pdev, state);
7766		break;
7767	case LPFC_PCI_DEV_OC:
7768		rc = lpfc_io_error_detected_s4(pdev, state);
7769		break;
7770	default:
7771		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7772				"1427 Invalid PCI device group: 0x%x\n",
7773				phba->pci_dev_grp);
7774		break;
7775	}
7776	return rc;
7777}
7778
7779/**
7780 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
7781 * @pdev: pointer to PCI device.
7782 *
7783 * This routine is registered to the PCI subsystem for error handling. This
7784 * function is called after PCI bus has been reset to restart the PCI card
7785 * from scratch, as if from a cold-boot. When this routine is invoked, it
7786 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
7787 * routine, which will perform the proper device reset.
7788 *
7789 * Return codes
7790 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7791 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7792 **/
7793static pci_ers_result_t
7794lpfc_io_slot_reset(struct pci_dev *pdev)
7795{
7796	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7797	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7798	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7799
7800	switch (phba->pci_dev_grp) {
7801	case LPFC_PCI_DEV_LP:
7802		rc = lpfc_io_slot_reset_s3(pdev);
7803		break;
7804	case LPFC_PCI_DEV_OC:
7805		rc = lpfc_io_slot_reset_s4(pdev);
7806		break;
7807	default:
7808		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7809				"1428 Invalid PCI device group: 0x%x\n",
7810				phba->pci_dev_grp);
7811		break;
7812	}
7813	return rc;
7814}
7815
7816/**
7817 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
7818 * @pdev: pointer to PCI device
7819 *
7820 * This routine is registered to the PCI subsystem for error handling. It
7821 * is called when kernel error recovery tells the lpfc driver that it is
7822 * OK to resume normal PCI operation after PCI bus error recovery. When
7823 * this routine is invoked, it dispatches the action to the proper SLI-3
7824 * or SLI-4 device io_resume routine, which will resume the device operation.
7825 **/
7826static void
7827lpfc_io_resume(struct pci_dev *pdev)
7828{
7829	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7830	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7831
7832	switch (phba->pci_dev_grp) {
7833	case LPFC_PCI_DEV_LP:
7834		lpfc_io_resume_s3(pdev);
7835		break;
7836	case LPFC_PCI_DEV_OC:
7837		lpfc_io_resume_s4(pdev);
7838		break;
7839	default:
7840		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7841				"1429 Invalid PCI device group: 0x%x\n",
7842				phba->pci_dev_grp);
7843		break;
7844	}
7845	return;
7846}
7847
7848static struct pci_device_id lpfc_id_table[] = {
7849	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
7850		PCI_ANY_ID, PCI_ANY_ID, },
7851	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
7852		PCI_ANY_ID, PCI_ANY_ID, },
7853	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
7854		PCI_ANY_ID, PCI_ANY_ID, },
7855	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
7856		PCI_ANY_ID, PCI_ANY_ID, },
7857	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
7858		PCI_ANY_ID, PCI_ANY_ID, },
7859	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
7860		PCI_ANY_ID, PCI_ANY_ID, },
7861	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
7862		PCI_ANY_ID, PCI_ANY_ID, },
7863	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
7864		PCI_ANY_ID, PCI_ANY_ID, },
7865	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
7866		PCI_ANY_ID, PCI_ANY_ID, },
7867	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
7868		PCI_ANY_ID, PCI_ANY_ID, },
7869	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
7870		PCI_ANY_ID, PCI_ANY_ID, },
7871	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
7872		PCI_ANY_ID, PCI_ANY_ID, },
7873	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
7874		PCI_ANY_ID, PCI_ANY_ID, },
7875	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
7876		PCI_ANY_ID, PCI_ANY_ID, },
7877	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
7878		PCI_ANY_ID, PCI_ANY_ID, },
7879	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
7880		PCI_ANY_ID, PCI_ANY_ID, },
7881	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
7882		PCI_ANY_ID, PCI_ANY_ID, },
7883	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
7884		PCI_ANY_ID, PCI_ANY_ID, },
7885	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
7886		PCI_ANY_ID, PCI_ANY_ID, },
7887	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
7888		PCI_ANY_ID, PCI_ANY_ID, },
7889	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
7890		PCI_ANY_ID, PCI_ANY_ID, },
7891	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
7892		PCI_ANY_ID, PCI_ANY_ID, },
7893	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
7894		PCI_ANY_ID, PCI_ANY_ID, },
7895	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
7896		PCI_ANY_ID, PCI_ANY_ID, },
7897	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
7898		PCI_ANY_ID, PCI_ANY_ID, },
7899	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
7900		PCI_ANY_ID, PCI_ANY_ID, },
7901	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
7902		PCI_ANY_ID, PCI_ANY_ID, },
7903	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
7904		PCI_ANY_ID, PCI_ANY_ID, },
7905	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
7906		PCI_ANY_ID, PCI_ANY_ID, },
7907	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
7908		PCI_ANY_ID, PCI_ANY_ID, },
7909	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
7910		PCI_ANY_ID, PCI_ANY_ID, },
7911	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
7912		PCI_ANY_ID, PCI_ANY_ID, },
7913	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
7914		PCI_ANY_ID, PCI_ANY_ID, },
7915	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
7916		PCI_ANY_ID, PCI_ANY_ID, },
7917	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
7918		PCI_ANY_ID, PCI_ANY_ID, },
7919	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
7920		PCI_ANY_ID, PCI_ANY_ID, },
7921	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
7922		PCI_ANY_ID, PCI_ANY_ID, },
7923	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7924		PCI_ANY_ID, PCI_ANY_ID, },
7925	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S,
7926		PCI_ANY_ID, PCI_ANY_ID, },
7927	{ 0 }
7928};
7929
7930MODULE_DEVICE_TABLE(pci, lpfc_id_table);
7931
7932static struct pci_error_handlers lpfc_err_handler = {
7933	.error_detected = lpfc_io_error_detected,
7934	.slot_reset = lpfc_io_slot_reset,
7935	.resume = lpfc_io_resume,
7936};
7937
7938static struct pci_driver lpfc_driver = {
7939	.name		= LPFC_DRIVER_NAME,
7940	.id_table	= lpfc_id_table,
7941	.probe		= lpfc_pci_probe_one,
7942	.remove		= __devexit_p(lpfc_pci_remove_one),
7943	.suspend        = lpfc_pci_suspend_one,
7944	.resume		= lpfc_pci_resume_one,
7945	.err_handler    = &lpfc_err_handler,
7946};
7947
7948/**
7949 * lpfc_init - lpfc module initialization routine
7950 *
7951 * This routine is to be invoked when the lpfc module is loaded into the
7952 * kernel. The special kernel macro module_init() is used to indicate the
7953 * role of this routine to the kernel as lpfc module entry point.
7954 *
7955 * Return codes
7956 *   0 - successful
7957 *   -ENOMEM - FC attach transport failed
7958 *   all others - failed
7959 */
7960static int __init
7961lpfc_init(void)
7962{
7963	int error = 0;
7964
7965	printk(LPFC_MODULE_DESC "\n");
7966	printk(LPFC_COPYRIGHT "\n");
7967
7968	if (lpfc_enable_npiv) {
7969		lpfc_transport_functions.vport_create = lpfc_vport_create;
7970		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
7971	}
7972	lpfc_transport_template =
7973				fc_attach_transport(&lpfc_transport_functions);
7974	if (lpfc_transport_template == NULL)
7975		return -ENOMEM;
7976	if (lpfc_enable_npiv) {
7977		lpfc_vport_transport_template =
7978			fc_attach_transport(&lpfc_vport_transport_functions);
7979		if (lpfc_vport_transport_template == NULL) {
7980			fc_release_transport(lpfc_transport_template);
7981			return -ENOMEM;
7982		}
7983	}
7984	error = pci_register_driver(&lpfc_driver);
7985	if (error) {
7986		fc_release_transport(lpfc_transport_template);
7987		if (lpfc_enable_npiv)
7988			fc_release_transport(lpfc_vport_transport_template);
7989	}
7990
7991	return error;
7992}
7993
7994/**
7995 * lpfc_exit - lpfc module removal routine
7996 *
7997 * This routine is invoked when the lpfc module is removed from the kernel.
7998 * The special kernel macro module_exit() is used to indicate the role of
7999 * this routine to the kernel as lpfc module exit point.
8000 */
8001static void __exit
8002lpfc_exit(void)
8003{
8004	pci_unregister_driver(&lpfc_driver);
8005	fc_release_transport(lpfc_transport_template);
8006	if (lpfc_enable_npiv)
8007		fc_release_transport(lpfc_vport_transport_template);
8008	if (_dump_buf_data) {
8009		printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data "
8010				"at 0x%p\n",
8011				(1L << _dump_buf_data_order), _dump_buf_data);
8012		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
8013	}
8014
8015	if (_dump_buf_dif) {
8016		printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif "
8017				"at 0x%p\n",
8018				(1L << _dump_buf_dif_order), _dump_buf_dif);
8019		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
8020	}
8021}
8022
8023module_init(lpfc_init);
8024module_exit(lpfc_exit);
8025MODULE_LICENSE("GPL");
8026MODULE_DESCRIPTION(LPFC_MODULE_DESC);
8027MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
8028MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
8029