lpfc_init.c revision fc2b989be9190f3311a5ae41289828e24897a20e
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
29#include <linux/spinlock.h>
30#include <linux/ctype.h>
31#include <linux/aer.h>
32
33#include <scsi/scsi.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi_transport_fc.h>
37
38#include "lpfc_hw4.h"
39#include "lpfc_hw.h"
40#include "lpfc_sli.h"
41#include "lpfc_sli4.h"
42#include "lpfc_nl.h"
43#include "lpfc_disc.h"
44#include "lpfc_scsi.h"
45#include "lpfc.h"
46#include "lpfc_logmsg.h"
47#include "lpfc_crtn.h"
48#include "lpfc_vport.h"
49#include "lpfc_version.h"
50
51char *_dump_buf_data;
52unsigned long _dump_buf_data_order;
53char *_dump_buf_dif;
54unsigned long _dump_buf_dif_order;
55spinlock_t _dump_buf_lock;
56
57static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
58static int lpfc_post_rcv_buf(struct lpfc_hba *);
59static int lpfc_sli4_queue_create(struct lpfc_hba *);
60static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
61static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
62static int lpfc_setup_endian_order(struct lpfc_hba *);
63static int lpfc_sli4_read_config(struct lpfc_hba *);
64static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
65static void lpfc_free_sgl_list(struct lpfc_hba *);
66static int lpfc_init_sgl_list(struct lpfc_hba *);
67static int lpfc_init_active_sgl_array(struct lpfc_hba *);
68static void lpfc_free_active_sgl(struct lpfc_hba *);
69static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
70static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
71static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
72static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
73static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
74
75static struct scsi_transport_template *lpfc_transport_template = NULL;
76static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
77static DEFINE_IDR(lpfc_hba_index);
78
79/**
80 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
81 * @phba: pointer to lpfc hba data structure.
82 *
83 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
84 * mailbox command. It retrieves the revision information from the HBA and
85 * collects the Vital Product Data (VPD) about the HBA for preparing the
86 * configuration of the HBA.
87 *
88 * Return codes:
89 *   0 - success.
90 *   -ERESTART - requests the SLI layer to reset the HBA and try again.
91 *   Any other value - indicates an error.
92 **/
93int
94lpfc_config_port_prep(struct lpfc_hba *phba)
95{
96	lpfc_vpd_t *vp = &phba->vpd;
97	int i = 0, rc;
98	LPFC_MBOXQ_t *pmb;
99	MAILBOX_t *mb;
100	char *lpfc_vpd_data = NULL;
101	uint16_t offset = 0;
102	static char licensed[56] =
103		    "key unlock for use with gnu public licensed code only\0";
104	static int init_key = 1;
105
106	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
107	if (!pmb) {
108		phba->link_state = LPFC_HBA_ERROR;
109		return -ENOMEM;
110	}
111
112	mb = &pmb->u.mb;
113	phba->link_state = LPFC_INIT_MBX_CMDS;
114
115	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
116		if (init_key) {
117			uint32_t *ptext = (uint32_t *) licensed;
118
119			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
120				*ptext = cpu_to_be32(*ptext);
121			init_key = 0;
122		}
123
124		lpfc_read_nv(phba, pmb);
125		memset((char*)mb->un.varRDnvp.rsvd3, 0,
126			sizeof (mb->un.varRDnvp.rsvd3));
127		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
128			 sizeof (licensed));
129
130		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
131
132		if (rc != MBX_SUCCESS) {
133			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
134					"0324 Config Port initialization "
135					"error, mbxCmd x%x READ_NVPARM, "
136					"mbxStatus x%x\n",
137					mb->mbxCommand, mb->mbxStatus);
138			mempool_free(pmb, phba->mbox_mem_pool);
139			return -ERESTART;
140		}
141		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
142		       sizeof(phba->wwnn));
143		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
144		       sizeof(phba->wwpn));
145	}
146
147	phba->sli3_options = 0x0;
148
149	/* Setup and issue mailbox READ REV command */
150	lpfc_read_rev(phba, pmb);
151	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
152	if (rc != MBX_SUCCESS) {
153		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
154				"0439 Adapter failed to init, mbxCmd x%x "
155				"READ_REV, mbxStatus x%x\n",
156				mb->mbxCommand, mb->mbxStatus);
157		mempool_free( pmb, phba->mbox_mem_pool);
158		return -ERESTART;
159	}
160
161
162	/*
163	 * The value of rr must be 1 since the driver set the cv field to 1.
164	 * This setting requires the FW to set all revision fields.
165	 */
166	if (mb->un.varRdRev.rr == 0) {
167		vp->rev.rBit = 0;
168		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
169				"0440 Adapter failed to init, READ_REV has "
170				"missing revision information.\n");
171		mempool_free(pmb, phba->mbox_mem_pool);
172		return -ERESTART;
173	}
174
175	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
176		mempool_free(pmb, phba->mbox_mem_pool);
177		return -EINVAL;
178	}
179
180	/* Save information as VPD data */
181	vp->rev.rBit = 1;
182	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
183	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
184	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
185	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
186	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
187	vp->rev.biuRev = mb->un.varRdRev.biuRev;
188	vp->rev.smRev = mb->un.varRdRev.smRev;
189	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
190	vp->rev.endecRev = mb->un.varRdRev.endecRev;
191	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
192	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
193	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
194	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
195	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
196	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
197
198	/* If the sli feature level is less then 9, we must
199	 * tear down all RPIs and VPIs on link down if NPIV
200	 * is enabled.
201	 */
202	if (vp->rev.feaLevelHigh < 9)
203		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
204
205	if (lpfc_is_LC_HBA(phba->pcidev->device))
206		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
207						sizeof (phba->RandomData));
208
209	/* Get adapter VPD information */
210	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
211	if (!lpfc_vpd_data)
212		goto out_free_mbox;
213
214	do {
215		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
216		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
217
218		if (rc != MBX_SUCCESS) {
219			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
220					"0441 VPD not present on adapter, "
221					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
222					mb->mbxCommand, mb->mbxStatus);
223			mb->un.varDmp.word_cnt = 0;
224		}
225		/* dump mem may return a zero when finished or we got a
226		 * mailbox error, either way we are done.
227		 */
228		if (mb->un.varDmp.word_cnt == 0)
229			break;
230		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
231			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
232		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
233				      lpfc_vpd_data + offset,
234				      mb->un.varDmp.word_cnt);
235		offset += mb->un.varDmp.word_cnt;
236	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
237	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
238
239	kfree(lpfc_vpd_data);
240out_free_mbox:
241	mempool_free(pmb, phba->mbox_mem_pool);
242	return 0;
243}
244
245/**
246 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
247 * @phba: pointer to lpfc hba data structure.
248 * @pmboxq: pointer to the driver internal queue element for mailbox command.
249 *
250 * This is the completion handler for driver's configuring asynchronous event
251 * mailbox command to the device. If the mailbox command returns successfully,
252 * it will set internal async event support flag to 1; otherwise, it will
253 * set internal async event support flag to 0.
254 **/
255static void
256lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
257{
258	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
259		phba->temp_sensor_support = 1;
260	else
261		phba->temp_sensor_support = 0;
262	mempool_free(pmboxq, phba->mbox_mem_pool);
263	return;
264}
265
266/**
267 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
268 * @phba: pointer to lpfc hba data structure.
269 * @pmboxq: pointer to the driver internal queue element for mailbox command.
270 *
271 * This is the completion handler for dump mailbox command for getting
272 * wake up parameters. When this command complete, the response contain
273 * Option rom version of the HBA. This function translate the version number
274 * into a human readable string and store it in OptionROMVersion.
275 **/
276static void
277lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
278{
279	struct prog_id *prg;
280	uint32_t prog_id_word;
281	char dist = ' ';
282	/* character array used for decoding dist type. */
283	char dist_char[] = "nabx";
284
285	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
286		mempool_free(pmboxq, phba->mbox_mem_pool);
287		return;
288	}
289
290	prg = (struct prog_id *) &prog_id_word;
291
292	/* word 7 contain option rom version */
293	prog_id_word = pmboxq->u.mb.un.varWords[7];
294
295	/* Decode the Option rom version word to a readable string */
296	if (prg->dist < 4)
297		dist = dist_char[prg->dist];
298
299	if ((prg->dist == 3) && (prg->num == 0))
300		sprintf(phba->OptionROMVersion, "%d.%d%d",
301			prg->ver, prg->rev, prg->lev);
302	else
303		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
304			prg->ver, prg->rev, prg->lev,
305			dist, prg->num);
306	mempool_free(pmboxq, phba->mbox_mem_pool);
307	return;
308}
309
310/**
311 * lpfc_config_port_post - Perform lpfc initialization after config port
312 * @phba: pointer to lpfc hba data structure.
313 *
314 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
315 * command call. It performs all internal resource and state setups on the
316 * port: post IOCB buffers, enable appropriate host interrupt attentions,
317 * ELS ring timers, etc.
318 *
319 * Return codes
320 *   0 - success.
321 *   Any other value - error.
322 **/
323int
324lpfc_config_port_post(struct lpfc_hba *phba)
325{
326	struct lpfc_vport *vport = phba->pport;
327	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
328	LPFC_MBOXQ_t *pmb;
329	MAILBOX_t *mb;
330	struct lpfc_dmabuf *mp;
331	struct lpfc_sli *psli = &phba->sli;
332	uint32_t status, timeout;
333	int i, j;
334	int rc;
335
336	spin_lock_irq(&phba->hbalock);
337	/*
338	 * If the Config port completed correctly the HBA is not
339	 * over heated any more.
340	 */
341	if (phba->over_temp_state == HBA_OVER_TEMP)
342		phba->over_temp_state = HBA_NORMAL_TEMP;
343	spin_unlock_irq(&phba->hbalock);
344
345	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
346	if (!pmb) {
347		phba->link_state = LPFC_HBA_ERROR;
348		return -ENOMEM;
349	}
350	mb = &pmb->u.mb;
351
352	/* Get login parameters for NID.  */
353	rc = lpfc_read_sparam(phba, pmb, 0);
354	if (rc) {
355		mempool_free(pmb, phba->mbox_mem_pool);
356		return -ENOMEM;
357	}
358
359	pmb->vport = vport;
360	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
361		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
362				"0448 Adapter failed init, mbxCmd x%x "
363				"READ_SPARM mbxStatus x%x\n",
364				mb->mbxCommand, mb->mbxStatus);
365		phba->link_state = LPFC_HBA_ERROR;
366		mp = (struct lpfc_dmabuf *) pmb->context1;
367		mempool_free(pmb, phba->mbox_mem_pool);
368		lpfc_mbuf_free(phba, mp->virt, mp->phys);
369		kfree(mp);
370		return -EIO;
371	}
372
373	mp = (struct lpfc_dmabuf *) pmb->context1;
374
375	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
376	lpfc_mbuf_free(phba, mp->virt, mp->phys);
377	kfree(mp);
378	pmb->context1 = NULL;
379
380	if (phba->cfg_soft_wwnn)
381		u64_to_wwn(phba->cfg_soft_wwnn,
382			   vport->fc_sparam.nodeName.u.wwn);
383	if (phba->cfg_soft_wwpn)
384		u64_to_wwn(phba->cfg_soft_wwpn,
385			   vport->fc_sparam.portName.u.wwn);
386	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
387	       sizeof (struct lpfc_name));
388	memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
389	       sizeof (struct lpfc_name));
390
391	/* Update the fc_host data structures with new wwn. */
392	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
393	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
394	fc_host_max_npiv_vports(shost) = phba->max_vpi;
395
396	/* If no serial number in VPD data, use low 6 bytes of WWNN */
397	/* This should be consolidated into parse_vpd ? - mr */
398	if (phba->SerialNumber[0] == 0) {
399		uint8_t *outptr;
400
401		outptr = &vport->fc_nodename.u.s.IEEE[0];
402		for (i = 0; i < 12; i++) {
403			status = *outptr++;
404			j = ((status & 0xf0) >> 4);
405			if (j <= 9)
406				phba->SerialNumber[i] =
407				    (char)((uint8_t) 0x30 + (uint8_t) j);
408			else
409				phba->SerialNumber[i] =
410				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
411			i++;
412			j = (status & 0xf);
413			if (j <= 9)
414				phba->SerialNumber[i] =
415				    (char)((uint8_t) 0x30 + (uint8_t) j);
416			else
417				phba->SerialNumber[i] =
418				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
419		}
420	}
421
422	lpfc_read_config(phba, pmb);
423	pmb->vport = vport;
424	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
425		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
426				"0453 Adapter failed to init, mbxCmd x%x "
427				"READ_CONFIG, mbxStatus x%x\n",
428				mb->mbxCommand, mb->mbxStatus);
429		phba->link_state = LPFC_HBA_ERROR;
430		mempool_free( pmb, phba->mbox_mem_pool);
431		return -EIO;
432	}
433
434	/* Check if the port is disabled */
435	lpfc_sli_read_link_ste(phba);
436
437	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
438	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
439		phba->cfg_hba_queue_depth =
440			(mb->un.varRdConfig.max_xri + 1) -
441					lpfc_sli4_get_els_iocb_cnt(phba);
442
443	phba->lmt = mb->un.varRdConfig.lmt;
444
445	/* Get the default values for Model Name and Description */
446	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
447
448	if ((phba->cfg_link_speed > LINK_SPEED_10G)
449	    || ((phba->cfg_link_speed == LINK_SPEED_1G)
450		&& !(phba->lmt & LMT_1Gb))
451	    || ((phba->cfg_link_speed == LINK_SPEED_2G)
452		&& !(phba->lmt & LMT_2Gb))
453	    || ((phba->cfg_link_speed == LINK_SPEED_4G)
454		&& !(phba->lmt & LMT_4Gb))
455	    || ((phba->cfg_link_speed == LINK_SPEED_8G)
456		&& !(phba->lmt & LMT_8Gb))
457	    || ((phba->cfg_link_speed == LINK_SPEED_10G)
458		&& !(phba->lmt & LMT_10Gb))) {
459		/* Reset link speed to auto */
460		lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
461			"1302 Invalid speed for this board: "
462			"Reset link speed to auto: x%x\n",
463			phba->cfg_link_speed);
464			phba->cfg_link_speed = LINK_SPEED_AUTO;
465	}
466
467	phba->link_state = LPFC_LINK_DOWN;
468
469	/* Only process IOCBs on ELS ring till hba_state is READY */
470	if (psli->ring[psli->extra_ring].cmdringaddr)
471		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
472	if (psli->ring[psli->fcp_ring].cmdringaddr)
473		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
474	if (psli->ring[psli->next_ring].cmdringaddr)
475		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
476
477	/* Post receive buffers for desired rings */
478	if (phba->sli_rev != 3)
479		lpfc_post_rcv_buf(phba);
480
481	/*
482	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
483	 */
484	if (phba->intr_type == MSIX) {
485		rc = lpfc_config_msi(phba, pmb);
486		if (rc) {
487			mempool_free(pmb, phba->mbox_mem_pool);
488			return -EIO;
489		}
490		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
491		if (rc != MBX_SUCCESS) {
492			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
493					"0352 Config MSI mailbox command "
494					"failed, mbxCmd x%x, mbxStatus x%x\n",
495					pmb->u.mb.mbxCommand,
496					pmb->u.mb.mbxStatus);
497			mempool_free(pmb, phba->mbox_mem_pool);
498			return -EIO;
499		}
500	}
501
502	spin_lock_irq(&phba->hbalock);
503	/* Initialize ERATT handling flag */
504	phba->hba_flag &= ~HBA_ERATT_HANDLED;
505
506	/* Enable appropriate host interrupts */
507	status = readl(phba->HCregaddr);
508	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
509	if (psli->num_rings > 0)
510		status |= HC_R0INT_ENA;
511	if (psli->num_rings > 1)
512		status |= HC_R1INT_ENA;
513	if (psli->num_rings > 2)
514		status |= HC_R2INT_ENA;
515	if (psli->num_rings > 3)
516		status |= HC_R3INT_ENA;
517
518	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
519	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
520		status &= ~(HC_R0INT_ENA);
521
522	writel(status, phba->HCregaddr);
523	readl(phba->HCregaddr); /* flush */
524	spin_unlock_irq(&phba->hbalock);
525
526	/* Set up ring-0 (ELS) timer */
527	timeout = phba->fc_ratov * 2;
528	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
529	/* Set up heart beat (HB) timer */
530	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
531	phba->hb_outstanding = 0;
532	phba->last_completion_time = jiffies;
533	/* Set up error attention (ERATT) polling timer */
534	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
535
536	if (phba->hba_flag & LINK_DISABLED) {
537		lpfc_printf_log(phba,
538			KERN_ERR, LOG_INIT,
539			"2598 Adapter Link is disabled.\n");
540		lpfc_down_link(phba, pmb);
541		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
542		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
543		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
544			lpfc_printf_log(phba,
545			KERN_ERR, LOG_INIT,
546			"2599 Adapter failed to issue DOWN_LINK"
547			" mbox command rc 0x%x\n", rc);
548
549			mempool_free(pmb, phba->mbox_mem_pool);
550			return -EIO;
551		}
552	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
553		lpfc_init_link(phba, pmb, phba->cfg_topology,
554			phba->cfg_link_speed);
555		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
556		lpfc_set_loopback_flag(phba);
557		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
558		if (rc != MBX_SUCCESS) {
559			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
560				"0454 Adapter failed to init, mbxCmd x%x "
561				"INIT_LINK, mbxStatus x%x\n",
562				mb->mbxCommand, mb->mbxStatus);
563
564			/* Clear all interrupt enable conditions */
565			writel(0, phba->HCregaddr);
566			readl(phba->HCregaddr); /* flush */
567			/* Clear all pending interrupts */
568			writel(0xffffffff, phba->HAregaddr);
569			readl(phba->HAregaddr); /* flush */
570
571			phba->link_state = LPFC_HBA_ERROR;
572			if (rc != MBX_BUSY)
573				mempool_free(pmb, phba->mbox_mem_pool);
574			return -EIO;
575		}
576	}
577	/* MBOX buffer will be freed in mbox compl */
578	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
579	if (!pmb) {
580		phba->link_state = LPFC_HBA_ERROR;
581		return -ENOMEM;
582	}
583
584	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
585	pmb->mbox_cmpl = lpfc_config_async_cmpl;
586	pmb->vport = phba->pport;
587	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
588
589	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
590		lpfc_printf_log(phba,
591				KERN_ERR,
592				LOG_INIT,
593				"0456 Adapter failed to issue "
594				"ASYNCEVT_ENABLE mbox status x%x\n",
595				rc);
596		mempool_free(pmb, phba->mbox_mem_pool);
597	}
598
599	/* Get Option rom version */
600	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
601	if (!pmb) {
602		phba->link_state = LPFC_HBA_ERROR;
603		return -ENOMEM;
604	}
605
606	lpfc_dump_wakeup_param(phba, pmb);
607	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
608	pmb->vport = phba->pport;
609	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
610
611	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
612		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
613				"to get Option ROM version status x%x\n", rc);
614		mempool_free(pmb, phba->mbox_mem_pool);
615	}
616
617	return 0;
618}
619
620/**
621 * lpfc_hba_init_link - Initialize the FC link
622 * @phba: pointer to lpfc hba data structure.
623 *
624 * This routine will issue the INIT_LINK mailbox command call.
625 * It is available to other drivers through the lpfc_hba data
626 * structure for use as a delayed link up mechanism with the
627 * module parameter lpfc_suppress_link_up.
628 *
629 * Return code
630 *		0 - success
631 *		Any other value - error
632 **/
633int
634lpfc_hba_init_link(struct lpfc_hba *phba)
635{
636	struct lpfc_vport *vport = phba->pport;
637	LPFC_MBOXQ_t *pmb;
638	MAILBOX_t *mb;
639	int rc;
640
641	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
642	if (!pmb) {
643		phba->link_state = LPFC_HBA_ERROR;
644		return -ENOMEM;
645	}
646	mb = &pmb->u.mb;
647	pmb->vport = vport;
648
649	lpfc_init_link(phba, pmb, phba->cfg_topology,
650		phba->cfg_link_speed);
651	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
652	lpfc_set_loopback_flag(phba);
653	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
654	if (rc != MBX_SUCCESS) {
655		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
656			"0498 Adapter failed to init, mbxCmd x%x "
657			"INIT_LINK, mbxStatus x%x\n",
658			mb->mbxCommand, mb->mbxStatus);
659		/* Clear all interrupt enable conditions */
660		writel(0, phba->HCregaddr);
661		readl(phba->HCregaddr); /* flush */
662		/* Clear all pending interrupts */
663		writel(0xffffffff, phba->HAregaddr);
664		readl(phba->HAregaddr); /* flush */
665		phba->link_state = LPFC_HBA_ERROR;
666		if (rc != MBX_BUSY)
667			mempool_free(pmb, phba->mbox_mem_pool);
668		return -EIO;
669	}
670	phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
671
672	return 0;
673}
674
675/**
676 * lpfc_hba_down_link - this routine downs the FC link
677 *
678 * This routine will issue the DOWN_LINK mailbox command call.
679 * It is available to other drivers through the lpfc_hba data
680 * structure for use to stop the link.
681 *
682 * Return code
683 *		0 - success
684 *		Any other value - error
685 **/
686int
687lpfc_hba_down_link(struct lpfc_hba *phba)
688{
689	LPFC_MBOXQ_t *pmb;
690	int rc;
691
692	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
693	if (!pmb) {
694		phba->link_state = LPFC_HBA_ERROR;
695		return -ENOMEM;
696	}
697
698	lpfc_printf_log(phba,
699		KERN_ERR, LOG_INIT,
700		"0491 Adapter Link is disabled.\n");
701	lpfc_down_link(phba, pmb);
702	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
703	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
704	if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
705		lpfc_printf_log(phba,
706		KERN_ERR, LOG_INIT,
707		"2522 Adapter failed to issue DOWN_LINK"
708		" mbox command rc 0x%x\n", rc);
709
710		mempool_free(pmb, phba->mbox_mem_pool);
711		return -EIO;
712	}
713	return 0;
714}
715
716/**
717 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
718 * @phba: pointer to lpfc HBA data structure.
719 *
720 * This routine will do LPFC uninitialization before the HBA is reset when
721 * bringing down the SLI Layer.
722 *
723 * Return codes
724 *   0 - success.
725 *   Any other value - error.
726 **/
727int
728lpfc_hba_down_prep(struct lpfc_hba *phba)
729{
730	struct lpfc_vport **vports;
731	int i;
732
733	if (phba->sli_rev <= LPFC_SLI_REV3) {
734		/* Disable interrupts */
735		writel(0, phba->HCregaddr);
736		readl(phba->HCregaddr); /* flush */
737	}
738
739	if (phba->pport->load_flag & FC_UNLOADING)
740		lpfc_cleanup_discovery_resources(phba->pport);
741	else {
742		vports = lpfc_create_vport_work_array(phba);
743		if (vports != NULL)
744			for (i = 0; i <= phba->max_vports &&
745				vports[i] != NULL; i++)
746				lpfc_cleanup_discovery_resources(vports[i]);
747		lpfc_destroy_vport_work_array(phba, vports);
748	}
749	return 0;
750}
751
752/**
753 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
754 * @phba: pointer to lpfc HBA data structure.
755 *
756 * This routine will do uninitialization after the HBA is reset when bring
757 * down the SLI Layer.
758 *
759 * Return codes
760 *   0 - success.
761 *   Any other value - error.
762 **/
763static int
764lpfc_hba_down_post_s3(struct lpfc_hba *phba)
765{
766	struct lpfc_sli *psli = &phba->sli;
767	struct lpfc_sli_ring *pring;
768	struct lpfc_dmabuf *mp, *next_mp;
769	LIST_HEAD(completions);
770	int i;
771
772	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
773		lpfc_sli_hbqbuf_free_all(phba);
774	else {
775		/* Cleanup preposted buffers on the ELS ring */
776		pring = &psli->ring[LPFC_ELS_RING];
777		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
778			list_del(&mp->list);
779			pring->postbufq_cnt--;
780			lpfc_mbuf_free(phba, mp->virt, mp->phys);
781			kfree(mp);
782		}
783	}
784
785	spin_lock_irq(&phba->hbalock);
786	for (i = 0; i < psli->num_rings; i++) {
787		pring = &psli->ring[i];
788
789		/* At this point in time the HBA is either reset or DOA. Either
790		 * way, nothing should be on txcmplq as it will NEVER complete.
791		 */
792		list_splice_init(&pring->txcmplq, &completions);
793		pring->txcmplq_cnt = 0;
794		spin_unlock_irq(&phba->hbalock);
795
796		/* Cancel all the IOCBs from the completions list */
797		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
798				      IOERR_SLI_ABORTED);
799
800		lpfc_sli_abort_iocb_ring(phba, pring);
801		spin_lock_irq(&phba->hbalock);
802	}
803	spin_unlock_irq(&phba->hbalock);
804
805	return 0;
806}
807/**
808 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
809 * @phba: pointer to lpfc HBA data structure.
810 *
811 * This routine will do uninitialization after the HBA is reset when bring
812 * down the SLI Layer.
813 *
814 * Return codes
815 *   0 - success.
816 *   Any other value - error.
817 **/
818static int
819lpfc_hba_down_post_s4(struct lpfc_hba *phba)
820{
821	struct lpfc_scsi_buf *psb, *psb_next;
822	LIST_HEAD(aborts);
823	int ret;
824	unsigned long iflag = 0;
825	struct lpfc_sglq *sglq_entry = NULL;
826
827	ret = lpfc_hba_down_post_s3(phba);
828	if (ret)
829		return ret;
830	/* At this point in time the HBA is either reset or DOA. Either
831	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
832	 * on the lpfc_sgl_list so that it can either be freed if the
833	 * driver is unloading or reposted if the driver is restarting
834	 * the port.
835	 */
836	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
837					/* scsl_buf_list */
838	/* abts_sgl_list_lock required because worker thread uses this
839	 * list.
840	 */
841	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
842	list_for_each_entry(sglq_entry,
843		&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
844		sglq_entry->state = SGL_FREED;
845
846	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
847			&phba->sli4_hba.lpfc_sgl_list);
848	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
849	/* abts_scsi_buf_list_lock required because worker thread uses this
850	 * list.
851	 */
852	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
853	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
854			&aborts);
855	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
856	spin_unlock_irq(&phba->hbalock);
857
858	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
859		psb->pCmd = NULL;
860		psb->status = IOSTAT_SUCCESS;
861	}
862	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
863	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
864	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
865	return 0;
866}
867
868/**
869 * lpfc_hba_down_post - Wrapper func for hba down post routine
870 * @phba: pointer to lpfc HBA data structure.
871 *
872 * This routine wraps the actual SLI3 or SLI4 routine for performing
873 * uninitialization after the HBA is reset when bring down the SLI Layer.
874 *
875 * Return codes
876 *   0 - success.
877 *   Any other value - error.
878 **/
879int
880lpfc_hba_down_post(struct lpfc_hba *phba)
881{
882	return (*phba->lpfc_hba_down_post)(phba);
883}
884
885/**
886 * lpfc_hb_timeout - The HBA-timer timeout handler
887 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
888 *
889 * This is the HBA-timer timeout handler registered to the lpfc driver. When
890 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
891 * work-port-events bitmap and the worker thread is notified. This timeout
892 * event will be used by the worker thread to invoke the actual timeout
893 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
894 * be performed in the timeout handler and the HBA timeout event bit shall
895 * be cleared by the worker thread after it has taken the event bitmap out.
896 **/
897static void
898lpfc_hb_timeout(unsigned long ptr)
899{
900	struct lpfc_hba *phba;
901	uint32_t tmo_posted;
902	unsigned long iflag;
903
904	phba = (struct lpfc_hba *)ptr;
905
906	/* Check for heart beat timeout conditions */
907	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
908	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
909	if (!tmo_posted)
910		phba->pport->work_port_events |= WORKER_HB_TMO;
911	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
912
913	/* Tell the worker thread there is work to do */
914	if (!tmo_posted)
915		lpfc_worker_wake_up(phba);
916	return;
917}
918
919/**
920 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
921 * @phba: pointer to lpfc hba data structure.
922 * @pmboxq: pointer to the driver internal queue element for mailbox command.
923 *
924 * This is the callback function to the lpfc heart-beat mailbox command.
925 * If configured, the lpfc driver issues the heart-beat mailbox command to
926 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
927 * heart-beat mailbox command is issued, the driver shall set up heart-beat
928 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
929 * heart-beat outstanding state. Once the mailbox command comes back and
930 * no error conditions detected, the heart-beat mailbox command timer is
931 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
932 * state is cleared for the next heart-beat. If the timer expired with the
933 * heart-beat outstanding state set, the driver will put the HBA offline.
934 **/
935static void
936lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
937{
938	unsigned long drvr_flag;
939
940	spin_lock_irqsave(&phba->hbalock, drvr_flag);
941	phba->hb_outstanding = 0;
942	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
943
944	/* Check and reset heart-beat timer is necessary */
945	mempool_free(pmboxq, phba->mbox_mem_pool);
946	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
947		!(phba->link_state == LPFC_HBA_ERROR) &&
948		!(phba->pport->load_flag & FC_UNLOADING))
949		mod_timer(&phba->hb_tmofunc,
950			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
951	return;
952}
953
954/**
955 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
956 * @phba: pointer to lpfc hba data structure.
957 *
958 * This is the actual HBA-timer timeout handler to be invoked by the worker
959 * thread whenever the HBA timer fired and HBA-timeout event posted. This
960 * handler performs any periodic operations needed for the device. If such
961 * periodic event has already been attended to either in the interrupt handler
962 * or by processing slow-ring or fast-ring events within the HBA-timer
963 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
964 * the timer for the next timeout period. If lpfc heart-beat mailbox command
965 * is configured and there is no heart-beat mailbox command outstanding, a
966 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
967 * has been a heart-beat mailbox command outstanding, the HBA shall be put
968 * to offline.
969 **/
970void
971lpfc_hb_timeout_handler(struct lpfc_hba *phba)
972{
973	struct lpfc_vport **vports;
974	LPFC_MBOXQ_t *pmboxq;
975	struct lpfc_dmabuf *buf_ptr;
976	int retval, i;
977	struct lpfc_sli *psli = &phba->sli;
978	LIST_HEAD(completions);
979
980	vports = lpfc_create_vport_work_array(phba);
981	if (vports != NULL)
982		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
983			lpfc_rcv_seq_check_edtov(vports[i]);
984	lpfc_destroy_vport_work_array(phba, vports);
985
986	if ((phba->link_state == LPFC_HBA_ERROR) ||
987		(phba->pport->load_flag & FC_UNLOADING) ||
988		(phba->pport->fc_flag & FC_OFFLINE_MODE))
989		return;
990
991	spin_lock_irq(&phba->pport->work_port_lock);
992
993	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
994		jiffies)) {
995		spin_unlock_irq(&phba->pport->work_port_lock);
996		if (!phba->hb_outstanding)
997			mod_timer(&phba->hb_tmofunc,
998				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
999		else
1000			mod_timer(&phba->hb_tmofunc,
1001				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1002		return;
1003	}
1004	spin_unlock_irq(&phba->pport->work_port_lock);
1005
1006	if (phba->elsbuf_cnt &&
1007		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1008		spin_lock_irq(&phba->hbalock);
1009		list_splice_init(&phba->elsbuf, &completions);
1010		phba->elsbuf_cnt = 0;
1011		phba->elsbuf_prev_cnt = 0;
1012		spin_unlock_irq(&phba->hbalock);
1013
1014		while (!list_empty(&completions)) {
1015			list_remove_head(&completions, buf_ptr,
1016				struct lpfc_dmabuf, list);
1017			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1018			kfree(buf_ptr);
1019		}
1020	}
1021	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1022
1023	/* If there is no heart beat outstanding, issue a heartbeat command */
1024	if (phba->cfg_enable_hba_heartbeat) {
1025		if (!phba->hb_outstanding) {
1026			pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
1027			if (!pmboxq) {
1028				mod_timer(&phba->hb_tmofunc,
1029					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1030				return;
1031			}
1032
1033			lpfc_heart_beat(phba, pmboxq);
1034			pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1035			pmboxq->vport = phba->pport;
1036			retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1037
1038			if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1039				mempool_free(pmboxq, phba->mbox_mem_pool);
1040				mod_timer(&phba->hb_tmofunc,
1041					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1042				return;
1043			}
1044			mod_timer(&phba->hb_tmofunc,
1045				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1046			phba->hb_outstanding = 1;
1047			return;
1048		} else {
1049			/*
1050			* If heart beat timeout called with hb_outstanding set
1051			* we need to take the HBA offline.
1052			*/
1053			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1054					"0459 Adapter heartbeat failure, "
1055					"taking this port offline.\n");
1056
1057			spin_lock_irq(&phba->hbalock);
1058			psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1059			spin_unlock_irq(&phba->hbalock);
1060
1061			lpfc_offline_prep(phba);
1062			lpfc_offline(phba);
1063			lpfc_unblock_mgmt_io(phba);
1064			phba->link_state = LPFC_HBA_ERROR;
1065			lpfc_hba_down_post(phba);
1066		}
1067	}
1068}
1069
1070/**
1071 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1072 * @phba: pointer to lpfc hba data structure.
1073 *
1074 * This routine is called to bring the HBA offline when HBA hardware error
1075 * other than Port Error 6 has been detected.
1076 **/
1077static void
1078lpfc_offline_eratt(struct lpfc_hba *phba)
1079{
1080	struct lpfc_sli   *psli = &phba->sli;
1081
1082	spin_lock_irq(&phba->hbalock);
1083	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1084	spin_unlock_irq(&phba->hbalock);
1085	lpfc_offline_prep(phba);
1086
1087	lpfc_offline(phba);
1088	lpfc_reset_barrier(phba);
1089	spin_lock_irq(&phba->hbalock);
1090	lpfc_sli_brdreset(phba);
1091	spin_unlock_irq(&phba->hbalock);
1092	lpfc_hba_down_post(phba);
1093	lpfc_sli_brdready(phba, HS_MBRDY);
1094	lpfc_unblock_mgmt_io(phba);
1095	phba->link_state = LPFC_HBA_ERROR;
1096	return;
1097}
1098
1099/**
1100 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1101 * @phba: pointer to lpfc hba data structure.
1102 *
1103 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1104 * other than Port Error 6 has been detected.
1105 **/
1106static void
1107lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1108{
1109	lpfc_offline_prep(phba);
1110	lpfc_offline(phba);
1111	lpfc_sli4_brdreset(phba);
1112	lpfc_hba_down_post(phba);
1113	lpfc_sli4_post_status_check(phba);
1114	lpfc_unblock_mgmt_io(phba);
1115	phba->link_state = LPFC_HBA_ERROR;
1116}
1117
1118/**
1119 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1120 * @phba: pointer to lpfc hba data structure.
1121 *
1122 * This routine is invoked to handle the deferred HBA hardware error
1123 * conditions. This type of error is indicated by HBA by setting ER1
1124 * and another ER bit in the host status register. The driver will
1125 * wait until the ER1 bit clears before handling the error condition.
1126 **/
1127static void
1128lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1129{
1130	uint32_t old_host_status = phba->work_hs;
1131	struct lpfc_sli_ring  *pring;
1132	struct lpfc_sli *psli = &phba->sli;
1133
1134	/* If the pci channel is offline, ignore possible errors,
1135	 * since we cannot communicate with the pci card anyway.
1136	 */
1137	if (pci_channel_offline(phba->pcidev)) {
1138		spin_lock_irq(&phba->hbalock);
1139		phba->hba_flag &= ~DEFER_ERATT;
1140		spin_unlock_irq(&phba->hbalock);
1141		return;
1142	}
1143
1144	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1145		"0479 Deferred Adapter Hardware Error "
1146		"Data: x%x x%x x%x\n",
1147		phba->work_hs,
1148		phba->work_status[0], phba->work_status[1]);
1149
1150	spin_lock_irq(&phba->hbalock);
1151	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1152	spin_unlock_irq(&phba->hbalock);
1153
1154
1155	/*
1156	 * Firmware stops when it triggred erratt. That could cause the I/Os
1157	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1158	 * SCSI layer retry it after re-establishing link.
1159	 */
1160	pring = &psli->ring[psli->fcp_ring];
1161	lpfc_sli_abort_iocb_ring(phba, pring);
1162
1163	/*
1164	 * There was a firmware error. Take the hba offline and then
1165	 * attempt to restart it.
1166	 */
1167	lpfc_offline_prep(phba);
1168	lpfc_offline(phba);
1169
1170	/* Wait for the ER1 bit to clear.*/
1171	while (phba->work_hs & HS_FFER1) {
1172		msleep(100);
1173		phba->work_hs = readl(phba->HSregaddr);
1174		/* If driver is unloading let the worker thread continue */
1175		if (phba->pport->load_flag & FC_UNLOADING) {
1176			phba->work_hs = 0;
1177			break;
1178		}
1179	}
1180
1181	/*
1182	 * This is to ptrotect against a race condition in which
1183	 * first write to the host attention register clear the
1184	 * host status register.
1185	 */
1186	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1187		phba->work_hs = old_host_status & ~HS_FFER1;
1188
1189	spin_lock_irq(&phba->hbalock);
1190	phba->hba_flag &= ~DEFER_ERATT;
1191	spin_unlock_irq(&phba->hbalock);
1192	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1193	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1194}
1195
1196static void
1197lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1198{
1199	struct lpfc_board_event_header board_event;
1200	struct Scsi_Host *shost;
1201
1202	board_event.event_type = FC_REG_BOARD_EVENT;
1203	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1204	shost = lpfc_shost_from_vport(phba->pport);
1205	fc_host_post_vendor_event(shost, fc_get_event_number(),
1206				  sizeof(board_event),
1207				  (char *) &board_event,
1208				  LPFC_NL_VENDOR_ID);
1209}
1210
1211/**
1212 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1213 * @phba: pointer to lpfc hba data structure.
1214 *
1215 * This routine is invoked to handle the following HBA hardware error
1216 * conditions:
1217 * 1 - HBA error attention interrupt
1218 * 2 - DMA ring index out of range
1219 * 3 - Mailbox command came back as unknown
1220 **/
1221static void
1222lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1223{
1224	struct lpfc_vport *vport = phba->pport;
1225	struct lpfc_sli   *psli = &phba->sli;
1226	struct lpfc_sli_ring  *pring;
1227	uint32_t event_data;
1228	unsigned long temperature;
1229	struct temp_event temp_event_data;
1230	struct Scsi_Host  *shost;
1231
1232	/* If the pci channel is offline, ignore possible errors,
1233	 * since we cannot communicate with the pci card anyway.
1234	 */
1235	if (pci_channel_offline(phba->pcidev)) {
1236		spin_lock_irq(&phba->hbalock);
1237		phba->hba_flag &= ~DEFER_ERATT;
1238		spin_unlock_irq(&phba->hbalock);
1239		return;
1240	}
1241
1242	/* If resets are disabled then leave the HBA alone and return */
1243	if (!phba->cfg_enable_hba_reset)
1244		return;
1245
1246	/* Send an internal error event to mgmt application */
1247	lpfc_board_errevt_to_mgmt(phba);
1248
1249	if (phba->hba_flag & DEFER_ERATT)
1250		lpfc_handle_deferred_eratt(phba);
1251
1252	if (phba->work_hs & HS_FFER6) {
1253		/* Re-establishing Link */
1254		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1255				"1301 Re-establishing Link "
1256				"Data: x%x x%x x%x\n",
1257				phba->work_hs,
1258				phba->work_status[0], phba->work_status[1]);
1259
1260		spin_lock_irq(&phba->hbalock);
1261		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1262		spin_unlock_irq(&phba->hbalock);
1263
1264		/*
1265		* Firmware stops when it triggled erratt with HS_FFER6.
1266		* That could cause the I/Os dropped by the firmware.
1267		* Error iocb (I/O) on txcmplq and let the SCSI layer
1268		* retry it after re-establishing link.
1269		*/
1270		pring = &psli->ring[psli->fcp_ring];
1271		lpfc_sli_abort_iocb_ring(phba, pring);
1272
1273		/*
1274		 * There was a firmware error.  Take the hba offline and then
1275		 * attempt to restart it.
1276		 */
1277		lpfc_offline_prep(phba);
1278		lpfc_offline(phba);
1279		lpfc_sli_brdrestart(phba);
1280		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1281			lpfc_unblock_mgmt_io(phba);
1282			return;
1283		}
1284		lpfc_unblock_mgmt_io(phba);
1285	} else if (phba->work_hs & HS_CRIT_TEMP) {
1286		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1287		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1288		temp_event_data.event_code = LPFC_CRIT_TEMP;
1289		temp_event_data.data = (uint32_t)temperature;
1290
1291		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1292				"0406 Adapter maximum temperature exceeded "
1293				"(%ld), taking this port offline "
1294				"Data: x%x x%x x%x\n",
1295				temperature, phba->work_hs,
1296				phba->work_status[0], phba->work_status[1]);
1297
1298		shost = lpfc_shost_from_vport(phba->pport);
1299		fc_host_post_vendor_event(shost, fc_get_event_number(),
1300					  sizeof(temp_event_data),
1301					  (char *) &temp_event_data,
1302					  SCSI_NL_VID_TYPE_PCI
1303					  | PCI_VENDOR_ID_EMULEX);
1304
1305		spin_lock_irq(&phba->hbalock);
1306		phba->over_temp_state = HBA_OVER_TEMP;
1307		spin_unlock_irq(&phba->hbalock);
1308		lpfc_offline_eratt(phba);
1309
1310	} else {
1311		/* The if clause above forces this code path when the status
1312		 * failure is a value other than FFER6. Do not call the offline
1313		 * twice. This is the adapter hardware error path.
1314		 */
1315		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1316				"0457 Adapter Hardware Error "
1317				"Data: x%x x%x x%x\n",
1318				phba->work_hs,
1319				phba->work_status[0], phba->work_status[1]);
1320
1321		event_data = FC_REG_DUMP_EVENT;
1322		shost = lpfc_shost_from_vport(vport);
1323		fc_host_post_vendor_event(shost, fc_get_event_number(),
1324				sizeof(event_data), (char *) &event_data,
1325				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1326
1327		lpfc_offline_eratt(phba);
1328	}
1329	return;
1330}
1331
1332/**
1333 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1334 * @phba: pointer to lpfc hba data structure.
1335 *
1336 * This routine is invoked to handle the SLI4 HBA hardware error attention
1337 * conditions.
1338 **/
1339static void
1340lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1341{
1342	struct lpfc_vport *vport = phba->pport;
1343	uint32_t event_data;
1344	struct Scsi_Host *shost;
1345
1346	/* If the pci channel is offline, ignore possible errors, since
1347	 * we cannot communicate with the pci card anyway.
1348	 */
1349	if (pci_channel_offline(phba->pcidev))
1350		return;
1351	/* If resets are disabled then leave the HBA alone and return */
1352	if (!phba->cfg_enable_hba_reset)
1353		return;
1354
1355	/* Send an internal error event to mgmt application */
1356	lpfc_board_errevt_to_mgmt(phba);
1357
1358	/* For now, the actual action for SLI4 device handling is not
1359	 * specified yet, just treated it as adaptor hardware failure
1360	 */
1361	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1362			"0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1363			phba->work_status[0], phba->work_status[1]);
1364
1365	event_data = FC_REG_DUMP_EVENT;
1366	shost = lpfc_shost_from_vport(vport);
1367	fc_host_post_vendor_event(shost, fc_get_event_number(),
1368				  sizeof(event_data), (char *) &event_data,
1369				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1370
1371	lpfc_sli4_offline_eratt(phba);
1372}
1373
1374/**
1375 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1376 * @phba: pointer to lpfc HBA data structure.
1377 *
1378 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1379 * routine from the API jump table function pointer from the lpfc_hba struct.
1380 *
1381 * Return codes
1382 *   0 - success.
1383 *   Any other value - error.
1384 **/
1385void
1386lpfc_handle_eratt(struct lpfc_hba *phba)
1387{
1388	(*phba->lpfc_handle_eratt)(phba);
1389}
1390
1391/**
1392 * lpfc_handle_latt - The HBA link event handler
1393 * @phba: pointer to lpfc hba data structure.
1394 *
1395 * This routine is invoked from the worker thread to handle a HBA host
1396 * attention link event.
1397 **/
1398void
1399lpfc_handle_latt(struct lpfc_hba *phba)
1400{
1401	struct lpfc_vport *vport = phba->pport;
1402	struct lpfc_sli   *psli = &phba->sli;
1403	LPFC_MBOXQ_t *pmb;
1404	volatile uint32_t control;
1405	struct lpfc_dmabuf *mp;
1406	int rc = 0;
1407
1408	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1409	if (!pmb) {
1410		rc = 1;
1411		goto lpfc_handle_latt_err_exit;
1412	}
1413
1414	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1415	if (!mp) {
1416		rc = 2;
1417		goto lpfc_handle_latt_free_pmb;
1418	}
1419
1420	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1421	if (!mp->virt) {
1422		rc = 3;
1423		goto lpfc_handle_latt_free_mp;
1424	}
1425
1426	/* Cleanup any outstanding ELS commands */
1427	lpfc_els_flush_all_cmd(phba);
1428
1429	psli->slistat.link_event++;
1430	lpfc_read_la(phba, pmb, mp);
1431	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
1432	pmb->vport = vport;
1433	/* Block ELS IOCBs until we have processed this mbox command */
1434	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1435	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1436	if (rc == MBX_NOT_FINISHED) {
1437		rc = 4;
1438		goto lpfc_handle_latt_free_mbuf;
1439	}
1440
1441	/* Clear Link Attention in HA REG */
1442	spin_lock_irq(&phba->hbalock);
1443	writel(HA_LATT, phba->HAregaddr);
1444	readl(phba->HAregaddr); /* flush */
1445	spin_unlock_irq(&phba->hbalock);
1446
1447	return;
1448
1449lpfc_handle_latt_free_mbuf:
1450	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1451	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1452lpfc_handle_latt_free_mp:
1453	kfree(mp);
1454lpfc_handle_latt_free_pmb:
1455	mempool_free(pmb, phba->mbox_mem_pool);
1456lpfc_handle_latt_err_exit:
1457	/* Enable Link attention interrupts */
1458	spin_lock_irq(&phba->hbalock);
1459	psli->sli_flag |= LPFC_PROCESS_LA;
1460	control = readl(phba->HCregaddr);
1461	control |= HC_LAINT_ENA;
1462	writel(control, phba->HCregaddr);
1463	readl(phba->HCregaddr); /* flush */
1464
1465	/* Clear Link Attention in HA REG */
1466	writel(HA_LATT, phba->HAregaddr);
1467	readl(phba->HAregaddr); /* flush */
1468	spin_unlock_irq(&phba->hbalock);
1469	lpfc_linkdown(phba);
1470	phba->link_state = LPFC_HBA_ERROR;
1471
1472	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1473		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1474
1475	return;
1476}
1477
1478/**
1479 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1480 * @phba: pointer to lpfc hba data structure.
1481 * @vpd: pointer to the vital product data.
1482 * @len: length of the vital product data in bytes.
1483 *
1484 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1485 * an array of characters. In this routine, the ModelName, ProgramType, and
1486 * ModelDesc, etc. fields of the phba data structure will be populated.
1487 *
1488 * Return codes
1489 *   0 - pointer to the VPD passed in is NULL
1490 *   1 - success
1491 **/
1492int
1493lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1494{
1495	uint8_t lenlo, lenhi;
1496	int Length;
1497	int i, j;
1498	int finished = 0;
1499	int index = 0;
1500
1501	if (!vpd)
1502		return 0;
1503
1504	/* Vital Product */
1505	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1506			"0455 Vital Product Data: x%x x%x x%x x%x\n",
1507			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1508			(uint32_t) vpd[3]);
1509	while (!finished && (index < (len - 4))) {
1510		switch (vpd[index]) {
1511		case 0x82:
1512		case 0x91:
1513			index += 1;
1514			lenlo = vpd[index];
1515			index += 1;
1516			lenhi = vpd[index];
1517			index += 1;
1518			i = ((((unsigned short)lenhi) << 8) + lenlo);
1519			index += i;
1520			break;
1521		case 0x90:
1522			index += 1;
1523			lenlo = vpd[index];
1524			index += 1;
1525			lenhi = vpd[index];
1526			index += 1;
1527			Length = ((((unsigned short)lenhi) << 8) + lenlo);
1528			if (Length > len - index)
1529				Length = len - index;
1530			while (Length > 0) {
1531			/* Look for Serial Number */
1532			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1533				index += 2;
1534				i = vpd[index];
1535				index += 1;
1536				j = 0;
1537				Length -= (3+i);
1538				while(i--) {
1539					phba->SerialNumber[j++] = vpd[index++];
1540					if (j == 31)
1541						break;
1542				}
1543				phba->SerialNumber[j] = 0;
1544				continue;
1545			}
1546			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1547				phba->vpd_flag |= VPD_MODEL_DESC;
1548				index += 2;
1549				i = vpd[index];
1550				index += 1;
1551				j = 0;
1552				Length -= (3+i);
1553				while(i--) {
1554					phba->ModelDesc[j++] = vpd[index++];
1555					if (j == 255)
1556						break;
1557				}
1558				phba->ModelDesc[j] = 0;
1559				continue;
1560			}
1561			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1562				phba->vpd_flag |= VPD_MODEL_NAME;
1563				index += 2;
1564				i = vpd[index];
1565				index += 1;
1566				j = 0;
1567				Length -= (3+i);
1568				while(i--) {
1569					phba->ModelName[j++] = vpd[index++];
1570					if (j == 79)
1571						break;
1572				}
1573				phba->ModelName[j] = 0;
1574				continue;
1575			}
1576			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1577				phba->vpd_flag |= VPD_PROGRAM_TYPE;
1578				index += 2;
1579				i = vpd[index];
1580				index += 1;
1581				j = 0;
1582				Length -= (3+i);
1583				while(i--) {
1584					phba->ProgramType[j++] = vpd[index++];
1585					if (j == 255)
1586						break;
1587				}
1588				phba->ProgramType[j] = 0;
1589				continue;
1590			}
1591			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1592				phba->vpd_flag |= VPD_PORT;
1593				index += 2;
1594				i = vpd[index];
1595				index += 1;
1596				j = 0;
1597				Length -= (3+i);
1598				while(i--) {
1599				phba->Port[j++] = vpd[index++];
1600				if (j == 19)
1601					break;
1602				}
1603				phba->Port[j] = 0;
1604				continue;
1605			}
1606			else {
1607				index += 2;
1608				i = vpd[index];
1609				index += 1;
1610				index += i;
1611				Length -= (3 + i);
1612			}
1613		}
1614		finished = 0;
1615		break;
1616		case 0x78:
1617			finished = 1;
1618			break;
1619		default:
1620			index ++;
1621			break;
1622		}
1623	}
1624
1625	return(1);
1626}
1627
1628/**
1629 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1630 * @phba: pointer to lpfc hba data structure.
1631 * @mdp: pointer to the data structure to hold the derived model name.
1632 * @descp: pointer to the data structure to hold the derived description.
1633 *
1634 * This routine retrieves HBA's description based on its registered PCI device
1635 * ID. The @descp passed into this function points to an array of 256 chars. It
1636 * shall be returned with the model name, maximum speed, and the host bus type.
1637 * The @mdp passed into this function points to an array of 80 chars. When the
1638 * function returns, the @mdp will be filled with the model name.
1639 **/
1640static void
1641lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1642{
1643	lpfc_vpd_t *vp;
1644	uint16_t dev_id = phba->pcidev->device;
1645	int max_speed;
1646	int GE = 0;
1647	int oneConnect = 0; /* default is not a oneConnect */
1648	struct {
1649		char *name;
1650		char *bus;
1651		char *function;
1652	} m = {"<Unknown>", "", ""};
1653
1654	if (mdp && mdp[0] != '\0'
1655		&& descp && descp[0] != '\0')
1656		return;
1657
1658	if (phba->lmt & LMT_10Gb)
1659		max_speed = 10;
1660	else if (phba->lmt & LMT_8Gb)
1661		max_speed = 8;
1662	else if (phba->lmt & LMT_4Gb)
1663		max_speed = 4;
1664	else if (phba->lmt & LMT_2Gb)
1665		max_speed = 2;
1666	else
1667		max_speed = 1;
1668
1669	vp = &phba->vpd;
1670
1671	switch (dev_id) {
1672	case PCI_DEVICE_ID_FIREFLY:
1673		m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1674		break;
1675	case PCI_DEVICE_ID_SUPERFLY:
1676		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1677			m = (typeof(m)){"LP7000", "PCI",
1678					"Fibre Channel Adapter"};
1679		else
1680			m = (typeof(m)){"LP7000E", "PCI",
1681					"Fibre Channel Adapter"};
1682		break;
1683	case PCI_DEVICE_ID_DRAGONFLY:
1684		m = (typeof(m)){"LP8000", "PCI",
1685				"Fibre Channel Adapter"};
1686		break;
1687	case PCI_DEVICE_ID_CENTAUR:
1688		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1689			m = (typeof(m)){"LP9002", "PCI",
1690					"Fibre Channel Adapter"};
1691		else
1692			m = (typeof(m)){"LP9000", "PCI",
1693					"Fibre Channel Adapter"};
1694		break;
1695	case PCI_DEVICE_ID_RFLY:
1696		m = (typeof(m)){"LP952", "PCI",
1697				"Fibre Channel Adapter"};
1698		break;
1699	case PCI_DEVICE_ID_PEGASUS:
1700		m = (typeof(m)){"LP9802", "PCI-X",
1701				"Fibre Channel Adapter"};
1702		break;
1703	case PCI_DEVICE_ID_THOR:
1704		m = (typeof(m)){"LP10000", "PCI-X",
1705				"Fibre Channel Adapter"};
1706		break;
1707	case PCI_DEVICE_ID_VIPER:
1708		m = (typeof(m)){"LPX1000",  "PCI-X",
1709				"Fibre Channel Adapter"};
1710		break;
1711	case PCI_DEVICE_ID_PFLY:
1712		m = (typeof(m)){"LP982", "PCI-X",
1713				"Fibre Channel Adapter"};
1714		break;
1715	case PCI_DEVICE_ID_TFLY:
1716		m = (typeof(m)){"LP1050", "PCI-X",
1717				"Fibre Channel Adapter"};
1718		break;
1719	case PCI_DEVICE_ID_HELIOS:
1720		m = (typeof(m)){"LP11000", "PCI-X2",
1721				"Fibre Channel Adapter"};
1722		break;
1723	case PCI_DEVICE_ID_HELIOS_SCSP:
1724		m = (typeof(m)){"LP11000-SP", "PCI-X2",
1725				"Fibre Channel Adapter"};
1726		break;
1727	case PCI_DEVICE_ID_HELIOS_DCSP:
1728		m = (typeof(m)){"LP11002-SP",  "PCI-X2",
1729				"Fibre Channel Adapter"};
1730		break;
1731	case PCI_DEVICE_ID_NEPTUNE:
1732		m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1733		break;
1734	case PCI_DEVICE_ID_NEPTUNE_SCSP:
1735		m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1736		break;
1737	case PCI_DEVICE_ID_NEPTUNE_DCSP:
1738		m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1739		break;
1740	case PCI_DEVICE_ID_BMID:
1741		m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1742		break;
1743	case PCI_DEVICE_ID_BSMB:
1744		m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1745		break;
1746	case PCI_DEVICE_ID_ZEPHYR:
1747		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1748		break;
1749	case PCI_DEVICE_ID_ZEPHYR_SCSP:
1750		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1751		break;
1752	case PCI_DEVICE_ID_ZEPHYR_DCSP:
1753		m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1754		GE = 1;
1755		break;
1756	case PCI_DEVICE_ID_ZMID:
1757		m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1758		break;
1759	case PCI_DEVICE_ID_ZSMB:
1760		m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1761		break;
1762	case PCI_DEVICE_ID_LP101:
1763		m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1764		break;
1765	case PCI_DEVICE_ID_LP10000S:
1766		m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1767		break;
1768	case PCI_DEVICE_ID_LP11000S:
1769		m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1770		break;
1771	case PCI_DEVICE_ID_LPE11000S:
1772		m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1773		break;
1774	case PCI_DEVICE_ID_SAT:
1775		m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1776		break;
1777	case PCI_DEVICE_ID_SAT_MID:
1778		m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1779		break;
1780	case PCI_DEVICE_ID_SAT_SMB:
1781		m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1782		break;
1783	case PCI_DEVICE_ID_SAT_DCSP:
1784		m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1785		break;
1786	case PCI_DEVICE_ID_SAT_SCSP:
1787		m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1788		break;
1789	case PCI_DEVICE_ID_SAT_S:
1790		m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1791		break;
1792	case PCI_DEVICE_ID_HORNET:
1793		m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1794		GE = 1;
1795		break;
1796	case PCI_DEVICE_ID_PROTEUS_VF:
1797		m = (typeof(m)){"LPev12000", "PCIe IOV",
1798				"Fibre Channel Adapter"};
1799		break;
1800	case PCI_DEVICE_ID_PROTEUS_PF:
1801		m = (typeof(m)){"LPev12000", "PCIe IOV",
1802				"Fibre Channel Adapter"};
1803		break;
1804	case PCI_DEVICE_ID_PROTEUS_S:
1805		m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1806				"Fibre Channel Adapter"};
1807		break;
1808	case PCI_DEVICE_ID_TIGERSHARK:
1809		oneConnect = 1;
1810		m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1811		break;
1812	case PCI_DEVICE_ID_TOMCAT:
1813		oneConnect = 1;
1814		m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1815		break;
1816	case PCI_DEVICE_ID_FALCON:
1817		m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1818				"EmulexSecure Fibre"};
1819		break;
1820	default:
1821		m = (typeof(m)){"Unknown", "", ""};
1822		break;
1823	}
1824
1825	if (mdp && mdp[0] == '\0')
1826		snprintf(mdp, 79,"%s", m.name);
1827	/* oneConnect hba requires special processing, they are all initiators
1828	 * and we put the port number on the end
1829	 */
1830	if (descp && descp[0] == '\0') {
1831		if (oneConnect)
1832			snprintf(descp, 255,
1833				"Emulex OneConnect %s, %s Initiator, Port %s",
1834				m.name, m.function,
1835				phba->Port);
1836		else
1837			snprintf(descp, 255,
1838				"Emulex %s %d%s %s %s",
1839				m.name, max_speed, (GE) ? "GE" : "Gb",
1840				m.bus, m.function);
1841	}
1842}
1843
1844/**
1845 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1846 * @phba: pointer to lpfc hba data structure.
1847 * @pring: pointer to a IOCB ring.
1848 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1849 *
1850 * This routine posts a given number of IOCBs with the associated DMA buffer
1851 * descriptors specified by the cnt argument to the given IOCB ring.
1852 *
1853 * Return codes
1854 *   The number of IOCBs NOT able to be posted to the IOCB ring.
1855 **/
1856int
1857lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1858{
1859	IOCB_t *icmd;
1860	struct lpfc_iocbq *iocb;
1861	struct lpfc_dmabuf *mp1, *mp2;
1862
1863	cnt += pring->missbufcnt;
1864
1865	/* While there are buffers to post */
1866	while (cnt > 0) {
1867		/* Allocate buffer for  command iocb */
1868		iocb = lpfc_sli_get_iocbq(phba);
1869		if (iocb == NULL) {
1870			pring->missbufcnt = cnt;
1871			return cnt;
1872		}
1873		icmd = &iocb->iocb;
1874
1875		/* 2 buffers can be posted per command */
1876		/* Allocate buffer to post */
1877		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1878		if (mp1)
1879		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1880		if (!mp1 || !mp1->virt) {
1881			kfree(mp1);
1882			lpfc_sli_release_iocbq(phba, iocb);
1883			pring->missbufcnt = cnt;
1884			return cnt;
1885		}
1886
1887		INIT_LIST_HEAD(&mp1->list);
1888		/* Allocate buffer to post */
1889		if (cnt > 1) {
1890			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1891			if (mp2)
1892				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1893							    &mp2->phys);
1894			if (!mp2 || !mp2->virt) {
1895				kfree(mp2);
1896				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1897				kfree(mp1);
1898				lpfc_sli_release_iocbq(phba, iocb);
1899				pring->missbufcnt = cnt;
1900				return cnt;
1901			}
1902
1903			INIT_LIST_HEAD(&mp2->list);
1904		} else {
1905			mp2 = NULL;
1906		}
1907
1908		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1909		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1910		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1911		icmd->ulpBdeCount = 1;
1912		cnt--;
1913		if (mp2) {
1914			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1915			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1916			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1917			cnt--;
1918			icmd->ulpBdeCount = 2;
1919		}
1920
1921		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1922		icmd->ulpLe = 1;
1923
1924		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1925		    IOCB_ERROR) {
1926			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1927			kfree(mp1);
1928			cnt++;
1929			if (mp2) {
1930				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1931				kfree(mp2);
1932				cnt++;
1933			}
1934			lpfc_sli_release_iocbq(phba, iocb);
1935			pring->missbufcnt = cnt;
1936			return cnt;
1937		}
1938		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1939		if (mp2)
1940			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1941	}
1942	pring->missbufcnt = 0;
1943	return 0;
1944}
1945
1946/**
1947 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
1948 * @phba: pointer to lpfc hba data structure.
1949 *
1950 * This routine posts initial receive IOCB buffers to the ELS ring. The
1951 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1952 * set to 64 IOCBs.
1953 *
1954 * Return codes
1955 *   0 - success (currently always success)
1956 **/
1957static int
1958lpfc_post_rcv_buf(struct lpfc_hba *phba)
1959{
1960	struct lpfc_sli *psli = &phba->sli;
1961
1962	/* Ring 0, ELS / CT buffers */
1963	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
1964	/* Ring 2 - FCP no buffers needed */
1965
1966	return 0;
1967}
1968
1969#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1970
1971/**
1972 * lpfc_sha_init - Set up initial array of hash table entries
1973 * @HashResultPointer: pointer to an array as hash table.
1974 *
1975 * This routine sets up the initial values to the array of hash table entries
1976 * for the LC HBAs.
1977 **/
1978static void
1979lpfc_sha_init(uint32_t * HashResultPointer)
1980{
1981	HashResultPointer[0] = 0x67452301;
1982	HashResultPointer[1] = 0xEFCDAB89;
1983	HashResultPointer[2] = 0x98BADCFE;
1984	HashResultPointer[3] = 0x10325476;
1985	HashResultPointer[4] = 0xC3D2E1F0;
1986}
1987
1988/**
1989 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
1990 * @HashResultPointer: pointer to an initial/result hash table.
1991 * @HashWorkingPointer: pointer to an working hash table.
1992 *
1993 * This routine iterates an initial hash table pointed by @HashResultPointer
1994 * with the values from the working hash table pointeed by @HashWorkingPointer.
1995 * The results are putting back to the initial hash table, returned through
1996 * the @HashResultPointer as the result hash table.
1997 **/
1998static void
1999lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2000{
2001	int t;
2002	uint32_t TEMP;
2003	uint32_t A, B, C, D, E;
2004	t = 16;
2005	do {
2006		HashWorkingPointer[t] =
2007		    S(1,
2008		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2009								     8] ^
2010		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2011	} while (++t <= 79);
2012	t = 0;
2013	A = HashResultPointer[0];
2014	B = HashResultPointer[1];
2015	C = HashResultPointer[2];
2016	D = HashResultPointer[3];
2017	E = HashResultPointer[4];
2018
2019	do {
2020		if (t < 20) {
2021			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2022		} else if (t < 40) {
2023			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2024		} else if (t < 60) {
2025			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2026		} else {
2027			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2028		}
2029		TEMP += S(5, A) + E + HashWorkingPointer[t];
2030		E = D;
2031		D = C;
2032		C = S(30, B);
2033		B = A;
2034		A = TEMP;
2035	} while (++t <= 79);
2036
2037	HashResultPointer[0] += A;
2038	HashResultPointer[1] += B;
2039	HashResultPointer[2] += C;
2040	HashResultPointer[3] += D;
2041	HashResultPointer[4] += E;
2042
2043}
2044
2045/**
2046 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2047 * @RandomChallenge: pointer to the entry of host challenge random number array.
2048 * @HashWorking: pointer to the entry of the working hash array.
2049 *
2050 * This routine calculates the working hash array referred by @HashWorking
2051 * from the challenge random numbers associated with the host, referred by
2052 * @RandomChallenge. The result is put into the entry of the working hash
2053 * array and returned by reference through @HashWorking.
2054 **/
2055static void
2056lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2057{
2058	*HashWorking = (*RandomChallenge ^ *HashWorking);
2059}
2060
2061/**
2062 * lpfc_hba_init - Perform special handling for LC HBA initialization
2063 * @phba: pointer to lpfc hba data structure.
2064 * @hbainit: pointer to an array of unsigned 32-bit integers.
2065 *
2066 * This routine performs the special handling for LC HBA initialization.
2067 **/
2068void
2069lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2070{
2071	int t;
2072	uint32_t *HashWorking;
2073	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2074
2075	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2076	if (!HashWorking)
2077		return;
2078
2079	HashWorking[0] = HashWorking[78] = *pwwnn++;
2080	HashWorking[1] = HashWorking[79] = *pwwnn;
2081
2082	for (t = 0; t < 7; t++)
2083		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2084
2085	lpfc_sha_init(hbainit);
2086	lpfc_sha_iterate(hbainit, HashWorking);
2087	kfree(HashWorking);
2088}
2089
2090/**
2091 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2092 * @vport: pointer to a virtual N_Port data structure.
2093 *
2094 * This routine performs the necessary cleanups before deleting the @vport.
2095 * It invokes the discovery state machine to perform necessary state
2096 * transitions and to release the ndlps associated with the @vport. Note,
2097 * the physical port is treated as @vport 0.
2098 **/
2099void
2100lpfc_cleanup(struct lpfc_vport *vport)
2101{
2102	struct lpfc_hba   *phba = vport->phba;
2103	struct lpfc_nodelist *ndlp, *next_ndlp;
2104	int i = 0;
2105
2106	if (phba->link_state > LPFC_LINK_DOWN)
2107		lpfc_port_link_failure(vport);
2108
2109	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2110		if (!NLP_CHK_NODE_ACT(ndlp)) {
2111			ndlp = lpfc_enable_node(vport, ndlp,
2112						NLP_STE_UNUSED_NODE);
2113			if (!ndlp)
2114				continue;
2115			spin_lock_irq(&phba->ndlp_lock);
2116			NLP_SET_FREE_REQ(ndlp);
2117			spin_unlock_irq(&phba->ndlp_lock);
2118			/* Trigger the release of the ndlp memory */
2119			lpfc_nlp_put(ndlp);
2120			continue;
2121		}
2122		spin_lock_irq(&phba->ndlp_lock);
2123		if (NLP_CHK_FREE_REQ(ndlp)) {
2124			/* The ndlp should not be in memory free mode already */
2125			spin_unlock_irq(&phba->ndlp_lock);
2126			continue;
2127		} else
2128			/* Indicate request for freeing ndlp memory */
2129			NLP_SET_FREE_REQ(ndlp);
2130		spin_unlock_irq(&phba->ndlp_lock);
2131
2132		if (vport->port_type != LPFC_PHYSICAL_PORT &&
2133		    ndlp->nlp_DID == Fabric_DID) {
2134			/* Just free up ndlp with Fabric_DID for vports */
2135			lpfc_nlp_put(ndlp);
2136			continue;
2137		}
2138
2139		if (ndlp->nlp_type & NLP_FABRIC)
2140			lpfc_disc_state_machine(vport, ndlp, NULL,
2141					NLP_EVT_DEVICE_RECOVERY);
2142
2143		lpfc_disc_state_machine(vport, ndlp, NULL,
2144					     NLP_EVT_DEVICE_RM);
2145
2146	}
2147
2148	/* At this point, ALL ndlp's should be gone
2149	 * because of the previous NLP_EVT_DEVICE_RM.
2150	 * Lets wait for this to happen, if needed.
2151	 */
2152	while (!list_empty(&vport->fc_nodes)) {
2153		if (i++ > 3000) {
2154			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2155				"0233 Nodelist not empty\n");
2156			list_for_each_entry_safe(ndlp, next_ndlp,
2157						&vport->fc_nodes, nlp_listp) {
2158				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2159						LOG_NODE,
2160						"0282 did:x%x ndlp:x%p "
2161						"usgmap:x%x refcnt:%d\n",
2162						ndlp->nlp_DID, (void *)ndlp,
2163						ndlp->nlp_usg_map,
2164						atomic_read(
2165							&ndlp->kref.refcount));
2166			}
2167			break;
2168		}
2169
2170		/* Wait for any activity on ndlps to settle */
2171		msleep(10);
2172	}
2173}
2174
2175/**
2176 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2177 * @vport: pointer to a virtual N_Port data structure.
2178 *
2179 * This routine stops all the timers associated with a @vport. This function
2180 * is invoked before disabling or deleting a @vport. Note that the physical
2181 * port is treated as @vport 0.
2182 **/
2183void
2184lpfc_stop_vport_timers(struct lpfc_vport *vport)
2185{
2186	del_timer_sync(&vport->els_tmofunc);
2187	del_timer_sync(&vport->fc_fdmitmo);
2188	lpfc_can_disctmo(vport);
2189	return;
2190}
2191
2192/**
2193 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2194 * @phba: pointer to lpfc hba data structure.
2195 *
2196 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2197 * caller of this routine should already hold the host lock.
2198 **/
2199void
2200__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2201{
2202	/* Clear pending FCF rediscovery wait and failover in progress flags */
2203	phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
2204				FCF_DEAD_FOVER  |
2205				FCF_CVL_FOVER);
2206	/* Now, try to stop the timer */
2207	del_timer(&phba->fcf.redisc_wait);
2208}
2209
2210/**
2211 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2212 * @phba: pointer to lpfc hba data structure.
2213 *
2214 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2215 * checks whether the FCF rediscovery wait timer is pending with the host
2216 * lock held before proceeding with disabling the timer and clearing the
2217 * wait timer pendig flag.
2218 **/
2219void
2220lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2221{
2222	spin_lock_irq(&phba->hbalock);
2223	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2224		/* FCF rediscovery timer already fired or stopped */
2225		spin_unlock_irq(&phba->hbalock);
2226		return;
2227	}
2228	__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2229	spin_unlock_irq(&phba->hbalock);
2230}
2231
2232/**
2233 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2234 * @phba: pointer to lpfc hba data structure.
2235 *
2236 * This routine stops all the timers associated with a HBA. This function is
2237 * invoked before either putting a HBA offline or unloading the driver.
2238 **/
2239void
2240lpfc_stop_hba_timers(struct lpfc_hba *phba)
2241{
2242	lpfc_stop_vport_timers(phba->pport);
2243	del_timer_sync(&phba->sli.mbox_tmo);
2244	del_timer_sync(&phba->fabric_block_timer);
2245	del_timer_sync(&phba->eratt_poll);
2246	del_timer_sync(&phba->hb_tmofunc);
2247	phba->hb_outstanding = 0;
2248
2249	switch (phba->pci_dev_grp) {
2250	case LPFC_PCI_DEV_LP:
2251		/* Stop any LightPulse device specific driver timers */
2252		del_timer_sync(&phba->fcp_poll_timer);
2253		break;
2254	case LPFC_PCI_DEV_OC:
2255		/* Stop any OneConnect device sepcific driver timers */
2256		lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2257		break;
2258	default:
2259		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2260				"0297 Invalid device group (x%x)\n",
2261				phba->pci_dev_grp);
2262		break;
2263	}
2264	return;
2265}
2266
2267/**
2268 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2269 * @phba: pointer to lpfc hba data structure.
2270 *
2271 * This routine marks a HBA's management interface as blocked. Once the HBA's
2272 * management interface is marked as blocked, all the user space access to
2273 * the HBA, whether they are from sysfs interface or libdfc interface will
2274 * all be blocked. The HBA is set to block the management interface when the
2275 * driver prepares the HBA interface for online or offline.
2276 **/
2277static void
2278lpfc_block_mgmt_io(struct lpfc_hba * phba)
2279{
2280	unsigned long iflag;
2281
2282	spin_lock_irqsave(&phba->hbalock, iflag);
2283	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2284	spin_unlock_irqrestore(&phba->hbalock, iflag);
2285}
2286
2287/**
2288 * lpfc_online - Initialize and bring a HBA online
2289 * @phba: pointer to lpfc hba data structure.
2290 *
2291 * This routine initializes the HBA and brings a HBA online. During this
2292 * process, the management interface is blocked to prevent user space access
2293 * to the HBA interfering with the driver initialization.
2294 *
2295 * Return codes
2296 *   0 - successful
2297 *   1 - failed
2298 **/
2299int
2300lpfc_online(struct lpfc_hba *phba)
2301{
2302	struct lpfc_vport *vport;
2303	struct lpfc_vport **vports;
2304	int i;
2305
2306	if (!phba)
2307		return 0;
2308	vport = phba->pport;
2309
2310	if (!(vport->fc_flag & FC_OFFLINE_MODE))
2311		return 0;
2312
2313	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2314			"0458 Bring Adapter online\n");
2315
2316	lpfc_block_mgmt_io(phba);
2317
2318	if (!lpfc_sli_queue_setup(phba)) {
2319		lpfc_unblock_mgmt_io(phba);
2320		return 1;
2321	}
2322
2323	if (phba->sli_rev == LPFC_SLI_REV4) {
2324		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2325			lpfc_unblock_mgmt_io(phba);
2326			return 1;
2327		}
2328	} else {
2329		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
2330			lpfc_unblock_mgmt_io(phba);
2331			return 1;
2332		}
2333	}
2334
2335	vports = lpfc_create_vport_work_array(phba);
2336	if (vports != NULL)
2337		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2338			struct Scsi_Host *shost;
2339			shost = lpfc_shost_from_vport(vports[i]);
2340			spin_lock_irq(shost->host_lock);
2341			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2342			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2343				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2344			if (phba->sli_rev == LPFC_SLI_REV4)
2345				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2346			spin_unlock_irq(shost->host_lock);
2347		}
2348		lpfc_destroy_vport_work_array(phba, vports);
2349
2350	lpfc_unblock_mgmt_io(phba);
2351	return 0;
2352}
2353
2354/**
2355 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2356 * @phba: pointer to lpfc hba data structure.
2357 *
2358 * This routine marks a HBA's management interface as not blocked. Once the
2359 * HBA's management interface is marked as not blocked, all the user space
2360 * access to the HBA, whether they are from sysfs interface or libdfc
2361 * interface will be allowed. The HBA is set to block the management interface
2362 * when the driver prepares the HBA interface for online or offline and then
2363 * set to unblock the management interface afterwards.
2364 **/
2365void
2366lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2367{
2368	unsigned long iflag;
2369
2370	spin_lock_irqsave(&phba->hbalock, iflag);
2371	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2372	spin_unlock_irqrestore(&phba->hbalock, iflag);
2373}
2374
2375/**
2376 * lpfc_offline_prep - Prepare a HBA to be brought offline
2377 * @phba: pointer to lpfc hba data structure.
2378 *
2379 * This routine is invoked to prepare a HBA to be brought offline. It performs
2380 * unregistration login to all the nodes on all vports and flushes the mailbox
2381 * queue to make it ready to be brought offline.
2382 **/
2383void
2384lpfc_offline_prep(struct lpfc_hba * phba)
2385{
2386	struct lpfc_vport *vport = phba->pport;
2387	struct lpfc_nodelist  *ndlp, *next_ndlp;
2388	struct lpfc_vport **vports;
2389	struct Scsi_Host *shost;
2390	int i;
2391
2392	if (vport->fc_flag & FC_OFFLINE_MODE)
2393		return;
2394
2395	lpfc_block_mgmt_io(phba);
2396
2397	lpfc_linkdown(phba);
2398
2399	/* Issue an unreg_login to all nodes on all vports */
2400	vports = lpfc_create_vport_work_array(phba);
2401	if (vports != NULL) {
2402		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2403			if (vports[i]->load_flag & FC_UNLOADING)
2404				continue;
2405			shost = lpfc_shost_from_vport(vports[i]);
2406			spin_lock_irq(shost->host_lock);
2407			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2408			vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2409			vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2410			spin_unlock_irq(shost->host_lock);
2411
2412			shost =	lpfc_shost_from_vport(vports[i]);
2413			list_for_each_entry_safe(ndlp, next_ndlp,
2414						 &vports[i]->fc_nodes,
2415						 nlp_listp) {
2416				if (!NLP_CHK_NODE_ACT(ndlp))
2417					continue;
2418				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2419					continue;
2420				if (ndlp->nlp_type & NLP_FABRIC) {
2421					lpfc_disc_state_machine(vports[i], ndlp,
2422						NULL, NLP_EVT_DEVICE_RECOVERY);
2423					lpfc_disc_state_machine(vports[i], ndlp,
2424						NULL, NLP_EVT_DEVICE_RM);
2425				}
2426				spin_lock_irq(shost->host_lock);
2427				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2428				spin_unlock_irq(shost->host_lock);
2429				lpfc_unreg_rpi(vports[i], ndlp);
2430			}
2431		}
2432	}
2433	lpfc_destroy_vport_work_array(phba, vports);
2434
2435	lpfc_sli_mbox_sys_shutdown(phba);
2436}
2437
2438/**
2439 * lpfc_offline - Bring a HBA offline
2440 * @phba: pointer to lpfc hba data structure.
2441 *
2442 * This routine actually brings a HBA offline. It stops all the timers
2443 * associated with the HBA, brings down the SLI layer, and eventually
2444 * marks the HBA as in offline state for the upper layer protocol.
2445 **/
2446void
2447lpfc_offline(struct lpfc_hba *phba)
2448{
2449	struct Scsi_Host  *shost;
2450	struct lpfc_vport **vports;
2451	int i;
2452
2453	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2454		return;
2455
2456	/* stop port and all timers associated with this hba */
2457	lpfc_stop_port(phba);
2458	vports = lpfc_create_vport_work_array(phba);
2459	if (vports != NULL)
2460		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2461			lpfc_stop_vport_timers(vports[i]);
2462	lpfc_destroy_vport_work_array(phba, vports);
2463	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2464			"0460 Bring Adapter offline\n");
2465	/* Bring down the SLI Layer and cleanup.  The HBA is offline
2466	   now.  */
2467	lpfc_sli_hba_down(phba);
2468	spin_lock_irq(&phba->hbalock);
2469	phba->work_ha = 0;
2470	spin_unlock_irq(&phba->hbalock);
2471	vports = lpfc_create_vport_work_array(phba);
2472	if (vports != NULL)
2473		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2474			shost = lpfc_shost_from_vport(vports[i]);
2475			spin_lock_irq(shost->host_lock);
2476			vports[i]->work_port_events = 0;
2477			vports[i]->fc_flag |= FC_OFFLINE_MODE;
2478			spin_unlock_irq(shost->host_lock);
2479		}
2480	lpfc_destroy_vport_work_array(phba, vports);
2481}
2482
2483/**
2484 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2485 * @phba: pointer to lpfc hba data structure.
2486 *
2487 * This routine is to free all the SCSI buffers and IOCBs from the driver
2488 * list back to kernel. It is called from lpfc_pci_remove_one to free
2489 * the internal resources before the device is removed from the system.
2490 *
2491 * Return codes
2492 *   0 - successful (for now, it always returns 0)
2493 **/
2494static int
2495lpfc_scsi_free(struct lpfc_hba *phba)
2496{
2497	struct lpfc_scsi_buf *sb, *sb_next;
2498	struct lpfc_iocbq *io, *io_next;
2499
2500	spin_lock_irq(&phba->hbalock);
2501	/* Release all the lpfc_scsi_bufs maintained by this host. */
2502	spin_lock(&phba->scsi_buf_list_lock);
2503	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2504		list_del(&sb->list);
2505		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2506			      sb->dma_handle);
2507		kfree(sb);
2508		phba->total_scsi_bufs--;
2509	}
2510	spin_unlock(&phba->scsi_buf_list_lock);
2511
2512	/* Release all the lpfc_iocbq entries maintained by this host. */
2513	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2514		list_del(&io->list);
2515		kfree(io);
2516		phba->total_iocbq_bufs--;
2517	}
2518	spin_unlock_irq(&phba->hbalock);
2519	return 0;
2520}
2521
2522/**
2523 * lpfc_create_port - Create an FC port
2524 * @phba: pointer to lpfc hba data structure.
2525 * @instance: a unique integer ID to this FC port.
2526 * @dev: pointer to the device data structure.
2527 *
2528 * This routine creates a FC port for the upper layer protocol. The FC port
2529 * can be created on top of either a physical port or a virtual port provided
2530 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2531 * and associates the FC port created before adding the shost into the SCSI
2532 * layer.
2533 *
2534 * Return codes
2535 *   @vport - pointer to the virtual N_Port data structure.
2536 *   NULL - port create failed.
2537 **/
2538struct lpfc_vport *
2539lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2540{
2541	struct lpfc_vport *vport;
2542	struct Scsi_Host  *shost;
2543	int error = 0;
2544
2545	if (dev != &phba->pcidev->dev)
2546		shost = scsi_host_alloc(&lpfc_vport_template,
2547					sizeof(struct lpfc_vport));
2548	else
2549		shost = scsi_host_alloc(&lpfc_template,
2550					sizeof(struct lpfc_vport));
2551	if (!shost)
2552		goto out;
2553
2554	vport = (struct lpfc_vport *) shost->hostdata;
2555	vport->phba = phba;
2556	vport->load_flag |= FC_LOADING;
2557	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2558	vport->fc_rscn_flush = 0;
2559
2560	lpfc_get_vport_cfgparam(vport);
2561	shost->unique_id = instance;
2562	shost->max_id = LPFC_MAX_TARGET;
2563	shost->max_lun = vport->cfg_max_luns;
2564	shost->this_id = -1;
2565	shost->max_cmd_len = 16;
2566	if (phba->sli_rev == LPFC_SLI_REV4) {
2567		shost->dma_boundary =
2568			phba->sli4_hba.pc_sli4_params.sge_supp_len;
2569		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2570	}
2571
2572	/*
2573	 * Set initial can_queue value since 0 is no longer supported and
2574	 * scsi_add_host will fail. This will be adjusted later based on the
2575	 * max xri value determined in hba setup.
2576	 */
2577	shost->can_queue = phba->cfg_hba_queue_depth - 10;
2578	if (dev != &phba->pcidev->dev) {
2579		shost->transportt = lpfc_vport_transport_template;
2580		vport->port_type = LPFC_NPIV_PORT;
2581	} else {
2582		shost->transportt = lpfc_transport_template;
2583		vport->port_type = LPFC_PHYSICAL_PORT;
2584	}
2585
2586	/* Initialize all internally managed lists. */
2587	INIT_LIST_HEAD(&vport->fc_nodes);
2588	INIT_LIST_HEAD(&vport->rcv_buffer_list);
2589	spin_lock_init(&vport->work_port_lock);
2590
2591	init_timer(&vport->fc_disctmo);
2592	vport->fc_disctmo.function = lpfc_disc_timeout;
2593	vport->fc_disctmo.data = (unsigned long)vport;
2594
2595	init_timer(&vport->fc_fdmitmo);
2596	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2597	vport->fc_fdmitmo.data = (unsigned long)vport;
2598
2599	init_timer(&vport->els_tmofunc);
2600	vport->els_tmofunc.function = lpfc_els_timeout;
2601	vport->els_tmofunc.data = (unsigned long)vport;
2602	if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
2603		phba->menlo_flag |= HBA_MENLO_SUPPORT;
2604		/* check for menlo minimum sg count */
2605		if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
2606			phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
2607			shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2608		}
2609	}
2610
2611	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2612	if (error)
2613		goto out_put_shost;
2614
2615	spin_lock_irq(&phba->hbalock);
2616	list_add_tail(&vport->listentry, &phba->port_list);
2617	spin_unlock_irq(&phba->hbalock);
2618	return vport;
2619
2620out_put_shost:
2621	scsi_host_put(shost);
2622out:
2623	return NULL;
2624}
2625
2626/**
2627 * destroy_port -  destroy an FC port
2628 * @vport: pointer to an lpfc virtual N_Port data structure.
2629 *
2630 * This routine destroys a FC port from the upper layer protocol. All the
2631 * resources associated with the port are released.
2632 **/
2633void
2634destroy_port(struct lpfc_vport *vport)
2635{
2636	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2637	struct lpfc_hba  *phba = vport->phba;
2638
2639	lpfc_debugfs_terminate(vport);
2640	fc_remove_host(shost);
2641	scsi_remove_host(shost);
2642
2643	spin_lock_irq(&phba->hbalock);
2644	list_del_init(&vport->listentry);
2645	spin_unlock_irq(&phba->hbalock);
2646
2647	lpfc_cleanup(vport);
2648	return;
2649}
2650
2651/**
2652 * lpfc_get_instance - Get a unique integer ID
2653 *
2654 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2655 * uses the kernel idr facility to perform the task.
2656 *
2657 * Return codes:
2658 *   instance - a unique integer ID allocated as the new instance.
2659 *   -1 - lpfc get instance failed.
2660 **/
2661int
2662lpfc_get_instance(void)
2663{
2664	int instance = 0;
2665
2666	/* Assign an unused number */
2667	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2668		return -1;
2669	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2670		return -1;
2671	return instance;
2672}
2673
2674/**
2675 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2676 * @shost: pointer to SCSI host data structure.
2677 * @time: elapsed time of the scan in jiffies.
2678 *
2679 * This routine is called by the SCSI layer with a SCSI host to determine
2680 * whether the scan host is finished.
2681 *
2682 * Note: there is no scan_start function as adapter initialization will have
2683 * asynchronously kicked off the link initialization.
2684 *
2685 * Return codes
2686 *   0 - SCSI host scan is not over yet.
2687 *   1 - SCSI host scan is over.
2688 **/
2689int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2690{
2691	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2692	struct lpfc_hba   *phba = vport->phba;
2693	int stat = 0;
2694
2695	spin_lock_irq(shost->host_lock);
2696
2697	if (vport->load_flag & FC_UNLOADING) {
2698		stat = 1;
2699		goto finished;
2700	}
2701	if (time >= 30 * HZ) {
2702		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2703				"0461 Scanning longer than 30 "
2704				"seconds.  Continuing initialization\n");
2705		stat = 1;
2706		goto finished;
2707	}
2708	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2709		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2710				"0465 Link down longer than 15 "
2711				"seconds.  Continuing initialization\n");
2712		stat = 1;
2713		goto finished;
2714	}
2715
2716	if (vport->port_state != LPFC_VPORT_READY)
2717		goto finished;
2718	if (vport->num_disc_nodes || vport->fc_prli_sent)
2719		goto finished;
2720	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2721		goto finished;
2722	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2723		goto finished;
2724
2725	stat = 1;
2726
2727finished:
2728	spin_unlock_irq(shost->host_lock);
2729	return stat;
2730}
2731
2732/**
2733 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2734 * @shost: pointer to SCSI host data structure.
2735 *
2736 * This routine initializes a given SCSI host attributes on a FC port. The
2737 * SCSI host can be either on top of a physical port or a virtual port.
2738 **/
2739void lpfc_host_attrib_init(struct Scsi_Host *shost)
2740{
2741	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2742	struct lpfc_hba   *phba = vport->phba;
2743	/*
2744	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
2745	 */
2746
2747	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2748	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2749	fc_host_supported_classes(shost) = FC_COS_CLASS3;
2750
2751	memset(fc_host_supported_fc4s(shost), 0,
2752	       sizeof(fc_host_supported_fc4s(shost)));
2753	fc_host_supported_fc4s(shost)[2] = 1;
2754	fc_host_supported_fc4s(shost)[7] = 1;
2755
2756	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2757				 sizeof fc_host_symbolic_name(shost));
2758
2759	fc_host_supported_speeds(shost) = 0;
2760	if (phba->lmt & LMT_10Gb)
2761		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2762	if (phba->lmt & LMT_8Gb)
2763		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2764	if (phba->lmt & LMT_4Gb)
2765		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2766	if (phba->lmt & LMT_2Gb)
2767		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2768	if (phba->lmt & LMT_1Gb)
2769		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2770
2771	fc_host_maxframe_size(shost) =
2772		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2773		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2774
2775	/* This value is also unchanging */
2776	memset(fc_host_active_fc4s(shost), 0,
2777	       sizeof(fc_host_active_fc4s(shost)));
2778	fc_host_active_fc4s(shost)[2] = 1;
2779	fc_host_active_fc4s(shost)[7] = 1;
2780
2781	fc_host_max_npiv_vports(shost) = phba->max_vpi;
2782	spin_lock_irq(shost->host_lock);
2783	vport->load_flag &= ~FC_LOADING;
2784	spin_unlock_irq(shost->host_lock);
2785}
2786
2787/**
2788 * lpfc_stop_port_s3 - Stop SLI3 device port
2789 * @phba: pointer to lpfc hba data structure.
2790 *
2791 * This routine is invoked to stop an SLI3 device port, it stops the device
2792 * from generating interrupts and stops the device driver's timers for the
2793 * device.
2794 **/
2795static void
2796lpfc_stop_port_s3(struct lpfc_hba *phba)
2797{
2798	/* Clear all interrupt enable conditions */
2799	writel(0, phba->HCregaddr);
2800	readl(phba->HCregaddr); /* flush */
2801	/* Clear all pending interrupts */
2802	writel(0xffffffff, phba->HAregaddr);
2803	readl(phba->HAregaddr); /* flush */
2804
2805	/* Reset some HBA SLI setup states */
2806	lpfc_stop_hba_timers(phba);
2807	phba->pport->work_port_events = 0;
2808}
2809
2810/**
2811 * lpfc_stop_port_s4 - Stop SLI4 device port
2812 * @phba: pointer to lpfc hba data structure.
2813 *
2814 * This routine is invoked to stop an SLI4 device port, it stops the device
2815 * from generating interrupts and stops the device driver's timers for the
2816 * device.
2817 **/
2818static void
2819lpfc_stop_port_s4(struct lpfc_hba *phba)
2820{
2821	/* Reset some HBA SLI4 setup states */
2822	lpfc_stop_hba_timers(phba);
2823	phba->pport->work_port_events = 0;
2824	phba->sli4_hba.intr_enable = 0;
2825}
2826
2827/**
2828 * lpfc_stop_port - Wrapper function for stopping hba port
2829 * @phba: Pointer to HBA context object.
2830 *
2831 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2832 * the API jump table function pointer from the lpfc_hba struct.
2833 **/
2834void
2835lpfc_stop_port(struct lpfc_hba *phba)
2836{
2837	phba->lpfc_stop_port(phba);
2838}
2839
2840/**
2841 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2842 * @phba: pointer to lpfc hba data structure.
2843 *
2844 * This routine is invoked to remove the driver default fcf record from
2845 * the port.  This routine currently acts on FCF Index 0.
2846 *
2847 **/
2848void
2849lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2850{
2851	int rc = 0;
2852	LPFC_MBOXQ_t *mboxq;
2853	struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2854	uint32_t mbox_tmo, req_len;
2855	uint32_t shdr_status, shdr_add_status;
2856
2857	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2858	if (!mboxq) {
2859		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2860			"2020 Failed to allocate mbox for ADD_FCF cmd\n");
2861		return;
2862	}
2863
2864	req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2865		  sizeof(struct lpfc_sli4_cfg_mhdr);
2866	rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2867			      LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2868			      req_len, LPFC_SLI4_MBX_EMBED);
2869	/*
2870	 * In phase 1, there is a single FCF index, 0.  In phase2, the driver
2871	 * supports multiple FCF indices.
2872	 */
2873	del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2874	bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2875	bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2876	       phba->fcf.current_rec.fcf_indx);
2877
2878	if (!phba->sli4_hba.intr_enable)
2879		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2880	else {
2881		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2882		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2883	}
2884	/* The IOCTL status is embedded in the mailbox subheader. */
2885	shdr_status = bf_get(lpfc_mbox_hdr_status,
2886			     &del_fcf_record->header.cfg_shdr.response);
2887	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2888				 &del_fcf_record->header.cfg_shdr.response);
2889	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2890		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2891				"2516 DEL FCF of default FCF Index failed "
2892				"mbx status x%x, status x%x add_status x%x\n",
2893				rc, shdr_status, shdr_add_status);
2894	}
2895	if (rc != MBX_TIMEOUT)
2896		mempool_free(mboxq, phba->mbox_mem_pool);
2897}
2898
2899/**
2900 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
2901 * @phba: Pointer to hba for which this call is being executed.
2902 *
2903 * This routine starts the timer waiting for the FCF rediscovery to complete.
2904 **/
2905void
2906lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
2907{
2908	unsigned long fcf_redisc_wait_tmo =
2909		(jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
2910	/* Start fcf rediscovery wait period timer */
2911	mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
2912	spin_lock_irq(&phba->hbalock);
2913	/* Allow action to new fcf asynchronous event */
2914	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2915	/* Mark the FCF rediscovery pending state */
2916	phba->fcf.fcf_flag |= FCF_REDISC_PEND;
2917	spin_unlock_irq(&phba->hbalock);
2918}
2919
2920/**
2921 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
2922 * @ptr: Map to lpfc_hba data structure pointer.
2923 *
2924 * This routine is invoked when waiting for FCF table rediscover has been
2925 * timed out. If new FCF record(s) has (have) been discovered during the
2926 * wait period, a new FCF event shall be added to the FCOE async event
2927 * list, and then worker thread shall be waked up for processing from the
2928 * worker thread context.
2929 **/
2930void
2931lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2932{
2933	struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2934
2935	/* Don't send FCF rediscovery event if timer cancelled */
2936	spin_lock_irq(&phba->hbalock);
2937	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2938		spin_unlock_irq(&phba->hbalock);
2939		return;
2940	}
2941	/* Clear FCF rediscovery timer pending flag */
2942	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2943	/* FCF rediscovery event to worker thread */
2944	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2945	spin_unlock_irq(&phba->hbalock);
2946	/* wake up worker thread */
2947	lpfc_worker_wake_up(phba);
2948}
2949
2950/**
2951 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
2952 * @phba: pointer to lpfc hba data structure.
2953 *
2954 * This function uses the QUERY_FW_CFG mailbox command to determine if the
2955 * firmware loaded supports FCoE. A return of zero indicates that the mailbox
2956 * was successful and the firmware supports FCoE. Any other return indicates
2957 * a error. It is assumed that this function will be called before interrupts
2958 * are enabled.
2959 **/
2960static int
2961lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
2962{
2963	int rc = 0;
2964	LPFC_MBOXQ_t *mboxq;
2965	struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
2966	uint32_t length;
2967	uint32_t shdr_status, shdr_add_status;
2968
2969	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2970	if (!mboxq) {
2971		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2972				"2621 Failed to allocate mbox for "
2973				"query firmware config cmd\n");
2974		return -ENOMEM;
2975	}
2976	query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
2977	length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
2978		  sizeof(struct lpfc_sli4_cfg_mhdr));
2979	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
2980			 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
2981			 length, LPFC_SLI4_MBX_EMBED);
2982	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2983	/* The IOCTL status is embedded in the mailbox subheader. */
2984	shdr_status = bf_get(lpfc_mbox_hdr_status,
2985			     &query_fw_cfg->header.cfg_shdr.response);
2986	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2987				 &query_fw_cfg->header.cfg_shdr.response);
2988	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2989		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2990				"2622 Query Firmware Config failed "
2991				"mbx status x%x, status x%x add_status x%x\n",
2992				rc, shdr_status, shdr_add_status);
2993		return -EINVAL;
2994	}
2995	if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
2996		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2997				"2623 FCoE Function not supported by firmware. "
2998				"Function mode = %08x\n",
2999				query_fw_cfg->function_mode);
3000		return -EINVAL;
3001	}
3002	if (rc != MBX_TIMEOUT)
3003		mempool_free(mboxq, phba->mbox_mem_pool);
3004	return 0;
3005}
3006
3007/**
3008 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3009 * @phba: pointer to lpfc hba data structure.
3010 * @acqe_link: pointer to the async link completion queue entry.
3011 *
3012 * This routine is to parse the SLI4 link-attention link fault code and
3013 * translate it into the base driver's read link attention mailbox command
3014 * status.
3015 *
3016 * Return: Link-attention status in terms of base driver's coding.
3017 **/
3018static uint16_t
3019lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3020			   struct lpfc_acqe_link *acqe_link)
3021{
3022	uint16_t latt_fault;
3023
3024	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3025	case LPFC_ASYNC_LINK_FAULT_NONE:
3026	case LPFC_ASYNC_LINK_FAULT_LOCAL:
3027	case LPFC_ASYNC_LINK_FAULT_REMOTE:
3028		latt_fault = 0;
3029		break;
3030	default:
3031		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3032				"0398 Invalid link fault code: x%x\n",
3033				bf_get(lpfc_acqe_link_fault, acqe_link));
3034		latt_fault = MBXERR_ERROR;
3035		break;
3036	}
3037	return latt_fault;
3038}
3039
3040/**
3041 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3042 * @phba: pointer to lpfc hba data structure.
3043 * @acqe_link: pointer to the async link completion queue entry.
3044 *
3045 * This routine is to parse the SLI4 link attention type and translate it
3046 * into the base driver's link attention type coding.
3047 *
3048 * Return: Link attention type in terms of base driver's coding.
3049 **/
3050static uint8_t
3051lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3052			  struct lpfc_acqe_link *acqe_link)
3053{
3054	uint8_t att_type;
3055
3056	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3057	case LPFC_ASYNC_LINK_STATUS_DOWN:
3058	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3059		att_type = AT_LINK_DOWN;
3060		break;
3061	case LPFC_ASYNC_LINK_STATUS_UP:
3062		/* Ignore physical link up events - wait for logical link up */
3063		att_type = AT_RESERVED;
3064		break;
3065	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3066		att_type = AT_LINK_UP;
3067		break;
3068	default:
3069		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3070				"0399 Invalid link attention type: x%x\n",
3071				bf_get(lpfc_acqe_link_status, acqe_link));
3072		att_type = AT_RESERVED;
3073		break;
3074	}
3075	return att_type;
3076}
3077
3078/**
3079 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3080 * @phba: pointer to lpfc hba data structure.
3081 * @acqe_link: pointer to the async link completion queue entry.
3082 *
3083 * This routine is to parse the SLI4 link-attention link speed and translate
3084 * it into the base driver's link-attention link speed coding.
3085 *
3086 * Return: Link-attention link speed in terms of base driver's coding.
3087 **/
3088static uint8_t
3089lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3090				struct lpfc_acqe_link *acqe_link)
3091{
3092	uint8_t link_speed;
3093
3094	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3095	case LPFC_ASYNC_LINK_SPEED_ZERO:
3096		link_speed = LA_UNKNW_LINK;
3097		break;
3098	case LPFC_ASYNC_LINK_SPEED_10MBPS:
3099		link_speed = LA_UNKNW_LINK;
3100		break;
3101	case LPFC_ASYNC_LINK_SPEED_100MBPS:
3102		link_speed = LA_UNKNW_LINK;
3103		break;
3104	case LPFC_ASYNC_LINK_SPEED_1GBPS:
3105		link_speed = LA_1GHZ_LINK;
3106		break;
3107	case LPFC_ASYNC_LINK_SPEED_10GBPS:
3108		link_speed = LA_10GHZ_LINK;
3109		break;
3110	default:
3111		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3112				"0483 Invalid link-attention link speed: x%x\n",
3113				bf_get(lpfc_acqe_link_speed, acqe_link));
3114		link_speed = LA_UNKNW_LINK;
3115		break;
3116	}
3117	return link_speed;
3118}
3119
3120/**
3121 * lpfc_sli4_async_link_evt - Process the asynchronous link event
3122 * @phba: pointer to lpfc hba data structure.
3123 * @acqe_link: pointer to the async link completion queue entry.
3124 *
3125 * This routine is to handle the SLI4 asynchronous link event.
3126 **/
3127static void
3128lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3129			 struct lpfc_acqe_link *acqe_link)
3130{
3131	struct lpfc_dmabuf *mp;
3132	LPFC_MBOXQ_t *pmb;
3133	MAILBOX_t *mb;
3134	READ_LA_VAR *la;
3135	uint8_t att_type;
3136
3137	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3138	if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
3139		return;
3140	phba->fcoe_eventtag = acqe_link->event_tag;
3141	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3142	if (!pmb) {
3143		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3144				"0395 The mboxq allocation failed\n");
3145		return;
3146	}
3147	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3148	if (!mp) {
3149		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3150				"0396 The lpfc_dmabuf allocation failed\n");
3151		goto out_free_pmb;
3152	}
3153	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3154	if (!mp->virt) {
3155		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3156				"0397 The mbuf allocation failed\n");
3157		goto out_free_dmabuf;
3158	}
3159
3160	/* Cleanup any outstanding ELS commands */
3161	lpfc_els_flush_all_cmd(phba);
3162
3163	/* Block ELS IOCBs until we have done process link event */
3164	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3165
3166	/* Update link event statistics */
3167	phba->sli.slistat.link_event++;
3168
3169	/* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
3170	lpfc_read_la(phba, pmb, mp);
3171	pmb->vport = phba->pport;
3172
3173	/* Parse and translate status field */
3174	mb = &pmb->u.mb;
3175	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3176
3177	/* Parse and translate link attention fields */
3178	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
3179	la->eventTag = acqe_link->event_tag;
3180	la->attType = att_type;
3181	la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
3182
3183	/* Fake the the following irrelvant fields */
3184	la->topology = TOPOLOGY_PT_PT;
3185	la->granted_AL_PA = 0;
3186	la->il = 0;
3187	la->pb = 0;
3188	la->fa = 0;
3189	la->mm = 0;
3190
3191	/* Keep the link status for extra SLI4 state machine reference */
3192	phba->sli4_hba.link_state.speed =
3193				bf_get(lpfc_acqe_link_speed, acqe_link);
3194	phba->sli4_hba.link_state.duplex =
3195				bf_get(lpfc_acqe_link_duplex, acqe_link);
3196	phba->sli4_hba.link_state.status =
3197				bf_get(lpfc_acqe_link_status, acqe_link);
3198	phba->sli4_hba.link_state.physical =
3199				bf_get(lpfc_acqe_link_physical, acqe_link);
3200	phba->sli4_hba.link_state.fault =
3201				bf_get(lpfc_acqe_link_fault, acqe_link);
3202	phba->sli4_hba.link_state.logical_speed =
3203				bf_get(lpfc_acqe_qos_link_speed, acqe_link);
3204
3205	/* Invoke the lpfc_handle_latt mailbox command callback function */
3206	lpfc_mbx_cmpl_read_la(phba, pmb);
3207
3208	return;
3209
3210out_free_dmabuf:
3211	kfree(mp);
3212out_free_pmb:
3213	mempool_free(pmb, phba->mbox_mem_pool);
3214}
3215
3216/**
3217 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3218 * @vport: pointer to vport data structure.
3219 *
3220 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3221 * response to a CVL event.
3222 *
3223 * Return the pointer to the ndlp with the vport if successful, otherwise
3224 * return NULL.
3225 **/
3226static struct lpfc_nodelist *
3227lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3228{
3229	struct lpfc_nodelist *ndlp;
3230	struct Scsi_Host *shost;
3231	struct lpfc_hba *phba;
3232
3233	if (!vport)
3234		return NULL;
3235	ndlp = lpfc_findnode_did(vport, Fabric_DID);
3236	if (!ndlp)
3237		return NULL;
3238	phba = vport->phba;
3239	if (!phba)
3240		return NULL;
3241	if (phba->pport->port_state <= LPFC_FLOGI)
3242		return NULL;
3243	/* If virtual link is not yet instantiated ignore CVL */
3244	if (vport->port_state <= LPFC_FDISC)
3245		return NULL;
3246	shost = lpfc_shost_from_vport(vport);
3247	if (!shost)
3248		return NULL;
3249	lpfc_linkdown_port(vport);
3250	lpfc_cleanup_pending_mbox(vport);
3251	spin_lock_irq(shost->host_lock);
3252	vport->fc_flag |= FC_VPORT_CVL_RCVD;
3253	spin_unlock_irq(shost->host_lock);
3254
3255	return ndlp;
3256}
3257
3258/**
3259 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3260 * @vport: pointer to lpfc hba data structure.
3261 *
3262 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3263 * response to a FCF dead event.
3264 **/
3265static void
3266lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3267{
3268	struct lpfc_vport **vports;
3269	int i;
3270
3271	vports = lpfc_create_vport_work_array(phba);
3272	if (vports)
3273		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3274			lpfc_sli4_perform_vport_cvl(vports[i]);
3275	lpfc_destroy_vport_work_array(phba, vports);
3276}
3277
3278/**
3279 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
3280 * @phba: pointer to lpfc hba data structure.
3281 * @acqe_link: pointer to the async fcoe completion queue entry.
3282 *
3283 * This routine is to handle the SLI4 asynchronous fcoe event.
3284 **/
3285static void
3286lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3287			 struct lpfc_acqe_fcoe *acqe_fcoe)
3288{
3289	uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
3290	int rc;
3291	struct lpfc_vport *vport;
3292	struct lpfc_nodelist *ndlp;
3293	struct Scsi_Host  *shost;
3294	int active_vlink_present;
3295	struct lpfc_vport **vports;
3296	int i;
3297
3298	phba->fc_eventTag = acqe_fcoe->event_tag;
3299	phba->fcoe_eventtag = acqe_fcoe->event_tag;
3300	switch (event_type) {
3301	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
3302	case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
3303		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
3304			"2546 New FCF found index 0x%x tag 0x%x\n",
3305			acqe_fcoe->index,
3306			acqe_fcoe->event_tag);
3307		spin_lock_irq(&phba->hbalock);
3308		if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
3309		    (phba->hba_flag & FCF_DISC_INPROGRESS)) {
3310			/*
3311			 * If the current FCF is in discovered state or
3312			 * FCF discovery is in progress, do nothing.
3313			 */
3314			spin_unlock_irq(&phba->hbalock);
3315			break;
3316		}
3317		if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3318			/*
3319			 * If fast FCF failover rescan event is pending,
3320			 * do nothing.
3321			 */
3322			spin_unlock_irq(&phba->hbalock);
3323			break;
3324		}
3325		spin_unlock_irq(&phba->hbalock);
3326
3327		/* Read the FCF table and re-discover SAN. */
3328		rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
3329		if (rc)
3330			lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
3331					"2547 Read FCF record failed 0x%x\n",
3332					rc);
3333		break;
3334
3335	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
3336		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3337			"2548 FCF Table full count 0x%x tag 0x%x\n",
3338			bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
3339			acqe_fcoe->event_tag);
3340		break;
3341
3342	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
3343		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
3344			"2549 FCF disconnected from network index 0x%x"
3345			" tag 0x%x\n", acqe_fcoe->index,
3346			acqe_fcoe->event_tag);
3347		/* If the event is not for currently used fcf do nothing */
3348		if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
3349			break;
3350		/* We request port to rediscover the entire FCF table for
3351		 * a fast recovery from case that the current FCF record
3352		 * is no longer valid if the last CVL event hasn't already
3353		 * triggered process.
3354		 */
3355		spin_lock_irq(&phba->hbalock);
3356		if (phba->fcf.fcf_flag & FCF_CVL_FOVER) {
3357			spin_unlock_irq(&phba->hbalock);
3358			break;
3359		}
3360		/* Mark the fast failover process in progress */
3361		phba->fcf.fcf_flag |= FCF_DEAD_FOVER;
3362		spin_unlock_irq(&phba->hbalock);
3363		rc = lpfc_sli4_redisc_fcf_table(phba);
3364		if (rc) {
3365			spin_lock_irq(&phba->hbalock);
3366			phba->fcf.fcf_flag &= ~FCF_DEAD_FOVER;
3367			spin_unlock_irq(&phba->hbalock);
3368			/*
3369			 * Last resort will fail over by treating this
3370			 * as a link down to FCF registration.
3371			 */
3372			lpfc_sli4_fcf_dead_failthrough(phba);
3373		} else
3374			/* Handling fast FCF failover to a DEAD FCF event
3375			 * is considered equalivant to receiving CVL to all
3376			 * vports.
3377			 */
3378			lpfc_sli4_perform_all_vport_cvl(phba);
3379		break;
3380	case LPFC_FCOE_EVENT_TYPE_CVL:
3381		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
3382			"2718 Clear Virtual Link Received for VPI 0x%x"
3383			" tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3384		vport = lpfc_find_vport_by_vpid(phba,
3385				acqe_fcoe->index - phba->vpi_base);
3386		ndlp = lpfc_sli4_perform_vport_cvl(vport);
3387		if (!ndlp)
3388			break;
3389		active_vlink_present = 0;
3390
3391		vports = lpfc_create_vport_work_array(phba);
3392		if (vports) {
3393			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3394					i++) {
3395				if ((!(vports[i]->fc_flag &
3396					FC_VPORT_CVL_RCVD)) &&
3397					(vports[i]->port_state > LPFC_FDISC)) {
3398					active_vlink_present = 1;
3399					break;
3400				}
3401			}
3402			lpfc_destroy_vport_work_array(phba, vports);
3403		}
3404
3405		if (active_vlink_present) {
3406			/*
3407			 * If there are other active VLinks present,
3408			 * re-instantiate the Vlink using FDISC.
3409			 */
3410			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3411			shost = lpfc_shost_from_vport(vport);
3412			spin_lock_irq(shost->host_lock);
3413			ndlp->nlp_flag |= NLP_DELAY_TMO;
3414			spin_unlock_irq(shost->host_lock);
3415			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3416			vport->port_state = LPFC_FDISC;
3417		} else {
3418			/*
3419			 * Otherwise, we request port to rediscover
3420			 * the entire FCF table for a fast recovery
3421			 * from possible case that the current FCF
3422			 * is no longer valid if the FCF_DEAD event
3423			 * hasn't already triggered process.
3424			 */
3425			spin_lock_irq(&phba->hbalock);
3426			if (phba->fcf.fcf_flag & FCF_DEAD_FOVER) {
3427				spin_unlock_irq(&phba->hbalock);
3428				break;
3429			}
3430			/* Mark the fast failover process in progress */
3431			phba->fcf.fcf_flag |= FCF_CVL_FOVER;
3432			spin_unlock_irq(&phba->hbalock);
3433			rc = lpfc_sli4_redisc_fcf_table(phba);
3434			if (rc) {
3435				spin_lock_irq(&phba->hbalock);
3436				phba->fcf.fcf_flag &= ~FCF_CVL_FOVER;
3437				spin_unlock_irq(&phba->hbalock);
3438				/*
3439				 * Last resort will be re-try on the
3440				 * the current registered FCF entry.
3441				 */
3442				lpfc_retry_pport_discovery(phba);
3443			}
3444		}
3445		break;
3446	default:
3447		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3448			"0288 Unknown FCoE event type 0x%x event tag "
3449			"0x%x\n", event_type, acqe_fcoe->event_tag);
3450		break;
3451	}
3452}
3453
3454/**
3455 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3456 * @phba: pointer to lpfc hba data structure.
3457 * @acqe_link: pointer to the async dcbx completion queue entry.
3458 *
3459 * This routine is to handle the SLI4 asynchronous dcbx event.
3460 **/
3461static void
3462lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3463			 struct lpfc_acqe_dcbx *acqe_dcbx)
3464{
3465	phba->fc_eventTag = acqe_dcbx->event_tag;
3466	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3467			"0290 The SLI4 DCBX asynchronous event is not "
3468			"handled yet\n");
3469}
3470
3471/**
3472 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3473 * @phba: pointer to lpfc hba data structure.
3474 *
3475 * This routine is invoked by the worker thread to process all the pending
3476 * SLI4 asynchronous events.
3477 **/
3478void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3479{
3480	struct lpfc_cq_event *cq_event;
3481
3482	/* First, declare the async event has been handled */
3483	spin_lock_irq(&phba->hbalock);
3484	phba->hba_flag &= ~ASYNC_EVENT;
3485	spin_unlock_irq(&phba->hbalock);
3486	/* Now, handle all the async events */
3487	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3488		/* Get the first event from the head of the event queue */
3489		spin_lock_irq(&phba->hbalock);
3490		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3491				 cq_event, struct lpfc_cq_event, list);
3492		spin_unlock_irq(&phba->hbalock);
3493		/* Process the asynchronous event */
3494		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3495		case LPFC_TRAILER_CODE_LINK:
3496			lpfc_sli4_async_link_evt(phba,
3497						 &cq_event->cqe.acqe_link);
3498			break;
3499		case LPFC_TRAILER_CODE_FCOE:
3500			lpfc_sli4_async_fcoe_evt(phba,
3501						 &cq_event->cqe.acqe_fcoe);
3502			break;
3503		case LPFC_TRAILER_CODE_DCBX:
3504			lpfc_sli4_async_dcbx_evt(phba,
3505						 &cq_event->cqe.acqe_dcbx);
3506			break;
3507		default:
3508			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3509					"1804 Invalid asynchrous event code: "
3510					"x%x\n", bf_get(lpfc_trailer_code,
3511					&cq_event->cqe.mcqe_cmpl));
3512			break;
3513		}
3514		/* Free the completion event processed to the free pool */
3515		lpfc_sli4_cq_event_release(phba, cq_event);
3516	}
3517}
3518
3519/**
3520 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3521 * @phba: pointer to lpfc hba data structure.
3522 *
3523 * This routine is invoked by the worker thread to process FCF table
3524 * rediscovery pending completion event.
3525 **/
3526void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3527{
3528	int rc;
3529
3530	spin_lock_irq(&phba->hbalock);
3531	/* Clear FCF rediscovery timeout event */
3532	phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3533	/* Clear driver fast failover FCF record flag */
3534	phba->fcf.failover_rec.flag = 0;
3535	/* Set state for FCF fast failover */
3536	phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3537	spin_unlock_irq(&phba->hbalock);
3538
3539	/* Scan FCF table from the first entry to re-discover SAN */
3540	rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
3541	if (rc)
3542		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
3543				"2747 Post FCF rediscovery read FCF record "
3544				"failed 0x%x\n", rc);
3545}
3546
3547/**
3548 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3549 * @phba: pointer to lpfc hba data structure.
3550 * @dev_grp: The HBA PCI-Device group number.
3551 *
3552 * This routine is invoked to set up the per HBA PCI-Device group function
3553 * API jump table entries.
3554 *
3555 * Return: 0 if success, otherwise -ENODEV
3556 **/
3557int
3558lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3559{
3560	int rc;
3561
3562	/* Set up lpfc PCI-device group */
3563	phba->pci_dev_grp = dev_grp;
3564
3565	/* The LPFC_PCI_DEV_OC uses SLI4 */
3566	if (dev_grp == LPFC_PCI_DEV_OC)
3567		phba->sli_rev = LPFC_SLI_REV4;
3568
3569	/* Set up device INIT API function jump table */
3570	rc = lpfc_init_api_table_setup(phba, dev_grp);
3571	if (rc)
3572		return -ENODEV;
3573	/* Set up SCSI API function jump table */
3574	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3575	if (rc)
3576		return -ENODEV;
3577	/* Set up SLI API function jump table */
3578	rc = lpfc_sli_api_table_setup(phba, dev_grp);
3579	if (rc)
3580		return -ENODEV;
3581	/* Set up MBOX API function jump table */
3582	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3583	if (rc)
3584		return -ENODEV;
3585
3586	return 0;
3587}
3588
3589/**
3590 * lpfc_log_intr_mode - Log the active interrupt mode
3591 * @phba: pointer to lpfc hba data structure.
3592 * @intr_mode: active interrupt mode adopted.
3593 *
3594 * This routine it invoked to log the currently used active interrupt mode
3595 * to the device.
3596 **/
3597static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3598{
3599	switch (intr_mode) {
3600	case 0:
3601		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3602				"0470 Enable INTx interrupt mode.\n");
3603		break;
3604	case 1:
3605		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3606				"0481 Enabled MSI interrupt mode.\n");
3607		break;
3608	case 2:
3609		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3610				"0480 Enabled MSI-X interrupt mode.\n");
3611		break;
3612	default:
3613		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3614				"0482 Illegal interrupt mode.\n");
3615		break;
3616	}
3617	return;
3618}
3619
3620/**
3621 * lpfc_enable_pci_dev - Enable a generic PCI device.
3622 * @phba: pointer to lpfc hba data structure.
3623 *
3624 * This routine is invoked to enable the PCI device that is common to all
3625 * PCI devices.
3626 *
3627 * Return codes
3628 * 	0 - successful
3629 * 	other values - error
3630 **/
3631static int
3632lpfc_enable_pci_dev(struct lpfc_hba *phba)
3633{
3634	struct pci_dev *pdev;
3635	int bars;
3636
3637	/* Obtain PCI device reference */
3638	if (!phba->pcidev)
3639		goto out_error;
3640	else
3641		pdev = phba->pcidev;
3642	/* Select PCI BARs */
3643	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3644	/* Enable PCI device */
3645	if (pci_enable_device_mem(pdev))
3646		goto out_error;
3647	/* Request PCI resource for the device */
3648	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3649		goto out_disable_device;
3650	/* Set up device as PCI master and save state for EEH */
3651	pci_set_master(pdev);
3652	pci_try_set_mwi(pdev);
3653	pci_save_state(pdev);
3654
3655	return 0;
3656
3657out_disable_device:
3658	pci_disable_device(pdev);
3659out_error:
3660	return -ENODEV;
3661}
3662
3663/**
3664 * lpfc_disable_pci_dev - Disable a generic PCI device.
3665 * @phba: pointer to lpfc hba data structure.
3666 *
3667 * This routine is invoked to disable the PCI device that is common to all
3668 * PCI devices.
3669 **/
3670static void
3671lpfc_disable_pci_dev(struct lpfc_hba *phba)
3672{
3673	struct pci_dev *pdev;
3674	int bars;
3675
3676	/* Obtain PCI device reference */
3677	if (!phba->pcidev)
3678		return;
3679	else
3680		pdev = phba->pcidev;
3681	/* Select PCI BARs */
3682	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3683	/* Release PCI resource and disable PCI device */
3684	pci_release_selected_regions(pdev, bars);
3685	pci_disable_device(pdev);
3686	/* Null out PCI private reference to driver */
3687	pci_set_drvdata(pdev, NULL);
3688
3689	return;
3690}
3691
3692/**
3693 * lpfc_reset_hba - Reset a hba
3694 * @phba: pointer to lpfc hba data structure.
3695 *
3696 * This routine is invoked to reset a hba device. It brings the HBA
3697 * offline, performs a board restart, and then brings the board back
3698 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3699 * on outstanding mailbox commands.
3700 **/
3701void
3702lpfc_reset_hba(struct lpfc_hba *phba)
3703{
3704	/* If resets are disabled then set error state and return. */
3705	if (!phba->cfg_enable_hba_reset) {
3706		phba->link_state = LPFC_HBA_ERROR;
3707		return;
3708	}
3709	lpfc_offline_prep(phba);
3710	lpfc_offline(phba);
3711	lpfc_sli_brdrestart(phba);
3712	lpfc_online(phba);
3713	lpfc_unblock_mgmt_io(phba);
3714}
3715
3716/**
3717 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3718 * @phba: pointer to lpfc hba data structure.
3719 *
3720 * This routine is invoked to set up the driver internal resources specific to
3721 * support the SLI-3 HBA device it attached to.
3722 *
3723 * Return codes
3724 * 	0 - successful
3725 * 	other values - error
3726 **/
3727static int
3728lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3729{
3730	struct lpfc_sli *psli;
3731
3732	/*
3733	 * Initialize timers used by driver
3734	 */
3735
3736	/* Heartbeat timer */
3737	init_timer(&phba->hb_tmofunc);
3738	phba->hb_tmofunc.function = lpfc_hb_timeout;
3739	phba->hb_tmofunc.data = (unsigned long)phba;
3740
3741	psli = &phba->sli;
3742	/* MBOX heartbeat timer */
3743	init_timer(&psli->mbox_tmo);
3744	psli->mbox_tmo.function = lpfc_mbox_timeout;
3745	psli->mbox_tmo.data = (unsigned long) phba;
3746	/* FCP polling mode timer */
3747	init_timer(&phba->fcp_poll_timer);
3748	phba->fcp_poll_timer.function = lpfc_poll_timeout;
3749	phba->fcp_poll_timer.data = (unsigned long) phba;
3750	/* Fabric block timer */
3751	init_timer(&phba->fabric_block_timer);
3752	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3753	phba->fabric_block_timer.data = (unsigned long) phba;
3754	/* EA polling mode timer */
3755	init_timer(&phba->eratt_poll);
3756	phba->eratt_poll.function = lpfc_poll_eratt;
3757	phba->eratt_poll.data = (unsigned long) phba;
3758
3759	/* Host attention work mask setup */
3760	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3761	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3762
3763	/* Get all the module params for configuring this host */
3764	lpfc_get_cfgparam(phba);
3765	/*
3766	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3767	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3768	 * 2 segments are added since the IOCB needs a command and response bde.
3769	 */
3770	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3771		sizeof(struct fcp_rsp) +
3772			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3773
3774	if (phba->cfg_enable_bg) {
3775		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3776		phba->cfg_sg_dma_buf_size +=
3777			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3778	}
3779
3780	/* Also reinitialize the host templates with new values. */
3781	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3782	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3783
3784	phba->max_vpi = LPFC_MAX_VPI;
3785	/* This will be set to correct value after config_port mbox */
3786	phba->max_vports = 0;
3787
3788	/*
3789	 * Initialize the SLI Layer to run with lpfc HBAs.
3790	 */
3791	lpfc_sli_setup(phba);
3792	lpfc_sli_queue_setup(phba);
3793
3794	/* Allocate device driver memory */
3795	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3796		return -ENOMEM;
3797
3798	return 0;
3799}
3800
3801/**
3802 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3803 * @phba: pointer to lpfc hba data structure.
3804 *
3805 * This routine is invoked to unset the driver internal resources set up
3806 * specific for supporting the SLI-3 HBA device it attached to.
3807 **/
3808static void
3809lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3810{
3811	/* Free device driver memory allocated */
3812	lpfc_mem_free_all(phba);
3813
3814	return;
3815}
3816
3817/**
3818 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3819 * @phba: pointer to lpfc hba data structure.
3820 *
3821 * This routine is invoked to set up the driver internal resources specific to
3822 * support the SLI-4 HBA device it attached to.
3823 *
3824 * Return codes
3825 * 	0 - successful
3826 * 	other values - error
3827 **/
3828static int
3829lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3830{
3831	struct lpfc_sli *psli;
3832	LPFC_MBOXQ_t *mboxq;
3833	int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3834	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3835	struct lpfc_mqe *mqe;
3836
3837	/* Before proceed, wait for POST done and device ready */
3838	rc = lpfc_sli4_post_status_check(phba);
3839	if (rc)
3840		return -ENODEV;
3841
3842	/*
3843	 * Initialize timers used by driver
3844	 */
3845
3846	/* Heartbeat timer */
3847	init_timer(&phba->hb_tmofunc);
3848	phba->hb_tmofunc.function = lpfc_hb_timeout;
3849	phba->hb_tmofunc.data = (unsigned long)phba;
3850
3851	psli = &phba->sli;
3852	/* MBOX heartbeat timer */
3853	init_timer(&psli->mbox_tmo);
3854	psli->mbox_tmo.function = lpfc_mbox_timeout;
3855	psli->mbox_tmo.data = (unsigned long) phba;
3856	/* Fabric block timer */
3857	init_timer(&phba->fabric_block_timer);
3858	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3859	phba->fabric_block_timer.data = (unsigned long) phba;
3860	/* EA polling mode timer */
3861	init_timer(&phba->eratt_poll);
3862	phba->eratt_poll.function = lpfc_poll_eratt;
3863	phba->eratt_poll.data = (unsigned long) phba;
3864	/* FCF rediscover timer */
3865	init_timer(&phba->fcf.redisc_wait);
3866	phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
3867	phba->fcf.redisc_wait.data = (unsigned long)phba;
3868
3869	/*
3870	 * We need to do a READ_CONFIG mailbox command here before
3871	 * calling lpfc_get_cfgparam. For VFs this will report the
3872	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3873	 * All of the resources allocated
3874	 * for this Port are tied to these values.
3875	 */
3876	/* Get all the module params for configuring this host */
3877	lpfc_get_cfgparam(phba);
3878	phba->max_vpi = LPFC_MAX_VPI;
3879	/* This will be set to correct value after the read_config mbox */
3880	phba->max_vports = 0;
3881
3882	/* Program the default value of vlan_id and fc_map */
3883	phba->valid_vlan = 0;
3884	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3885	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3886	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3887
3888	/*
3889	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3890	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3891	 * 2 segments are added since the IOCB needs a command and response bde.
3892	 * To insure that the scsi sgl does not cross a 4k page boundary only
3893	 * sgl sizes of must be a power of 2.
3894	 */
3895	buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
3896		    ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
3897	/* Feature Level 1 hardware is limited to 2 pages */
3898	if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
3899	     LPFC_SLI_INTF_FEATURELEVEL1_1))
3900		max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
3901	else
3902		max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
3903	for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
3904	     dma_buf_size < max_buf_size && buf_size > dma_buf_size;
3905	     dma_buf_size = dma_buf_size << 1)
3906		;
3907	if (dma_buf_size == max_buf_size)
3908		phba->cfg_sg_seg_cnt = (dma_buf_size -
3909			sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
3910			(2 * sizeof(struct sli4_sge))) /
3911				sizeof(struct sli4_sge);
3912	phba->cfg_sg_dma_buf_size = dma_buf_size;
3913
3914	/* Initialize buffer queue management fields */
3915	hbq_count = lpfc_sli_hbq_count();
3916	for (i = 0; i < hbq_count; ++i)
3917		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
3918	INIT_LIST_HEAD(&phba->rb_pend_list);
3919	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
3920	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3921
3922	/*
3923	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
3924	 */
3925	/* Initialize the Abort scsi buffer list used by driver */
3926	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
3927	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
3928	/* This abort list used by worker thread */
3929	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3930
3931	/*
3932	 * Initialize dirver internal slow-path work queues
3933	 */
3934
3935	/* Driver internel slow-path CQ Event pool */
3936	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3937	/* Response IOCB work queue list */
3938	INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
3939	/* Asynchronous event CQ Event work queue list */
3940	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3941	/* Fast-path XRI aborted CQ Event work queue list */
3942	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
3943	/* Slow-path XRI aborted CQ Event work queue list */
3944	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
3945	/* Receive queue CQ Event work queue list */
3946	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
3947
3948	/* Initialize the driver internal SLI layer lists. */
3949	lpfc_sli_setup(phba);
3950	lpfc_sli_queue_setup(phba);
3951
3952	/* Allocate device driver memory */
3953	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
3954	if (rc)
3955		return -ENOMEM;
3956
3957	/* Create the bootstrap mailbox command */
3958	rc = lpfc_create_bootstrap_mbox(phba);
3959	if (unlikely(rc))
3960		goto out_free_mem;
3961
3962	/* Set up the host's endian order with the device. */
3963	rc = lpfc_setup_endian_order(phba);
3964	if (unlikely(rc))
3965		goto out_free_bsmbx;
3966
3967	rc = lpfc_sli4_fw_cfg_check(phba);
3968	if (unlikely(rc))
3969		goto out_free_bsmbx;
3970
3971	/* Set up the hba's configuration parameters. */
3972	rc = lpfc_sli4_read_config(phba);
3973	if (unlikely(rc))
3974		goto out_free_bsmbx;
3975
3976	/* Perform a function reset */
3977	rc = lpfc_pci_function_reset(phba);
3978	if (unlikely(rc))
3979		goto out_free_bsmbx;
3980
3981	/* Create all the SLI4 queues */
3982	rc = lpfc_sli4_queue_create(phba);
3983	if (rc)
3984		goto out_free_bsmbx;
3985
3986	/* Create driver internal CQE event pool */
3987	rc = lpfc_sli4_cq_event_pool_create(phba);
3988	if (rc)
3989		goto out_destroy_queue;
3990
3991	/* Initialize and populate the iocb list per host */
3992	rc = lpfc_init_sgl_list(phba);
3993	if (rc) {
3994		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3995				"1400 Failed to initialize sgl list.\n");
3996		goto out_destroy_cq_event_pool;
3997	}
3998	rc = lpfc_init_active_sgl_array(phba);
3999	if (rc) {
4000		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4001				"1430 Failed to initialize sgl list.\n");
4002		goto out_free_sgl_list;
4003	}
4004
4005	rc = lpfc_sli4_init_rpi_hdrs(phba);
4006	if (rc) {
4007		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4008				"1432 Failed to initialize rpi headers.\n");
4009		goto out_free_active_sgl;
4010	}
4011
4012	phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4013				    phba->cfg_fcp_eq_count), GFP_KERNEL);
4014	if (!phba->sli4_hba.fcp_eq_hdl) {
4015		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4016				"2572 Failed allocate memory for fast-path "
4017				"per-EQ handle array\n");
4018		goto out_remove_rpi_hdrs;
4019	}
4020
4021	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4022				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4023	if (!phba->sli4_hba.msix_entries) {
4024		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4025				"2573 Failed allocate memory for msi-x "
4026				"interrupt vector entries\n");
4027		goto out_free_fcp_eq_hdl;
4028	}
4029
4030	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4031						       GFP_KERNEL);
4032	if (!mboxq) {
4033		rc = -ENOMEM;
4034		goto out_free_fcp_eq_hdl;
4035	}
4036
4037	/* Get the Supported Pages. It is always available. */
4038	lpfc_supported_pages(mboxq);
4039	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4040	if (unlikely(rc)) {
4041		rc = -EIO;
4042		mempool_free(mboxq, phba->mbox_mem_pool);
4043		goto out_free_fcp_eq_hdl;
4044	}
4045
4046	mqe = &mboxq->u.mqe;
4047	memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4048	       LPFC_MAX_SUPPORTED_PAGES);
4049	for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4050		switch (pn_page[i]) {
4051		case LPFC_SLI4_PARAMETERS:
4052			phba->sli4_hba.pc_sli4_params.supported = 1;
4053			break;
4054		default:
4055			break;
4056		}
4057	}
4058
4059	/* Read the port's SLI4 Parameters capabilities if supported. */
4060	if (phba->sli4_hba.pc_sli4_params.supported)
4061		rc = lpfc_pc_sli4_params_get(phba, mboxq);
4062	mempool_free(mboxq, phba->mbox_mem_pool);
4063	if (rc) {
4064		rc = -EIO;
4065		goto out_free_fcp_eq_hdl;
4066	}
4067	return rc;
4068
4069out_free_fcp_eq_hdl:
4070	kfree(phba->sli4_hba.fcp_eq_hdl);
4071out_remove_rpi_hdrs:
4072	lpfc_sli4_remove_rpi_hdrs(phba);
4073out_free_active_sgl:
4074	lpfc_free_active_sgl(phba);
4075out_free_sgl_list:
4076	lpfc_free_sgl_list(phba);
4077out_destroy_cq_event_pool:
4078	lpfc_sli4_cq_event_pool_destroy(phba);
4079out_destroy_queue:
4080	lpfc_sli4_queue_destroy(phba);
4081out_free_bsmbx:
4082	lpfc_destroy_bootstrap_mbox(phba);
4083out_free_mem:
4084	lpfc_mem_free(phba);
4085	return rc;
4086}
4087
4088/**
4089 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4090 * @phba: pointer to lpfc hba data structure.
4091 *
4092 * This routine is invoked to unset the driver internal resources set up
4093 * specific for supporting the SLI-4 HBA device it attached to.
4094 **/
4095static void
4096lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4097{
4098	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4099
4100	/* unregister default FCFI from the HBA */
4101	lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
4102
4103	/* Free the default FCR table */
4104	lpfc_sli_remove_dflt_fcf(phba);
4105
4106	/* Free memory allocated for msi-x interrupt vector entries */
4107	kfree(phba->sli4_hba.msix_entries);
4108
4109	/* Free memory allocated for fast-path work queue handles */
4110	kfree(phba->sli4_hba.fcp_eq_hdl);
4111
4112	/* Free the allocated rpi headers. */
4113	lpfc_sli4_remove_rpi_hdrs(phba);
4114	lpfc_sli4_remove_rpis(phba);
4115
4116	/* Free the ELS sgl list */
4117	lpfc_free_active_sgl(phba);
4118	lpfc_free_sgl_list(phba);
4119
4120	/* Free the SCSI sgl management array */
4121	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4122
4123	/* Free the SLI4 queues */
4124	lpfc_sli4_queue_destroy(phba);
4125
4126	/* Free the completion queue EQ event pool */
4127	lpfc_sli4_cq_event_release_all(phba);
4128	lpfc_sli4_cq_event_pool_destroy(phba);
4129
4130	/* Reset SLI4 HBA FCoE function */
4131	lpfc_pci_function_reset(phba);
4132
4133	/* Free the bsmbx region. */
4134	lpfc_destroy_bootstrap_mbox(phba);
4135
4136	/* Free the SLI Layer memory with SLI4 HBAs */
4137	lpfc_mem_free_all(phba);
4138
4139	/* Free the current connect table */
4140	list_for_each_entry_safe(conn_entry, next_conn_entry,
4141		&phba->fcf_conn_rec_list, list) {
4142		list_del_init(&conn_entry->list);
4143		kfree(conn_entry);
4144	}
4145
4146	return;
4147}
4148
4149/**
4150 * lpfc_init_api_table_setup - Set up init api fucntion jump table
4151 * @phba: The hba struct for which this call is being executed.
4152 * @dev_grp: The HBA PCI-Device group number.
4153 *
4154 * This routine sets up the device INIT interface API function jump table
4155 * in @phba struct.
4156 *
4157 * Returns: 0 - success, -ENODEV - failure.
4158 **/
4159int
4160lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4161{
4162	phba->lpfc_hba_init_link = lpfc_hba_init_link;
4163	phba->lpfc_hba_down_link = lpfc_hba_down_link;
4164	switch (dev_grp) {
4165	case LPFC_PCI_DEV_LP:
4166		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4167		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4168		phba->lpfc_stop_port = lpfc_stop_port_s3;
4169		break;
4170	case LPFC_PCI_DEV_OC:
4171		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4172		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4173		phba->lpfc_stop_port = lpfc_stop_port_s4;
4174		break;
4175	default:
4176		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4177				"1431 Invalid HBA PCI-device group: 0x%x\n",
4178				dev_grp);
4179		return -ENODEV;
4180		break;
4181	}
4182	return 0;
4183}
4184
4185/**
4186 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4187 * @phba: pointer to lpfc hba data structure.
4188 *
4189 * This routine is invoked to set up the driver internal resources before the
4190 * device specific resource setup to support the HBA device it attached to.
4191 *
4192 * Return codes
4193 *	0 - successful
4194 *	other values - error
4195 **/
4196static int
4197lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4198{
4199	/*
4200	 * Driver resources common to all SLI revisions
4201	 */
4202	atomic_set(&phba->fast_event_count, 0);
4203	spin_lock_init(&phba->hbalock);
4204
4205	/* Initialize ndlp management spinlock */
4206	spin_lock_init(&phba->ndlp_lock);
4207
4208	INIT_LIST_HEAD(&phba->port_list);
4209	INIT_LIST_HEAD(&phba->work_list);
4210	init_waitqueue_head(&phba->wait_4_mlo_m_q);
4211
4212	/* Initialize the wait queue head for the kernel thread */
4213	init_waitqueue_head(&phba->work_waitq);
4214
4215	/* Initialize the scsi buffer list used by driver for scsi IO */
4216	spin_lock_init(&phba->scsi_buf_list_lock);
4217	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4218
4219	/* Initialize the fabric iocb list */
4220	INIT_LIST_HEAD(&phba->fabric_iocb_list);
4221
4222	/* Initialize list to save ELS buffers */
4223	INIT_LIST_HEAD(&phba->elsbuf);
4224
4225	/* Initialize FCF connection rec list */
4226	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4227
4228	return 0;
4229}
4230
4231/**
4232 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4233 * @phba: pointer to lpfc hba data structure.
4234 *
4235 * This routine is invoked to set up the driver internal resources after the
4236 * device specific resource setup to support the HBA device it attached to.
4237 *
4238 * Return codes
4239 * 	0 - successful
4240 * 	other values - error
4241 **/
4242static int
4243lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4244{
4245	int error;
4246
4247	/* Startup the kernel thread for this host adapter. */
4248	phba->worker_thread = kthread_run(lpfc_do_work, phba,
4249					  "lpfc_worker_%d", phba->brd_no);
4250	if (IS_ERR(phba->worker_thread)) {
4251		error = PTR_ERR(phba->worker_thread);
4252		return error;
4253	}
4254
4255	return 0;
4256}
4257
4258/**
4259 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4260 * @phba: pointer to lpfc hba data structure.
4261 *
4262 * This routine is invoked to unset the driver internal resources set up after
4263 * the device specific resource setup for supporting the HBA device it
4264 * attached to.
4265 **/
4266static void
4267lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4268{
4269	/* Stop kernel worker thread */
4270	kthread_stop(phba->worker_thread);
4271}
4272
4273/**
4274 * lpfc_free_iocb_list - Free iocb list.
4275 * @phba: pointer to lpfc hba data structure.
4276 *
4277 * This routine is invoked to free the driver's IOCB list and memory.
4278 **/
4279static void
4280lpfc_free_iocb_list(struct lpfc_hba *phba)
4281{
4282	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4283
4284	spin_lock_irq(&phba->hbalock);
4285	list_for_each_entry_safe(iocbq_entry, iocbq_next,
4286				 &phba->lpfc_iocb_list, list) {
4287		list_del(&iocbq_entry->list);
4288		kfree(iocbq_entry);
4289		phba->total_iocbq_bufs--;
4290	}
4291	spin_unlock_irq(&phba->hbalock);
4292
4293	return;
4294}
4295
4296/**
4297 * lpfc_init_iocb_list - Allocate and initialize iocb list.
4298 * @phba: pointer to lpfc hba data structure.
4299 *
4300 * This routine is invoked to allocate and initizlize the driver's IOCB
4301 * list and set up the IOCB tag array accordingly.
4302 *
4303 * Return codes
4304 *	0 - successful
4305 *	other values - error
4306 **/
4307static int
4308lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4309{
4310	struct lpfc_iocbq *iocbq_entry = NULL;
4311	uint16_t iotag;
4312	int i;
4313
4314	/* Initialize and populate the iocb list per host.  */
4315	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
4316	for (i = 0; i < iocb_count; i++) {
4317		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
4318		if (iocbq_entry == NULL) {
4319			printk(KERN_ERR "%s: only allocated %d iocbs of "
4320				"expected %d count. Unloading driver.\n",
4321				__func__, i, LPFC_IOCB_LIST_CNT);
4322			goto out_free_iocbq;
4323		}
4324
4325		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4326		if (iotag == 0) {
4327			kfree(iocbq_entry);
4328			printk(KERN_ERR "%s: failed to allocate IOTAG. "
4329				"Unloading driver.\n", __func__);
4330			goto out_free_iocbq;
4331		}
4332		iocbq_entry->sli4_xritag = NO_XRI;
4333
4334		spin_lock_irq(&phba->hbalock);
4335		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4336		phba->total_iocbq_bufs++;
4337		spin_unlock_irq(&phba->hbalock);
4338	}
4339
4340	return 0;
4341
4342out_free_iocbq:
4343	lpfc_free_iocb_list(phba);
4344
4345	return -ENOMEM;
4346}
4347
4348/**
4349 * lpfc_free_sgl_list - Free sgl list.
4350 * @phba: pointer to lpfc hba data structure.
4351 *
4352 * This routine is invoked to free the driver's sgl list and memory.
4353 **/
4354static void
4355lpfc_free_sgl_list(struct lpfc_hba *phba)
4356{
4357	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4358	LIST_HEAD(sglq_list);
4359	int rc = 0;
4360
4361	spin_lock_irq(&phba->hbalock);
4362	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
4363	spin_unlock_irq(&phba->hbalock);
4364
4365	list_for_each_entry_safe(sglq_entry, sglq_next,
4366				 &sglq_list, list) {
4367		list_del(&sglq_entry->list);
4368		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
4369		kfree(sglq_entry);
4370		phba->sli4_hba.total_sglq_bufs--;
4371	}
4372	rc = lpfc_sli4_remove_all_sgl_pages(phba);
4373	if (rc) {
4374		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4375			"2005 Unable to deregister pages from HBA: %x\n", rc);
4376	}
4377	kfree(phba->sli4_hba.lpfc_els_sgl_array);
4378}
4379
4380/**
4381 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
4382 * @phba: pointer to lpfc hba data structure.
4383 *
4384 * This routine is invoked to allocate the driver's active sgl memory.
4385 * This array will hold the sglq_entry's for active IOs.
4386 **/
4387static int
4388lpfc_init_active_sgl_array(struct lpfc_hba *phba)
4389{
4390	int size;
4391	size = sizeof(struct lpfc_sglq *);
4392	size *= phba->sli4_hba.max_cfg_param.max_xri;
4393
4394	phba->sli4_hba.lpfc_sglq_active_list =
4395		kzalloc(size, GFP_KERNEL);
4396	if (!phba->sli4_hba.lpfc_sglq_active_list)
4397		return -ENOMEM;
4398	return 0;
4399}
4400
4401/**
4402 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
4403 * @phba: pointer to lpfc hba data structure.
4404 *
4405 * This routine is invoked to walk through the array of active sglq entries
4406 * and free all of the resources.
4407 * This is just a place holder for now.
4408 **/
4409static void
4410lpfc_free_active_sgl(struct lpfc_hba *phba)
4411{
4412	kfree(phba->sli4_hba.lpfc_sglq_active_list);
4413}
4414
4415/**
4416 * lpfc_init_sgl_list - Allocate and initialize sgl list.
4417 * @phba: pointer to lpfc hba data structure.
4418 *
4419 * This routine is invoked to allocate and initizlize the driver's sgl
4420 * list and set up the sgl xritag tag array accordingly.
4421 *
4422 * Return codes
4423 *	0 - successful
4424 *	other values - error
4425 **/
4426static int
4427lpfc_init_sgl_list(struct lpfc_hba *phba)
4428{
4429	struct lpfc_sglq *sglq_entry = NULL;
4430	int i;
4431	int els_xri_cnt;
4432
4433	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4434	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4435				"2400 lpfc_init_sgl_list els %d.\n",
4436				els_xri_cnt);
4437	/* Initialize and populate the sglq list per host/VF. */
4438	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
4439	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
4440
4441	/* Sanity check on XRI management */
4442	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
4443		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4444				"2562 No room left for SCSI XRI allocation: "
4445				"max_xri=%d, els_xri=%d\n",
4446				phba->sli4_hba.max_cfg_param.max_xri,
4447				els_xri_cnt);
4448		return -ENOMEM;
4449	}
4450
4451	/* Allocate memory for the ELS XRI management array */
4452	phba->sli4_hba.lpfc_els_sgl_array =
4453			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
4454			GFP_KERNEL);
4455
4456	if (!phba->sli4_hba.lpfc_els_sgl_array) {
4457		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4458				"2401 Failed to allocate memory for ELS "
4459				"XRI management array of size %d.\n",
4460				els_xri_cnt);
4461		return -ENOMEM;
4462	}
4463
4464	/* Keep the SCSI XRI into the XRI management array */
4465	phba->sli4_hba.scsi_xri_max =
4466			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4467	phba->sli4_hba.scsi_xri_cnt = 0;
4468
4469	phba->sli4_hba.lpfc_scsi_psb_array =
4470			kzalloc((sizeof(struct lpfc_scsi_buf *) *
4471			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
4472
4473	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
4474		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4475				"2563 Failed to allocate memory for SCSI "
4476				"XRI management array of size %d.\n",
4477				phba->sli4_hba.scsi_xri_max);
4478		kfree(phba->sli4_hba.lpfc_els_sgl_array);
4479		return -ENOMEM;
4480	}
4481
4482	for (i = 0; i < els_xri_cnt; i++) {
4483		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
4484		if (sglq_entry == NULL) {
4485			printk(KERN_ERR "%s: only allocated %d sgls of "
4486				"expected %d count. Unloading driver.\n",
4487				__func__, i, els_xri_cnt);
4488			goto out_free_mem;
4489		}
4490
4491		sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
4492		if (sglq_entry->sli4_xritag == NO_XRI) {
4493			kfree(sglq_entry);
4494			printk(KERN_ERR "%s: failed to allocate XRI.\n"
4495				"Unloading driver.\n", __func__);
4496			goto out_free_mem;
4497		}
4498		sglq_entry->buff_type = GEN_BUFF_TYPE;
4499		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4500		if (sglq_entry->virt == NULL) {
4501			kfree(sglq_entry);
4502			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
4503				"Unloading driver.\n", __func__);
4504			goto out_free_mem;
4505		}
4506		sglq_entry->sgl = sglq_entry->virt;
4507		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4508
4509		/* The list order is used by later block SGL registraton */
4510		spin_lock_irq(&phba->hbalock);
4511		sglq_entry->state = SGL_FREED;
4512		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4513		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4514		phba->sli4_hba.total_sglq_bufs++;
4515		spin_unlock_irq(&phba->hbalock);
4516	}
4517	return 0;
4518
4519out_free_mem:
4520	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4521	lpfc_free_sgl_list(phba);
4522	return -ENOMEM;
4523}
4524
4525/**
4526 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
4527 * @phba: pointer to lpfc hba data structure.
4528 *
4529 * This routine is invoked to post rpi header templates to the
4530 * HBA consistent with the SLI-4 interface spec.  This routine
4531 * posts a PAGE_SIZE memory region to the port to hold up to
4532 * PAGE_SIZE modulo 64 rpi context headers.
4533 * No locks are held here because this is an initialization routine
4534 * called only from probe or lpfc_online when interrupts are not
4535 * enabled and the driver is reinitializing the device.
4536 *
4537 * Return codes
4538 * 	0 - successful
4539 * 	ENOMEM - No availble memory
4540 *      EIO - The mailbox failed to complete successfully.
4541 **/
4542int
4543lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4544{
4545	int rc = 0;
4546	int longs;
4547	uint16_t rpi_count;
4548	struct lpfc_rpi_hdr *rpi_hdr;
4549
4550	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4551
4552	/*
4553	 * Provision an rpi bitmask range for discovery. The total count
4554	 * is the difference between max and base + 1.
4555	 */
4556	rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
4557		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4558
4559	longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4560	phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
4561					   GFP_KERNEL);
4562	if (!phba->sli4_hba.rpi_bmask)
4563		return -ENOMEM;
4564
4565	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4566	if (!rpi_hdr) {
4567		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4568				"0391 Error during rpi post operation\n");
4569		lpfc_sli4_remove_rpis(phba);
4570		rc = -ENODEV;
4571	}
4572
4573	return rc;
4574}
4575
4576/**
4577 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4578 * @phba: pointer to lpfc hba data structure.
4579 *
4580 * This routine is invoked to allocate a single 4KB memory region to
4581 * support rpis and stores them in the phba.  This single region
4582 * provides support for up to 64 rpis.  The region is used globally
4583 * by the device.
4584 *
4585 * Returns:
4586 *   A valid rpi hdr on success.
4587 *   A NULL pointer on any failure.
4588 **/
4589struct lpfc_rpi_hdr *
4590lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4591{
4592	uint16_t rpi_limit, curr_rpi_range;
4593	struct lpfc_dmabuf *dmabuf;
4594	struct lpfc_rpi_hdr *rpi_hdr;
4595
4596	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4597		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4598
4599	spin_lock_irq(&phba->hbalock);
4600	curr_rpi_range = phba->sli4_hba.next_rpi;
4601	spin_unlock_irq(&phba->hbalock);
4602
4603	/*
4604	 * The port has a limited number of rpis. The increment here
4605	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4606	 * and to allow the full max_rpi range per port.
4607	 */
4608	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4609		return NULL;
4610
4611	/*
4612	 * First allocate the protocol header region for the port.  The
4613	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4614	 */
4615	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4616	if (!dmabuf)
4617		return NULL;
4618
4619	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4620					  LPFC_HDR_TEMPLATE_SIZE,
4621					  &dmabuf->phys,
4622					  GFP_KERNEL);
4623	if (!dmabuf->virt) {
4624		rpi_hdr = NULL;
4625		goto err_free_dmabuf;
4626	}
4627
4628	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4629	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4630		rpi_hdr = NULL;
4631		goto err_free_coherent;
4632	}
4633
4634	/* Save the rpi header data for cleanup later. */
4635	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4636	if (!rpi_hdr)
4637		goto err_free_coherent;
4638
4639	rpi_hdr->dmabuf = dmabuf;
4640	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4641	rpi_hdr->page_count = 1;
4642	spin_lock_irq(&phba->hbalock);
4643	rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4644	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4645
4646	/*
4647	 * The next_rpi stores the next module-64 rpi value to post
4648	 * in any subsequent rpi memory region postings.
4649	 */
4650	phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4651	spin_unlock_irq(&phba->hbalock);
4652	return rpi_hdr;
4653
4654 err_free_coherent:
4655	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4656			  dmabuf->virt, dmabuf->phys);
4657 err_free_dmabuf:
4658	kfree(dmabuf);
4659	return NULL;
4660}
4661
4662/**
4663 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4664 * @phba: pointer to lpfc hba data structure.
4665 *
4666 * This routine is invoked to remove all memory resources allocated
4667 * to support rpis. This routine presumes the caller has released all
4668 * rpis consumed by fabric or port logins and is prepared to have
4669 * the header pages removed.
4670 **/
4671void
4672lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4673{
4674	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4675
4676	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4677				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4678		list_del(&rpi_hdr->list);
4679		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4680				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4681		kfree(rpi_hdr->dmabuf);
4682		kfree(rpi_hdr);
4683	}
4684
4685	phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4686	memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4687}
4688
4689/**
4690 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4691 * @pdev: pointer to pci device data structure.
4692 *
4693 * This routine is invoked to allocate the driver hba data structure for an
4694 * HBA device. If the allocation is successful, the phba reference to the
4695 * PCI device data structure is set.
4696 *
4697 * Return codes
4698 *      pointer to @phba - successful
4699 *      NULL - error
4700 **/
4701static struct lpfc_hba *
4702lpfc_hba_alloc(struct pci_dev *pdev)
4703{
4704	struct lpfc_hba *phba;
4705
4706	/* Allocate memory for HBA structure */
4707	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4708	if (!phba) {
4709		dev_err(&pdev->dev, "failed to allocate hba struct\n");
4710		return NULL;
4711	}
4712
4713	/* Set reference to PCI device in HBA structure */
4714	phba->pcidev = pdev;
4715
4716	/* Assign an unused board number */
4717	phba->brd_no = lpfc_get_instance();
4718	if (phba->brd_no < 0) {
4719		kfree(phba);
4720		return NULL;
4721	}
4722
4723	spin_lock_init(&phba->ct_ev_lock);
4724	INIT_LIST_HEAD(&phba->ct_ev_waiters);
4725
4726	return phba;
4727}
4728
4729/**
4730 * lpfc_hba_free - Free driver hba data structure with a device.
4731 * @phba: pointer to lpfc hba data structure.
4732 *
4733 * This routine is invoked to free the driver hba data structure with an
4734 * HBA device.
4735 **/
4736static void
4737lpfc_hba_free(struct lpfc_hba *phba)
4738{
4739	/* Release the driver assigned board number */
4740	idr_remove(&lpfc_hba_index, phba->brd_no);
4741
4742	kfree(phba);
4743	return;
4744}
4745
4746/**
4747 * lpfc_create_shost - Create hba physical port with associated scsi host.
4748 * @phba: pointer to lpfc hba data structure.
4749 *
4750 * This routine is invoked to create HBA physical port and associate a SCSI
4751 * host with it.
4752 *
4753 * Return codes
4754 *      0 - successful
4755 *      other values - error
4756 **/
4757static int
4758lpfc_create_shost(struct lpfc_hba *phba)
4759{
4760	struct lpfc_vport *vport;
4761	struct Scsi_Host  *shost;
4762
4763	/* Initialize HBA FC structure */
4764	phba->fc_edtov = FF_DEF_EDTOV;
4765	phba->fc_ratov = FF_DEF_RATOV;
4766	phba->fc_altov = FF_DEF_ALTOV;
4767	phba->fc_arbtov = FF_DEF_ARBTOV;
4768
4769	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4770	if (!vport)
4771		return -ENODEV;
4772
4773	shost = lpfc_shost_from_vport(vport);
4774	phba->pport = vport;
4775	lpfc_debugfs_initialize(vport);
4776	/* Put reference to SCSI host to driver's device private data */
4777	pci_set_drvdata(phba->pcidev, shost);
4778
4779	return 0;
4780}
4781
4782/**
4783 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4784 * @phba: pointer to lpfc hba data structure.
4785 *
4786 * This routine is invoked to destroy HBA physical port and the associated
4787 * SCSI host.
4788 **/
4789static void
4790lpfc_destroy_shost(struct lpfc_hba *phba)
4791{
4792	struct lpfc_vport *vport = phba->pport;
4793
4794	/* Destroy physical port that associated with the SCSI host */
4795	destroy_port(vport);
4796
4797	return;
4798}
4799
4800/**
4801 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4802 * @phba: pointer to lpfc hba data structure.
4803 * @shost: the shost to be used to detect Block guard settings.
4804 *
4805 * This routine sets up the local Block guard protocol settings for @shost.
4806 * This routine also allocates memory for debugging bg buffers.
4807 **/
4808static void
4809lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4810{
4811	int pagecnt = 10;
4812	if (lpfc_prot_mask && lpfc_prot_guard) {
4813		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4814				"1478 Registering BlockGuard with the "
4815				"SCSI layer\n");
4816		scsi_host_set_prot(shost, lpfc_prot_mask);
4817		scsi_host_set_guard(shost, lpfc_prot_guard);
4818	}
4819	if (!_dump_buf_data) {
4820		while (pagecnt) {
4821			spin_lock_init(&_dump_buf_lock);
4822			_dump_buf_data =
4823				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4824			if (_dump_buf_data) {
4825				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4826					"9043 BLKGRD: allocated %d pages for "
4827				       "_dump_buf_data at 0x%p\n",
4828				       (1 << pagecnt), _dump_buf_data);
4829				_dump_buf_data_order = pagecnt;
4830				memset(_dump_buf_data, 0,
4831				       ((1 << PAGE_SHIFT) << pagecnt));
4832				break;
4833			} else
4834				--pagecnt;
4835		}
4836		if (!_dump_buf_data_order)
4837			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4838				"9044 BLKGRD: ERROR unable to allocate "
4839			       "memory for hexdump\n");
4840	} else
4841		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4842			"9045 BLKGRD: already allocated _dump_buf_data=0x%p"
4843		       "\n", _dump_buf_data);
4844	if (!_dump_buf_dif) {
4845		while (pagecnt) {
4846			_dump_buf_dif =
4847				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4848			if (_dump_buf_dif) {
4849				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4850					"9046 BLKGRD: allocated %d pages for "
4851				       "_dump_buf_dif at 0x%p\n",
4852				       (1 << pagecnt), _dump_buf_dif);
4853				_dump_buf_dif_order = pagecnt;
4854				memset(_dump_buf_dif, 0,
4855				       ((1 << PAGE_SHIFT) << pagecnt));
4856				break;
4857			} else
4858				--pagecnt;
4859		}
4860		if (!_dump_buf_dif_order)
4861			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4862			"9047 BLKGRD: ERROR unable to allocate "
4863			       "memory for hexdump\n");
4864	} else
4865		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4866			"9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
4867		       _dump_buf_dif);
4868}
4869
4870/**
4871 * lpfc_post_init_setup - Perform necessary device post initialization setup.
4872 * @phba: pointer to lpfc hba data structure.
4873 *
4874 * This routine is invoked to perform all the necessary post initialization
4875 * setup for the device.
4876 **/
4877static void
4878lpfc_post_init_setup(struct lpfc_hba *phba)
4879{
4880	struct Scsi_Host  *shost;
4881	struct lpfc_adapter_event_header adapter_event;
4882
4883	/* Get the default values for Model Name and Description */
4884	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
4885
4886	/*
4887	 * hba setup may have changed the hba_queue_depth so we need to
4888	 * adjust the value of can_queue.
4889	 */
4890	shost = pci_get_drvdata(phba->pcidev);
4891	shost->can_queue = phba->cfg_hba_queue_depth - 10;
4892	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4893		lpfc_setup_bg(phba, shost);
4894
4895	lpfc_host_attrib_init(shost);
4896
4897	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
4898		spin_lock_irq(shost->host_lock);
4899		lpfc_poll_start_timer(phba);
4900		spin_unlock_irq(shost->host_lock);
4901	}
4902
4903	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4904			"0428 Perform SCSI scan\n");
4905	/* Send board arrival event to upper layer */
4906	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
4907	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
4908	fc_host_post_vendor_event(shost, fc_get_event_number(),
4909				  sizeof(adapter_event),
4910				  (char *) &adapter_event,
4911				  LPFC_NL_VENDOR_ID);
4912	return;
4913}
4914
4915/**
4916 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
4917 * @phba: pointer to lpfc hba data structure.
4918 *
4919 * This routine is invoked to set up the PCI device memory space for device
4920 * with SLI-3 interface spec.
4921 *
4922 * Return codes
4923 * 	0 - successful
4924 * 	other values - error
4925 **/
4926static int
4927lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4928{
4929	struct pci_dev *pdev;
4930	unsigned long bar0map_len, bar2map_len;
4931	int i, hbq_count;
4932	void *ptr;
4933	int error = -ENODEV;
4934
4935	/* Obtain PCI device reference */
4936	if (!phba->pcidev)
4937		return error;
4938	else
4939		pdev = phba->pcidev;
4940
4941	/* Set the device DMA mask size */
4942	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
4943	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
4944		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
4945		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
4946			return error;
4947		}
4948	}
4949
4950	/* Get the bus address of Bar0 and Bar2 and the number of bytes
4951	 * required by each mapping.
4952	 */
4953	phba->pci_bar0_map = pci_resource_start(pdev, 0);
4954	bar0map_len = pci_resource_len(pdev, 0);
4955
4956	phba->pci_bar2_map = pci_resource_start(pdev, 2);
4957	bar2map_len = pci_resource_len(pdev, 2);
4958
4959	/* Map HBA SLIM to a kernel virtual address. */
4960	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
4961	if (!phba->slim_memmap_p) {
4962		dev_printk(KERN_ERR, &pdev->dev,
4963			   "ioremap failed for SLIM memory.\n");
4964		goto out;
4965	}
4966
4967	/* Map HBA Control Registers to a kernel virtual address. */
4968	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
4969	if (!phba->ctrl_regs_memmap_p) {
4970		dev_printk(KERN_ERR, &pdev->dev,
4971			   "ioremap failed for HBA control registers.\n");
4972		goto out_iounmap_slim;
4973	}
4974
4975	/* Allocate memory for SLI-2 structures */
4976	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
4977					       SLI2_SLIM_SIZE,
4978					       &phba->slim2p.phys,
4979					       GFP_KERNEL);
4980	if (!phba->slim2p.virt)
4981		goto out_iounmap;
4982
4983	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
4984	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
4985	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
4986	phba->IOCBs = (phba->slim2p.virt +
4987		       offsetof(struct lpfc_sli2_slim, IOCBs));
4988
4989	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
4990						 lpfc_sli_hbq_size(),
4991						 &phba->hbqslimp.phys,
4992						 GFP_KERNEL);
4993	if (!phba->hbqslimp.virt)
4994		goto out_free_slim;
4995
4996	hbq_count = lpfc_sli_hbq_count();
4997	ptr = phba->hbqslimp.virt;
4998	for (i = 0; i < hbq_count; ++i) {
4999		phba->hbqs[i].hbq_virt = ptr;
5000		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5001		ptr += (lpfc_hbq_defs[i]->entry_count *
5002			sizeof(struct lpfc_hbq_entry));
5003	}
5004	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5005	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5006
5007	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5008
5009	INIT_LIST_HEAD(&phba->rb_pend_list);
5010
5011	phba->MBslimaddr = phba->slim_memmap_p;
5012	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5013	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5014	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5015	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5016
5017	return 0;
5018
5019out_free_slim:
5020	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5021			  phba->slim2p.virt, phba->slim2p.phys);
5022out_iounmap:
5023	iounmap(phba->ctrl_regs_memmap_p);
5024out_iounmap_slim:
5025	iounmap(phba->slim_memmap_p);
5026out:
5027	return error;
5028}
5029
5030/**
5031 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5032 * @phba: pointer to lpfc hba data structure.
5033 *
5034 * This routine is invoked to unset the PCI device memory space for device
5035 * with SLI-3 interface spec.
5036 **/
5037static void
5038lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5039{
5040	struct pci_dev *pdev;
5041
5042	/* Obtain PCI device reference */
5043	if (!phba->pcidev)
5044		return;
5045	else
5046		pdev = phba->pcidev;
5047
5048	/* Free coherent DMA memory allocated */
5049	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5050			  phba->hbqslimp.virt, phba->hbqslimp.phys);
5051	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5052			  phba->slim2p.virt, phba->slim2p.phys);
5053
5054	/* I/O memory unmap */
5055	iounmap(phba->ctrl_regs_memmap_p);
5056	iounmap(phba->slim_memmap_p);
5057
5058	return;
5059}
5060
5061/**
5062 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
5063 * @phba: pointer to lpfc hba data structure.
5064 *
5065 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5066 * done and check status.
5067 *
5068 * Return 0 if successful, otherwise -ENODEV.
5069 **/
5070int
5071lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5072{
5073	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
5074	int i, port_error = -ENODEV;
5075
5076	if (!phba->sli4_hba.STAregaddr)
5077		return -ENODEV;
5078
5079	/* Wait up to 30 seconds for the SLI Port POST done and ready */
5080	for (i = 0; i < 3000; i++) {
5081		sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
5082		/* Encounter fatal POST error, break out */
5083		if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
5084			port_error = -ENODEV;
5085			break;
5086		}
5087		if (LPFC_POST_STAGE_ARMFW_READY ==
5088		    bf_get(lpfc_hst_state_port_status, &sta_reg)) {
5089			port_error = 0;
5090			break;
5091		}
5092		msleep(10);
5093	}
5094
5095	if (port_error)
5096		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5097			"1408 Failure HBA POST Status: sta_reg=0x%x, "
5098			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
5099			"dl=x%x, pstatus=x%x\n", sta_reg.word0,
5100			bf_get(lpfc_hst_state_perr, &sta_reg),
5101			bf_get(lpfc_hst_state_sfi, &sta_reg),
5102			bf_get(lpfc_hst_state_nip, &sta_reg),
5103			bf_get(lpfc_hst_state_ipc, &sta_reg),
5104			bf_get(lpfc_hst_state_xrom, &sta_reg),
5105			bf_get(lpfc_hst_state_dl, &sta_reg),
5106			bf_get(lpfc_hst_state_port_status, &sta_reg));
5107
5108	/* Log device information */
5109	phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
5110	if (bf_get(lpfc_sli_intf_valid,
5111		   &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
5112		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5113				"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
5114				"FeatureL1=0x%x, FeatureL2=0x%x\n",
5115				bf_get(lpfc_sli_intf_sli_family,
5116				       &phba->sli4_hba.sli_intf),
5117				bf_get(lpfc_sli_intf_slirev,
5118				       &phba->sli4_hba.sli_intf),
5119				bf_get(lpfc_sli_intf_featurelevel1,
5120				       &phba->sli4_hba.sli_intf),
5121				bf_get(lpfc_sli_intf_featurelevel2,
5122				       &phba->sli4_hba.sli_intf));
5123	}
5124	phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
5125	phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
5126	/* With uncoverable error, log the error message and return error */
5127	uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
5128	uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
5129	if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5130	    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5131		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5132				"1422 HBA Unrecoverable error: "
5133				"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
5134				"ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
5135				uerrlo_reg.word0, uerrhi_reg.word0,
5136				phba->sli4_hba.ue_mask_lo,
5137				phba->sli4_hba.ue_mask_hi);
5138		return -ENODEV;
5139	}
5140
5141	return port_error;
5142}
5143
5144/**
5145 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5146 * @phba: pointer to lpfc hba data structure.
5147 *
5148 * This routine is invoked to set up SLI4 BAR0 PCI config space register
5149 * memory map.
5150 **/
5151static void
5152lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
5153{
5154	phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5155					LPFC_UERR_STATUS_LO;
5156	phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5157					LPFC_UERR_STATUS_HI;
5158	phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5159					LPFC_UE_MASK_LO;
5160	phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5161					LPFC_UE_MASK_HI;
5162	phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
5163					LPFC_SLI_INTF;
5164}
5165
5166/**
5167 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5168 * @phba: pointer to lpfc hba data structure.
5169 *
5170 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5171 * memory map.
5172 **/
5173static void
5174lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5175{
5176
5177	phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5178				    LPFC_HST_STATE;
5179	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5180				    LPFC_HST_ISR0;
5181	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5182				    LPFC_HST_IMR0;
5183	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5184				     LPFC_HST_ISCR0;
5185	return;
5186}
5187
5188/**
5189 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
5190 * @phba: pointer to lpfc hba data structure.
5191 * @vf: virtual function number
5192 *
5193 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5194 * based on the given viftual function number, @vf.
5195 *
5196 * Return 0 if successful, otherwise -ENODEV.
5197 **/
5198static int
5199lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5200{
5201	if (vf > LPFC_VIR_FUNC_MAX)
5202		return -ENODEV;
5203
5204	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5205				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5206	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5207				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5208	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5209				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5210	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5211				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5212	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5213				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5214	return 0;
5215}
5216
5217/**
5218 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
5219 * @phba: pointer to lpfc hba data structure.
5220 *
5221 * This routine is invoked to create the bootstrap mailbox
5222 * region consistent with the SLI-4 interface spec.  This
5223 * routine allocates all memory necessary to communicate
5224 * mailbox commands to the port and sets up all alignment
5225 * needs.  No locks are expected to be held when calling
5226 * this routine.
5227 *
5228 * Return codes
5229 * 	0 - successful
5230 * 	ENOMEM - could not allocated memory.
5231 **/
5232static int
5233lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
5234{
5235	uint32_t bmbx_size;
5236	struct lpfc_dmabuf *dmabuf;
5237	struct dma_address *dma_address;
5238	uint32_t pa_addr;
5239	uint64_t phys_addr;
5240
5241	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5242	if (!dmabuf)
5243		return -ENOMEM;
5244
5245	/*
5246	 * The bootstrap mailbox region is comprised of 2 parts
5247	 * plus an alignment restriction of 16 bytes.
5248	 */
5249	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5250	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5251					  bmbx_size,
5252					  &dmabuf->phys,
5253					  GFP_KERNEL);
5254	if (!dmabuf->virt) {
5255		kfree(dmabuf);
5256		return -ENOMEM;
5257	}
5258	memset(dmabuf->virt, 0, bmbx_size);
5259
5260	/*
5261	 * Initialize the bootstrap mailbox pointers now so that the register
5262	 * operations are simple later.  The mailbox dma address is required
5263	 * to be 16-byte aligned.  Also align the virtual memory as each
5264	 * maibox is copied into the bmbx mailbox region before issuing the
5265	 * command to the port.
5266	 */
5267	phba->sli4_hba.bmbx.dmabuf = dmabuf;
5268	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
5269
5270	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
5271					      LPFC_ALIGN_16_BYTE);
5272	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
5273					      LPFC_ALIGN_16_BYTE);
5274
5275	/*
5276	 * Set the high and low physical addresses now.  The SLI4 alignment
5277	 * requirement is 16 bytes and the mailbox is posted to the port
5278	 * as two 30-bit addresses.  The other data is a bit marking whether
5279	 * the 30-bit address is the high or low address.
5280	 * Upcast bmbx aphys to 64bits so shift instruction compiles
5281	 * clean on 32 bit machines.
5282	 */
5283	dma_address = &phba->sli4_hba.bmbx.dma_address;
5284	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
5285	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
5286	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
5287					   LPFC_BMBX_BIT1_ADDR_HI);
5288
5289	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
5290	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
5291					   LPFC_BMBX_BIT1_ADDR_LO);
5292	return 0;
5293}
5294
5295/**
5296 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
5297 * @phba: pointer to lpfc hba data structure.
5298 *
5299 * This routine is invoked to teardown the bootstrap mailbox
5300 * region and release all host resources. This routine requires
5301 * the caller to ensure all mailbox commands recovered, no
5302 * additional mailbox comands are sent, and interrupts are disabled
5303 * before calling this routine.
5304 *
5305 **/
5306static void
5307lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5308{
5309	dma_free_coherent(&phba->pcidev->dev,
5310			  phba->sli4_hba.bmbx.bmbx_size,
5311			  phba->sli4_hba.bmbx.dmabuf->virt,
5312			  phba->sli4_hba.bmbx.dmabuf->phys);
5313
5314	kfree(phba->sli4_hba.bmbx.dmabuf);
5315	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
5316}
5317
5318/**
5319 * lpfc_sli4_read_config - Get the config parameters.
5320 * @phba: pointer to lpfc hba data structure.
5321 *
5322 * This routine is invoked to read the configuration parameters from the HBA.
5323 * The configuration parameters are used to set the base and maximum values
5324 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
5325 * allocation for the port.
5326 *
5327 * Return codes
5328 * 	0 - successful
5329 * 	ENOMEM - No availble memory
5330 *      EIO - The mailbox failed to complete successfully.
5331 **/
5332static int
5333lpfc_sli4_read_config(struct lpfc_hba *phba)
5334{
5335	LPFC_MBOXQ_t *pmb;
5336	struct lpfc_mbx_read_config *rd_config;
5337	uint32_t rc = 0;
5338
5339	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5340	if (!pmb) {
5341		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5342				"2011 Unable to allocate memory for issuing "
5343				"SLI_CONFIG_SPECIAL mailbox command\n");
5344		return -ENOMEM;
5345	}
5346
5347	lpfc_read_config(phba, pmb);
5348
5349	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5350	if (rc != MBX_SUCCESS) {
5351		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5352			"2012 Mailbox failed , mbxCmd x%x "
5353			"READ_CONFIG, mbxStatus x%x\n",
5354			bf_get(lpfc_mqe_command, &pmb->u.mqe),
5355			bf_get(lpfc_mqe_status, &pmb->u.mqe));
5356		rc = -EIO;
5357	} else {
5358		rd_config = &pmb->u.mqe.un.rd_config;
5359		phba->sli4_hba.max_cfg_param.max_xri =
5360			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5361		phba->sli4_hba.max_cfg_param.xri_base =
5362			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
5363		phba->sli4_hba.max_cfg_param.max_vpi =
5364			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
5365		phba->sli4_hba.max_cfg_param.vpi_base =
5366			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
5367		phba->sli4_hba.max_cfg_param.max_rpi =
5368			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
5369		phba->sli4_hba.max_cfg_param.rpi_base =
5370			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
5371		phba->sli4_hba.max_cfg_param.max_vfi =
5372			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
5373		phba->sli4_hba.max_cfg_param.vfi_base =
5374			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5375		phba->sli4_hba.max_cfg_param.max_fcfi =
5376			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5377		phba->sli4_hba.max_cfg_param.fcfi_base =
5378			bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
5379		phba->sli4_hba.max_cfg_param.max_eq =
5380			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5381		phba->sli4_hba.max_cfg_param.max_rq =
5382			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
5383		phba->sli4_hba.max_cfg_param.max_wq =
5384			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
5385		phba->sli4_hba.max_cfg_param.max_cq =
5386			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
5387		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
5388		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
5389		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
5390		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5391		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
5392		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
5393				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
5394		phba->max_vports = phba->max_vpi;
5395		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5396				"2003 cfg params XRI(B:%d M:%d), "
5397				"VPI(B:%d M:%d) "
5398				"VFI(B:%d M:%d) "
5399				"RPI(B:%d M:%d) "
5400				"FCFI(B:%d M:%d)\n",
5401				phba->sli4_hba.max_cfg_param.xri_base,
5402				phba->sli4_hba.max_cfg_param.max_xri,
5403				phba->sli4_hba.max_cfg_param.vpi_base,
5404				phba->sli4_hba.max_cfg_param.max_vpi,
5405				phba->sli4_hba.max_cfg_param.vfi_base,
5406				phba->sli4_hba.max_cfg_param.max_vfi,
5407				phba->sli4_hba.max_cfg_param.rpi_base,
5408				phba->sli4_hba.max_cfg_param.max_rpi,
5409				phba->sli4_hba.max_cfg_param.fcfi_base,
5410				phba->sli4_hba.max_cfg_param.max_fcfi);
5411	}
5412	mempool_free(pmb, phba->mbox_mem_pool);
5413
5414	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
5415	if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
5416		phba->cfg_hba_queue_depth =
5417				phba->sli4_hba.max_cfg_param.max_xri;
5418	return rc;
5419}
5420
5421/**
5422 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
5423 * @phba: pointer to lpfc hba data structure.
5424 *
5425 * This routine is invoked to setup the host-side endian order to the
5426 * HBA consistent with the SLI-4 interface spec.
5427 *
5428 * Return codes
5429 * 	0 - successful
5430 * 	ENOMEM - No availble memory
5431 *      EIO - The mailbox failed to complete successfully.
5432 **/
5433static int
5434lpfc_setup_endian_order(struct lpfc_hba *phba)
5435{
5436	LPFC_MBOXQ_t *mboxq;
5437	uint32_t rc = 0;
5438	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
5439				      HOST_ENDIAN_HIGH_WORD1};
5440
5441	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5442	if (!mboxq) {
5443		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5444				"0492 Unable to allocate memory for issuing "
5445				"SLI_CONFIG_SPECIAL mailbox command\n");
5446		return -ENOMEM;
5447	}
5448
5449	/*
5450	 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
5451	 * words to contain special data values and no other data.
5452	 */
5453	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
5454	memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
5455	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5456	if (rc != MBX_SUCCESS) {
5457		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5458				"0493 SLI_CONFIG_SPECIAL mailbox failed with "
5459				"status x%x\n",
5460				rc);
5461		rc = -EIO;
5462	}
5463
5464	mempool_free(mboxq, phba->mbox_mem_pool);
5465	return rc;
5466}
5467
5468/**
5469 * lpfc_sli4_queue_create - Create all the SLI4 queues
5470 * @phba: pointer to lpfc hba data structure.
5471 *
5472 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
5473 * operation. For each SLI4 queue type, the parameters such as queue entry
5474 * count (queue depth) shall be taken from the module parameter. For now,
5475 * we just use some constant number as place holder.
5476 *
5477 * Return codes
5478 *      0 - successful
5479 *      ENOMEM - No availble memory
5480 *      EIO - The mailbox failed to complete successfully.
5481 **/
5482static int
5483lpfc_sli4_queue_create(struct lpfc_hba *phba)
5484{
5485	struct lpfc_queue *qdesc;
5486	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5487	int cfg_fcp_wq_count;
5488	int cfg_fcp_eq_count;
5489
5490	/*
5491	 * Sanity check for confiugred queue parameters against the run-time
5492	 * device parameters
5493	 */
5494
5495	/* Sanity check on FCP fast-path WQ parameters */
5496	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
5497	if (cfg_fcp_wq_count >
5498	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
5499		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
5500				   LPFC_SP_WQN_DEF;
5501		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
5502			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5503					"2581 Not enough WQs (%d) from "
5504					"the pci function for supporting "
5505					"FCP WQs (%d)\n",
5506					phba->sli4_hba.max_cfg_param.max_wq,
5507					phba->cfg_fcp_wq_count);
5508			goto out_error;
5509		}
5510		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5511				"2582 Not enough WQs (%d) from the pci "
5512				"function for supporting the requested "
5513				"FCP WQs (%d), the actual FCP WQs can "
5514				"be supported: %d\n",
5515				phba->sli4_hba.max_cfg_param.max_wq,
5516				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
5517	}
5518	/* The actual number of FCP work queues adopted */
5519	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
5520
5521	/* Sanity check on FCP fast-path EQ parameters */
5522	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
5523	if (cfg_fcp_eq_count >
5524	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
5525		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
5526				   LPFC_SP_EQN_DEF;
5527		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
5528			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5529					"2574 Not enough EQs (%d) from the "
5530					"pci function for supporting FCP "
5531					"EQs (%d)\n",
5532					phba->sli4_hba.max_cfg_param.max_eq,
5533					phba->cfg_fcp_eq_count);
5534			goto out_error;
5535		}
5536		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5537				"2575 Not enough EQs (%d) from the pci "
5538				"function for supporting the requested "
5539				"FCP EQs (%d), the actual FCP EQs can "
5540				"be supported: %d\n",
5541				phba->sli4_hba.max_cfg_param.max_eq,
5542				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
5543	}
5544	/* It does not make sense to have more EQs than WQs */
5545	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
5546		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5547				"2593 The FCP EQ count(%d) cannot be greater "
5548				"than the FCP WQ count(%d), limiting the "
5549				"FCP EQ count to %d\n", cfg_fcp_eq_count,
5550				phba->cfg_fcp_wq_count,
5551				phba->cfg_fcp_wq_count);
5552		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
5553	}
5554	/* The actual number of FCP event queues adopted */
5555	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
5556	/* The overall number of event queues used */
5557	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
5558
5559	/*
5560	 * Create Event Queues (EQs)
5561	 */
5562
5563	/* Get EQ depth from module parameter, fake the default for now */
5564	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
5565	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
5566
5567	/* Create slow path event queue */
5568	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5569				      phba->sli4_hba.eq_ecount);
5570	if (!qdesc) {
5571		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5572				"0496 Failed allocate slow-path EQ\n");
5573		goto out_error;
5574	}
5575	phba->sli4_hba.sp_eq = qdesc;
5576
5577	/* Create fast-path FCP Event Queue(s) */
5578	phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
5579			       phba->cfg_fcp_eq_count), GFP_KERNEL);
5580	if (!phba->sli4_hba.fp_eq) {
5581		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5582				"2576 Failed allocate memory for fast-path "
5583				"EQ record array\n");
5584		goto out_free_sp_eq;
5585	}
5586	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5587		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5588					      phba->sli4_hba.eq_ecount);
5589		if (!qdesc) {
5590			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5591					"0497 Failed allocate fast-path EQ\n");
5592			goto out_free_fp_eq;
5593		}
5594		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5595	}
5596
5597	/*
5598	 * Create Complete Queues (CQs)
5599	 */
5600
5601	/* Get CQ depth from module parameter, fake the default for now */
5602	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5603	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5604
5605	/* Create slow-path Mailbox Command Complete Queue */
5606	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5607				      phba->sli4_hba.cq_ecount);
5608	if (!qdesc) {
5609		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5610				"0500 Failed allocate slow-path mailbox CQ\n");
5611		goto out_free_fp_eq;
5612	}
5613	phba->sli4_hba.mbx_cq = qdesc;
5614
5615	/* Create slow-path ELS Complete Queue */
5616	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5617				      phba->sli4_hba.cq_ecount);
5618	if (!qdesc) {
5619		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5620				"0501 Failed allocate slow-path ELS CQ\n");
5621		goto out_free_mbx_cq;
5622	}
5623	phba->sli4_hba.els_cq = qdesc;
5624
5625
5626	/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5627	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5628				phba->cfg_fcp_eq_count), GFP_KERNEL);
5629	if (!phba->sli4_hba.fcp_cq) {
5630		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5631				"2577 Failed allocate memory for fast-path "
5632				"CQ record array\n");
5633		goto out_free_els_cq;
5634	}
5635	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5636		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5637					      phba->sli4_hba.cq_ecount);
5638		if (!qdesc) {
5639			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5640					"0499 Failed allocate fast-path FCP "
5641					"CQ (%d)\n", fcp_cqidx);
5642			goto out_free_fcp_cq;
5643		}
5644		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5645	}
5646
5647	/* Create Mailbox Command Queue */
5648	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5649	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5650
5651	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5652				      phba->sli4_hba.mq_ecount);
5653	if (!qdesc) {
5654		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5655				"0505 Failed allocate slow-path MQ\n");
5656		goto out_free_fcp_cq;
5657	}
5658	phba->sli4_hba.mbx_wq = qdesc;
5659
5660	/*
5661	 * Create all the Work Queues (WQs)
5662	 */
5663	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5664	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5665
5666	/* Create slow-path ELS Work Queue */
5667	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5668				      phba->sli4_hba.wq_ecount);
5669	if (!qdesc) {
5670		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5671				"0504 Failed allocate slow-path ELS WQ\n");
5672		goto out_free_mbx_wq;
5673	}
5674	phba->sli4_hba.els_wq = qdesc;
5675
5676	/* Create fast-path FCP Work Queue(s) */
5677	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5678				phba->cfg_fcp_wq_count), GFP_KERNEL);
5679	if (!phba->sli4_hba.fcp_wq) {
5680		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5681				"2578 Failed allocate memory for fast-path "
5682				"WQ record array\n");
5683		goto out_free_els_wq;
5684	}
5685	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5686		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5687					      phba->sli4_hba.wq_ecount);
5688		if (!qdesc) {
5689			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5690					"0503 Failed allocate fast-path FCP "
5691					"WQ (%d)\n", fcp_wqidx);
5692			goto out_free_fcp_wq;
5693		}
5694		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5695	}
5696
5697	/*
5698	 * Create Receive Queue (RQ)
5699	 */
5700	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5701	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5702
5703	/* Create Receive Queue for header */
5704	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5705				      phba->sli4_hba.rq_ecount);
5706	if (!qdesc) {
5707		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5708				"0506 Failed allocate receive HRQ\n");
5709		goto out_free_fcp_wq;
5710	}
5711	phba->sli4_hba.hdr_rq = qdesc;
5712
5713	/* Create Receive Queue for data */
5714	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5715				      phba->sli4_hba.rq_ecount);
5716	if (!qdesc) {
5717		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5718				"0507 Failed allocate receive DRQ\n");
5719		goto out_free_hdr_rq;
5720	}
5721	phba->sli4_hba.dat_rq = qdesc;
5722
5723	return 0;
5724
5725out_free_hdr_rq:
5726	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5727	phba->sli4_hba.hdr_rq = NULL;
5728out_free_fcp_wq:
5729	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5730		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5731		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5732	}
5733	kfree(phba->sli4_hba.fcp_wq);
5734out_free_els_wq:
5735	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5736	phba->sli4_hba.els_wq = NULL;
5737out_free_mbx_wq:
5738	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5739	phba->sli4_hba.mbx_wq = NULL;
5740out_free_fcp_cq:
5741	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5742		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5743		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5744	}
5745	kfree(phba->sli4_hba.fcp_cq);
5746out_free_els_cq:
5747	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5748	phba->sli4_hba.els_cq = NULL;
5749out_free_mbx_cq:
5750	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5751	phba->sli4_hba.mbx_cq = NULL;
5752out_free_fp_eq:
5753	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5754		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5755		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5756	}
5757	kfree(phba->sli4_hba.fp_eq);
5758out_free_sp_eq:
5759	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5760	phba->sli4_hba.sp_eq = NULL;
5761out_error:
5762	return -ENOMEM;
5763}
5764
5765/**
5766 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5767 * @phba: pointer to lpfc hba data structure.
5768 *
5769 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5770 * operation.
5771 *
5772 * Return codes
5773 *      0 - successful
5774 *      ENOMEM - No availble memory
5775 *      EIO - The mailbox failed to complete successfully.
5776 **/
5777static void
5778lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5779{
5780	int fcp_qidx;
5781
5782	/* Release mailbox command work queue */
5783	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5784	phba->sli4_hba.mbx_wq = NULL;
5785
5786	/* Release ELS work queue */
5787	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5788	phba->sli4_hba.els_wq = NULL;
5789
5790	/* Release FCP work queue */
5791	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5792		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5793	kfree(phba->sli4_hba.fcp_wq);
5794	phba->sli4_hba.fcp_wq = NULL;
5795
5796	/* Release unsolicited receive queue */
5797	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5798	phba->sli4_hba.hdr_rq = NULL;
5799	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5800	phba->sli4_hba.dat_rq = NULL;
5801
5802	/* Release ELS complete queue */
5803	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5804	phba->sli4_hba.els_cq = NULL;
5805
5806	/* Release mailbox command complete queue */
5807	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5808	phba->sli4_hba.mbx_cq = NULL;
5809
5810	/* Release FCP response complete queue */
5811	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5812		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5813	kfree(phba->sli4_hba.fcp_cq);
5814	phba->sli4_hba.fcp_cq = NULL;
5815
5816	/* Release fast-path event queue */
5817	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5818		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5819	kfree(phba->sli4_hba.fp_eq);
5820	phba->sli4_hba.fp_eq = NULL;
5821
5822	/* Release slow-path event queue */
5823	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5824	phba->sli4_hba.sp_eq = NULL;
5825
5826	return;
5827}
5828
5829/**
5830 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5831 * @phba: pointer to lpfc hba data structure.
5832 *
5833 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5834 * operation.
5835 *
5836 * Return codes
5837 *      0 - successful
5838 *      ENOMEM - No availble memory
5839 *      EIO - The mailbox failed to complete successfully.
5840 **/
5841int
5842lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5843{
5844	int rc = -ENOMEM;
5845	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5846	int fcp_cq_index = 0;
5847
5848	/*
5849	 * Set up Event Queues (EQs)
5850	 */
5851
5852	/* Set up slow-path event queue */
5853	if (!phba->sli4_hba.sp_eq) {
5854		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5855				"0520 Slow-path EQ not allocated\n");
5856		goto out_error;
5857	}
5858	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5859			    LPFC_SP_DEF_IMAX);
5860	if (rc) {
5861		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5862				"0521 Failed setup of slow-path EQ: "
5863				"rc = 0x%x\n", rc);
5864		goto out_error;
5865	}
5866	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5867			"2583 Slow-path EQ setup: queue-id=%d\n",
5868			phba->sli4_hba.sp_eq->queue_id);
5869
5870	/* Set up fast-path event queue */
5871	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5872		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5873			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5874					"0522 Fast-path EQ (%d) not "
5875					"allocated\n", fcp_eqidx);
5876			goto out_destroy_fp_eq;
5877		}
5878		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
5879				    phba->cfg_fcp_imax);
5880		if (rc) {
5881			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5882					"0523 Failed setup of fast-path EQ "
5883					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
5884			goto out_destroy_fp_eq;
5885		}
5886		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5887				"2584 Fast-path EQ setup: "
5888				"queue[%d]-id=%d\n", fcp_eqidx,
5889				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
5890	}
5891
5892	/*
5893	 * Set up Complete Queues (CQs)
5894	 */
5895
5896	/* Set up slow-path MBOX Complete Queue as the first CQ */
5897	if (!phba->sli4_hba.mbx_cq) {
5898		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5899				"0528 Mailbox CQ not allocated\n");
5900		goto out_destroy_fp_eq;
5901	}
5902	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
5903			    LPFC_MCQ, LPFC_MBOX);
5904	if (rc) {
5905		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5906				"0529 Failed setup of slow-path mailbox CQ: "
5907				"rc = 0x%x\n", rc);
5908		goto out_destroy_fp_eq;
5909	}
5910	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5911			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
5912			phba->sli4_hba.mbx_cq->queue_id,
5913			phba->sli4_hba.sp_eq->queue_id);
5914
5915	/* Set up slow-path ELS Complete Queue */
5916	if (!phba->sli4_hba.els_cq) {
5917		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5918				"0530 ELS CQ not allocated\n");
5919		goto out_destroy_mbx_cq;
5920	}
5921	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
5922			    LPFC_WCQ, LPFC_ELS);
5923	if (rc) {
5924		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5925				"0531 Failed setup of slow-path ELS CQ: "
5926				"rc = 0x%x\n", rc);
5927		goto out_destroy_mbx_cq;
5928	}
5929	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5930			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
5931			phba->sli4_hba.els_cq->queue_id,
5932			phba->sli4_hba.sp_eq->queue_id);
5933
5934	/* Set up fast-path FCP Response Complete Queue */
5935	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5936		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
5937			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5938					"0526 Fast-path FCP CQ (%d) not "
5939					"allocated\n", fcp_cqidx);
5940			goto out_destroy_fcp_cq;
5941		}
5942		rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
5943				    phba->sli4_hba.fp_eq[fcp_cqidx],
5944				    LPFC_WCQ, LPFC_FCP);
5945		if (rc) {
5946			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5947					"0527 Failed setup of fast-path FCP "
5948					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
5949			goto out_destroy_fcp_cq;
5950		}
5951		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5952				"2588 FCP CQ setup: cq[%d]-id=%d, "
5953				"parent eq[%d]-id=%d\n",
5954				fcp_cqidx,
5955				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
5956				fcp_cqidx,
5957				phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
5958	}
5959
5960	/*
5961	 * Set up all the Work Queues (WQs)
5962	 */
5963
5964	/* Set up Mailbox Command Queue */
5965	if (!phba->sli4_hba.mbx_wq) {
5966		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5967				"0538 Slow-path MQ not allocated\n");
5968		goto out_destroy_fcp_cq;
5969	}
5970	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
5971			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
5972	if (rc) {
5973		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5974				"0539 Failed setup of slow-path MQ: "
5975				"rc = 0x%x\n", rc);
5976		goto out_destroy_fcp_cq;
5977	}
5978	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5979			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
5980			phba->sli4_hba.mbx_wq->queue_id,
5981			phba->sli4_hba.mbx_cq->queue_id);
5982
5983	/* Set up slow-path ELS Work Queue */
5984	if (!phba->sli4_hba.els_wq) {
5985		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5986				"0536 Slow-path ELS WQ not allocated\n");
5987		goto out_destroy_mbx_wq;
5988	}
5989	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
5990			    phba->sli4_hba.els_cq, LPFC_ELS);
5991	if (rc) {
5992		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5993				"0537 Failed setup of slow-path ELS WQ: "
5994				"rc = 0x%x\n", rc);
5995		goto out_destroy_mbx_wq;
5996	}
5997	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5998			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
5999			phba->sli4_hba.els_wq->queue_id,
6000			phba->sli4_hba.els_cq->queue_id);
6001
6002	/* Set up fast-path FCP Work Queue */
6003	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6004		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6005			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6006					"0534 Fast-path FCP WQ (%d) not "
6007					"allocated\n", fcp_wqidx);
6008			goto out_destroy_fcp_wq;
6009		}
6010		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6011				    phba->sli4_hba.fcp_cq[fcp_cq_index],
6012				    LPFC_FCP);
6013		if (rc) {
6014			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6015					"0535 Failed setup of fast-path FCP "
6016					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6017			goto out_destroy_fcp_wq;
6018		}
6019		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6020				"2591 FCP WQ setup: wq[%d]-id=%d, "
6021				"parent cq[%d]-id=%d\n",
6022				fcp_wqidx,
6023				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6024				fcp_cq_index,
6025				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6026		/* Round robin FCP Work Queue's Completion Queue assignment */
6027		fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
6028	}
6029
6030	/*
6031	 * Create Receive Queue (RQ)
6032	 */
6033	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6034		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6035				"0540 Receive Queue not allocated\n");
6036		goto out_destroy_fcp_wq;
6037	}
6038	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6039			    phba->sli4_hba.els_cq, LPFC_USOL);
6040	if (rc) {
6041		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6042				"0541 Failed setup of Receive Queue: "
6043				"rc = 0x%x\n", rc);
6044		goto out_destroy_fcp_wq;
6045	}
6046	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6047			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6048			"parent cq-id=%d\n",
6049			phba->sli4_hba.hdr_rq->queue_id,
6050			phba->sli4_hba.dat_rq->queue_id,
6051			phba->sli4_hba.els_cq->queue_id);
6052	return 0;
6053
6054out_destroy_fcp_wq:
6055	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6056		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6057	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6058out_destroy_mbx_wq:
6059	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6060out_destroy_fcp_cq:
6061	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6062		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6063	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6064out_destroy_mbx_cq:
6065	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6066out_destroy_fp_eq:
6067	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6068		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6069	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6070out_error:
6071	return rc;
6072}
6073
6074/**
6075 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
6076 * @phba: pointer to lpfc hba data structure.
6077 *
6078 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
6079 * operation.
6080 *
6081 * Return codes
6082 *      0 - successful
6083 *      ENOMEM - No availble memory
6084 *      EIO - The mailbox failed to complete successfully.
6085 **/
6086void
6087lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6088{
6089	int fcp_qidx;
6090
6091	/* Unset mailbox command work queue */
6092	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6093	/* Unset ELS work queue */
6094	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6095	/* Unset unsolicited receive queue */
6096	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
6097	/* Unset FCP work queue */
6098	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6099		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
6100	/* Unset mailbox command complete queue */
6101	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6102	/* Unset ELS complete queue */
6103	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6104	/* Unset FCP response complete queue */
6105	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6106		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6107	/* Unset fast-path event queue */
6108	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6109		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6110	/* Unset slow-path event queue */
6111	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6112}
6113
6114/**
6115 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
6116 * @phba: pointer to lpfc hba data structure.
6117 *
6118 * This routine is invoked to allocate and set up a pool of completion queue
6119 * events. The body of the completion queue event is a completion queue entry
6120 * CQE. For now, this pool is used for the interrupt service routine to queue
6121 * the following HBA completion queue events for the worker thread to process:
6122 *   - Mailbox asynchronous events
6123 *   - Receive queue completion unsolicited events
6124 * Later, this can be used for all the slow-path events.
6125 *
6126 * Return codes
6127 *      0 - successful
6128 *      -ENOMEM - No availble memory
6129 **/
6130static int
6131lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
6132{
6133	struct lpfc_cq_event *cq_event;
6134	int i;
6135
6136	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
6137		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
6138		if (!cq_event)
6139			goto out_pool_create_fail;
6140		list_add_tail(&cq_event->list,
6141			      &phba->sli4_hba.sp_cqe_event_pool);
6142	}
6143	return 0;
6144
6145out_pool_create_fail:
6146	lpfc_sli4_cq_event_pool_destroy(phba);
6147	return -ENOMEM;
6148}
6149
6150/**
6151 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
6152 * @phba: pointer to lpfc hba data structure.
6153 *
6154 * This routine is invoked to free the pool of completion queue events at
6155 * driver unload time. Note that, it is the responsibility of the driver
6156 * cleanup routine to free all the outstanding completion-queue events
6157 * allocated from this pool back into the pool before invoking this routine
6158 * to destroy the pool.
6159 **/
6160static void
6161lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
6162{
6163	struct lpfc_cq_event *cq_event, *next_cq_event;
6164
6165	list_for_each_entry_safe(cq_event, next_cq_event,
6166				 &phba->sli4_hba.sp_cqe_event_pool, list) {
6167		list_del(&cq_event->list);
6168		kfree(cq_event);
6169	}
6170}
6171
6172/**
6173 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6174 * @phba: pointer to lpfc hba data structure.
6175 *
6176 * This routine is the lock free version of the API invoked to allocate a
6177 * completion-queue event from the free pool.
6178 *
6179 * Return: Pointer to the newly allocated completion-queue event if successful
6180 *         NULL otherwise.
6181 **/
6182struct lpfc_cq_event *
6183__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6184{
6185	struct lpfc_cq_event *cq_event = NULL;
6186
6187	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
6188			 struct lpfc_cq_event, list);
6189	return cq_event;
6190}
6191
6192/**
6193 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6194 * @phba: pointer to lpfc hba data structure.
6195 *
6196 * This routine is the lock version of the API invoked to allocate a
6197 * completion-queue event from the free pool.
6198 *
6199 * Return: Pointer to the newly allocated completion-queue event if successful
6200 *         NULL otherwise.
6201 **/
6202struct lpfc_cq_event *
6203lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6204{
6205	struct lpfc_cq_event *cq_event;
6206	unsigned long iflags;
6207
6208	spin_lock_irqsave(&phba->hbalock, iflags);
6209	cq_event = __lpfc_sli4_cq_event_alloc(phba);
6210	spin_unlock_irqrestore(&phba->hbalock, iflags);
6211	return cq_event;
6212}
6213
6214/**
6215 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6216 * @phba: pointer to lpfc hba data structure.
6217 * @cq_event: pointer to the completion queue event to be freed.
6218 *
6219 * This routine is the lock free version of the API invoked to release a
6220 * completion-queue event back into the free pool.
6221 **/
6222void
6223__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6224			     struct lpfc_cq_event *cq_event)
6225{
6226	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
6227}
6228
6229/**
6230 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6231 * @phba: pointer to lpfc hba data structure.
6232 * @cq_event: pointer to the completion queue event to be freed.
6233 *
6234 * This routine is the lock version of the API invoked to release a
6235 * completion-queue event back into the free pool.
6236 **/
6237void
6238lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6239			   struct lpfc_cq_event *cq_event)
6240{
6241	unsigned long iflags;
6242	spin_lock_irqsave(&phba->hbalock, iflags);
6243	__lpfc_sli4_cq_event_release(phba, cq_event);
6244	spin_unlock_irqrestore(&phba->hbalock, iflags);
6245}
6246
6247/**
6248 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
6249 * @phba: pointer to lpfc hba data structure.
6250 *
6251 * This routine is to free all the pending completion-queue events to the
6252 * back into the free pool for device reset.
6253 **/
6254static void
6255lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
6256{
6257	LIST_HEAD(cqelist);
6258	struct lpfc_cq_event *cqe;
6259	unsigned long iflags;
6260
6261	/* Retrieve all the pending WCQEs from pending WCQE lists */
6262	spin_lock_irqsave(&phba->hbalock, iflags);
6263	/* Pending FCP XRI abort events */
6264	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
6265			 &cqelist);
6266	/* Pending ELS XRI abort events */
6267	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
6268			 &cqelist);
6269	/* Pending asynnc events */
6270	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
6271			 &cqelist);
6272	spin_unlock_irqrestore(&phba->hbalock, iflags);
6273
6274	while (!list_empty(&cqelist)) {
6275		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
6276		lpfc_sli4_cq_event_release(phba, cqe);
6277	}
6278}
6279
6280/**
6281 * lpfc_pci_function_reset - Reset pci function.
6282 * @phba: pointer to lpfc hba data structure.
6283 *
6284 * This routine is invoked to request a PCI function reset. It will destroys
6285 * all resources assigned to the PCI function which originates this request.
6286 *
6287 * Return codes
6288 *      0 - successful
6289 *      ENOMEM - No availble memory
6290 *      EIO - The mailbox failed to complete successfully.
6291 **/
6292int
6293lpfc_pci_function_reset(struct lpfc_hba *phba)
6294{
6295	LPFC_MBOXQ_t *mboxq;
6296	uint32_t rc = 0;
6297	uint32_t shdr_status, shdr_add_status;
6298	union lpfc_sli4_cfg_shdr *shdr;
6299
6300	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6301	if (!mboxq) {
6302		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6303				"0494 Unable to allocate memory for issuing "
6304				"SLI_FUNCTION_RESET mailbox command\n");
6305		return -ENOMEM;
6306	}
6307
6308	/* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
6309	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6310			 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
6311			 LPFC_SLI4_MBX_EMBED);
6312	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6313	shdr = (union lpfc_sli4_cfg_shdr *)
6314		&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6315	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6316	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6317	if (rc != MBX_TIMEOUT)
6318		mempool_free(mboxq, phba->mbox_mem_pool);
6319	if (shdr_status || shdr_add_status || rc) {
6320		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6321				"0495 SLI_FUNCTION_RESET mailbox failed with "
6322				"status x%x add_status x%x, mbx status x%x\n",
6323				shdr_status, shdr_add_status, rc);
6324		rc = -ENXIO;
6325	}
6326	return rc;
6327}
6328
6329/**
6330 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
6331 * @phba: pointer to lpfc hba data structure.
6332 * @cnt: number of nop mailbox commands to send.
6333 *
6334 * This routine is invoked to send a number @cnt of NOP mailbox command and
6335 * wait for each command to complete.
6336 *
6337 * Return: the number of NOP mailbox command completed.
6338 **/
6339static int
6340lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
6341{
6342	LPFC_MBOXQ_t *mboxq;
6343	int length, cmdsent;
6344	uint32_t mbox_tmo;
6345	uint32_t rc = 0;
6346	uint32_t shdr_status, shdr_add_status;
6347	union lpfc_sli4_cfg_shdr *shdr;
6348
6349	if (cnt == 0) {
6350		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6351				"2518 Requested to send 0 NOP mailbox cmd\n");
6352		return cnt;
6353	}
6354
6355	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6356	if (!mboxq) {
6357		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6358				"2519 Unable to allocate memory for issuing "
6359				"NOP mailbox command\n");
6360		return 0;
6361	}
6362
6363	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
6364	length = (sizeof(struct lpfc_mbx_nop) -
6365		  sizeof(struct lpfc_sli4_cfg_mhdr));
6366	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6367			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
6368
6369	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6370	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
6371		if (!phba->sli4_hba.intr_enable)
6372			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6373		else
6374			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6375		if (rc == MBX_TIMEOUT)
6376			break;
6377		/* Check return status */
6378		shdr = (union lpfc_sli4_cfg_shdr *)
6379			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6380		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6381		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
6382					 &shdr->response);
6383		if (shdr_status || shdr_add_status || rc) {
6384			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6385					"2520 NOP mailbox command failed "
6386					"status x%x add_status x%x mbx "
6387					"status x%x\n", shdr_status,
6388					shdr_add_status, rc);
6389			break;
6390		}
6391	}
6392
6393	if (rc != MBX_TIMEOUT)
6394		mempool_free(mboxq, phba->mbox_mem_pool);
6395
6396	return cmdsent;
6397}
6398
6399/**
6400 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
6401 * @phba: pointer to lpfc hba data structure.
6402 * @fcfi: fcf index.
6403 *
6404 * This routine is invoked to unregister a FCFI from device.
6405 **/
6406void
6407lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
6408{
6409	LPFC_MBOXQ_t *mbox;
6410	uint32_t mbox_tmo;
6411	int rc;
6412	unsigned long flags;
6413
6414	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6415
6416	if (!mbox)
6417		return;
6418
6419	lpfc_unreg_fcfi(mbox, fcfi);
6420
6421	if (!phba->sli4_hba.intr_enable)
6422		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6423	else {
6424		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6425		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6426	}
6427	if (rc != MBX_TIMEOUT)
6428		mempool_free(mbox, phba->mbox_mem_pool);
6429	if (rc != MBX_SUCCESS)
6430		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6431				"2517 Unregister FCFI command failed "
6432				"status %d, mbxStatus x%x\n", rc,
6433				bf_get(lpfc_mqe_status, &mbox->u.mqe));
6434	else {
6435		spin_lock_irqsave(&phba->hbalock, flags);
6436		/* Mark the FCFI is no longer registered */
6437		phba->fcf.fcf_flag &=
6438			~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE);
6439		spin_unlock_irqrestore(&phba->hbalock, flags);
6440	}
6441}
6442
6443/**
6444 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
6445 * @phba: pointer to lpfc hba data structure.
6446 *
6447 * This routine is invoked to set up the PCI device memory space for device
6448 * with SLI-4 interface spec.
6449 *
6450 * Return codes
6451 * 	0 - successful
6452 * 	other values - error
6453 **/
6454static int
6455lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
6456{
6457	struct pci_dev *pdev;
6458	unsigned long bar0map_len, bar1map_len, bar2map_len;
6459	int error = -ENODEV;
6460
6461	/* Obtain PCI device reference */
6462	if (!phba->pcidev)
6463		return error;
6464	else
6465		pdev = phba->pcidev;
6466
6467	/* Set the device DMA mask size */
6468	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6469	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6470		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6471		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6472			return error;
6473		}
6474	}
6475
6476	/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
6477	 * number of bytes required by each mapping. They are actually
6478	 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
6479	 */
6480	if (pci_resource_start(pdev, 0)) {
6481		phba->pci_bar0_map = pci_resource_start(pdev, 0);
6482		bar0map_len = pci_resource_len(pdev, 0);
6483	} else {
6484		phba->pci_bar0_map = pci_resource_start(pdev, 1);
6485		bar0map_len = pci_resource_len(pdev, 1);
6486	}
6487	phba->pci_bar1_map = pci_resource_start(pdev, 2);
6488	bar1map_len = pci_resource_len(pdev, 2);
6489
6490	phba->pci_bar2_map = pci_resource_start(pdev, 4);
6491	bar2map_len = pci_resource_len(pdev, 4);
6492
6493	/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
6494	phba->sli4_hba.conf_regs_memmap_p =
6495				ioremap(phba->pci_bar0_map, bar0map_len);
6496	if (!phba->sli4_hba.conf_regs_memmap_p) {
6497		dev_printk(KERN_ERR, &pdev->dev,
6498			   "ioremap failed for SLI4 PCI config registers.\n");
6499		goto out;
6500	}
6501
6502	/* Map SLI4 HBA Control Register base to a kernel virtual address. */
6503	phba->sli4_hba.ctrl_regs_memmap_p =
6504				ioremap(phba->pci_bar1_map, bar1map_len);
6505	if (!phba->sli4_hba.ctrl_regs_memmap_p) {
6506		dev_printk(KERN_ERR, &pdev->dev,
6507			   "ioremap failed for SLI4 HBA control registers.\n");
6508		goto out_iounmap_conf;
6509	}
6510
6511	/* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
6512	phba->sli4_hba.drbl_regs_memmap_p =
6513				ioremap(phba->pci_bar2_map, bar2map_len);
6514	if (!phba->sli4_hba.drbl_regs_memmap_p) {
6515		dev_printk(KERN_ERR, &pdev->dev,
6516			   "ioremap failed for SLI4 HBA doorbell registers.\n");
6517		goto out_iounmap_ctrl;
6518	}
6519
6520	/* Set up BAR0 PCI config space register memory map */
6521	lpfc_sli4_bar0_register_memmap(phba);
6522
6523	/* Set up BAR1 register memory map */
6524	lpfc_sli4_bar1_register_memmap(phba);
6525
6526	/* Set up BAR2 register memory map */
6527	error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
6528	if (error)
6529		goto out_iounmap_all;
6530
6531	return 0;
6532
6533out_iounmap_all:
6534	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6535out_iounmap_ctrl:
6536	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6537out_iounmap_conf:
6538	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6539out:
6540	return error;
6541}
6542
6543/**
6544 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
6545 * @phba: pointer to lpfc hba data structure.
6546 *
6547 * This routine is invoked to unset the PCI device memory space for device
6548 * with SLI-4 interface spec.
6549 **/
6550static void
6551lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
6552{
6553	struct pci_dev *pdev;
6554
6555	/* Obtain PCI device reference */
6556	if (!phba->pcidev)
6557		return;
6558	else
6559		pdev = phba->pcidev;
6560
6561	/* Free coherent DMA memory allocated */
6562
6563	/* Unmap I/O memory space */
6564	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6565	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6566	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6567
6568	return;
6569}
6570
6571/**
6572 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6573 * @phba: pointer to lpfc hba data structure.
6574 *
6575 * This routine is invoked to enable the MSI-X interrupt vectors to device
6576 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6577 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6578 * invoked, enables either all or nothing, depending on the current
6579 * availability of PCI vector resources. The device driver is responsible
6580 * for calling the individual request_irq() to register each MSI-X vector
6581 * with a interrupt handler, which is done in this function. Note that
6582 * later when device is unloading, the driver should always call free_irq()
6583 * on all MSI-X vectors it has done request_irq() on before calling
6584 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
6585 * will be left with MSI-X enabled and leaks its vectors.
6586 *
6587 * Return codes
6588 *   0 - successful
6589 *   other values - error
6590 **/
6591static int
6592lpfc_sli_enable_msix(struct lpfc_hba *phba)
6593{
6594	int rc, i;
6595	LPFC_MBOXQ_t *pmb;
6596
6597	/* Set up MSI-X multi-message vectors */
6598	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6599		phba->msix_entries[i].entry = i;
6600
6601	/* Configure MSI-X capability structure */
6602	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
6603				ARRAY_SIZE(phba->msix_entries));
6604	if (rc) {
6605		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6606				"0420 PCI enable MSI-X failed (%d)\n", rc);
6607		goto msi_fail_out;
6608	}
6609	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6610		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6611				"0477 MSI-X entry[%d]: vector=x%x "
6612				"message=%d\n", i,
6613				phba->msix_entries[i].vector,
6614				phba->msix_entries[i].entry);
6615	/*
6616	 * Assign MSI-X vectors to interrupt handlers
6617	 */
6618
6619	/* vector-0 is associated to slow-path handler */
6620	rc = request_irq(phba->msix_entries[0].vector,
6621			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6622			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6623	if (rc) {
6624		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6625				"0421 MSI-X slow-path request_irq failed "
6626				"(%d)\n", rc);
6627		goto msi_fail_out;
6628	}
6629
6630	/* vector-1 is associated to fast-path handler */
6631	rc = request_irq(phba->msix_entries[1].vector,
6632			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6633			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
6634
6635	if (rc) {
6636		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6637				"0429 MSI-X fast-path request_irq failed "
6638				"(%d)\n", rc);
6639		goto irq_fail_out;
6640	}
6641
6642	/*
6643	 * Configure HBA MSI-X attention conditions to messages
6644	 */
6645	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6646
6647	if (!pmb) {
6648		rc = -ENOMEM;
6649		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6650				"0474 Unable to allocate memory for issuing "
6651				"MBOX_CONFIG_MSI command\n");
6652		goto mem_fail_out;
6653	}
6654	rc = lpfc_config_msi(phba, pmb);
6655	if (rc)
6656		goto mbx_fail_out;
6657	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6658	if (rc != MBX_SUCCESS) {
6659		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
6660				"0351 Config MSI mailbox command failed, "
6661				"mbxCmd x%x, mbxStatus x%x\n",
6662				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
6663		goto mbx_fail_out;
6664	}
6665
6666	/* Free memory allocated for mailbox command */
6667	mempool_free(pmb, phba->mbox_mem_pool);
6668	return rc;
6669
6670mbx_fail_out:
6671	/* Free memory allocated for mailbox command */
6672	mempool_free(pmb, phba->mbox_mem_pool);
6673
6674mem_fail_out:
6675	/* free the irq already requested */
6676	free_irq(phba->msix_entries[1].vector, phba);
6677
6678irq_fail_out:
6679	/* free the irq already requested */
6680	free_irq(phba->msix_entries[0].vector, phba);
6681
6682msi_fail_out:
6683	/* Unconfigure MSI-X capability structure */
6684	pci_disable_msix(phba->pcidev);
6685	return rc;
6686}
6687
6688/**
6689 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
6690 * @phba: pointer to lpfc hba data structure.
6691 *
6692 * This routine is invoked to release the MSI-X vectors and then disable the
6693 * MSI-X interrupt mode to device with SLI-3 interface spec.
6694 **/
6695static void
6696lpfc_sli_disable_msix(struct lpfc_hba *phba)
6697{
6698	int i;
6699
6700	/* Free up MSI-X multi-message vectors */
6701	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6702		free_irq(phba->msix_entries[i].vector, phba);
6703	/* Disable MSI-X */
6704	pci_disable_msix(phba->pcidev);
6705
6706	return;
6707}
6708
6709/**
6710 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
6711 * @phba: pointer to lpfc hba data structure.
6712 *
6713 * This routine is invoked to enable the MSI interrupt mode to device with
6714 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
6715 * enable the MSI vector. The device driver is responsible for calling the
6716 * request_irq() to register MSI vector with a interrupt the handler, which
6717 * is done in this function.
6718 *
6719 * Return codes
6720 * 	0 - successful
6721 * 	other values - error
6722 */
6723static int
6724lpfc_sli_enable_msi(struct lpfc_hba *phba)
6725{
6726	int rc;
6727
6728	rc = pci_enable_msi(phba->pcidev);
6729	if (!rc)
6730		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6731				"0462 PCI enable MSI mode success.\n");
6732	else {
6733		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6734				"0471 PCI enable MSI mode failed (%d)\n", rc);
6735		return rc;
6736	}
6737
6738	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6739			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6740	if (rc) {
6741		pci_disable_msi(phba->pcidev);
6742		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6743				"0478 MSI request_irq failed (%d)\n", rc);
6744	}
6745	return rc;
6746}
6747
6748/**
6749 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
6750 * @phba: pointer to lpfc hba data structure.
6751 *
6752 * This routine is invoked to disable the MSI interrupt mode to device with
6753 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
6754 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6755 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6756 * its vector.
6757 */
6758static void
6759lpfc_sli_disable_msi(struct lpfc_hba *phba)
6760{
6761	free_irq(phba->pcidev->irq, phba);
6762	pci_disable_msi(phba->pcidev);
6763	return;
6764}
6765
6766/**
6767 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
6768 * @phba: pointer to lpfc hba data structure.
6769 *
6770 * This routine is invoked to enable device interrupt and associate driver's
6771 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
6772 * spec. Depends on the interrupt mode configured to the driver, the driver
6773 * will try to fallback from the configured interrupt mode to an interrupt
6774 * mode which is supported by the platform, kernel, and device in the order
6775 * of:
6776 * MSI-X -> MSI -> IRQ.
6777 *
6778 * Return codes
6779 *   0 - successful
6780 *   other values - error
6781 **/
6782static uint32_t
6783lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6784{
6785	uint32_t intr_mode = LPFC_INTR_ERROR;
6786	int retval;
6787
6788	if (cfg_mode == 2) {
6789		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6790		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6791		if (!retval) {
6792			/* Now, try to enable MSI-X interrupt mode */
6793			retval = lpfc_sli_enable_msix(phba);
6794			if (!retval) {
6795				/* Indicate initialization to MSI-X mode */
6796				phba->intr_type = MSIX;
6797				intr_mode = 2;
6798			}
6799		}
6800	}
6801
6802	/* Fallback to MSI if MSI-X initialization failed */
6803	if (cfg_mode >= 1 && phba->intr_type == NONE) {
6804		retval = lpfc_sli_enable_msi(phba);
6805		if (!retval) {
6806			/* Indicate initialization to MSI mode */
6807			phba->intr_type = MSI;
6808			intr_mode = 1;
6809		}
6810	}
6811
6812	/* Fallback to INTx if both MSI-X/MSI initalization failed */
6813	if (phba->intr_type == NONE) {
6814		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6815				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6816		if (!retval) {
6817			/* Indicate initialization to INTx mode */
6818			phba->intr_type = INTx;
6819			intr_mode = 0;
6820		}
6821	}
6822	return intr_mode;
6823}
6824
6825/**
6826 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6827 * @phba: pointer to lpfc hba data structure.
6828 *
6829 * This routine is invoked to disable device interrupt and disassociate the
6830 * driver's interrupt handler(s) from interrupt vector(s) to device with
6831 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6832 * release the interrupt vector(s) for the message signaled interrupt.
6833 **/
6834static void
6835lpfc_sli_disable_intr(struct lpfc_hba *phba)
6836{
6837	/* Disable the currently initialized interrupt mode */
6838	if (phba->intr_type == MSIX)
6839		lpfc_sli_disable_msix(phba);
6840	else if (phba->intr_type == MSI)
6841		lpfc_sli_disable_msi(phba);
6842	else if (phba->intr_type == INTx)
6843		free_irq(phba->pcidev->irq, phba);
6844
6845	/* Reset interrupt management states */
6846	phba->intr_type = NONE;
6847	phba->sli.slistat.sli_intr = 0;
6848
6849	return;
6850}
6851
6852/**
6853 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6854 * @phba: pointer to lpfc hba data structure.
6855 *
6856 * This routine is invoked to enable the MSI-X interrupt vectors to device
6857 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6858 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6859 * enables either all or nothing, depending on the current availability of
6860 * PCI vector resources. The device driver is responsible for calling the
6861 * individual request_irq() to register each MSI-X vector with a interrupt
6862 * handler, which is done in this function. Note that later when device is
6863 * unloading, the driver should always call free_irq() on all MSI-X vectors
6864 * it has done request_irq() on before calling pci_disable_msix(). Failure
6865 * to do so results in a BUG_ON() and a device will be left with MSI-X
6866 * enabled and leaks its vectors.
6867 *
6868 * Return codes
6869 * 0 - successful
6870 * other values - error
6871 **/
6872static int
6873lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6874{
6875	int rc, index;
6876
6877	/* Set up MSI-X multi-message vectors */
6878	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6879		phba->sli4_hba.msix_entries[index].entry = index;
6880
6881	/* Configure MSI-X capability structure */
6882	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6883			     phba->sli4_hba.cfg_eqn);
6884	if (rc) {
6885		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6886				"0484 PCI enable MSI-X failed (%d)\n", rc);
6887		goto msi_fail_out;
6888	}
6889	/* Log MSI-X vector assignment */
6890	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6891		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6892				"0489 MSI-X entry[%d]: vector=x%x "
6893				"message=%d\n", index,
6894				phba->sli4_hba.msix_entries[index].vector,
6895				phba->sli4_hba.msix_entries[index].entry);
6896	/*
6897	 * Assign MSI-X vectors to interrupt handlers
6898	 */
6899
6900	/* The first vector must associated to slow-path handler for MQ */
6901	rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6902			 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6903			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6904	if (rc) {
6905		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6906				"0485 MSI-X slow-path request_irq failed "
6907				"(%d)\n", rc);
6908		goto msi_fail_out;
6909	}
6910
6911	/* The rest of the vector(s) are associated to fast-path handler(s) */
6912	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
6913		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
6914		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
6915		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
6916				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
6917				 LPFC_FP_DRIVER_HANDLER_NAME,
6918				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6919		if (rc) {
6920			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6921					"0486 MSI-X fast-path (%d) "
6922					"request_irq failed (%d)\n", index, rc);
6923			goto cfg_fail_out;
6924		}
6925	}
6926
6927	return rc;
6928
6929cfg_fail_out:
6930	/* free the irq already requested */
6931	for (--index; index >= 1; index--)
6932		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
6933			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6934
6935	/* free the irq already requested */
6936	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6937
6938msi_fail_out:
6939	/* Unconfigure MSI-X capability structure */
6940	pci_disable_msix(phba->pcidev);
6941	return rc;
6942}
6943
6944/**
6945 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
6946 * @phba: pointer to lpfc hba data structure.
6947 *
6948 * This routine is invoked to release the MSI-X vectors and then disable the
6949 * MSI-X interrupt mode to device with SLI-4 interface spec.
6950 **/
6951static void
6952lpfc_sli4_disable_msix(struct lpfc_hba *phba)
6953{
6954	int index;
6955
6956	/* Free up MSI-X multi-message vectors */
6957	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6958
6959	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
6960		free_irq(phba->sli4_hba.msix_entries[index].vector,
6961			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6962	/* Disable MSI-X */
6963	pci_disable_msix(phba->pcidev);
6964
6965	return;
6966}
6967
6968/**
6969 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
6970 * @phba: pointer to lpfc hba data structure.
6971 *
6972 * This routine is invoked to enable the MSI interrupt mode to device with
6973 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
6974 * to enable the MSI vector. The device driver is responsible for calling
6975 * the request_irq() to register MSI vector with a interrupt the handler,
6976 * which is done in this function.
6977 *
6978 * Return codes
6979 * 	0 - successful
6980 * 	other values - error
6981 **/
6982static int
6983lpfc_sli4_enable_msi(struct lpfc_hba *phba)
6984{
6985	int rc, index;
6986
6987	rc = pci_enable_msi(phba->pcidev);
6988	if (!rc)
6989		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6990				"0487 PCI enable MSI mode success.\n");
6991	else {
6992		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6993				"0488 PCI enable MSI mode failed (%d)\n", rc);
6994		return rc;
6995	}
6996
6997	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6998			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6999	if (rc) {
7000		pci_disable_msi(phba->pcidev);
7001		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7002				"0490 MSI request_irq failed (%d)\n", rc);
7003	}
7004
7005	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
7006		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7007		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7008	}
7009
7010	return rc;
7011}
7012
7013/**
7014 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
7015 * @phba: pointer to lpfc hba data structure.
7016 *
7017 * This routine is invoked to disable the MSI interrupt mode to device with
7018 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
7019 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7020 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7021 * its vector.
7022 **/
7023static void
7024lpfc_sli4_disable_msi(struct lpfc_hba *phba)
7025{
7026	free_irq(phba->pcidev->irq, phba);
7027	pci_disable_msi(phba->pcidev);
7028	return;
7029}
7030
7031/**
7032 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
7033 * @phba: pointer to lpfc hba data structure.
7034 *
7035 * This routine is invoked to enable device interrupt and associate driver's
7036 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
7037 * interface spec. Depends on the interrupt mode configured to the driver,
7038 * the driver will try to fallback from the configured interrupt mode to an
7039 * interrupt mode which is supported by the platform, kernel, and device in
7040 * the order of:
7041 * MSI-X -> MSI -> IRQ.
7042 *
7043 * Return codes
7044 * 	0 - successful
7045 * 	other values - error
7046 **/
7047static uint32_t
7048lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7049{
7050	uint32_t intr_mode = LPFC_INTR_ERROR;
7051	int retval, index;
7052
7053	if (cfg_mode == 2) {
7054		/* Preparation before conf_msi mbox cmd */
7055		retval = 0;
7056		if (!retval) {
7057			/* Now, try to enable MSI-X interrupt mode */
7058			retval = lpfc_sli4_enable_msix(phba);
7059			if (!retval) {
7060				/* Indicate initialization to MSI-X mode */
7061				phba->intr_type = MSIX;
7062				intr_mode = 2;
7063			}
7064		}
7065	}
7066
7067	/* Fallback to MSI if MSI-X initialization failed */
7068	if (cfg_mode >= 1 && phba->intr_type == NONE) {
7069		retval = lpfc_sli4_enable_msi(phba);
7070		if (!retval) {
7071			/* Indicate initialization to MSI mode */
7072			phba->intr_type = MSI;
7073			intr_mode = 1;
7074		}
7075	}
7076
7077	/* Fallback to INTx if both MSI-X/MSI initalization failed */
7078	if (phba->intr_type == NONE) {
7079		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7080				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7081		if (!retval) {
7082			/* Indicate initialization to INTx mode */
7083			phba->intr_type = INTx;
7084			intr_mode = 0;
7085			for (index = 0; index < phba->cfg_fcp_eq_count;
7086			     index++) {
7087				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7088				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7089			}
7090		}
7091	}
7092	return intr_mode;
7093}
7094
7095/**
7096 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
7097 * @phba: pointer to lpfc hba data structure.
7098 *
7099 * This routine is invoked to disable device interrupt and disassociate
7100 * the driver's interrupt handler(s) from interrupt vector(s) to device
7101 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
7102 * will release the interrupt vector(s) for the message signaled interrupt.
7103 **/
7104static void
7105lpfc_sli4_disable_intr(struct lpfc_hba *phba)
7106{
7107	/* Disable the currently initialized interrupt mode */
7108	if (phba->intr_type == MSIX)
7109		lpfc_sli4_disable_msix(phba);
7110	else if (phba->intr_type == MSI)
7111		lpfc_sli4_disable_msi(phba);
7112	else if (phba->intr_type == INTx)
7113		free_irq(phba->pcidev->irq, phba);
7114
7115	/* Reset interrupt management states */
7116	phba->intr_type = NONE;
7117	phba->sli.slistat.sli_intr = 0;
7118
7119	return;
7120}
7121
7122/**
7123 * lpfc_unset_hba - Unset SLI3 hba device initialization
7124 * @phba: pointer to lpfc hba data structure.
7125 *
7126 * This routine is invoked to unset the HBA device initialization steps to
7127 * a device with SLI-3 interface spec.
7128 **/
7129static void
7130lpfc_unset_hba(struct lpfc_hba *phba)
7131{
7132	struct lpfc_vport *vport = phba->pport;
7133	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7134
7135	spin_lock_irq(shost->host_lock);
7136	vport->load_flag |= FC_UNLOADING;
7137	spin_unlock_irq(shost->host_lock);
7138
7139	lpfc_stop_hba_timers(phba);
7140
7141	phba->pport->work_port_events = 0;
7142
7143	lpfc_sli_hba_down(phba);
7144
7145	lpfc_sli_brdrestart(phba);
7146
7147	lpfc_sli_disable_intr(phba);
7148
7149	return;
7150}
7151
7152/**
7153 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
7154 * @phba: pointer to lpfc hba data structure.
7155 *
7156 * This routine is invoked to unset the HBA device initialization steps to
7157 * a device with SLI-4 interface spec.
7158 **/
7159static void
7160lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7161{
7162	struct lpfc_vport *vport = phba->pport;
7163	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7164
7165	spin_lock_irq(shost->host_lock);
7166	vport->load_flag |= FC_UNLOADING;
7167	spin_unlock_irq(shost->host_lock);
7168
7169	phba->pport->work_port_events = 0;
7170
7171	lpfc_sli4_hba_down(phba);
7172
7173	lpfc_sli4_disable_intr(phba);
7174
7175	return;
7176}
7177
7178/**
7179 * lpfc_sli4_hba_unset - Unset the fcoe hba
7180 * @phba: Pointer to HBA context object.
7181 *
7182 * This function is called in the SLI4 code path to reset the HBA's FCoE
7183 * function. The caller is not required to hold any lock. This routine
7184 * issues PCI function reset mailbox command to reset the FCoE function.
7185 * At the end of the function, it calls lpfc_hba_down_post function to
7186 * free any pending commands.
7187 **/
7188static void
7189lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7190{
7191	int wait_cnt = 0;
7192	LPFC_MBOXQ_t *mboxq;
7193
7194	lpfc_stop_hba_timers(phba);
7195	phba->sli4_hba.intr_enable = 0;
7196
7197	/*
7198	 * Gracefully wait out the potential current outstanding asynchronous
7199	 * mailbox command.
7200	 */
7201
7202	/* First, block any pending async mailbox command from posted */
7203	spin_lock_irq(&phba->hbalock);
7204	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7205	spin_unlock_irq(&phba->hbalock);
7206	/* Now, trying to wait it out if we can */
7207	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7208		msleep(10);
7209		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
7210			break;
7211	}
7212	/* Forcefully release the outstanding mailbox command if timed out */
7213	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7214		spin_lock_irq(&phba->hbalock);
7215		mboxq = phba->sli.mbox_active;
7216		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7217		__lpfc_mbox_cmpl_put(phba, mboxq);
7218		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7219		phba->sli.mbox_active = NULL;
7220		spin_unlock_irq(&phba->hbalock);
7221	}
7222
7223	/* Tear down the queues in the HBA */
7224	lpfc_sli4_queue_unset(phba);
7225
7226	/* Disable PCI subsystem interrupt */
7227	lpfc_sli4_disable_intr(phba);
7228
7229	/* Stop kthread signal shall trigger work_done one more time */
7230	kthread_stop(phba->worker_thread);
7231
7232	/* Stop the SLI4 device port */
7233	phba->pport->work_port_events = 0;
7234}
7235
7236 /**
7237 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
7238 * @phba: Pointer to HBA context object.
7239 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
7240 *
7241 * This function is called in the SLI4 code path to read the port's
7242 * sli4 capabilities.
7243 *
7244 * This function may be be called from any context that can block-wait
7245 * for the completion.  The expectation is that this routine is called
7246 * typically from probe_one or from the online routine.
7247 **/
7248int
7249lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7250{
7251	int rc;
7252	struct lpfc_mqe *mqe;
7253	struct lpfc_pc_sli4_params *sli4_params;
7254	uint32_t mbox_tmo;
7255
7256	rc = 0;
7257	mqe = &mboxq->u.mqe;
7258
7259	/* Read the port's SLI4 Parameters port capabilities */
7260	lpfc_sli4_params(mboxq);
7261	if (!phba->sli4_hba.intr_enable)
7262		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7263	else {
7264		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
7265		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7266	}
7267
7268	if (unlikely(rc))
7269		return 1;
7270
7271	sli4_params = &phba->sli4_hba.pc_sli4_params;
7272	sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
7273	sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
7274	sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
7275	sli4_params->featurelevel_1 = bf_get(featurelevel_1,
7276					     &mqe->un.sli4_params);
7277	sli4_params->featurelevel_2 = bf_get(featurelevel_2,
7278					     &mqe->un.sli4_params);
7279	sli4_params->proto_types = mqe->un.sli4_params.word3;
7280	sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
7281	sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
7282	sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
7283	sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
7284	sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
7285	sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
7286	sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
7287	sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
7288	sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
7289	sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
7290	sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
7291	sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
7292	sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
7293	sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
7294	sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
7295	sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
7296	sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
7297	sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7298	sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7299	sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
7300	return rc;
7301}
7302
7303/**
7304 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
7305 * @pdev: pointer to PCI device
7306 * @pid: pointer to PCI device identifier
7307 *
7308 * This routine is to be called to attach a device with SLI-3 interface spec
7309 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7310 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7311 * information of the device and driver to see if the driver state that it can
7312 * support this kind of device. If the match is successful, the driver core
7313 * invokes this routine. If this routine determines it can claim the HBA, it
7314 * does all the initialization that it needs to do to handle the HBA properly.
7315 *
7316 * Return code
7317 * 	0 - driver can claim the device
7318 * 	negative value - driver can not claim the device
7319 **/
7320static int __devinit
7321lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
7322{
7323	struct lpfc_hba   *phba;
7324	struct lpfc_vport *vport = NULL;
7325	struct Scsi_Host  *shost = NULL;
7326	int error;
7327	uint32_t cfg_mode, intr_mode;
7328
7329	/* Allocate memory for HBA structure */
7330	phba = lpfc_hba_alloc(pdev);
7331	if (!phba)
7332		return -ENOMEM;
7333
7334	/* Perform generic PCI device enabling operation */
7335	error = lpfc_enable_pci_dev(phba);
7336	if (error) {
7337		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7338				"1401 Failed to enable pci device.\n");
7339		goto out_free_phba;
7340	}
7341
7342	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
7343	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
7344	if (error)
7345		goto out_disable_pci_dev;
7346
7347	/* Set up SLI-3 specific device PCI memory space */
7348	error = lpfc_sli_pci_mem_setup(phba);
7349	if (error) {
7350		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7351				"1402 Failed to set up pci memory space.\n");
7352		goto out_disable_pci_dev;
7353	}
7354
7355	/* Set up phase-1 common device driver resources */
7356	error = lpfc_setup_driver_resource_phase1(phba);
7357	if (error) {
7358		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7359				"1403 Failed to set up driver resource.\n");
7360		goto out_unset_pci_mem_s3;
7361	}
7362
7363	/* Set up SLI-3 specific device driver resources */
7364	error = lpfc_sli_driver_resource_setup(phba);
7365	if (error) {
7366		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7367				"1404 Failed to set up driver resource.\n");
7368		goto out_unset_pci_mem_s3;
7369	}
7370
7371	/* Initialize and populate the iocb list per host */
7372	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
7373	if (error) {
7374		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7375				"1405 Failed to initialize iocb list.\n");
7376		goto out_unset_driver_resource_s3;
7377	}
7378
7379	/* Set up common device driver resources */
7380	error = lpfc_setup_driver_resource_phase2(phba);
7381	if (error) {
7382		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7383				"1406 Failed to set up driver resource.\n");
7384		goto out_free_iocb_list;
7385	}
7386
7387	/* Create SCSI host to the physical port */
7388	error = lpfc_create_shost(phba);
7389	if (error) {
7390		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7391				"1407 Failed to create scsi host.\n");
7392		goto out_unset_driver_resource;
7393	}
7394
7395	/* Configure sysfs attributes */
7396	vport = phba->pport;
7397	error = lpfc_alloc_sysfs_attr(vport);
7398	if (error) {
7399		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7400				"1476 Failed to allocate sysfs attr\n");
7401		goto out_destroy_shost;
7402	}
7403
7404	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
7405	/* Now, trying to enable interrupt and bring up the device */
7406	cfg_mode = phba->cfg_use_msi;
7407	while (true) {
7408		/* Put device to a known state before enabling interrupt */
7409		lpfc_stop_port(phba);
7410		/* Configure and enable interrupt */
7411		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
7412		if (intr_mode == LPFC_INTR_ERROR) {
7413			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7414					"0431 Failed to enable interrupt.\n");
7415			error = -ENODEV;
7416			goto out_free_sysfs_attr;
7417		}
7418		/* SLI-3 HBA setup */
7419		if (lpfc_sli_hba_setup(phba)) {
7420			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7421					"1477 Failed to set up hba\n");
7422			error = -ENODEV;
7423			goto out_remove_device;
7424		}
7425
7426		/* Wait 50ms for the interrupts of previous mailbox commands */
7427		msleep(50);
7428		/* Check active interrupts on message signaled interrupts */
7429		if (intr_mode == 0 ||
7430		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
7431			/* Log the current active interrupt mode */
7432			phba->intr_mode = intr_mode;
7433			lpfc_log_intr_mode(phba, intr_mode);
7434			break;
7435		} else {
7436			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7437					"0447 Configure interrupt mode (%d) "
7438					"failed active interrupt test.\n",
7439					intr_mode);
7440			/* Disable the current interrupt mode */
7441			lpfc_sli_disable_intr(phba);
7442			/* Try next level of interrupt mode */
7443			cfg_mode = --intr_mode;
7444		}
7445	}
7446
7447	/* Perform post initialization setup */
7448	lpfc_post_init_setup(phba);
7449
7450	/* Check if there are static vports to be created. */
7451	lpfc_create_static_vport(phba);
7452
7453	return 0;
7454
7455out_remove_device:
7456	lpfc_unset_hba(phba);
7457out_free_sysfs_attr:
7458	lpfc_free_sysfs_attr(vport);
7459out_destroy_shost:
7460	lpfc_destroy_shost(phba);
7461out_unset_driver_resource:
7462	lpfc_unset_driver_resource_phase2(phba);
7463out_free_iocb_list:
7464	lpfc_free_iocb_list(phba);
7465out_unset_driver_resource_s3:
7466	lpfc_sli_driver_resource_unset(phba);
7467out_unset_pci_mem_s3:
7468	lpfc_sli_pci_mem_unset(phba);
7469out_disable_pci_dev:
7470	lpfc_disable_pci_dev(phba);
7471	if (shost)
7472		scsi_host_put(shost);
7473out_free_phba:
7474	lpfc_hba_free(phba);
7475	return error;
7476}
7477
7478/**
7479 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
7480 * @pdev: pointer to PCI device
7481 *
7482 * This routine is to be called to disattach a device with SLI-3 interface
7483 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7484 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7485 * device to be removed from the PCI subsystem properly.
7486 **/
7487static void __devexit
7488lpfc_pci_remove_one_s3(struct pci_dev *pdev)
7489{
7490	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
7491	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7492	struct lpfc_vport **vports;
7493	struct lpfc_hba   *phba = vport->phba;
7494	int i;
7495	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
7496
7497	spin_lock_irq(&phba->hbalock);
7498	vport->load_flag |= FC_UNLOADING;
7499	spin_unlock_irq(&phba->hbalock);
7500
7501	lpfc_free_sysfs_attr(vport);
7502
7503	/* Release all the vports against this physical port */
7504	vports = lpfc_create_vport_work_array(phba);
7505	if (vports != NULL)
7506		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7507			fc_vport_terminate(vports[i]->fc_vport);
7508	lpfc_destroy_vport_work_array(phba, vports);
7509
7510	/* Remove FC host and then SCSI host with the physical port */
7511	fc_remove_host(shost);
7512	scsi_remove_host(shost);
7513	lpfc_cleanup(vport);
7514
7515	/*
7516	 * Bring down the SLI Layer. This step disable all interrupts,
7517	 * clears the rings, discards all mailbox commands, and resets
7518	 * the HBA.
7519	 */
7520
7521	/* HBA interrupt will be diabled after this call */
7522	lpfc_sli_hba_down(phba);
7523	/* Stop kthread signal shall trigger work_done one more time */
7524	kthread_stop(phba->worker_thread);
7525	/* Final cleanup of txcmplq and reset the HBA */
7526	lpfc_sli_brdrestart(phba);
7527
7528	lpfc_stop_hba_timers(phba);
7529	spin_lock_irq(&phba->hbalock);
7530	list_del_init(&vport->listentry);
7531	spin_unlock_irq(&phba->hbalock);
7532
7533	lpfc_debugfs_terminate(vport);
7534
7535	/* Disable interrupt */
7536	lpfc_sli_disable_intr(phba);
7537
7538	pci_set_drvdata(pdev, NULL);
7539	scsi_host_put(shost);
7540
7541	/*
7542	 * Call scsi_free before mem_free since scsi bufs are released to their
7543	 * corresponding pools here.
7544	 */
7545	lpfc_scsi_free(phba);
7546	lpfc_mem_free_all(phba);
7547
7548	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7549			  phba->hbqslimp.virt, phba->hbqslimp.phys);
7550
7551	/* Free resources associated with SLI2 interface */
7552	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7553			  phba->slim2p.virt, phba->slim2p.phys);
7554
7555	/* unmap adapter SLIM and Control Registers */
7556	iounmap(phba->ctrl_regs_memmap_p);
7557	iounmap(phba->slim_memmap_p);
7558
7559	lpfc_hba_free(phba);
7560
7561	pci_release_selected_regions(pdev, bars);
7562	pci_disable_device(pdev);
7563}
7564
7565/**
7566 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
7567 * @pdev: pointer to PCI device
7568 * @msg: power management message
7569 *
7570 * This routine is to be called from the kernel's PCI subsystem to support
7571 * system Power Management (PM) to device with SLI-3 interface spec. When
7572 * PM invokes this method, it quiesces the device by stopping the driver's
7573 * worker thread for the device, turning off device's interrupt and DMA,
7574 * and bring the device offline. Note that as the driver implements the
7575 * minimum PM requirements to a power-aware driver's PM support for the
7576 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7577 * to the suspend() method call will be treated as SUSPEND and the driver will
7578 * fully reinitialize its device during resume() method call, the driver will
7579 * set device to PCI_D3hot state in PCI config space instead of setting it
7580 * according to the @msg provided by the PM.
7581 *
7582 * Return code
7583 * 	0 - driver suspended the device
7584 * 	Error otherwise
7585 **/
7586static int
7587lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
7588{
7589	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7590	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7591
7592	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7593			"0473 PCI device Power Management suspend.\n");
7594
7595	/* Bring down the device */
7596	lpfc_offline_prep(phba);
7597	lpfc_offline(phba);
7598	kthread_stop(phba->worker_thread);
7599
7600	/* Disable interrupt from device */
7601	lpfc_sli_disable_intr(phba);
7602
7603	/* Save device state to PCI config space */
7604	pci_save_state(pdev);
7605	pci_set_power_state(pdev, PCI_D3hot);
7606
7607	return 0;
7608}
7609
7610/**
7611 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
7612 * @pdev: pointer to PCI device
7613 *
7614 * This routine is to be called from the kernel's PCI subsystem to support
7615 * system Power Management (PM) to device with SLI-3 interface spec. When PM
7616 * invokes this method, it restores the device's PCI config space state and
7617 * fully reinitializes the device and brings it online. Note that as the
7618 * driver implements the minimum PM requirements to a power-aware driver's
7619 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
7620 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
7621 * driver will fully reinitialize its device during resume() method call,
7622 * the device will be set to PCI_D0 directly in PCI config space before
7623 * restoring the state.
7624 *
7625 * Return code
7626 * 	0 - driver suspended the device
7627 * 	Error otherwise
7628 **/
7629static int
7630lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7631{
7632	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7633	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7634	uint32_t intr_mode;
7635	int error;
7636
7637	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7638			"0452 PCI device Power Management resume.\n");
7639
7640	/* Restore device state from PCI config space */
7641	pci_set_power_state(pdev, PCI_D0);
7642	pci_restore_state(pdev);
7643
7644	/*
7645	 * As the new kernel behavior of pci_restore_state() API call clears
7646	 * device saved_state flag, need to save the restored state again.
7647	 */
7648	pci_save_state(pdev);
7649
7650	if (pdev->is_busmaster)
7651		pci_set_master(pdev);
7652
7653	/* Startup the kernel thread for this host adapter. */
7654	phba->worker_thread = kthread_run(lpfc_do_work, phba,
7655					"lpfc_worker_%d", phba->brd_no);
7656	if (IS_ERR(phba->worker_thread)) {
7657		error = PTR_ERR(phba->worker_thread);
7658		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7659				"0434 PM resume failed to start worker "
7660				"thread: error=x%x.\n", error);
7661		return error;
7662	}
7663
7664	/* Configure and enable interrupt */
7665	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7666	if (intr_mode == LPFC_INTR_ERROR) {
7667		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7668				"0430 PM resume Failed to enable interrupt\n");
7669		return -EIO;
7670	} else
7671		phba->intr_mode = intr_mode;
7672
7673	/* Restart HBA and bring it online */
7674	lpfc_sli_brdrestart(phba);
7675	lpfc_online(phba);
7676
7677	/* Log the current active interrupt mode */
7678	lpfc_log_intr_mode(phba, phba->intr_mode);
7679
7680	return 0;
7681}
7682
7683/**
7684 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
7685 * @phba: pointer to lpfc hba data structure.
7686 *
7687 * This routine is called to prepare the SLI3 device for PCI slot recover. It
7688 * aborts and stops all the on-going I/Os on the pci device.
7689 **/
7690static void
7691lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
7692{
7693	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7694			"2723 PCI channel I/O abort preparing for recovery\n");
7695	/* Prepare for bringing HBA offline */
7696	lpfc_offline_prep(phba);
7697	/* Clear sli active flag to prevent sysfs access to HBA */
7698	spin_lock_irq(&phba->hbalock);
7699	phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
7700	spin_unlock_irq(&phba->hbalock);
7701	/* Stop and flush all I/Os and bring HBA offline */
7702	lpfc_offline(phba);
7703}
7704
7705/**
7706 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
7707 * @phba: pointer to lpfc hba data structure.
7708 *
7709 * This routine is called to prepare the SLI3 device for PCI slot reset. It
7710 * disables the device interrupt and pci device, and aborts the internal FCP
7711 * pending I/Os.
7712 **/
7713static void
7714lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7715{
7716	struct lpfc_sli *psli = &phba->sli;
7717	struct lpfc_sli_ring  *pring;
7718
7719	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7720			"2710 PCI channel disable preparing for reset\n");
7721	/* Disable interrupt and pci device */
7722	lpfc_sli_disable_intr(phba);
7723	pci_disable_device(phba->pcidev);
7724	/*
7725	 * There may be I/Os dropped by the firmware.
7726	 * Error iocb (I/O) on txcmplq and let the SCSI layer
7727	 * retry it after re-establishing link.
7728	 */
7729	pring = &psli->ring[psli->fcp_ring];
7730	lpfc_sli_abort_iocb_ring(phba, pring);
7731}
7732
7733/**
7734 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
7735 * @phba: pointer to lpfc hba data structure.
7736 *
7737 * This routine is called to prepare the SLI3 device for PCI slot permanently
7738 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
7739 * pending I/Os.
7740 **/
7741static void
7742lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7743{
7744	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7745			"2711 PCI channel permanent disable for failure\n");
7746	/* Clean up all driver's outstanding SCSI I/Os */
7747	lpfc_sli_flush_fcp_rings(phba);
7748}
7749
7750/**
7751 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7752 * @pdev: pointer to PCI device.
7753 * @state: the current PCI connection state.
7754 *
7755 * This routine is called from the PCI subsystem for I/O error handling to
7756 * device with SLI-3 interface spec. This function is called by the PCI
7757 * subsystem after a PCI bus error affecting this device has been detected.
7758 * When this function is invoked, it will need to stop all the I/Os and
7759 * interrupt(s) to the device. Once that is done, it will return
7760 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7761 * as desired.
7762 *
7763 * Return codes
7764 * 	PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
7765 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7766 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7767 **/
7768static pci_ers_result_t
7769lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7770{
7771	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7772	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7773
7774	/* Block all SCSI devices' I/Os on the host */
7775	lpfc_scsi_dev_block(phba);
7776
7777	switch (state) {
7778	case pci_channel_io_normal:
7779		/* Non-fatal error, prepare for recovery */
7780		lpfc_sli_prep_dev_for_recover(phba);
7781		return PCI_ERS_RESULT_CAN_RECOVER;
7782	case pci_channel_io_frozen:
7783		/* Fatal error, prepare for slot reset */
7784		lpfc_sli_prep_dev_for_reset(phba);
7785		return PCI_ERS_RESULT_NEED_RESET;
7786	case pci_channel_io_perm_failure:
7787		/* Permanent failure, prepare for device down */
7788		lpfc_prep_dev_for_perm_failure(phba);
7789		return PCI_ERS_RESULT_DISCONNECT;
7790	default:
7791		/* Unknown state, prepare and request slot reset */
7792		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7793				"0472 Unknown PCI error state: x%x\n", state);
7794		lpfc_sli_prep_dev_for_reset(phba);
7795		return PCI_ERS_RESULT_NEED_RESET;
7796	}
7797}
7798
7799/**
7800 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
7801 * @pdev: pointer to PCI device.
7802 *
7803 * This routine is called from the PCI subsystem for error handling to
7804 * device with SLI-3 interface spec. This is called after PCI bus has been
7805 * reset to restart the PCI card from scratch, as if from a cold-boot.
7806 * During the PCI subsystem error recovery, after driver returns
7807 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7808 * recovery and then call this routine before calling the .resume method
7809 * to recover the device. This function will initialize the HBA device,
7810 * enable the interrupt, but it will just put the HBA to offline state
7811 * without passing any I/O traffic.
7812 *
7813 * Return codes
7814 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7815 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7816 */
7817static pci_ers_result_t
7818lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7819{
7820	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7821	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7822	struct lpfc_sli *psli = &phba->sli;
7823	uint32_t intr_mode;
7824
7825	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
7826	if (pci_enable_device_mem(pdev)) {
7827		printk(KERN_ERR "lpfc: Cannot re-enable "
7828			"PCI device after reset.\n");
7829		return PCI_ERS_RESULT_DISCONNECT;
7830	}
7831
7832	pci_restore_state(pdev);
7833
7834	/*
7835	 * As the new kernel behavior of pci_restore_state() API call clears
7836	 * device saved_state flag, need to save the restored state again.
7837	 */
7838	pci_save_state(pdev);
7839
7840	if (pdev->is_busmaster)
7841		pci_set_master(pdev);
7842
7843	spin_lock_irq(&phba->hbalock);
7844	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7845	spin_unlock_irq(&phba->hbalock);
7846
7847	/* Configure and enable interrupt */
7848	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7849	if (intr_mode == LPFC_INTR_ERROR) {
7850		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7851				"0427 Cannot re-enable interrupt after "
7852				"slot reset.\n");
7853		return PCI_ERS_RESULT_DISCONNECT;
7854	} else
7855		phba->intr_mode = intr_mode;
7856
7857	/* Take device offline; this will perform cleanup */
7858	lpfc_offline(phba);
7859	lpfc_sli_brdrestart(phba);
7860
7861	/* Log the current active interrupt mode */
7862	lpfc_log_intr_mode(phba, phba->intr_mode);
7863
7864	return PCI_ERS_RESULT_RECOVERED;
7865}
7866
7867/**
7868 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
7869 * @pdev: pointer to PCI device
7870 *
7871 * This routine is called from the PCI subsystem for error handling to device
7872 * with SLI-3 interface spec. It is called when kernel error recovery tells
7873 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7874 * error recovery. After this call, traffic can start to flow from this device
7875 * again.
7876 */
7877static void
7878lpfc_io_resume_s3(struct pci_dev *pdev)
7879{
7880	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7881	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7882
7883	/* Bring the device online */
7884	lpfc_online(phba);
7885
7886	/* Clean up Advanced Error Reporting (AER) if needed */
7887	if (phba->hba_flag & HBA_AER_ENABLED)
7888		pci_cleanup_aer_uncorrect_error_status(pdev);
7889}
7890
7891/**
7892 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7893 * @phba: pointer to lpfc hba data structure.
7894 *
7895 * returns the number of ELS/CT IOCBs to reserve
7896 **/
7897int
7898lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7899{
7900	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7901
7902	if (phba->sli_rev == LPFC_SLI_REV4) {
7903		if (max_xri <= 100)
7904			return 10;
7905		else if (max_xri <= 256)
7906			return 25;
7907		else if (max_xri <= 512)
7908			return 50;
7909		else if (max_xri <= 1024)
7910			return 100;
7911		else
7912			return 150;
7913	} else
7914		return 0;
7915}
7916
7917/**
7918 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
7919 * @pdev: pointer to PCI device
7920 * @pid: pointer to PCI device identifier
7921 *
7922 * This routine is called from the kernel's PCI subsystem to device with
7923 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7924 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7925 * information of the device and driver to see if the driver state that it
7926 * can support this kind of device. If the match is successful, the driver
7927 * core invokes this routine. If this routine determines it can claim the HBA,
7928 * it does all the initialization that it needs to do to handle the HBA
7929 * properly.
7930 *
7931 * Return code
7932 * 	0 - driver can claim the device
7933 * 	negative value - driver can not claim the device
7934 **/
7935static int __devinit
7936lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7937{
7938	struct lpfc_hba   *phba;
7939	struct lpfc_vport *vport = NULL;
7940	struct Scsi_Host  *shost = NULL;
7941	int error;
7942	uint32_t cfg_mode, intr_mode;
7943	int mcnt;
7944
7945	/* Allocate memory for HBA structure */
7946	phba = lpfc_hba_alloc(pdev);
7947	if (!phba)
7948		return -ENOMEM;
7949
7950	/* Perform generic PCI device enabling operation */
7951	error = lpfc_enable_pci_dev(phba);
7952	if (error) {
7953		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7954				"1409 Failed to enable pci device.\n");
7955		goto out_free_phba;
7956	}
7957
7958	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
7959	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
7960	if (error)
7961		goto out_disable_pci_dev;
7962
7963	/* Set up SLI-4 specific device PCI memory space */
7964	error = lpfc_sli4_pci_mem_setup(phba);
7965	if (error) {
7966		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7967				"1410 Failed to set up pci memory space.\n");
7968		goto out_disable_pci_dev;
7969	}
7970
7971	/* Set up phase-1 common device driver resources */
7972	error = lpfc_setup_driver_resource_phase1(phba);
7973	if (error) {
7974		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7975				"1411 Failed to set up driver resource.\n");
7976		goto out_unset_pci_mem_s4;
7977	}
7978
7979	/* Set up SLI-4 Specific device driver resources */
7980	error = lpfc_sli4_driver_resource_setup(phba);
7981	if (error) {
7982		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7983				"1412 Failed to set up driver resource.\n");
7984		goto out_unset_pci_mem_s4;
7985	}
7986
7987	/* Initialize and populate the iocb list per host */
7988	error = lpfc_init_iocb_list(phba,
7989			phba->sli4_hba.max_cfg_param.max_xri);
7990	if (error) {
7991		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7992				"1413 Failed to initialize iocb list.\n");
7993		goto out_unset_driver_resource_s4;
7994	}
7995
7996	/* Set up common device driver resources */
7997	error = lpfc_setup_driver_resource_phase2(phba);
7998	if (error) {
7999		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8000				"1414 Failed to set up driver resource.\n");
8001		goto out_free_iocb_list;
8002	}
8003
8004	/* Create SCSI host to the physical port */
8005	error = lpfc_create_shost(phba);
8006	if (error) {
8007		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8008				"1415 Failed to create scsi host.\n");
8009		goto out_unset_driver_resource;
8010	}
8011
8012	/* Configure sysfs attributes */
8013	vport = phba->pport;
8014	error = lpfc_alloc_sysfs_attr(vport);
8015	if (error) {
8016		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8017				"1416 Failed to allocate sysfs attr\n");
8018		goto out_destroy_shost;
8019	}
8020
8021	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8022	/* Now, trying to enable interrupt and bring up the device */
8023	cfg_mode = phba->cfg_use_msi;
8024	while (true) {
8025		/* Put device to a known state before enabling interrupt */
8026		lpfc_stop_port(phba);
8027		/* Configure and enable interrupt */
8028		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
8029		if (intr_mode == LPFC_INTR_ERROR) {
8030			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8031					"0426 Failed to enable interrupt.\n");
8032			error = -ENODEV;
8033			goto out_free_sysfs_attr;
8034		}
8035		/* Default to single FCP EQ for non-MSI-X */
8036		if (phba->intr_type != MSIX)
8037			phba->cfg_fcp_eq_count = 1;
8038		/* Set up SLI-4 HBA */
8039		if (lpfc_sli4_hba_setup(phba)) {
8040			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8041					"1421 Failed to set up hba\n");
8042			error = -ENODEV;
8043			goto out_disable_intr;
8044		}
8045
8046		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
8047		if (intr_mode != 0)
8048			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
8049							    LPFC_ACT_INTR_CNT);
8050
8051		/* Check active interrupts received only for MSI/MSI-X */
8052		if (intr_mode == 0 ||
8053		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
8054			/* Log the current active interrupt mode */
8055			phba->intr_mode = intr_mode;
8056			lpfc_log_intr_mode(phba, intr_mode);
8057			break;
8058		}
8059		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8060				"0451 Configure interrupt mode (%d) "
8061				"failed active interrupt test.\n",
8062				intr_mode);
8063		/* Unset the preivous SLI-4 HBA setup */
8064		lpfc_sli4_unset_hba(phba);
8065		/* Try next level of interrupt mode */
8066		cfg_mode = --intr_mode;
8067	}
8068
8069	/* Perform post initialization setup */
8070	lpfc_post_init_setup(phba);
8071
8072	/* Check if there are static vports to be created. */
8073	lpfc_create_static_vport(phba);
8074
8075	return 0;
8076
8077out_disable_intr:
8078	lpfc_sli4_disable_intr(phba);
8079out_free_sysfs_attr:
8080	lpfc_free_sysfs_attr(vport);
8081out_destroy_shost:
8082	lpfc_destroy_shost(phba);
8083out_unset_driver_resource:
8084	lpfc_unset_driver_resource_phase2(phba);
8085out_free_iocb_list:
8086	lpfc_free_iocb_list(phba);
8087out_unset_driver_resource_s4:
8088	lpfc_sli4_driver_resource_unset(phba);
8089out_unset_pci_mem_s4:
8090	lpfc_sli4_pci_mem_unset(phba);
8091out_disable_pci_dev:
8092	lpfc_disable_pci_dev(phba);
8093	if (shost)
8094		scsi_host_put(shost);
8095out_free_phba:
8096	lpfc_hba_free(phba);
8097	return error;
8098}
8099
8100/**
8101 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
8102 * @pdev: pointer to PCI device
8103 *
8104 * This routine is called from the kernel's PCI subsystem to device with
8105 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8106 * removed from PCI bus, it performs all the necessary cleanup for the HBA
8107 * device to be removed from the PCI subsystem properly.
8108 **/
8109static void __devexit
8110lpfc_pci_remove_one_s4(struct pci_dev *pdev)
8111{
8112	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8113	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8114	struct lpfc_vport **vports;
8115	struct lpfc_hba *phba = vport->phba;
8116	int i;
8117
8118	/* Mark the device unloading flag */
8119	spin_lock_irq(&phba->hbalock);
8120	vport->load_flag |= FC_UNLOADING;
8121	spin_unlock_irq(&phba->hbalock);
8122
8123	/* Free the HBA sysfs attributes */
8124	lpfc_free_sysfs_attr(vport);
8125
8126	/* Release all the vports against this physical port */
8127	vports = lpfc_create_vport_work_array(phba);
8128	if (vports != NULL)
8129		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8130			fc_vport_terminate(vports[i]->fc_vport);
8131	lpfc_destroy_vport_work_array(phba, vports);
8132
8133	/* Remove FC host and then SCSI host with the physical port */
8134	fc_remove_host(shost);
8135	scsi_remove_host(shost);
8136
8137	/* Perform cleanup on the physical port */
8138	lpfc_cleanup(vport);
8139
8140	/*
8141	 * Bring down the SLI Layer. This step disables all interrupts,
8142	 * clears the rings, discards all mailbox commands, and resets
8143	 * the HBA FCoE function.
8144	 */
8145	lpfc_debugfs_terminate(vport);
8146	lpfc_sli4_hba_unset(phba);
8147
8148	spin_lock_irq(&phba->hbalock);
8149	list_del_init(&vport->listentry);
8150	spin_unlock_irq(&phba->hbalock);
8151
8152	/* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
8153	 * buffers are released to their corresponding pools here.
8154	 */
8155	lpfc_scsi_free(phba);
8156	lpfc_sli4_driver_resource_unset(phba);
8157
8158	/* Unmap adapter Control and Doorbell registers */
8159	lpfc_sli4_pci_mem_unset(phba);
8160
8161	/* Release PCI resources and disable device's PCI function */
8162	scsi_host_put(shost);
8163	lpfc_disable_pci_dev(phba);
8164
8165	/* Finally, free the driver's device data structure */
8166	lpfc_hba_free(phba);
8167
8168	return;
8169}
8170
8171/**
8172 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
8173 * @pdev: pointer to PCI device
8174 * @msg: power management message
8175 *
8176 * This routine is called from the kernel's PCI subsystem to support system
8177 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
8178 * this method, it quiesces the device by stopping the driver's worker
8179 * thread for the device, turning off device's interrupt and DMA, and bring
8180 * the device offline. Note that as the driver implements the minimum PM
8181 * requirements to a power-aware driver's PM support for suspend/resume -- all
8182 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
8183 * method call will be treated as SUSPEND and the driver will fully
8184 * reinitialize its device during resume() method call, the driver will set
8185 * device to PCI_D3hot state in PCI config space instead of setting it
8186 * according to the @msg provided by the PM.
8187 *
8188 * Return code
8189 * 	0 - driver suspended the device
8190 * 	Error otherwise
8191 **/
8192static int
8193lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
8194{
8195	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8196	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8197
8198	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8199			"0298 PCI device Power Management suspend.\n");
8200
8201	/* Bring down the device */
8202	lpfc_offline_prep(phba);
8203	lpfc_offline(phba);
8204	kthread_stop(phba->worker_thread);
8205
8206	/* Disable interrupt from device */
8207	lpfc_sli4_disable_intr(phba);
8208
8209	/* Save device state to PCI config space */
8210	pci_save_state(pdev);
8211	pci_set_power_state(pdev, PCI_D3hot);
8212
8213	return 0;
8214}
8215
8216/**
8217 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
8218 * @pdev: pointer to PCI device
8219 *
8220 * This routine is called from the kernel's PCI subsystem to support system
8221 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
8222 * this method, it restores the device's PCI config space state and fully
8223 * reinitializes the device and brings it online. Note that as the driver
8224 * implements the minimum PM requirements to a power-aware driver's PM for
8225 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8226 * to the suspend() method call will be treated as SUSPEND and the driver
8227 * will fully reinitialize its device during resume() method call, the device
8228 * will be set to PCI_D0 directly in PCI config space before restoring the
8229 * state.
8230 *
8231 * Return code
8232 * 	0 - driver suspended the device
8233 * 	Error otherwise
8234 **/
8235static int
8236lpfc_pci_resume_one_s4(struct pci_dev *pdev)
8237{
8238	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8239	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8240	uint32_t intr_mode;
8241	int error;
8242
8243	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8244			"0292 PCI device Power Management resume.\n");
8245
8246	/* Restore device state from PCI config space */
8247	pci_set_power_state(pdev, PCI_D0);
8248	pci_restore_state(pdev);
8249
8250	/*
8251	 * As the new kernel behavior of pci_restore_state() API call clears
8252	 * device saved_state flag, need to save the restored state again.
8253	 */
8254	pci_save_state(pdev);
8255
8256	if (pdev->is_busmaster)
8257		pci_set_master(pdev);
8258
8259	 /* Startup the kernel thread for this host adapter. */
8260	phba->worker_thread = kthread_run(lpfc_do_work, phba,
8261					"lpfc_worker_%d", phba->brd_no);
8262	if (IS_ERR(phba->worker_thread)) {
8263		error = PTR_ERR(phba->worker_thread);
8264		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8265				"0293 PM resume failed to start worker "
8266				"thread: error=x%x.\n", error);
8267		return error;
8268	}
8269
8270	/* Configure and enable interrupt */
8271	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
8272	if (intr_mode == LPFC_INTR_ERROR) {
8273		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8274				"0294 PM resume Failed to enable interrupt\n");
8275		return -EIO;
8276	} else
8277		phba->intr_mode = intr_mode;
8278
8279	/* Restart HBA and bring it online */
8280	lpfc_sli_brdrestart(phba);
8281	lpfc_online(phba);
8282
8283	/* Log the current active interrupt mode */
8284	lpfc_log_intr_mode(phba, phba->intr_mode);
8285
8286	return 0;
8287}
8288
8289/**
8290 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
8291 * @pdev: pointer to PCI device.
8292 * @state: the current PCI connection state.
8293 *
8294 * This routine is called from the PCI subsystem for error handling to device
8295 * with SLI-4 interface spec. This function is called by the PCI subsystem
8296 * after a PCI bus error affecting this device has been detected. When this
8297 * function is invoked, it will need to stop all the I/Os and interrupt(s)
8298 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
8299 * for the PCI subsystem to perform proper recovery as desired.
8300 *
8301 * Return codes
8302 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8303 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8304 **/
8305static pci_ers_result_t
8306lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8307{
8308	return PCI_ERS_RESULT_NEED_RESET;
8309}
8310
8311/**
8312 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
8313 * @pdev: pointer to PCI device.
8314 *
8315 * This routine is called from the PCI subsystem for error handling to device
8316 * with SLI-4 interface spec. It is called after PCI bus has been reset to
8317 * restart the PCI card from scratch, as if from a cold-boot. During the
8318 * PCI subsystem error recovery, after the driver returns
8319 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
8320 * recovery and then call this routine before calling the .resume method to
8321 * recover the device. This function will initialize the HBA device, enable
8322 * the interrupt, but it will just put the HBA to offline state without
8323 * passing any I/O traffic.
8324 *
8325 * Return codes
8326 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
8327 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8328 */
8329static pci_ers_result_t
8330lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8331{
8332	return PCI_ERS_RESULT_RECOVERED;
8333}
8334
8335/**
8336 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
8337 * @pdev: pointer to PCI device
8338 *
8339 * This routine is called from the PCI subsystem for error handling to device
8340 * with SLI-4 interface spec. It is called when kernel error recovery tells
8341 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8342 * error recovery. After this call, traffic can start to flow from this device
8343 * again.
8344 **/
8345static void
8346lpfc_io_resume_s4(struct pci_dev *pdev)
8347{
8348	return;
8349}
8350
8351/**
8352 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
8353 * @pdev: pointer to PCI device
8354 * @pid: pointer to PCI device identifier
8355 *
8356 * This routine is to be registered to the kernel's PCI subsystem. When an
8357 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
8358 * at PCI device-specific information of the device and driver to see if the
8359 * driver state that it can support this kind of device. If the match is
8360 * successful, the driver core invokes this routine. This routine dispatches
8361 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
8362 * do all the initialization that it needs to do to handle the HBA device
8363 * properly.
8364 *
8365 * Return code
8366 * 	0 - driver can claim the device
8367 * 	negative value - driver can not claim the device
8368 **/
8369static int __devinit
8370lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
8371{
8372	int rc;
8373	struct lpfc_sli_intf intf;
8374
8375	if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
8376		return -ENODEV;
8377
8378	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
8379	    (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
8380		rc = lpfc_pci_probe_one_s4(pdev, pid);
8381	else
8382		rc = lpfc_pci_probe_one_s3(pdev, pid);
8383
8384	return rc;
8385}
8386
8387/**
8388 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
8389 * @pdev: pointer to PCI device
8390 *
8391 * This routine is to be registered to the kernel's PCI subsystem. When an
8392 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
8393 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
8394 * remove routine, which will perform all the necessary cleanup for the
8395 * device to be removed from the PCI subsystem properly.
8396 **/
8397static void __devexit
8398lpfc_pci_remove_one(struct pci_dev *pdev)
8399{
8400	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8401	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8402
8403	switch (phba->pci_dev_grp) {
8404	case LPFC_PCI_DEV_LP:
8405		lpfc_pci_remove_one_s3(pdev);
8406		break;
8407	case LPFC_PCI_DEV_OC:
8408		lpfc_pci_remove_one_s4(pdev);
8409		break;
8410	default:
8411		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8412				"1424 Invalid PCI device group: 0x%x\n",
8413				phba->pci_dev_grp);
8414		break;
8415	}
8416	return;
8417}
8418
8419/**
8420 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
8421 * @pdev: pointer to PCI device
8422 * @msg: power management message
8423 *
8424 * This routine is to be registered to the kernel's PCI subsystem to support
8425 * system Power Management (PM). When PM invokes this method, it dispatches
8426 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
8427 * suspend the device.
8428 *
8429 * Return code
8430 * 	0 - driver suspended the device
8431 * 	Error otherwise
8432 **/
8433static int
8434lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
8435{
8436	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8437	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8438	int rc = -ENODEV;
8439
8440	switch (phba->pci_dev_grp) {
8441	case LPFC_PCI_DEV_LP:
8442		rc = lpfc_pci_suspend_one_s3(pdev, msg);
8443		break;
8444	case LPFC_PCI_DEV_OC:
8445		rc = lpfc_pci_suspend_one_s4(pdev, msg);
8446		break;
8447	default:
8448		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8449				"1425 Invalid PCI device group: 0x%x\n",
8450				phba->pci_dev_grp);
8451		break;
8452	}
8453	return rc;
8454}
8455
8456/**
8457 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
8458 * @pdev: pointer to PCI device
8459 *
8460 * This routine is to be registered to the kernel's PCI subsystem to support
8461 * system Power Management (PM). When PM invokes this method, it dispatches
8462 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
8463 * resume the device.
8464 *
8465 * Return code
8466 * 	0 - driver suspended the device
8467 * 	Error otherwise
8468 **/
8469static int
8470lpfc_pci_resume_one(struct pci_dev *pdev)
8471{
8472	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8473	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8474	int rc = -ENODEV;
8475
8476	switch (phba->pci_dev_grp) {
8477	case LPFC_PCI_DEV_LP:
8478		rc = lpfc_pci_resume_one_s3(pdev);
8479		break;
8480	case LPFC_PCI_DEV_OC:
8481		rc = lpfc_pci_resume_one_s4(pdev);
8482		break;
8483	default:
8484		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8485				"1426 Invalid PCI device group: 0x%x\n",
8486				phba->pci_dev_grp);
8487		break;
8488	}
8489	return rc;
8490}
8491
8492/**
8493 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
8494 * @pdev: pointer to PCI device.
8495 * @state: the current PCI connection state.
8496 *
8497 * This routine is registered to the PCI subsystem for error handling. This
8498 * function is called by the PCI subsystem after a PCI bus error affecting
8499 * this device has been detected. When this routine is invoked, it dispatches
8500 * the action to the proper SLI-3 or SLI-4 device error detected handling
8501 * routine, which will perform the proper error detected operation.
8502 *
8503 * Return codes
8504 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8505 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8506 **/
8507static pci_ers_result_t
8508lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
8509{
8510	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8511	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8512	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
8513
8514	switch (phba->pci_dev_grp) {
8515	case LPFC_PCI_DEV_LP:
8516		rc = lpfc_io_error_detected_s3(pdev, state);
8517		break;
8518	case LPFC_PCI_DEV_OC:
8519		rc = lpfc_io_error_detected_s4(pdev, state);
8520		break;
8521	default:
8522		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8523				"1427 Invalid PCI device group: 0x%x\n",
8524				phba->pci_dev_grp);
8525		break;
8526	}
8527	return rc;
8528}
8529
8530/**
8531 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
8532 * @pdev: pointer to PCI device.
8533 *
8534 * This routine is registered to the PCI subsystem for error handling. This
8535 * function is called after PCI bus has been reset to restart the PCI card
8536 * from scratch, as if from a cold-boot. When this routine is invoked, it
8537 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
8538 * routine, which will perform the proper device reset.
8539 *
8540 * Return codes
8541 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
8542 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8543 **/
8544static pci_ers_result_t
8545lpfc_io_slot_reset(struct pci_dev *pdev)
8546{
8547	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8548	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8549	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
8550
8551	switch (phba->pci_dev_grp) {
8552	case LPFC_PCI_DEV_LP:
8553		rc = lpfc_io_slot_reset_s3(pdev);
8554		break;
8555	case LPFC_PCI_DEV_OC:
8556		rc = lpfc_io_slot_reset_s4(pdev);
8557		break;
8558	default:
8559		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8560				"1428 Invalid PCI device group: 0x%x\n",
8561				phba->pci_dev_grp);
8562		break;
8563	}
8564	return rc;
8565}
8566
8567/**
8568 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
8569 * @pdev: pointer to PCI device
8570 *
8571 * This routine is registered to the PCI subsystem for error handling. It
8572 * is called when kernel error recovery tells the lpfc driver that it is
8573 * OK to resume normal PCI operation after PCI bus error recovery. When
8574 * this routine is invoked, it dispatches the action to the proper SLI-3
8575 * or SLI-4 device io_resume routine, which will resume the device operation.
8576 **/
8577static void
8578lpfc_io_resume(struct pci_dev *pdev)
8579{
8580	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8581	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8582
8583	switch (phba->pci_dev_grp) {
8584	case LPFC_PCI_DEV_LP:
8585		lpfc_io_resume_s3(pdev);
8586		break;
8587	case LPFC_PCI_DEV_OC:
8588		lpfc_io_resume_s4(pdev);
8589		break;
8590	default:
8591		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8592				"1429 Invalid PCI device group: 0x%x\n",
8593				phba->pci_dev_grp);
8594		break;
8595	}
8596	return;
8597}
8598
8599static struct pci_device_id lpfc_id_table[] = {
8600	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
8601		PCI_ANY_ID, PCI_ANY_ID, },
8602	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
8603		PCI_ANY_ID, PCI_ANY_ID, },
8604	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
8605		PCI_ANY_ID, PCI_ANY_ID, },
8606	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
8607		PCI_ANY_ID, PCI_ANY_ID, },
8608	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
8609		PCI_ANY_ID, PCI_ANY_ID, },
8610	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
8611		PCI_ANY_ID, PCI_ANY_ID, },
8612	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
8613		PCI_ANY_ID, PCI_ANY_ID, },
8614	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
8615		PCI_ANY_ID, PCI_ANY_ID, },
8616	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
8617		PCI_ANY_ID, PCI_ANY_ID, },
8618	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
8619		PCI_ANY_ID, PCI_ANY_ID, },
8620	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
8621		PCI_ANY_ID, PCI_ANY_ID, },
8622	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
8623		PCI_ANY_ID, PCI_ANY_ID, },
8624	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
8625		PCI_ANY_ID, PCI_ANY_ID, },
8626	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
8627		PCI_ANY_ID, PCI_ANY_ID, },
8628	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
8629		PCI_ANY_ID, PCI_ANY_ID, },
8630	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
8631		PCI_ANY_ID, PCI_ANY_ID, },
8632	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
8633		PCI_ANY_ID, PCI_ANY_ID, },
8634	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
8635		PCI_ANY_ID, PCI_ANY_ID, },
8636	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
8637		PCI_ANY_ID, PCI_ANY_ID, },
8638	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
8639		PCI_ANY_ID, PCI_ANY_ID, },
8640	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
8641		PCI_ANY_ID, PCI_ANY_ID, },
8642	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
8643		PCI_ANY_ID, PCI_ANY_ID, },
8644	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
8645		PCI_ANY_ID, PCI_ANY_ID, },
8646	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
8647		PCI_ANY_ID, PCI_ANY_ID, },
8648	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
8649		PCI_ANY_ID, PCI_ANY_ID, },
8650	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
8651		PCI_ANY_ID, PCI_ANY_ID, },
8652	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
8653		PCI_ANY_ID, PCI_ANY_ID, },
8654	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
8655		PCI_ANY_ID, PCI_ANY_ID, },
8656	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
8657		PCI_ANY_ID, PCI_ANY_ID, },
8658	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
8659		PCI_ANY_ID, PCI_ANY_ID, },
8660	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
8661		PCI_ANY_ID, PCI_ANY_ID, },
8662	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
8663		PCI_ANY_ID, PCI_ANY_ID, },
8664	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
8665		PCI_ANY_ID, PCI_ANY_ID, },
8666	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
8667		PCI_ANY_ID, PCI_ANY_ID, },
8668	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
8669		PCI_ANY_ID, PCI_ANY_ID, },
8670	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
8671		PCI_ANY_ID, PCI_ANY_ID, },
8672	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
8673		PCI_ANY_ID, PCI_ANY_ID, },
8674	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
8675		PCI_ANY_ID, PCI_ANY_ID, },
8676	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
8677		PCI_ANY_ID, PCI_ANY_ID, },
8678	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
8679		PCI_ANY_ID, PCI_ANY_ID, },
8680	{ 0 }
8681};
8682
8683MODULE_DEVICE_TABLE(pci, lpfc_id_table);
8684
8685static struct pci_error_handlers lpfc_err_handler = {
8686	.error_detected = lpfc_io_error_detected,
8687	.slot_reset = lpfc_io_slot_reset,
8688	.resume = lpfc_io_resume,
8689};
8690
8691static struct pci_driver lpfc_driver = {
8692	.name		= LPFC_DRIVER_NAME,
8693	.id_table	= lpfc_id_table,
8694	.probe		= lpfc_pci_probe_one,
8695	.remove		= __devexit_p(lpfc_pci_remove_one),
8696	.suspend        = lpfc_pci_suspend_one,
8697	.resume		= lpfc_pci_resume_one,
8698	.err_handler    = &lpfc_err_handler,
8699};
8700
8701/**
8702 * lpfc_init - lpfc module initialization routine
8703 *
8704 * This routine is to be invoked when the lpfc module is loaded into the
8705 * kernel. The special kernel macro module_init() is used to indicate the
8706 * role of this routine to the kernel as lpfc module entry point.
8707 *
8708 * Return codes
8709 *   0 - successful
8710 *   -ENOMEM - FC attach transport failed
8711 *   all others - failed
8712 */
8713static int __init
8714lpfc_init(void)
8715{
8716	int error = 0;
8717
8718	printk(LPFC_MODULE_DESC "\n");
8719	printk(LPFC_COPYRIGHT "\n");
8720
8721	if (lpfc_enable_npiv) {
8722		lpfc_transport_functions.vport_create = lpfc_vport_create;
8723		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
8724	}
8725	lpfc_transport_template =
8726				fc_attach_transport(&lpfc_transport_functions);
8727	if (lpfc_transport_template == NULL)
8728		return -ENOMEM;
8729	if (lpfc_enable_npiv) {
8730		lpfc_vport_transport_template =
8731			fc_attach_transport(&lpfc_vport_transport_functions);
8732		if (lpfc_vport_transport_template == NULL) {
8733			fc_release_transport(lpfc_transport_template);
8734			return -ENOMEM;
8735		}
8736	}
8737	error = pci_register_driver(&lpfc_driver);
8738	if (error) {
8739		fc_release_transport(lpfc_transport_template);
8740		if (lpfc_enable_npiv)
8741			fc_release_transport(lpfc_vport_transport_template);
8742	}
8743
8744	return error;
8745}
8746
8747/**
8748 * lpfc_exit - lpfc module removal routine
8749 *
8750 * This routine is invoked when the lpfc module is removed from the kernel.
8751 * The special kernel macro module_exit() is used to indicate the role of
8752 * this routine to the kernel as lpfc module exit point.
8753 */
8754static void __exit
8755lpfc_exit(void)
8756{
8757	pci_unregister_driver(&lpfc_driver);
8758	fc_release_transport(lpfc_transport_template);
8759	if (lpfc_enable_npiv)
8760		fc_release_transport(lpfc_vport_transport_template);
8761	if (_dump_buf_data) {
8762		printk(KERN_ERR	"9062 BLKGRD: freeing %lu pages for "
8763				"_dump_buf_data at 0x%p\n",
8764				(1L << _dump_buf_data_order), _dump_buf_data);
8765		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
8766	}
8767
8768	if (_dump_buf_dif) {
8769		printk(KERN_ERR	"9049 BLKGRD: freeing %lu pages for "
8770				"_dump_buf_dif at 0x%p\n",
8771				(1L << _dump_buf_dif_order), _dump_buf_dif);
8772		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
8773	}
8774}
8775
8776module_init(lpfc_init);
8777module_exit(lpfc_exit);
8778MODULE_LICENSE("GPL");
8779MODULE_DESCRIPTION(LPFC_MODULE_DESC);
8780MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
8781MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
8782