lpfc_init.c revision 97f2ecf1f401d689d4036f64c244fad3b39e5e0a
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/module.h>
28#include <linux/kthread.h>
29#include <linux/pci.h>
30#include <linux/spinlock.h>
31#include <linux/ctype.h>
32#include <linux/aer.h>
33#include <linux/slab.h>
34#include <linux/firmware.h>
35#include <linux/miscdevice.h>
36
37#include <scsi/scsi.h>
38#include <scsi/scsi_device.h>
39#include <scsi/scsi_host.h>
40#include <scsi/scsi_transport_fc.h>
41
42#include "lpfc_hw4.h"
43#include "lpfc_hw.h"
44#include "lpfc_sli.h"
45#include "lpfc_sli4.h"
46#include "lpfc_nl.h"
47#include "lpfc_disc.h"
48#include "lpfc_scsi.h"
49#include "lpfc.h"
50#include "lpfc_logmsg.h"
51#include "lpfc_crtn.h"
52#include "lpfc_vport.h"
53#include "lpfc_version.h"
54
55char *_dump_buf_data;
56unsigned long _dump_buf_data_order;
57char *_dump_buf_dif;
58unsigned long _dump_buf_dif_order;
59spinlock_t _dump_buf_lock;
60
61static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
62static int lpfc_post_rcv_buf(struct lpfc_hba *);
63static int lpfc_sli4_queue_verify(struct lpfc_hba *);
64static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
65static int lpfc_setup_endian_order(struct lpfc_hba *);
66static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
67static void lpfc_free_sgl_list(struct lpfc_hba *);
68static int lpfc_init_sgl_list(struct lpfc_hba *);
69static int lpfc_init_active_sgl_array(struct lpfc_hba *);
70static void lpfc_free_active_sgl(struct lpfc_hba *);
71static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
72static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
73static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
74static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
75static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
76
77static struct scsi_transport_template *lpfc_transport_template = NULL;
78static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
79static DEFINE_IDR(lpfc_hba_index);
80
81/**
82 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
83 * @phba: pointer to lpfc hba data structure.
84 *
85 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
86 * mailbox command. It retrieves the revision information from the HBA and
87 * collects the Vital Product Data (VPD) about the HBA for preparing the
88 * configuration of the HBA.
89 *
90 * Return codes:
91 *   0 - success.
92 *   -ERESTART - requests the SLI layer to reset the HBA and try again.
93 *   Any other value - indicates an error.
94 **/
95int
96lpfc_config_port_prep(struct lpfc_hba *phba)
97{
98	lpfc_vpd_t *vp = &phba->vpd;
99	int i = 0, rc;
100	LPFC_MBOXQ_t *pmb;
101	MAILBOX_t *mb;
102	char *lpfc_vpd_data = NULL;
103	uint16_t offset = 0;
104	static char licensed[56] =
105		    "key unlock for use with gnu public licensed code only\0";
106	static int init_key = 1;
107
108	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
109	if (!pmb) {
110		phba->link_state = LPFC_HBA_ERROR;
111		return -ENOMEM;
112	}
113
114	mb = &pmb->u.mb;
115	phba->link_state = LPFC_INIT_MBX_CMDS;
116
117	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
118		if (init_key) {
119			uint32_t *ptext = (uint32_t *) licensed;
120
121			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
122				*ptext = cpu_to_be32(*ptext);
123			init_key = 0;
124		}
125
126		lpfc_read_nv(phba, pmb);
127		memset((char*)mb->un.varRDnvp.rsvd3, 0,
128			sizeof (mb->un.varRDnvp.rsvd3));
129		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
130			 sizeof (licensed));
131
132		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
133
134		if (rc != MBX_SUCCESS) {
135			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
136					"0324 Config Port initialization "
137					"error, mbxCmd x%x READ_NVPARM, "
138					"mbxStatus x%x\n",
139					mb->mbxCommand, mb->mbxStatus);
140			mempool_free(pmb, phba->mbox_mem_pool);
141			return -ERESTART;
142		}
143		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
144		       sizeof(phba->wwnn));
145		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
146		       sizeof(phba->wwpn));
147	}
148
149	phba->sli3_options = 0x0;
150
151	/* Setup and issue mailbox READ REV command */
152	lpfc_read_rev(phba, pmb);
153	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
154	if (rc != MBX_SUCCESS) {
155		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
156				"0439 Adapter failed to init, mbxCmd x%x "
157				"READ_REV, mbxStatus x%x\n",
158				mb->mbxCommand, mb->mbxStatus);
159		mempool_free( pmb, phba->mbox_mem_pool);
160		return -ERESTART;
161	}
162
163
164	/*
165	 * The value of rr must be 1 since the driver set the cv field to 1.
166	 * This setting requires the FW to set all revision fields.
167	 */
168	if (mb->un.varRdRev.rr == 0) {
169		vp->rev.rBit = 0;
170		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
171				"0440 Adapter failed to init, READ_REV has "
172				"missing revision information.\n");
173		mempool_free(pmb, phba->mbox_mem_pool);
174		return -ERESTART;
175	}
176
177	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
178		mempool_free(pmb, phba->mbox_mem_pool);
179		return -EINVAL;
180	}
181
182	/* Save information as VPD data */
183	vp->rev.rBit = 1;
184	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
185	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
186	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
187	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
188	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
189	vp->rev.biuRev = mb->un.varRdRev.biuRev;
190	vp->rev.smRev = mb->un.varRdRev.smRev;
191	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
192	vp->rev.endecRev = mb->un.varRdRev.endecRev;
193	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
194	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
195	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
196	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
197	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
198	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
199
200	/* If the sli feature level is less then 9, we must
201	 * tear down all RPIs and VPIs on link down if NPIV
202	 * is enabled.
203	 */
204	if (vp->rev.feaLevelHigh < 9)
205		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
206
207	if (lpfc_is_LC_HBA(phba->pcidev->device))
208		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
209						sizeof (phba->RandomData));
210
211	/* Get adapter VPD information */
212	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
213	if (!lpfc_vpd_data)
214		goto out_free_mbox;
215	do {
216		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
218
219		if (rc != MBX_SUCCESS) {
220			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
221					"0441 VPD not present on adapter, "
222					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
223					mb->mbxCommand, mb->mbxStatus);
224			mb->un.varDmp.word_cnt = 0;
225		}
226		/* dump mem may return a zero when finished or we got a
227		 * mailbox error, either way we are done.
228		 */
229		if (mb->un.varDmp.word_cnt == 0)
230			break;
231		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
232			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
233		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
234				      lpfc_vpd_data + offset,
235				      mb->un.varDmp.word_cnt);
236		offset += mb->un.varDmp.word_cnt;
237	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
238	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
239
240	kfree(lpfc_vpd_data);
241out_free_mbox:
242	mempool_free(pmb, phba->mbox_mem_pool);
243	return 0;
244}
245
246/**
247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
248 * @phba: pointer to lpfc hba data structure.
249 * @pmboxq: pointer to the driver internal queue element for mailbox command.
250 *
251 * This is the completion handler for driver's configuring asynchronous event
252 * mailbox command to the device. If the mailbox command returns successfully,
253 * it will set internal async event support flag to 1; otherwise, it will
254 * set internal async event support flag to 0.
255 **/
256static void
257lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
258{
259	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
260		phba->temp_sensor_support = 1;
261	else
262		phba->temp_sensor_support = 0;
263	mempool_free(pmboxq, phba->mbox_mem_pool);
264	return;
265}
266
267/**
268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
269 * @phba: pointer to lpfc hba data structure.
270 * @pmboxq: pointer to the driver internal queue element for mailbox command.
271 *
272 * This is the completion handler for dump mailbox command for getting
273 * wake up parameters. When this command complete, the response contain
274 * Option rom version of the HBA. This function translate the version number
275 * into a human readable string and store it in OptionROMVersion.
276 **/
277static void
278lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
279{
280	struct prog_id *prg;
281	uint32_t prog_id_word;
282	char dist = ' ';
283	/* character array used for decoding dist type. */
284	char dist_char[] = "nabx";
285
286	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
287		mempool_free(pmboxq, phba->mbox_mem_pool);
288		return;
289	}
290
291	prg = (struct prog_id *) &prog_id_word;
292
293	/* word 7 contain option rom version */
294	prog_id_word = pmboxq->u.mb.un.varWords[7];
295
296	/* Decode the Option rom version word to a readable string */
297	if (prg->dist < 4)
298		dist = dist_char[prg->dist];
299
300	if ((prg->dist == 3) && (prg->num == 0))
301		sprintf(phba->OptionROMVersion, "%d.%d%d",
302			prg->ver, prg->rev, prg->lev);
303	else
304		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
305			prg->ver, prg->rev, prg->lev,
306			dist, prg->num);
307	mempool_free(pmboxq, phba->mbox_mem_pool);
308	return;
309}
310
311/**
312 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
313 *	cfg_soft_wwnn, cfg_soft_wwpn
314 * @vport: pointer to lpfc vport data structure.
315 *
316 *
317 * Return codes
318 *   None.
319 **/
320void
321lpfc_update_vport_wwn(struct lpfc_vport *vport)
322{
323	/* If the soft name exists then update it using the service params */
324	if (vport->phba->cfg_soft_wwnn)
325		u64_to_wwn(vport->phba->cfg_soft_wwnn,
326			   vport->fc_sparam.nodeName.u.wwn);
327	if (vport->phba->cfg_soft_wwpn)
328		u64_to_wwn(vport->phba->cfg_soft_wwpn,
329			   vport->fc_sparam.portName.u.wwn);
330
331	/*
332	 * If the name is empty or there exists a soft name
333	 * then copy the service params name, otherwise use the fc name
334	 */
335	if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
336		memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
337			sizeof(struct lpfc_name));
338	else
339		memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
340			sizeof(struct lpfc_name));
341
342	if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
343		memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
344			sizeof(struct lpfc_name));
345	else
346		memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
347			sizeof(struct lpfc_name));
348}
349
350/**
351 * lpfc_config_port_post - Perform lpfc initialization after config port
352 * @phba: pointer to lpfc hba data structure.
353 *
354 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
355 * command call. It performs all internal resource and state setups on the
356 * port: post IOCB buffers, enable appropriate host interrupt attentions,
357 * ELS ring timers, etc.
358 *
359 * Return codes
360 *   0 - success.
361 *   Any other value - error.
362 **/
363int
364lpfc_config_port_post(struct lpfc_hba *phba)
365{
366	struct lpfc_vport *vport = phba->pport;
367	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
368	LPFC_MBOXQ_t *pmb;
369	MAILBOX_t *mb;
370	struct lpfc_dmabuf *mp;
371	struct lpfc_sli *psli = &phba->sli;
372	uint32_t status, timeout;
373	int i, j;
374	int rc;
375
376	spin_lock_irq(&phba->hbalock);
377	/*
378	 * If the Config port completed correctly the HBA is not
379	 * over heated any more.
380	 */
381	if (phba->over_temp_state == HBA_OVER_TEMP)
382		phba->over_temp_state = HBA_NORMAL_TEMP;
383	spin_unlock_irq(&phba->hbalock);
384
385	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
386	if (!pmb) {
387		phba->link_state = LPFC_HBA_ERROR;
388		return -ENOMEM;
389	}
390	mb = &pmb->u.mb;
391
392	/* Get login parameters for NID.  */
393	rc = lpfc_read_sparam(phba, pmb, 0);
394	if (rc) {
395		mempool_free(pmb, phba->mbox_mem_pool);
396		return -ENOMEM;
397	}
398
399	pmb->vport = vport;
400	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
401		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
402				"0448 Adapter failed init, mbxCmd x%x "
403				"READ_SPARM mbxStatus x%x\n",
404				mb->mbxCommand, mb->mbxStatus);
405		phba->link_state = LPFC_HBA_ERROR;
406		mp = (struct lpfc_dmabuf *) pmb->context1;
407		mempool_free(pmb, phba->mbox_mem_pool);
408		lpfc_mbuf_free(phba, mp->virt, mp->phys);
409		kfree(mp);
410		return -EIO;
411	}
412
413	mp = (struct lpfc_dmabuf *) pmb->context1;
414
415	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
416	lpfc_mbuf_free(phba, mp->virt, mp->phys);
417	kfree(mp);
418	pmb->context1 = NULL;
419	lpfc_update_vport_wwn(vport);
420
421	/* Update the fc_host data structures with new wwn. */
422	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
423	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
424	fc_host_max_npiv_vports(shost) = phba->max_vpi;
425
426	/* If no serial number in VPD data, use low 6 bytes of WWNN */
427	/* This should be consolidated into parse_vpd ? - mr */
428	if (phba->SerialNumber[0] == 0) {
429		uint8_t *outptr;
430
431		outptr = &vport->fc_nodename.u.s.IEEE[0];
432		for (i = 0; i < 12; i++) {
433			status = *outptr++;
434			j = ((status & 0xf0) >> 4);
435			if (j <= 9)
436				phba->SerialNumber[i] =
437				    (char)((uint8_t) 0x30 + (uint8_t) j);
438			else
439				phba->SerialNumber[i] =
440				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
441			i++;
442			j = (status & 0xf);
443			if (j <= 9)
444				phba->SerialNumber[i] =
445				    (char)((uint8_t) 0x30 + (uint8_t) j);
446			else
447				phba->SerialNumber[i] =
448				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
449		}
450	}
451
452	lpfc_read_config(phba, pmb);
453	pmb->vport = vport;
454	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
455		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
456				"0453 Adapter failed to init, mbxCmd x%x "
457				"READ_CONFIG, mbxStatus x%x\n",
458				mb->mbxCommand, mb->mbxStatus);
459		phba->link_state = LPFC_HBA_ERROR;
460		mempool_free( pmb, phba->mbox_mem_pool);
461		return -EIO;
462	}
463
464	/* Check if the port is disabled */
465	lpfc_sli_read_link_ste(phba);
466
467	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
468	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
469		phba->cfg_hba_queue_depth =
470			(mb->un.varRdConfig.max_xri + 1) -
471					lpfc_sli4_get_els_iocb_cnt(phba);
472
473	phba->lmt = mb->un.varRdConfig.lmt;
474
475	/* Get the default values for Model Name and Description */
476	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
477
478	phba->link_state = LPFC_LINK_DOWN;
479
480	/* Only process IOCBs on ELS ring till hba_state is READY */
481	if (psli->ring[psli->extra_ring].cmdringaddr)
482		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
483	if (psli->ring[psli->fcp_ring].cmdringaddr)
484		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
485	if (psli->ring[psli->next_ring].cmdringaddr)
486		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
487
488	/* Post receive buffers for desired rings */
489	if (phba->sli_rev != 3)
490		lpfc_post_rcv_buf(phba);
491
492	/*
493	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
494	 */
495	if (phba->intr_type == MSIX) {
496		rc = lpfc_config_msi(phba, pmb);
497		if (rc) {
498			mempool_free(pmb, phba->mbox_mem_pool);
499			return -EIO;
500		}
501		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
502		if (rc != MBX_SUCCESS) {
503			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
504					"0352 Config MSI mailbox command "
505					"failed, mbxCmd x%x, mbxStatus x%x\n",
506					pmb->u.mb.mbxCommand,
507					pmb->u.mb.mbxStatus);
508			mempool_free(pmb, phba->mbox_mem_pool);
509			return -EIO;
510		}
511	}
512
513	spin_lock_irq(&phba->hbalock);
514	/* Initialize ERATT handling flag */
515	phba->hba_flag &= ~HBA_ERATT_HANDLED;
516
517	/* Enable appropriate host interrupts */
518	if (lpfc_readl(phba->HCregaddr, &status)) {
519		spin_unlock_irq(&phba->hbalock);
520		return -EIO;
521	}
522	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
523	if (psli->num_rings > 0)
524		status |= HC_R0INT_ENA;
525	if (psli->num_rings > 1)
526		status |= HC_R1INT_ENA;
527	if (psli->num_rings > 2)
528		status |= HC_R2INT_ENA;
529	if (psli->num_rings > 3)
530		status |= HC_R3INT_ENA;
531
532	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
533	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
534		status &= ~(HC_R0INT_ENA);
535
536	writel(status, phba->HCregaddr);
537	readl(phba->HCregaddr); /* flush */
538	spin_unlock_irq(&phba->hbalock);
539
540	/* Set up ring-0 (ELS) timer */
541	timeout = phba->fc_ratov * 2;
542	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
543	/* Set up heart beat (HB) timer */
544	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
545	phba->hb_outstanding = 0;
546	phba->last_completion_time = jiffies;
547	/* Set up error attention (ERATT) polling timer */
548	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
549
550	if (phba->hba_flag & LINK_DISABLED) {
551		lpfc_printf_log(phba,
552			KERN_ERR, LOG_INIT,
553			"2598 Adapter Link is disabled.\n");
554		lpfc_down_link(phba, pmb);
555		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
556		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
557		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
558			lpfc_printf_log(phba,
559			KERN_ERR, LOG_INIT,
560			"2599 Adapter failed to issue DOWN_LINK"
561			" mbox command rc 0x%x\n", rc);
562
563			mempool_free(pmb, phba->mbox_mem_pool);
564			return -EIO;
565		}
566	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
567		mempool_free(pmb, phba->mbox_mem_pool);
568		rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
569		if (rc)
570			return rc;
571	}
572	/* MBOX buffer will be freed in mbox compl */
573	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
574	if (!pmb) {
575		phba->link_state = LPFC_HBA_ERROR;
576		return -ENOMEM;
577	}
578
579	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
580	pmb->mbox_cmpl = lpfc_config_async_cmpl;
581	pmb->vport = phba->pport;
582	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
583
584	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
585		lpfc_printf_log(phba,
586				KERN_ERR,
587				LOG_INIT,
588				"0456 Adapter failed to issue "
589				"ASYNCEVT_ENABLE mbox status x%x\n",
590				rc);
591		mempool_free(pmb, phba->mbox_mem_pool);
592	}
593
594	/* Get Option rom version */
595	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
596	if (!pmb) {
597		phba->link_state = LPFC_HBA_ERROR;
598		return -ENOMEM;
599	}
600
601	lpfc_dump_wakeup_param(phba, pmb);
602	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
603	pmb->vport = phba->pport;
604	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
605
606	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
607		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
608				"to get Option ROM version status x%x\n", rc);
609		mempool_free(pmb, phba->mbox_mem_pool);
610	}
611
612	return 0;
613}
614
615/**
616 * lpfc_hba_init_link - Initialize the FC link
617 * @phba: pointer to lpfc hba data structure.
618 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
619 *
620 * This routine will issue the INIT_LINK mailbox command call.
621 * It is available to other drivers through the lpfc_hba data
622 * structure for use as a delayed link up mechanism with the
623 * module parameter lpfc_suppress_link_up.
624 *
625 * Return code
626 *		0 - success
627 *		Any other value - error
628 **/
629int
630lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
631{
632	return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
633}
634
635/**
636 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
637 * @phba: pointer to lpfc hba data structure.
638 * @fc_topology: desired fc topology.
639 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
640 *
641 * This routine will issue the INIT_LINK mailbox command call.
642 * It is available to other drivers through the lpfc_hba data
643 * structure for use as a delayed link up mechanism with the
644 * module parameter lpfc_suppress_link_up.
645 *
646 * Return code
647 *              0 - success
648 *              Any other value - error
649 **/
650int
651lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
652			       uint32_t flag)
653{
654	struct lpfc_vport *vport = phba->pport;
655	LPFC_MBOXQ_t *pmb;
656	MAILBOX_t *mb;
657	int rc;
658
659	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
660	if (!pmb) {
661		phba->link_state = LPFC_HBA_ERROR;
662		return -ENOMEM;
663	}
664	mb = &pmb->u.mb;
665	pmb->vport = vport;
666
667	if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
668	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
669	     !(phba->lmt & LMT_1Gb)) ||
670	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
671	     !(phba->lmt & LMT_2Gb)) ||
672	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
673	     !(phba->lmt & LMT_4Gb)) ||
674	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
675	     !(phba->lmt & LMT_8Gb)) ||
676	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
677	     !(phba->lmt & LMT_10Gb)) ||
678	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
679	     !(phba->lmt & LMT_16Gb))) {
680		/* Reset link speed to auto */
681		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
682			"1302 Invalid speed for this board:%d "
683			"Reset link speed to auto.\n",
684			phba->cfg_link_speed);
685			phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
686	}
687	lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
688	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
689	if (phba->sli_rev < LPFC_SLI_REV4)
690		lpfc_set_loopback_flag(phba);
691	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
692	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
693		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
694			"0498 Adapter failed to init, mbxCmd x%x "
695			"INIT_LINK, mbxStatus x%x\n",
696			mb->mbxCommand, mb->mbxStatus);
697		if (phba->sli_rev <= LPFC_SLI_REV3) {
698			/* Clear all interrupt enable conditions */
699			writel(0, phba->HCregaddr);
700			readl(phba->HCregaddr); /* flush */
701			/* Clear all pending interrupts */
702			writel(0xffffffff, phba->HAregaddr);
703			readl(phba->HAregaddr); /* flush */
704		}
705		phba->link_state = LPFC_HBA_ERROR;
706		if (rc != MBX_BUSY || flag == MBX_POLL)
707			mempool_free(pmb, phba->mbox_mem_pool);
708		return -EIO;
709	}
710	phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
711	if (flag == MBX_POLL)
712		mempool_free(pmb, phba->mbox_mem_pool);
713
714	return 0;
715}
716
717/**
718 * lpfc_hba_down_link - this routine downs the FC link
719 * @phba: pointer to lpfc hba data structure.
720 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
721 *
722 * This routine will issue the DOWN_LINK mailbox command call.
723 * It is available to other drivers through the lpfc_hba data
724 * structure for use to stop the link.
725 *
726 * Return code
727 *		0 - success
728 *		Any other value - error
729 **/
730int
731lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
732{
733	LPFC_MBOXQ_t *pmb;
734	int rc;
735
736	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
737	if (!pmb) {
738		phba->link_state = LPFC_HBA_ERROR;
739		return -ENOMEM;
740	}
741
742	lpfc_printf_log(phba,
743		KERN_ERR, LOG_INIT,
744		"0491 Adapter Link is disabled.\n");
745	lpfc_down_link(phba, pmb);
746	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
747	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
748	if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
749		lpfc_printf_log(phba,
750		KERN_ERR, LOG_INIT,
751		"2522 Adapter failed to issue DOWN_LINK"
752		" mbox command rc 0x%x\n", rc);
753
754		mempool_free(pmb, phba->mbox_mem_pool);
755		return -EIO;
756	}
757	if (flag == MBX_POLL)
758		mempool_free(pmb, phba->mbox_mem_pool);
759
760	return 0;
761}
762
763/**
764 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
765 * @phba: pointer to lpfc HBA data structure.
766 *
767 * This routine will do LPFC uninitialization before the HBA is reset when
768 * bringing down the SLI Layer.
769 *
770 * Return codes
771 *   0 - success.
772 *   Any other value - error.
773 **/
774int
775lpfc_hba_down_prep(struct lpfc_hba *phba)
776{
777	struct lpfc_vport **vports;
778	int i;
779
780	if (phba->sli_rev <= LPFC_SLI_REV3) {
781		/* Disable interrupts */
782		writel(0, phba->HCregaddr);
783		readl(phba->HCregaddr); /* flush */
784	}
785
786	if (phba->pport->load_flag & FC_UNLOADING)
787		lpfc_cleanup_discovery_resources(phba->pport);
788	else {
789		vports = lpfc_create_vport_work_array(phba);
790		if (vports != NULL)
791			for (i = 0; i <= phba->max_vports &&
792				vports[i] != NULL; i++)
793				lpfc_cleanup_discovery_resources(vports[i]);
794		lpfc_destroy_vport_work_array(phba, vports);
795	}
796	return 0;
797}
798
799/**
800 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
801 * @phba: pointer to lpfc HBA data structure.
802 *
803 * This routine will do uninitialization after the HBA is reset when bring
804 * down the SLI Layer.
805 *
806 * Return codes
807 *   0 - success.
808 *   Any other value - error.
809 **/
810static int
811lpfc_hba_down_post_s3(struct lpfc_hba *phba)
812{
813	struct lpfc_sli *psli = &phba->sli;
814	struct lpfc_sli_ring *pring;
815	struct lpfc_dmabuf *mp, *next_mp;
816	LIST_HEAD(completions);
817	int i;
818
819	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
820		lpfc_sli_hbqbuf_free_all(phba);
821	else {
822		/* Cleanup preposted buffers on the ELS ring */
823		pring = &psli->ring[LPFC_ELS_RING];
824		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
825			list_del(&mp->list);
826			pring->postbufq_cnt--;
827			lpfc_mbuf_free(phba, mp->virt, mp->phys);
828			kfree(mp);
829		}
830	}
831
832	spin_lock_irq(&phba->hbalock);
833	for (i = 0; i < psli->num_rings; i++) {
834		pring = &psli->ring[i];
835
836		/* At this point in time the HBA is either reset or DOA. Either
837		 * way, nothing should be on txcmplq as it will NEVER complete.
838		 */
839		list_splice_init(&pring->txcmplq, &completions);
840		pring->txcmplq_cnt = 0;
841		spin_unlock_irq(&phba->hbalock);
842
843		/* Cancel all the IOCBs from the completions list */
844		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
845				      IOERR_SLI_ABORTED);
846
847		lpfc_sli_abort_iocb_ring(phba, pring);
848		spin_lock_irq(&phba->hbalock);
849	}
850	spin_unlock_irq(&phba->hbalock);
851
852	return 0;
853}
854
855/**
856 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
857 * @phba: pointer to lpfc HBA data structure.
858 *
859 * This routine will do uninitialization after the HBA is reset when bring
860 * down the SLI Layer.
861 *
862 * Return codes
863 *   0 - success.
864 *   Any other value - error.
865 **/
866static int
867lpfc_hba_down_post_s4(struct lpfc_hba *phba)
868{
869	struct lpfc_scsi_buf *psb, *psb_next;
870	LIST_HEAD(aborts);
871	int ret;
872	unsigned long iflag = 0;
873	struct lpfc_sglq *sglq_entry = NULL;
874
875	ret = lpfc_hba_down_post_s3(phba);
876	if (ret)
877		return ret;
878	/* At this point in time the HBA is either reset or DOA. Either
879	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
880	 * on the lpfc_sgl_list so that it can either be freed if the
881	 * driver is unloading or reposted if the driver is restarting
882	 * the port.
883	 */
884	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
885					/* scsl_buf_list */
886	/* abts_sgl_list_lock required because worker thread uses this
887	 * list.
888	 */
889	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
890	list_for_each_entry(sglq_entry,
891		&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
892		sglq_entry->state = SGL_FREED;
893
894	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
895			&phba->sli4_hba.lpfc_sgl_list);
896	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
897	/* abts_scsi_buf_list_lock required because worker thread uses this
898	 * list.
899	 */
900	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
901	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
902			&aborts);
903	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
904	spin_unlock_irq(&phba->hbalock);
905
906	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
907		psb->pCmd = NULL;
908		psb->status = IOSTAT_SUCCESS;
909	}
910	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
911	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
912	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
913	return 0;
914}
915
916/**
917 * lpfc_hba_down_post - Wrapper func for hba down post routine
918 * @phba: pointer to lpfc HBA data structure.
919 *
920 * This routine wraps the actual SLI3 or SLI4 routine for performing
921 * uninitialization after the HBA is reset when bring down the SLI Layer.
922 *
923 * Return codes
924 *   0 - success.
925 *   Any other value - error.
926 **/
927int
928lpfc_hba_down_post(struct lpfc_hba *phba)
929{
930	return (*phba->lpfc_hba_down_post)(phba);
931}
932
933/**
934 * lpfc_hb_timeout - The HBA-timer timeout handler
935 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
936 *
937 * This is the HBA-timer timeout handler registered to the lpfc driver. When
938 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
939 * work-port-events bitmap and the worker thread is notified. This timeout
940 * event will be used by the worker thread to invoke the actual timeout
941 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
942 * be performed in the timeout handler and the HBA timeout event bit shall
943 * be cleared by the worker thread after it has taken the event bitmap out.
944 **/
945static void
946lpfc_hb_timeout(unsigned long ptr)
947{
948	struct lpfc_hba *phba;
949	uint32_t tmo_posted;
950	unsigned long iflag;
951
952	phba = (struct lpfc_hba *)ptr;
953
954	/* Check for heart beat timeout conditions */
955	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
956	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
957	if (!tmo_posted)
958		phba->pport->work_port_events |= WORKER_HB_TMO;
959	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
960
961	/* Tell the worker thread there is work to do */
962	if (!tmo_posted)
963		lpfc_worker_wake_up(phba);
964	return;
965}
966
967/**
968 * lpfc_rrq_timeout - The RRQ-timer timeout handler
969 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
970 *
971 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
972 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
973 * work-port-events bitmap and the worker thread is notified. This timeout
974 * event will be used by the worker thread to invoke the actual timeout
975 * handler routine, lpfc_rrq_handler. Any periodical operations will
976 * be performed in the timeout handler and the RRQ timeout event bit shall
977 * be cleared by the worker thread after it has taken the event bitmap out.
978 **/
979static void
980lpfc_rrq_timeout(unsigned long ptr)
981{
982	struct lpfc_hba *phba;
983	unsigned long iflag;
984
985	phba = (struct lpfc_hba *)ptr;
986	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
987	phba->hba_flag |= HBA_RRQ_ACTIVE;
988	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
989	lpfc_worker_wake_up(phba);
990}
991
992/**
993 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
994 * @phba: pointer to lpfc hba data structure.
995 * @pmboxq: pointer to the driver internal queue element for mailbox command.
996 *
997 * This is the callback function to the lpfc heart-beat mailbox command.
998 * If configured, the lpfc driver issues the heart-beat mailbox command to
999 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1000 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1001 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1002 * heart-beat outstanding state. Once the mailbox command comes back and
1003 * no error conditions detected, the heart-beat mailbox command timer is
1004 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1005 * state is cleared for the next heart-beat. If the timer expired with the
1006 * heart-beat outstanding state set, the driver will put the HBA offline.
1007 **/
1008static void
1009lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1010{
1011	unsigned long drvr_flag;
1012
1013	spin_lock_irqsave(&phba->hbalock, drvr_flag);
1014	phba->hb_outstanding = 0;
1015	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1016
1017	/* Check and reset heart-beat timer is necessary */
1018	mempool_free(pmboxq, phba->mbox_mem_pool);
1019	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1020		!(phba->link_state == LPFC_HBA_ERROR) &&
1021		!(phba->pport->load_flag & FC_UNLOADING))
1022		mod_timer(&phba->hb_tmofunc,
1023			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1024	return;
1025}
1026
1027/**
1028 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1029 * @phba: pointer to lpfc hba data structure.
1030 *
1031 * This is the actual HBA-timer timeout handler to be invoked by the worker
1032 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1033 * handler performs any periodic operations needed for the device. If such
1034 * periodic event has already been attended to either in the interrupt handler
1035 * or by processing slow-ring or fast-ring events within the HBA-timer
1036 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1037 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1038 * is configured and there is no heart-beat mailbox command outstanding, a
1039 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1040 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1041 * to offline.
1042 **/
1043void
1044lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1045{
1046	struct lpfc_vport **vports;
1047	LPFC_MBOXQ_t *pmboxq;
1048	struct lpfc_dmabuf *buf_ptr;
1049	int retval, i;
1050	struct lpfc_sli *psli = &phba->sli;
1051	LIST_HEAD(completions);
1052
1053	vports = lpfc_create_vport_work_array(phba);
1054	if (vports != NULL)
1055		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1056			lpfc_rcv_seq_check_edtov(vports[i]);
1057	lpfc_destroy_vport_work_array(phba, vports);
1058
1059	if ((phba->link_state == LPFC_HBA_ERROR) ||
1060		(phba->pport->load_flag & FC_UNLOADING) ||
1061		(phba->pport->fc_flag & FC_OFFLINE_MODE))
1062		return;
1063
1064	spin_lock_irq(&phba->pport->work_port_lock);
1065
1066	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
1067		jiffies)) {
1068		spin_unlock_irq(&phba->pport->work_port_lock);
1069		if (!phba->hb_outstanding)
1070			mod_timer(&phba->hb_tmofunc,
1071				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1072		else
1073			mod_timer(&phba->hb_tmofunc,
1074				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1075		return;
1076	}
1077	spin_unlock_irq(&phba->pport->work_port_lock);
1078
1079	if (phba->elsbuf_cnt &&
1080		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1081		spin_lock_irq(&phba->hbalock);
1082		list_splice_init(&phba->elsbuf, &completions);
1083		phba->elsbuf_cnt = 0;
1084		phba->elsbuf_prev_cnt = 0;
1085		spin_unlock_irq(&phba->hbalock);
1086
1087		while (!list_empty(&completions)) {
1088			list_remove_head(&completions, buf_ptr,
1089				struct lpfc_dmabuf, list);
1090			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1091			kfree(buf_ptr);
1092		}
1093	}
1094	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1095
1096	/* If there is no heart beat outstanding, issue a heartbeat command */
1097	if (phba->cfg_enable_hba_heartbeat) {
1098		if (!phba->hb_outstanding) {
1099			if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1100				(list_empty(&psli->mboxq))) {
1101				pmboxq = mempool_alloc(phba->mbox_mem_pool,
1102							GFP_KERNEL);
1103				if (!pmboxq) {
1104					mod_timer(&phba->hb_tmofunc,
1105						 jiffies +
1106						 HZ * LPFC_HB_MBOX_INTERVAL);
1107					return;
1108				}
1109
1110				lpfc_heart_beat(phba, pmboxq);
1111				pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1112				pmboxq->vport = phba->pport;
1113				retval = lpfc_sli_issue_mbox(phba, pmboxq,
1114						MBX_NOWAIT);
1115
1116				if (retval != MBX_BUSY &&
1117					retval != MBX_SUCCESS) {
1118					mempool_free(pmboxq,
1119							phba->mbox_mem_pool);
1120					mod_timer(&phba->hb_tmofunc,
1121						jiffies +
1122						HZ * LPFC_HB_MBOX_INTERVAL);
1123					return;
1124				}
1125				phba->skipped_hb = 0;
1126				phba->hb_outstanding = 1;
1127			} else if (time_before_eq(phba->last_completion_time,
1128					phba->skipped_hb)) {
1129				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1130					"2857 Last completion time not "
1131					" updated in %d ms\n",
1132					jiffies_to_msecs(jiffies
1133						 - phba->last_completion_time));
1134			} else
1135				phba->skipped_hb = jiffies;
1136
1137			mod_timer(&phba->hb_tmofunc,
1138				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1139			return;
1140		} else {
1141			/*
1142			* If heart beat timeout called with hb_outstanding set
1143			* we need to give the hb mailbox cmd a chance to
1144			* complete or TMO.
1145			*/
1146			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1147					"0459 Adapter heartbeat still out"
1148					"standing:last compl time was %d ms.\n",
1149					jiffies_to_msecs(jiffies
1150						 - phba->last_completion_time));
1151			mod_timer(&phba->hb_tmofunc,
1152				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1153		}
1154	}
1155}
1156
1157/**
1158 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1159 * @phba: pointer to lpfc hba data structure.
1160 *
1161 * This routine is called to bring the HBA offline when HBA hardware error
1162 * other than Port Error 6 has been detected.
1163 **/
1164static void
1165lpfc_offline_eratt(struct lpfc_hba *phba)
1166{
1167	struct lpfc_sli   *psli = &phba->sli;
1168
1169	spin_lock_irq(&phba->hbalock);
1170	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1171	spin_unlock_irq(&phba->hbalock);
1172	lpfc_offline_prep(phba);
1173
1174	lpfc_offline(phba);
1175	lpfc_reset_barrier(phba);
1176	spin_lock_irq(&phba->hbalock);
1177	lpfc_sli_brdreset(phba);
1178	spin_unlock_irq(&phba->hbalock);
1179	lpfc_hba_down_post(phba);
1180	lpfc_sli_brdready(phba, HS_MBRDY);
1181	lpfc_unblock_mgmt_io(phba);
1182	phba->link_state = LPFC_HBA_ERROR;
1183	return;
1184}
1185
1186/**
1187 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1188 * @phba: pointer to lpfc hba data structure.
1189 *
1190 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1191 * other than Port Error 6 has been detected.
1192 **/
1193static void
1194lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1195{
1196	lpfc_offline_prep(phba);
1197	lpfc_offline(phba);
1198	lpfc_sli4_brdreset(phba);
1199	lpfc_hba_down_post(phba);
1200	lpfc_sli4_post_status_check(phba);
1201	lpfc_unblock_mgmt_io(phba);
1202	phba->link_state = LPFC_HBA_ERROR;
1203}
1204
1205/**
1206 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1207 * @phba: pointer to lpfc hba data structure.
1208 *
1209 * This routine is invoked to handle the deferred HBA hardware error
1210 * conditions. This type of error is indicated by HBA by setting ER1
1211 * and another ER bit in the host status register. The driver will
1212 * wait until the ER1 bit clears before handling the error condition.
1213 **/
1214static void
1215lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1216{
1217	uint32_t old_host_status = phba->work_hs;
1218	struct lpfc_sli_ring  *pring;
1219	struct lpfc_sli *psli = &phba->sli;
1220
1221	/* If the pci channel is offline, ignore possible errors,
1222	 * since we cannot communicate with the pci card anyway.
1223	 */
1224	if (pci_channel_offline(phba->pcidev)) {
1225		spin_lock_irq(&phba->hbalock);
1226		phba->hba_flag &= ~DEFER_ERATT;
1227		spin_unlock_irq(&phba->hbalock);
1228		return;
1229	}
1230
1231	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1232		"0479 Deferred Adapter Hardware Error "
1233		"Data: x%x x%x x%x\n",
1234		phba->work_hs,
1235		phba->work_status[0], phba->work_status[1]);
1236
1237	spin_lock_irq(&phba->hbalock);
1238	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1239	spin_unlock_irq(&phba->hbalock);
1240
1241
1242	/*
1243	 * Firmware stops when it triggred erratt. That could cause the I/Os
1244	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1245	 * SCSI layer retry it after re-establishing link.
1246	 */
1247	pring = &psli->ring[psli->fcp_ring];
1248	lpfc_sli_abort_iocb_ring(phba, pring);
1249
1250	/*
1251	 * There was a firmware error. Take the hba offline and then
1252	 * attempt to restart it.
1253	 */
1254	lpfc_offline_prep(phba);
1255	lpfc_offline(phba);
1256
1257	/* Wait for the ER1 bit to clear.*/
1258	while (phba->work_hs & HS_FFER1) {
1259		msleep(100);
1260		if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1261			phba->work_hs = UNPLUG_ERR ;
1262			break;
1263		}
1264		/* If driver is unloading let the worker thread continue */
1265		if (phba->pport->load_flag & FC_UNLOADING) {
1266			phba->work_hs = 0;
1267			break;
1268		}
1269	}
1270
1271	/*
1272	 * This is to ptrotect against a race condition in which
1273	 * first write to the host attention register clear the
1274	 * host status register.
1275	 */
1276	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1277		phba->work_hs = old_host_status & ~HS_FFER1;
1278
1279	spin_lock_irq(&phba->hbalock);
1280	phba->hba_flag &= ~DEFER_ERATT;
1281	spin_unlock_irq(&phba->hbalock);
1282	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1283	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1284}
1285
1286static void
1287lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1288{
1289	struct lpfc_board_event_header board_event;
1290	struct Scsi_Host *shost;
1291
1292	board_event.event_type = FC_REG_BOARD_EVENT;
1293	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1294	shost = lpfc_shost_from_vport(phba->pport);
1295	fc_host_post_vendor_event(shost, fc_get_event_number(),
1296				  sizeof(board_event),
1297				  (char *) &board_event,
1298				  LPFC_NL_VENDOR_ID);
1299}
1300
1301/**
1302 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1303 * @phba: pointer to lpfc hba data structure.
1304 *
1305 * This routine is invoked to handle the following HBA hardware error
1306 * conditions:
1307 * 1 - HBA error attention interrupt
1308 * 2 - DMA ring index out of range
1309 * 3 - Mailbox command came back as unknown
1310 **/
1311static void
1312lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1313{
1314	struct lpfc_vport *vport = phba->pport;
1315	struct lpfc_sli   *psli = &phba->sli;
1316	struct lpfc_sli_ring  *pring;
1317	uint32_t event_data;
1318	unsigned long temperature;
1319	struct temp_event temp_event_data;
1320	struct Scsi_Host  *shost;
1321
1322	/* If the pci channel is offline, ignore possible errors,
1323	 * since we cannot communicate with the pci card anyway.
1324	 */
1325	if (pci_channel_offline(phba->pcidev)) {
1326		spin_lock_irq(&phba->hbalock);
1327		phba->hba_flag &= ~DEFER_ERATT;
1328		spin_unlock_irq(&phba->hbalock);
1329		return;
1330	}
1331
1332	/* If resets are disabled then leave the HBA alone and return */
1333	if (!phba->cfg_enable_hba_reset)
1334		return;
1335
1336	/* Send an internal error event to mgmt application */
1337	lpfc_board_errevt_to_mgmt(phba);
1338
1339	if (phba->hba_flag & DEFER_ERATT)
1340		lpfc_handle_deferred_eratt(phba);
1341
1342	if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1343		if (phba->work_hs & HS_FFER6)
1344			/* Re-establishing Link */
1345			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1346					"1301 Re-establishing Link "
1347					"Data: x%x x%x x%x\n",
1348					phba->work_hs, phba->work_status[0],
1349					phba->work_status[1]);
1350		if (phba->work_hs & HS_FFER8)
1351			/* Device Zeroization */
1352			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1353					"2861 Host Authentication device "
1354					"zeroization Data:x%x x%x x%x\n",
1355					phba->work_hs, phba->work_status[0],
1356					phba->work_status[1]);
1357
1358		spin_lock_irq(&phba->hbalock);
1359		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1360		spin_unlock_irq(&phba->hbalock);
1361
1362		/*
1363		* Firmware stops when it triggled erratt with HS_FFER6.
1364		* That could cause the I/Os dropped by the firmware.
1365		* Error iocb (I/O) on txcmplq and let the SCSI layer
1366		* retry it after re-establishing link.
1367		*/
1368		pring = &psli->ring[psli->fcp_ring];
1369		lpfc_sli_abort_iocb_ring(phba, pring);
1370
1371		/*
1372		 * There was a firmware error.  Take the hba offline and then
1373		 * attempt to restart it.
1374		 */
1375		lpfc_offline_prep(phba);
1376		lpfc_offline(phba);
1377		lpfc_sli_brdrestart(phba);
1378		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1379			lpfc_unblock_mgmt_io(phba);
1380			return;
1381		}
1382		lpfc_unblock_mgmt_io(phba);
1383	} else if (phba->work_hs & HS_CRIT_TEMP) {
1384		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1385		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1386		temp_event_data.event_code = LPFC_CRIT_TEMP;
1387		temp_event_data.data = (uint32_t)temperature;
1388
1389		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1390				"0406 Adapter maximum temperature exceeded "
1391				"(%ld), taking this port offline "
1392				"Data: x%x x%x x%x\n",
1393				temperature, phba->work_hs,
1394				phba->work_status[0], phba->work_status[1]);
1395
1396		shost = lpfc_shost_from_vport(phba->pport);
1397		fc_host_post_vendor_event(shost, fc_get_event_number(),
1398					  sizeof(temp_event_data),
1399					  (char *) &temp_event_data,
1400					  SCSI_NL_VID_TYPE_PCI
1401					  | PCI_VENDOR_ID_EMULEX);
1402
1403		spin_lock_irq(&phba->hbalock);
1404		phba->over_temp_state = HBA_OVER_TEMP;
1405		spin_unlock_irq(&phba->hbalock);
1406		lpfc_offline_eratt(phba);
1407
1408	} else {
1409		/* The if clause above forces this code path when the status
1410		 * failure is a value other than FFER6. Do not call the offline
1411		 * twice. This is the adapter hardware error path.
1412		 */
1413		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1414				"0457 Adapter Hardware Error "
1415				"Data: x%x x%x x%x\n",
1416				phba->work_hs,
1417				phba->work_status[0], phba->work_status[1]);
1418
1419		event_data = FC_REG_DUMP_EVENT;
1420		shost = lpfc_shost_from_vport(vport);
1421		fc_host_post_vendor_event(shost, fc_get_event_number(),
1422				sizeof(event_data), (char *) &event_data,
1423				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1424
1425		lpfc_offline_eratt(phba);
1426	}
1427	return;
1428}
1429
1430/**
1431 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1432 * @phba: pointer to lpfc hba data structure.
1433 *
1434 * This routine is invoked to handle the SLI4 HBA hardware error attention
1435 * conditions.
1436 **/
1437static void
1438lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1439{
1440	struct lpfc_vport *vport = phba->pport;
1441	uint32_t event_data;
1442	struct Scsi_Host *shost;
1443	uint32_t if_type;
1444	struct lpfc_register portstat_reg = {0};
1445	uint32_t reg_err1, reg_err2;
1446	uint32_t uerrlo_reg, uemasklo_reg;
1447	uint32_t pci_rd_rc1, pci_rd_rc2;
1448	int rc;
1449
1450	/* If the pci channel is offline, ignore possible errors, since
1451	 * we cannot communicate with the pci card anyway.
1452	 */
1453	if (pci_channel_offline(phba->pcidev))
1454		return;
1455	/* If resets are disabled then leave the HBA alone and return */
1456	if (!phba->cfg_enable_hba_reset)
1457		return;
1458
1459	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1460	switch (if_type) {
1461	case LPFC_SLI_INTF_IF_TYPE_0:
1462		pci_rd_rc1 = lpfc_readl(
1463				phba->sli4_hba.u.if_type0.UERRLOregaddr,
1464				&uerrlo_reg);
1465		pci_rd_rc2 = lpfc_readl(
1466				phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1467				&uemasklo_reg);
1468		/* consider PCI bus read error as pci_channel_offline */
1469		if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1470			return;
1471		lpfc_sli4_offline_eratt(phba);
1472		break;
1473	case LPFC_SLI_INTF_IF_TYPE_2:
1474		pci_rd_rc1 = lpfc_readl(
1475				phba->sli4_hba.u.if_type2.STATUSregaddr,
1476				&portstat_reg.word0);
1477		/* consider PCI bus read error as pci_channel_offline */
1478		if (pci_rd_rc1 == -EIO) {
1479			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1480				"3151 PCI bus read access failure: x%x\n",
1481				readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1482			return;
1483		}
1484		reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1485		reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1486		if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1487			/* TODO: Register for Overtemp async events. */
1488			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1489				"2889 Port Overtemperature event, "
1490				"taking port offline\n");
1491			spin_lock_irq(&phba->hbalock);
1492			phba->over_temp_state = HBA_OVER_TEMP;
1493			spin_unlock_irq(&phba->hbalock);
1494			lpfc_sli4_offline_eratt(phba);
1495			break;
1496		}
1497		if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1498		    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
1499			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1500					"3143 Port Down: Firmware Restarted\n");
1501		else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1502			 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1503			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1504					"3144 Port Down: Debug Dump\n");
1505		else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1506			 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1507			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1508					"3145 Port Down: Provisioning\n");
1509		/*
1510		 * On error status condition, driver need to wait for port
1511		 * ready before performing reset.
1512		 */
1513		rc = lpfc_sli4_pdev_status_reg_wait(phba);
1514		if (!rc) {
1515			/* need reset: attempt for port recovery */
1516			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1517					"2887 Reset Needed: Attempting Port "
1518					"Recovery...\n");
1519			lpfc_offline_prep(phba);
1520			lpfc_offline(phba);
1521			lpfc_sli_brdrestart(phba);
1522			if (lpfc_online(phba) == 0) {
1523				lpfc_unblock_mgmt_io(phba);
1524				/* don't report event on forced debug dump */
1525				if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1526				    reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1527					return;
1528				else
1529					break;
1530			}
1531			/* fall through for not able to recover */
1532		}
1533		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1534				"3152 Unrecoverable error, bring the port "
1535				"offline\n");
1536		lpfc_sli4_offline_eratt(phba);
1537		break;
1538	case LPFC_SLI_INTF_IF_TYPE_1:
1539	default:
1540		break;
1541	}
1542	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1543			"3123 Report dump event to upper layer\n");
1544	/* Send an internal error event to mgmt application */
1545	lpfc_board_errevt_to_mgmt(phba);
1546
1547	event_data = FC_REG_DUMP_EVENT;
1548	shost = lpfc_shost_from_vport(vport);
1549	fc_host_post_vendor_event(shost, fc_get_event_number(),
1550				  sizeof(event_data), (char *) &event_data,
1551				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1552}
1553
1554/**
1555 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1556 * @phba: pointer to lpfc HBA data structure.
1557 *
1558 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1559 * routine from the API jump table function pointer from the lpfc_hba struct.
1560 *
1561 * Return codes
1562 *   0 - success.
1563 *   Any other value - error.
1564 **/
1565void
1566lpfc_handle_eratt(struct lpfc_hba *phba)
1567{
1568	(*phba->lpfc_handle_eratt)(phba);
1569}
1570
1571/**
1572 * lpfc_handle_latt - The HBA link event handler
1573 * @phba: pointer to lpfc hba data structure.
1574 *
1575 * This routine is invoked from the worker thread to handle a HBA host
1576 * attention link event.
1577 **/
1578void
1579lpfc_handle_latt(struct lpfc_hba *phba)
1580{
1581	struct lpfc_vport *vport = phba->pport;
1582	struct lpfc_sli   *psli = &phba->sli;
1583	LPFC_MBOXQ_t *pmb;
1584	volatile uint32_t control;
1585	struct lpfc_dmabuf *mp;
1586	int rc = 0;
1587
1588	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1589	if (!pmb) {
1590		rc = 1;
1591		goto lpfc_handle_latt_err_exit;
1592	}
1593
1594	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1595	if (!mp) {
1596		rc = 2;
1597		goto lpfc_handle_latt_free_pmb;
1598	}
1599
1600	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1601	if (!mp->virt) {
1602		rc = 3;
1603		goto lpfc_handle_latt_free_mp;
1604	}
1605
1606	/* Cleanup any outstanding ELS commands */
1607	lpfc_els_flush_all_cmd(phba);
1608
1609	psli->slistat.link_event++;
1610	lpfc_read_topology(phba, pmb, mp);
1611	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
1612	pmb->vport = vport;
1613	/* Block ELS IOCBs until we have processed this mbox command */
1614	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1615	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1616	if (rc == MBX_NOT_FINISHED) {
1617		rc = 4;
1618		goto lpfc_handle_latt_free_mbuf;
1619	}
1620
1621	/* Clear Link Attention in HA REG */
1622	spin_lock_irq(&phba->hbalock);
1623	writel(HA_LATT, phba->HAregaddr);
1624	readl(phba->HAregaddr); /* flush */
1625	spin_unlock_irq(&phba->hbalock);
1626
1627	return;
1628
1629lpfc_handle_latt_free_mbuf:
1630	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1631	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1632lpfc_handle_latt_free_mp:
1633	kfree(mp);
1634lpfc_handle_latt_free_pmb:
1635	mempool_free(pmb, phba->mbox_mem_pool);
1636lpfc_handle_latt_err_exit:
1637	/* Enable Link attention interrupts */
1638	spin_lock_irq(&phba->hbalock);
1639	psli->sli_flag |= LPFC_PROCESS_LA;
1640	control = readl(phba->HCregaddr);
1641	control |= HC_LAINT_ENA;
1642	writel(control, phba->HCregaddr);
1643	readl(phba->HCregaddr); /* flush */
1644
1645	/* Clear Link Attention in HA REG */
1646	writel(HA_LATT, phba->HAregaddr);
1647	readl(phba->HAregaddr); /* flush */
1648	spin_unlock_irq(&phba->hbalock);
1649	lpfc_linkdown(phba);
1650	phba->link_state = LPFC_HBA_ERROR;
1651
1652	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1653		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1654
1655	return;
1656}
1657
1658/**
1659 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1660 * @phba: pointer to lpfc hba data structure.
1661 * @vpd: pointer to the vital product data.
1662 * @len: length of the vital product data in bytes.
1663 *
1664 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1665 * an array of characters. In this routine, the ModelName, ProgramType, and
1666 * ModelDesc, etc. fields of the phba data structure will be populated.
1667 *
1668 * Return codes
1669 *   0 - pointer to the VPD passed in is NULL
1670 *   1 - success
1671 **/
1672int
1673lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1674{
1675	uint8_t lenlo, lenhi;
1676	int Length;
1677	int i, j;
1678	int finished = 0;
1679	int index = 0;
1680
1681	if (!vpd)
1682		return 0;
1683
1684	/* Vital Product */
1685	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1686			"0455 Vital Product Data: x%x x%x x%x x%x\n",
1687			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1688			(uint32_t) vpd[3]);
1689	while (!finished && (index < (len - 4))) {
1690		switch (vpd[index]) {
1691		case 0x82:
1692		case 0x91:
1693			index += 1;
1694			lenlo = vpd[index];
1695			index += 1;
1696			lenhi = vpd[index];
1697			index += 1;
1698			i = ((((unsigned short)lenhi) << 8) + lenlo);
1699			index += i;
1700			break;
1701		case 0x90:
1702			index += 1;
1703			lenlo = vpd[index];
1704			index += 1;
1705			lenhi = vpd[index];
1706			index += 1;
1707			Length = ((((unsigned short)lenhi) << 8) + lenlo);
1708			if (Length > len - index)
1709				Length = len - index;
1710			while (Length > 0) {
1711			/* Look for Serial Number */
1712			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1713				index += 2;
1714				i = vpd[index];
1715				index += 1;
1716				j = 0;
1717				Length -= (3+i);
1718				while(i--) {
1719					phba->SerialNumber[j++] = vpd[index++];
1720					if (j == 31)
1721						break;
1722				}
1723				phba->SerialNumber[j] = 0;
1724				continue;
1725			}
1726			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1727				phba->vpd_flag |= VPD_MODEL_DESC;
1728				index += 2;
1729				i = vpd[index];
1730				index += 1;
1731				j = 0;
1732				Length -= (3+i);
1733				while(i--) {
1734					phba->ModelDesc[j++] = vpd[index++];
1735					if (j == 255)
1736						break;
1737				}
1738				phba->ModelDesc[j] = 0;
1739				continue;
1740			}
1741			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1742				phba->vpd_flag |= VPD_MODEL_NAME;
1743				index += 2;
1744				i = vpd[index];
1745				index += 1;
1746				j = 0;
1747				Length -= (3+i);
1748				while(i--) {
1749					phba->ModelName[j++] = vpd[index++];
1750					if (j == 79)
1751						break;
1752				}
1753				phba->ModelName[j] = 0;
1754				continue;
1755			}
1756			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1757				phba->vpd_flag |= VPD_PROGRAM_TYPE;
1758				index += 2;
1759				i = vpd[index];
1760				index += 1;
1761				j = 0;
1762				Length -= (3+i);
1763				while(i--) {
1764					phba->ProgramType[j++] = vpd[index++];
1765					if (j == 255)
1766						break;
1767				}
1768				phba->ProgramType[j] = 0;
1769				continue;
1770			}
1771			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1772				phba->vpd_flag |= VPD_PORT;
1773				index += 2;
1774				i = vpd[index];
1775				index += 1;
1776				j = 0;
1777				Length -= (3+i);
1778				while(i--) {
1779					if ((phba->sli_rev == LPFC_SLI_REV4) &&
1780					    (phba->sli4_hba.pport_name_sta ==
1781					     LPFC_SLI4_PPNAME_GET)) {
1782						j++;
1783						index++;
1784					} else
1785						phba->Port[j++] = vpd[index++];
1786					if (j == 19)
1787						break;
1788				}
1789				if ((phba->sli_rev != LPFC_SLI_REV4) ||
1790				    (phba->sli4_hba.pport_name_sta ==
1791				     LPFC_SLI4_PPNAME_NON))
1792					phba->Port[j] = 0;
1793				continue;
1794			}
1795			else {
1796				index += 2;
1797				i = vpd[index];
1798				index += 1;
1799				index += i;
1800				Length -= (3 + i);
1801			}
1802		}
1803		finished = 0;
1804		break;
1805		case 0x78:
1806			finished = 1;
1807			break;
1808		default:
1809			index ++;
1810			break;
1811		}
1812	}
1813
1814	return(1);
1815}
1816
1817/**
1818 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1819 * @phba: pointer to lpfc hba data structure.
1820 * @mdp: pointer to the data structure to hold the derived model name.
1821 * @descp: pointer to the data structure to hold the derived description.
1822 *
1823 * This routine retrieves HBA's description based on its registered PCI device
1824 * ID. The @descp passed into this function points to an array of 256 chars. It
1825 * shall be returned with the model name, maximum speed, and the host bus type.
1826 * The @mdp passed into this function points to an array of 80 chars. When the
1827 * function returns, the @mdp will be filled with the model name.
1828 **/
1829static void
1830lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1831{
1832	lpfc_vpd_t *vp;
1833	uint16_t dev_id = phba->pcidev->device;
1834	int max_speed;
1835	int GE = 0;
1836	int oneConnect = 0; /* default is not a oneConnect */
1837	struct {
1838		char *name;
1839		char *bus;
1840		char *function;
1841	} m = {"<Unknown>", "", ""};
1842
1843	if (mdp && mdp[0] != '\0'
1844		&& descp && descp[0] != '\0')
1845		return;
1846
1847	if (phba->lmt & LMT_16Gb)
1848		max_speed = 16;
1849	else if (phba->lmt & LMT_10Gb)
1850		max_speed = 10;
1851	else if (phba->lmt & LMT_8Gb)
1852		max_speed = 8;
1853	else if (phba->lmt & LMT_4Gb)
1854		max_speed = 4;
1855	else if (phba->lmt & LMT_2Gb)
1856		max_speed = 2;
1857	else
1858		max_speed = 1;
1859
1860	vp = &phba->vpd;
1861
1862	switch (dev_id) {
1863	case PCI_DEVICE_ID_FIREFLY:
1864		m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1865		break;
1866	case PCI_DEVICE_ID_SUPERFLY:
1867		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1868			m = (typeof(m)){"LP7000", "PCI",
1869					"Fibre Channel Adapter"};
1870		else
1871			m = (typeof(m)){"LP7000E", "PCI",
1872					"Fibre Channel Adapter"};
1873		break;
1874	case PCI_DEVICE_ID_DRAGONFLY:
1875		m = (typeof(m)){"LP8000", "PCI",
1876				"Fibre Channel Adapter"};
1877		break;
1878	case PCI_DEVICE_ID_CENTAUR:
1879		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1880			m = (typeof(m)){"LP9002", "PCI",
1881					"Fibre Channel Adapter"};
1882		else
1883			m = (typeof(m)){"LP9000", "PCI",
1884					"Fibre Channel Adapter"};
1885		break;
1886	case PCI_DEVICE_ID_RFLY:
1887		m = (typeof(m)){"LP952", "PCI",
1888				"Fibre Channel Adapter"};
1889		break;
1890	case PCI_DEVICE_ID_PEGASUS:
1891		m = (typeof(m)){"LP9802", "PCI-X",
1892				"Fibre Channel Adapter"};
1893		break;
1894	case PCI_DEVICE_ID_THOR:
1895		m = (typeof(m)){"LP10000", "PCI-X",
1896				"Fibre Channel Adapter"};
1897		break;
1898	case PCI_DEVICE_ID_VIPER:
1899		m = (typeof(m)){"LPX1000",  "PCI-X",
1900				"Fibre Channel Adapter"};
1901		break;
1902	case PCI_DEVICE_ID_PFLY:
1903		m = (typeof(m)){"LP982", "PCI-X",
1904				"Fibre Channel Adapter"};
1905		break;
1906	case PCI_DEVICE_ID_TFLY:
1907		m = (typeof(m)){"LP1050", "PCI-X",
1908				"Fibre Channel Adapter"};
1909		break;
1910	case PCI_DEVICE_ID_HELIOS:
1911		m = (typeof(m)){"LP11000", "PCI-X2",
1912				"Fibre Channel Adapter"};
1913		break;
1914	case PCI_DEVICE_ID_HELIOS_SCSP:
1915		m = (typeof(m)){"LP11000-SP", "PCI-X2",
1916				"Fibre Channel Adapter"};
1917		break;
1918	case PCI_DEVICE_ID_HELIOS_DCSP:
1919		m = (typeof(m)){"LP11002-SP",  "PCI-X2",
1920				"Fibre Channel Adapter"};
1921		break;
1922	case PCI_DEVICE_ID_NEPTUNE:
1923		m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1924		break;
1925	case PCI_DEVICE_ID_NEPTUNE_SCSP:
1926		m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1927		break;
1928	case PCI_DEVICE_ID_NEPTUNE_DCSP:
1929		m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1930		break;
1931	case PCI_DEVICE_ID_BMID:
1932		m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1933		break;
1934	case PCI_DEVICE_ID_BSMB:
1935		m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1936		break;
1937	case PCI_DEVICE_ID_ZEPHYR:
1938		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1939		break;
1940	case PCI_DEVICE_ID_ZEPHYR_SCSP:
1941		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1942		break;
1943	case PCI_DEVICE_ID_ZEPHYR_DCSP:
1944		m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1945		GE = 1;
1946		break;
1947	case PCI_DEVICE_ID_ZMID:
1948		m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1949		break;
1950	case PCI_DEVICE_ID_ZSMB:
1951		m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1952		break;
1953	case PCI_DEVICE_ID_LP101:
1954		m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1955		break;
1956	case PCI_DEVICE_ID_LP10000S:
1957		m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1958		break;
1959	case PCI_DEVICE_ID_LP11000S:
1960		m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1961		break;
1962	case PCI_DEVICE_ID_LPE11000S:
1963		m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1964		break;
1965	case PCI_DEVICE_ID_SAT:
1966		m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1967		break;
1968	case PCI_DEVICE_ID_SAT_MID:
1969		m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1970		break;
1971	case PCI_DEVICE_ID_SAT_SMB:
1972		m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1973		break;
1974	case PCI_DEVICE_ID_SAT_DCSP:
1975		m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1976		break;
1977	case PCI_DEVICE_ID_SAT_SCSP:
1978		m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1979		break;
1980	case PCI_DEVICE_ID_SAT_S:
1981		m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1982		break;
1983	case PCI_DEVICE_ID_HORNET:
1984		m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1985		GE = 1;
1986		break;
1987	case PCI_DEVICE_ID_PROTEUS_VF:
1988		m = (typeof(m)){"LPev12000", "PCIe IOV",
1989				"Fibre Channel Adapter"};
1990		break;
1991	case PCI_DEVICE_ID_PROTEUS_PF:
1992		m = (typeof(m)){"LPev12000", "PCIe IOV",
1993				"Fibre Channel Adapter"};
1994		break;
1995	case PCI_DEVICE_ID_PROTEUS_S:
1996		m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1997				"Fibre Channel Adapter"};
1998		break;
1999	case PCI_DEVICE_ID_TIGERSHARK:
2000		oneConnect = 1;
2001		m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2002		break;
2003	case PCI_DEVICE_ID_TOMCAT:
2004		oneConnect = 1;
2005		m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2006		break;
2007	case PCI_DEVICE_ID_FALCON:
2008		m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2009				"EmulexSecure Fibre"};
2010		break;
2011	case PCI_DEVICE_ID_BALIUS:
2012		m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2013				"Fibre Channel Adapter"};
2014		break;
2015	case PCI_DEVICE_ID_LANCER_FC:
2016	case PCI_DEVICE_ID_LANCER_FC_VF:
2017		m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2018		break;
2019	case PCI_DEVICE_ID_LANCER_FCOE:
2020	case PCI_DEVICE_ID_LANCER_FCOE_VF:
2021		oneConnect = 1;
2022		m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2023		break;
2024	default:
2025		m = (typeof(m)){"Unknown", "", ""};
2026		break;
2027	}
2028
2029	if (mdp && mdp[0] == '\0')
2030		snprintf(mdp, 79,"%s", m.name);
2031	/*
2032	 * oneConnect hba requires special processing, they are all initiators
2033	 * and we put the port number on the end
2034	 */
2035	if (descp && descp[0] == '\0') {
2036		if (oneConnect)
2037			snprintf(descp, 255,
2038				"Emulex OneConnect %s, %s Initiator, Port %s",
2039				m.name, m.function,
2040				phba->Port);
2041		else
2042			snprintf(descp, 255,
2043				"Emulex %s %d%s %s %s",
2044				m.name, max_speed, (GE) ? "GE" : "Gb",
2045				m.bus, m.function);
2046	}
2047}
2048
2049/**
2050 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2051 * @phba: pointer to lpfc hba data structure.
2052 * @pring: pointer to a IOCB ring.
2053 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2054 *
2055 * This routine posts a given number of IOCBs with the associated DMA buffer
2056 * descriptors specified by the cnt argument to the given IOCB ring.
2057 *
2058 * Return codes
2059 *   The number of IOCBs NOT able to be posted to the IOCB ring.
2060 **/
2061int
2062lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2063{
2064	IOCB_t *icmd;
2065	struct lpfc_iocbq *iocb;
2066	struct lpfc_dmabuf *mp1, *mp2;
2067
2068	cnt += pring->missbufcnt;
2069
2070	/* While there are buffers to post */
2071	while (cnt > 0) {
2072		/* Allocate buffer for  command iocb */
2073		iocb = lpfc_sli_get_iocbq(phba);
2074		if (iocb == NULL) {
2075			pring->missbufcnt = cnt;
2076			return cnt;
2077		}
2078		icmd = &iocb->iocb;
2079
2080		/* 2 buffers can be posted per command */
2081		/* Allocate buffer to post */
2082		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2083		if (mp1)
2084		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2085		if (!mp1 || !mp1->virt) {
2086			kfree(mp1);
2087			lpfc_sli_release_iocbq(phba, iocb);
2088			pring->missbufcnt = cnt;
2089			return cnt;
2090		}
2091
2092		INIT_LIST_HEAD(&mp1->list);
2093		/* Allocate buffer to post */
2094		if (cnt > 1) {
2095			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2096			if (mp2)
2097				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2098							    &mp2->phys);
2099			if (!mp2 || !mp2->virt) {
2100				kfree(mp2);
2101				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2102				kfree(mp1);
2103				lpfc_sli_release_iocbq(phba, iocb);
2104				pring->missbufcnt = cnt;
2105				return cnt;
2106			}
2107
2108			INIT_LIST_HEAD(&mp2->list);
2109		} else {
2110			mp2 = NULL;
2111		}
2112
2113		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2114		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2115		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2116		icmd->ulpBdeCount = 1;
2117		cnt--;
2118		if (mp2) {
2119			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2120			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2121			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2122			cnt--;
2123			icmd->ulpBdeCount = 2;
2124		}
2125
2126		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2127		icmd->ulpLe = 1;
2128
2129		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2130		    IOCB_ERROR) {
2131			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2132			kfree(mp1);
2133			cnt++;
2134			if (mp2) {
2135				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2136				kfree(mp2);
2137				cnt++;
2138			}
2139			lpfc_sli_release_iocbq(phba, iocb);
2140			pring->missbufcnt = cnt;
2141			return cnt;
2142		}
2143		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2144		if (mp2)
2145			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2146	}
2147	pring->missbufcnt = 0;
2148	return 0;
2149}
2150
2151/**
2152 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2153 * @phba: pointer to lpfc hba data structure.
2154 *
2155 * This routine posts initial receive IOCB buffers to the ELS ring. The
2156 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2157 * set to 64 IOCBs.
2158 *
2159 * Return codes
2160 *   0 - success (currently always success)
2161 **/
2162static int
2163lpfc_post_rcv_buf(struct lpfc_hba *phba)
2164{
2165	struct lpfc_sli *psli = &phba->sli;
2166
2167	/* Ring 0, ELS / CT buffers */
2168	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2169	/* Ring 2 - FCP no buffers needed */
2170
2171	return 0;
2172}
2173
2174#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2175
2176/**
2177 * lpfc_sha_init - Set up initial array of hash table entries
2178 * @HashResultPointer: pointer to an array as hash table.
2179 *
2180 * This routine sets up the initial values to the array of hash table entries
2181 * for the LC HBAs.
2182 **/
2183static void
2184lpfc_sha_init(uint32_t * HashResultPointer)
2185{
2186	HashResultPointer[0] = 0x67452301;
2187	HashResultPointer[1] = 0xEFCDAB89;
2188	HashResultPointer[2] = 0x98BADCFE;
2189	HashResultPointer[3] = 0x10325476;
2190	HashResultPointer[4] = 0xC3D2E1F0;
2191}
2192
2193/**
2194 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2195 * @HashResultPointer: pointer to an initial/result hash table.
2196 * @HashWorkingPointer: pointer to an working hash table.
2197 *
2198 * This routine iterates an initial hash table pointed by @HashResultPointer
2199 * with the values from the working hash table pointeed by @HashWorkingPointer.
2200 * The results are putting back to the initial hash table, returned through
2201 * the @HashResultPointer as the result hash table.
2202 **/
2203static void
2204lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2205{
2206	int t;
2207	uint32_t TEMP;
2208	uint32_t A, B, C, D, E;
2209	t = 16;
2210	do {
2211		HashWorkingPointer[t] =
2212		    S(1,
2213		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2214								     8] ^
2215		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2216	} while (++t <= 79);
2217	t = 0;
2218	A = HashResultPointer[0];
2219	B = HashResultPointer[1];
2220	C = HashResultPointer[2];
2221	D = HashResultPointer[3];
2222	E = HashResultPointer[4];
2223
2224	do {
2225		if (t < 20) {
2226			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2227		} else if (t < 40) {
2228			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2229		} else if (t < 60) {
2230			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2231		} else {
2232			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2233		}
2234		TEMP += S(5, A) + E + HashWorkingPointer[t];
2235		E = D;
2236		D = C;
2237		C = S(30, B);
2238		B = A;
2239		A = TEMP;
2240	} while (++t <= 79);
2241
2242	HashResultPointer[0] += A;
2243	HashResultPointer[1] += B;
2244	HashResultPointer[2] += C;
2245	HashResultPointer[3] += D;
2246	HashResultPointer[4] += E;
2247
2248}
2249
2250/**
2251 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2252 * @RandomChallenge: pointer to the entry of host challenge random number array.
2253 * @HashWorking: pointer to the entry of the working hash array.
2254 *
2255 * This routine calculates the working hash array referred by @HashWorking
2256 * from the challenge random numbers associated with the host, referred by
2257 * @RandomChallenge. The result is put into the entry of the working hash
2258 * array and returned by reference through @HashWorking.
2259 **/
2260static void
2261lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2262{
2263	*HashWorking = (*RandomChallenge ^ *HashWorking);
2264}
2265
2266/**
2267 * lpfc_hba_init - Perform special handling for LC HBA initialization
2268 * @phba: pointer to lpfc hba data structure.
2269 * @hbainit: pointer to an array of unsigned 32-bit integers.
2270 *
2271 * This routine performs the special handling for LC HBA initialization.
2272 **/
2273void
2274lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2275{
2276	int t;
2277	uint32_t *HashWorking;
2278	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2279
2280	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2281	if (!HashWorking)
2282		return;
2283
2284	HashWorking[0] = HashWorking[78] = *pwwnn++;
2285	HashWorking[1] = HashWorking[79] = *pwwnn;
2286
2287	for (t = 0; t < 7; t++)
2288		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2289
2290	lpfc_sha_init(hbainit);
2291	lpfc_sha_iterate(hbainit, HashWorking);
2292	kfree(HashWorking);
2293}
2294
2295/**
2296 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2297 * @vport: pointer to a virtual N_Port data structure.
2298 *
2299 * This routine performs the necessary cleanups before deleting the @vport.
2300 * It invokes the discovery state machine to perform necessary state
2301 * transitions and to release the ndlps associated with the @vport. Note,
2302 * the physical port is treated as @vport 0.
2303 **/
2304void
2305lpfc_cleanup(struct lpfc_vport *vport)
2306{
2307	struct lpfc_hba   *phba = vport->phba;
2308	struct lpfc_nodelist *ndlp, *next_ndlp;
2309	int i = 0;
2310
2311	if (phba->link_state > LPFC_LINK_DOWN)
2312		lpfc_port_link_failure(vport);
2313
2314	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2315		if (!NLP_CHK_NODE_ACT(ndlp)) {
2316			ndlp = lpfc_enable_node(vport, ndlp,
2317						NLP_STE_UNUSED_NODE);
2318			if (!ndlp)
2319				continue;
2320			spin_lock_irq(&phba->ndlp_lock);
2321			NLP_SET_FREE_REQ(ndlp);
2322			spin_unlock_irq(&phba->ndlp_lock);
2323			/* Trigger the release of the ndlp memory */
2324			lpfc_nlp_put(ndlp);
2325			continue;
2326		}
2327		spin_lock_irq(&phba->ndlp_lock);
2328		if (NLP_CHK_FREE_REQ(ndlp)) {
2329			/* The ndlp should not be in memory free mode already */
2330			spin_unlock_irq(&phba->ndlp_lock);
2331			continue;
2332		} else
2333			/* Indicate request for freeing ndlp memory */
2334			NLP_SET_FREE_REQ(ndlp);
2335		spin_unlock_irq(&phba->ndlp_lock);
2336
2337		if (vport->port_type != LPFC_PHYSICAL_PORT &&
2338		    ndlp->nlp_DID == Fabric_DID) {
2339			/* Just free up ndlp with Fabric_DID for vports */
2340			lpfc_nlp_put(ndlp);
2341			continue;
2342		}
2343
2344		/* take care of nodes in unused state before the state
2345		 * machine taking action.
2346		 */
2347		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2348			lpfc_nlp_put(ndlp);
2349			continue;
2350		}
2351
2352		if (ndlp->nlp_type & NLP_FABRIC)
2353			lpfc_disc_state_machine(vport, ndlp, NULL,
2354					NLP_EVT_DEVICE_RECOVERY);
2355
2356		lpfc_disc_state_machine(vport, ndlp, NULL,
2357					     NLP_EVT_DEVICE_RM);
2358	}
2359
2360	/* At this point, ALL ndlp's should be gone
2361	 * because of the previous NLP_EVT_DEVICE_RM.
2362	 * Lets wait for this to happen, if needed.
2363	 */
2364	while (!list_empty(&vport->fc_nodes)) {
2365		if (i++ > 3000) {
2366			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2367				"0233 Nodelist not empty\n");
2368			list_for_each_entry_safe(ndlp, next_ndlp,
2369						&vport->fc_nodes, nlp_listp) {
2370				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2371						LOG_NODE,
2372						"0282 did:x%x ndlp:x%p "
2373						"usgmap:x%x refcnt:%d\n",
2374						ndlp->nlp_DID, (void *)ndlp,
2375						ndlp->nlp_usg_map,
2376						atomic_read(
2377							&ndlp->kref.refcount));
2378			}
2379			break;
2380		}
2381
2382		/* Wait for any activity on ndlps to settle */
2383		msleep(10);
2384	}
2385	lpfc_cleanup_vports_rrqs(vport, NULL);
2386}
2387
2388/**
2389 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2390 * @vport: pointer to a virtual N_Port data structure.
2391 *
2392 * This routine stops all the timers associated with a @vport. This function
2393 * is invoked before disabling or deleting a @vport. Note that the physical
2394 * port is treated as @vport 0.
2395 **/
2396void
2397lpfc_stop_vport_timers(struct lpfc_vport *vport)
2398{
2399	del_timer_sync(&vport->els_tmofunc);
2400	del_timer_sync(&vport->fc_fdmitmo);
2401	del_timer_sync(&vport->delayed_disc_tmo);
2402	lpfc_can_disctmo(vport);
2403	return;
2404}
2405
2406/**
2407 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2408 * @phba: pointer to lpfc hba data structure.
2409 *
2410 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2411 * caller of this routine should already hold the host lock.
2412 **/
2413void
2414__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2415{
2416	/* Clear pending FCF rediscovery wait flag */
2417	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2418
2419	/* Now, try to stop the timer */
2420	del_timer(&phba->fcf.redisc_wait);
2421}
2422
2423/**
2424 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2425 * @phba: pointer to lpfc hba data structure.
2426 *
2427 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2428 * checks whether the FCF rediscovery wait timer is pending with the host
2429 * lock held before proceeding with disabling the timer and clearing the
2430 * wait timer pendig flag.
2431 **/
2432void
2433lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2434{
2435	spin_lock_irq(&phba->hbalock);
2436	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2437		/* FCF rediscovery timer already fired or stopped */
2438		spin_unlock_irq(&phba->hbalock);
2439		return;
2440	}
2441	__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2442	/* Clear failover in progress flags */
2443	phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2444	spin_unlock_irq(&phba->hbalock);
2445}
2446
2447/**
2448 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2449 * @phba: pointer to lpfc hba data structure.
2450 *
2451 * This routine stops all the timers associated with a HBA. This function is
2452 * invoked before either putting a HBA offline or unloading the driver.
2453 **/
2454void
2455lpfc_stop_hba_timers(struct lpfc_hba *phba)
2456{
2457	lpfc_stop_vport_timers(phba->pport);
2458	del_timer_sync(&phba->sli.mbox_tmo);
2459	del_timer_sync(&phba->fabric_block_timer);
2460	del_timer_sync(&phba->eratt_poll);
2461	del_timer_sync(&phba->hb_tmofunc);
2462	if (phba->sli_rev == LPFC_SLI_REV4) {
2463		del_timer_sync(&phba->rrq_tmr);
2464		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2465	}
2466	phba->hb_outstanding = 0;
2467
2468	switch (phba->pci_dev_grp) {
2469	case LPFC_PCI_DEV_LP:
2470		/* Stop any LightPulse device specific driver timers */
2471		del_timer_sync(&phba->fcp_poll_timer);
2472		break;
2473	case LPFC_PCI_DEV_OC:
2474		/* Stop any OneConnect device sepcific driver timers */
2475		lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2476		break;
2477	default:
2478		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2479				"0297 Invalid device group (x%x)\n",
2480				phba->pci_dev_grp);
2481		break;
2482	}
2483	return;
2484}
2485
2486/**
2487 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2488 * @phba: pointer to lpfc hba data structure.
2489 *
2490 * This routine marks a HBA's management interface as blocked. Once the HBA's
2491 * management interface is marked as blocked, all the user space access to
2492 * the HBA, whether they are from sysfs interface or libdfc interface will
2493 * all be blocked. The HBA is set to block the management interface when the
2494 * driver prepares the HBA interface for online or offline.
2495 **/
2496static void
2497lpfc_block_mgmt_io(struct lpfc_hba * phba)
2498{
2499	unsigned long iflag;
2500	uint8_t actcmd = MBX_HEARTBEAT;
2501	unsigned long timeout;
2502
2503	timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2504	spin_lock_irqsave(&phba->hbalock, iflag);
2505	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2506	if (phba->sli.mbox_active) {
2507		actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2508		/* Determine how long we might wait for the active mailbox
2509		 * command to be gracefully completed by firmware.
2510		 */
2511		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2512				phba->sli.mbox_active) * 1000) + jiffies;
2513	}
2514	spin_unlock_irqrestore(&phba->hbalock, iflag);
2515
2516	/* Wait for the outstnading mailbox command to complete */
2517	while (phba->sli.mbox_active) {
2518		/* Check active mailbox complete status every 2ms */
2519		msleep(2);
2520		if (time_after(jiffies, timeout)) {
2521			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2522				"2813 Mgmt IO is Blocked %x "
2523				"- mbox cmd %x still active\n",
2524				phba->sli.sli_flag, actcmd);
2525			break;
2526		}
2527	}
2528}
2529
2530/**
2531 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
2532 * @phba: pointer to lpfc hba data structure.
2533 *
2534 * Allocate RPIs for all active remote nodes. This is needed whenever
2535 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
2536 * is to fixup the temporary rpi assignments.
2537 **/
2538void
2539lpfc_sli4_node_prep(struct lpfc_hba *phba)
2540{
2541	struct lpfc_nodelist  *ndlp, *next_ndlp;
2542	struct lpfc_vport **vports;
2543	int i;
2544
2545	if (phba->sli_rev != LPFC_SLI_REV4)
2546		return;
2547
2548	vports = lpfc_create_vport_work_array(phba);
2549	if (vports != NULL) {
2550		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2551			if (vports[i]->load_flag & FC_UNLOADING)
2552				continue;
2553
2554			list_for_each_entry_safe(ndlp, next_ndlp,
2555						 &vports[i]->fc_nodes,
2556						 nlp_listp) {
2557				if (NLP_CHK_NODE_ACT(ndlp))
2558					ndlp->nlp_rpi =
2559						lpfc_sli4_alloc_rpi(phba);
2560			}
2561		}
2562	}
2563	lpfc_destroy_vport_work_array(phba, vports);
2564}
2565
2566/**
2567 * lpfc_online - Initialize and bring a HBA online
2568 * @phba: pointer to lpfc hba data structure.
2569 *
2570 * This routine initializes the HBA and brings a HBA online. During this
2571 * process, the management interface is blocked to prevent user space access
2572 * to the HBA interfering with the driver initialization.
2573 *
2574 * Return codes
2575 *   0 - successful
2576 *   1 - failed
2577 **/
2578int
2579lpfc_online(struct lpfc_hba *phba)
2580{
2581	struct lpfc_vport *vport;
2582	struct lpfc_vport **vports;
2583	int i;
2584
2585	if (!phba)
2586		return 0;
2587	vport = phba->pport;
2588
2589	if (!(vport->fc_flag & FC_OFFLINE_MODE))
2590		return 0;
2591
2592	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2593			"0458 Bring Adapter online\n");
2594
2595	lpfc_block_mgmt_io(phba);
2596
2597	if (!lpfc_sli_queue_setup(phba)) {
2598		lpfc_unblock_mgmt_io(phba);
2599		return 1;
2600	}
2601
2602	if (phba->sli_rev == LPFC_SLI_REV4) {
2603		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2604			lpfc_unblock_mgmt_io(phba);
2605			return 1;
2606		}
2607	} else {
2608		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
2609			lpfc_unblock_mgmt_io(phba);
2610			return 1;
2611		}
2612	}
2613
2614	vports = lpfc_create_vport_work_array(phba);
2615	if (vports != NULL)
2616		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2617			struct Scsi_Host *shost;
2618			shost = lpfc_shost_from_vport(vports[i]);
2619			spin_lock_irq(shost->host_lock);
2620			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2621			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2622				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2623			if (phba->sli_rev == LPFC_SLI_REV4)
2624				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2625			spin_unlock_irq(shost->host_lock);
2626		}
2627		lpfc_destroy_vport_work_array(phba, vports);
2628
2629	lpfc_unblock_mgmt_io(phba);
2630	return 0;
2631}
2632
2633/**
2634 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2635 * @phba: pointer to lpfc hba data structure.
2636 *
2637 * This routine marks a HBA's management interface as not blocked. Once the
2638 * HBA's management interface is marked as not blocked, all the user space
2639 * access to the HBA, whether they are from sysfs interface or libdfc
2640 * interface will be allowed. The HBA is set to block the management interface
2641 * when the driver prepares the HBA interface for online or offline and then
2642 * set to unblock the management interface afterwards.
2643 **/
2644void
2645lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2646{
2647	unsigned long iflag;
2648
2649	spin_lock_irqsave(&phba->hbalock, iflag);
2650	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2651	spin_unlock_irqrestore(&phba->hbalock, iflag);
2652}
2653
2654/**
2655 * lpfc_offline_prep - Prepare a HBA to be brought offline
2656 * @phba: pointer to lpfc hba data structure.
2657 *
2658 * This routine is invoked to prepare a HBA to be brought offline. It performs
2659 * unregistration login to all the nodes on all vports and flushes the mailbox
2660 * queue to make it ready to be brought offline.
2661 **/
2662void
2663lpfc_offline_prep(struct lpfc_hba * phba)
2664{
2665	struct lpfc_vport *vport = phba->pport;
2666	struct lpfc_nodelist  *ndlp, *next_ndlp;
2667	struct lpfc_vport **vports;
2668	struct Scsi_Host *shost;
2669	int i;
2670
2671	if (vport->fc_flag & FC_OFFLINE_MODE)
2672		return;
2673
2674	lpfc_block_mgmt_io(phba);
2675
2676	lpfc_linkdown(phba);
2677
2678	/* Issue an unreg_login to all nodes on all vports */
2679	vports = lpfc_create_vport_work_array(phba);
2680	if (vports != NULL) {
2681		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2682			if (vports[i]->load_flag & FC_UNLOADING)
2683				continue;
2684			shost = lpfc_shost_from_vport(vports[i]);
2685			spin_lock_irq(shost->host_lock);
2686			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2687			vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2688			vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2689			spin_unlock_irq(shost->host_lock);
2690
2691			shost =	lpfc_shost_from_vport(vports[i]);
2692			list_for_each_entry_safe(ndlp, next_ndlp,
2693						 &vports[i]->fc_nodes,
2694						 nlp_listp) {
2695				if (!NLP_CHK_NODE_ACT(ndlp))
2696					continue;
2697				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2698					continue;
2699				if (ndlp->nlp_type & NLP_FABRIC) {
2700					lpfc_disc_state_machine(vports[i], ndlp,
2701						NULL, NLP_EVT_DEVICE_RECOVERY);
2702					lpfc_disc_state_machine(vports[i], ndlp,
2703						NULL, NLP_EVT_DEVICE_RM);
2704				}
2705				spin_lock_irq(shost->host_lock);
2706				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2707
2708				/*
2709				 * Whenever an SLI4 port goes offline, free the
2710				 * RPI.  A new RPI when the adapter port comes
2711				 * back online.
2712				 */
2713				if (phba->sli_rev == LPFC_SLI_REV4)
2714					lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
2715
2716				spin_unlock_irq(shost->host_lock);
2717				lpfc_unreg_rpi(vports[i], ndlp);
2718			}
2719		}
2720	}
2721	lpfc_destroy_vport_work_array(phba, vports);
2722
2723	lpfc_sli_mbox_sys_shutdown(phba);
2724}
2725
2726/**
2727 * lpfc_offline - Bring a HBA offline
2728 * @phba: pointer to lpfc hba data structure.
2729 *
2730 * This routine actually brings a HBA offline. It stops all the timers
2731 * associated with the HBA, brings down the SLI layer, and eventually
2732 * marks the HBA as in offline state for the upper layer protocol.
2733 **/
2734void
2735lpfc_offline(struct lpfc_hba *phba)
2736{
2737	struct Scsi_Host  *shost;
2738	struct lpfc_vport **vports;
2739	int i;
2740
2741	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2742		return;
2743
2744	/* stop port and all timers associated with this hba */
2745	lpfc_stop_port(phba);
2746	vports = lpfc_create_vport_work_array(phba);
2747	if (vports != NULL)
2748		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2749			lpfc_stop_vport_timers(vports[i]);
2750	lpfc_destroy_vport_work_array(phba, vports);
2751	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2752			"0460 Bring Adapter offline\n");
2753	/* Bring down the SLI Layer and cleanup.  The HBA is offline
2754	   now.  */
2755	lpfc_sli_hba_down(phba);
2756	spin_lock_irq(&phba->hbalock);
2757	phba->work_ha = 0;
2758	spin_unlock_irq(&phba->hbalock);
2759	vports = lpfc_create_vport_work_array(phba);
2760	if (vports != NULL)
2761		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2762			shost = lpfc_shost_from_vport(vports[i]);
2763			spin_lock_irq(shost->host_lock);
2764			vports[i]->work_port_events = 0;
2765			vports[i]->fc_flag |= FC_OFFLINE_MODE;
2766			spin_unlock_irq(shost->host_lock);
2767		}
2768	lpfc_destroy_vport_work_array(phba, vports);
2769}
2770
2771/**
2772 * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated.
2773 * @phba: pointer to lpfc hba data structure.
2774 *
2775 * This routine goes through all the scsi buffers in the system and updates the
2776 * Physical XRIs assigned to the SCSI buffer because these may change after any
2777 * firmware reset
2778 *
2779 * Return codes
2780 *   0 - successful (for now, it always returns 0)
2781 **/
2782int
2783lpfc_scsi_buf_update(struct lpfc_hba *phba)
2784{
2785	struct lpfc_scsi_buf *sb, *sb_next;
2786
2787	spin_lock_irq(&phba->hbalock);
2788	spin_lock(&phba->scsi_buf_list_lock);
2789	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2790		sb->cur_iocbq.sli4_xritag =
2791			phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
2792		set_bit(sb->cur_iocbq.sli4_lxritag, phba->sli4_hba.xri_bmask);
2793		phba->sli4_hba.max_cfg_param.xri_used++;
2794		phba->sli4_hba.xri_count++;
2795	}
2796	spin_unlock(&phba->scsi_buf_list_lock);
2797	spin_unlock_irq(&phba->hbalock);
2798	return 0;
2799}
2800
2801/**
2802 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2803 * @phba: pointer to lpfc hba data structure.
2804 *
2805 * This routine is to free all the SCSI buffers and IOCBs from the driver
2806 * list back to kernel. It is called from lpfc_pci_remove_one to free
2807 * the internal resources before the device is removed from the system.
2808 *
2809 * Return codes
2810 *   0 - successful (for now, it always returns 0)
2811 **/
2812static int
2813lpfc_scsi_free(struct lpfc_hba *phba)
2814{
2815	struct lpfc_scsi_buf *sb, *sb_next;
2816	struct lpfc_iocbq *io, *io_next;
2817
2818	spin_lock_irq(&phba->hbalock);
2819	/* Release all the lpfc_scsi_bufs maintained by this host. */
2820	spin_lock(&phba->scsi_buf_list_lock);
2821	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2822		list_del(&sb->list);
2823		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2824			      sb->dma_handle);
2825		kfree(sb);
2826		phba->total_scsi_bufs--;
2827	}
2828	spin_unlock(&phba->scsi_buf_list_lock);
2829
2830	/* Release all the lpfc_iocbq entries maintained by this host. */
2831	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2832		list_del(&io->list);
2833		kfree(io);
2834		phba->total_iocbq_bufs--;
2835	}
2836
2837	spin_unlock_irq(&phba->hbalock);
2838	return 0;
2839}
2840
2841/**
2842 * lpfc_create_port - Create an FC port
2843 * @phba: pointer to lpfc hba data structure.
2844 * @instance: a unique integer ID to this FC port.
2845 * @dev: pointer to the device data structure.
2846 *
2847 * This routine creates a FC port for the upper layer protocol. The FC port
2848 * can be created on top of either a physical port or a virtual port provided
2849 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2850 * and associates the FC port created before adding the shost into the SCSI
2851 * layer.
2852 *
2853 * Return codes
2854 *   @vport - pointer to the virtual N_Port data structure.
2855 *   NULL - port create failed.
2856 **/
2857struct lpfc_vport *
2858lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2859{
2860	struct lpfc_vport *vport;
2861	struct Scsi_Host  *shost;
2862	int error = 0;
2863
2864	if (dev != &phba->pcidev->dev)
2865		shost = scsi_host_alloc(&lpfc_vport_template,
2866					sizeof(struct lpfc_vport));
2867	else
2868		shost = scsi_host_alloc(&lpfc_template,
2869					sizeof(struct lpfc_vport));
2870	if (!shost)
2871		goto out;
2872
2873	vport = (struct lpfc_vport *) shost->hostdata;
2874	vport->phba = phba;
2875	vport->load_flag |= FC_LOADING;
2876	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2877	vport->fc_rscn_flush = 0;
2878
2879	lpfc_get_vport_cfgparam(vport);
2880	shost->unique_id = instance;
2881	shost->max_id = LPFC_MAX_TARGET;
2882	shost->max_lun = vport->cfg_max_luns;
2883	shost->this_id = -1;
2884	shost->max_cmd_len = 16;
2885	if (phba->sli_rev == LPFC_SLI_REV4) {
2886		shost->dma_boundary =
2887			phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2888		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2889	}
2890
2891	/*
2892	 * Set initial can_queue value since 0 is no longer supported and
2893	 * scsi_add_host will fail. This will be adjusted later based on the
2894	 * max xri value determined in hba setup.
2895	 */
2896	shost->can_queue = phba->cfg_hba_queue_depth - 10;
2897	if (dev != &phba->pcidev->dev) {
2898		shost->transportt = lpfc_vport_transport_template;
2899		vport->port_type = LPFC_NPIV_PORT;
2900	} else {
2901		shost->transportt = lpfc_transport_template;
2902		vport->port_type = LPFC_PHYSICAL_PORT;
2903	}
2904
2905	/* Initialize all internally managed lists. */
2906	INIT_LIST_HEAD(&vport->fc_nodes);
2907	INIT_LIST_HEAD(&vport->rcv_buffer_list);
2908	spin_lock_init(&vport->work_port_lock);
2909
2910	init_timer(&vport->fc_disctmo);
2911	vport->fc_disctmo.function = lpfc_disc_timeout;
2912	vport->fc_disctmo.data = (unsigned long)vport;
2913
2914	init_timer(&vport->fc_fdmitmo);
2915	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2916	vport->fc_fdmitmo.data = (unsigned long)vport;
2917
2918	init_timer(&vport->els_tmofunc);
2919	vport->els_tmofunc.function = lpfc_els_timeout;
2920	vport->els_tmofunc.data = (unsigned long)vport;
2921
2922	init_timer(&vport->delayed_disc_tmo);
2923	vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
2924	vport->delayed_disc_tmo.data = (unsigned long)vport;
2925
2926	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2927	if (error)
2928		goto out_put_shost;
2929
2930	spin_lock_irq(&phba->hbalock);
2931	list_add_tail(&vport->listentry, &phba->port_list);
2932	spin_unlock_irq(&phba->hbalock);
2933	return vport;
2934
2935out_put_shost:
2936	scsi_host_put(shost);
2937out:
2938	return NULL;
2939}
2940
2941/**
2942 * destroy_port -  destroy an FC port
2943 * @vport: pointer to an lpfc virtual N_Port data structure.
2944 *
2945 * This routine destroys a FC port from the upper layer protocol. All the
2946 * resources associated with the port are released.
2947 **/
2948void
2949destroy_port(struct lpfc_vport *vport)
2950{
2951	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2952	struct lpfc_hba  *phba = vport->phba;
2953
2954	lpfc_debugfs_terminate(vport);
2955	fc_remove_host(shost);
2956	scsi_remove_host(shost);
2957
2958	spin_lock_irq(&phba->hbalock);
2959	list_del_init(&vport->listentry);
2960	spin_unlock_irq(&phba->hbalock);
2961
2962	lpfc_cleanup(vport);
2963	return;
2964}
2965
2966/**
2967 * lpfc_get_instance - Get a unique integer ID
2968 *
2969 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2970 * uses the kernel idr facility to perform the task.
2971 *
2972 * Return codes:
2973 *   instance - a unique integer ID allocated as the new instance.
2974 *   -1 - lpfc get instance failed.
2975 **/
2976int
2977lpfc_get_instance(void)
2978{
2979	int instance = 0;
2980
2981	/* Assign an unused number */
2982	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2983		return -1;
2984	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2985		return -1;
2986	return instance;
2987}
2988
2989/**
2990 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2991 * @shost: pointer to SCSI host data structure.
2992 * @time: elapsed time of the scan in jiffies.
2993 *
2994 * This routine is called by the SCSI layer with a SCSI host to determine
2995 * whether the scan host is finished.
2996 *
2997 * Note: there is no scan_start function as adapter initialization will have
2998 * asynchronously kicked off the link initialization.
2999 *
3000 * Return codes
3001 *   0 - SCSI host scan is not over yet.
3002 *   1 - SCSI host scan is over.
3003 **/
3004int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3005{
3006	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3007	struct lpfc_hba   *phba = vport->phba;
3008	int stat = 0;
3009
3010	spin_lock_irq(shost->host_lock);
3011
3012	if (vport->load_flag & FC_UNLOADING) {
3013		stat = 1;
3014		goto finished;
3015	}
3016	if (time >= 30 * HZ) {
3017		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3018				"0461 Scanning longer than 30 "
3019				"seconds.  Continuing initialization\n");
3020		stat = 1;
3021		goto finished;
3022	}
3023	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
3024		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3025				"0465 Link down longer than 15 "
3026				"seconds.  Continuing initialization\n");
3027		stat = 1;
3028		goto finished;
3029	}
3030
3031	if (vport->port_state != LPFC_VPORT_READY)
3032		goto finished;
3033	if (vport->num_disc_nodes || vport->fc_prli_sent)
3034		goto finished;
3035	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
3036		goto finished;
3037	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
3038		goto finished;
3039
3040	stat = 1;
3041
3042finished:
3043	spin_unlock_irq(shost->host_lock);
3044	return stat;
3045}
3046
3047/**
3048 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
3049 * @shost: pointer to SCSI host data structure.
3050 *
3051 * This routine initializes a given SCSI host attributes on a FC port. The
3052 * SCSI host can be either on top of a physical port or a virtual port.
3053 **/
3054void lpfc_host_attrib_init(struct Scsi_Host *shost)
3055{
3056	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3057	struct lpfc_hba   *phba = vport->phba;
3058	/*
3059	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
3060	 */
3061
3062	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
3063	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3064	fc_host_supported_classes(shost) = FC_COS_CLASS3;
3065
3066	memset(fc_host_supported_fc4s(shost), 0,
3067	       sizeof(fc_host_supported_fc4s(shost)));
3068	fc_host_supported_fc4s(shost)[2] = 1;
3069	fc_host_supported_fc4s(shost)[7] = 1;
3070
3071	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
3072				 sizeof fc_host_symbolic_name(shost));
3073
3074	fc_host_supported_speeds(shost) = 0;
3075	if (phba->lmt & LMT_16Gb)
3076		fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
3077	if (phba->lmt & LMT_10Gb)
3078		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
3079	if (phba->lmt & LMT_8Gb)
3080		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
3081	if (phba->lmt & LMT_4Gb)
3082		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
3083	if (phba->lmt & LMT_2Gb)
3084		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
3085	if (phba->lmt & LMT_1Gb)
3086		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
3087
3088	fc_host_maxframe_size(shost) =
3089		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
3090		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
3091
3092	fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
3093
3094	/* This value is also unchanging */
3095	memset(fc_host_active_fc4s(shost), 0,
3096	       sizeof(fc_host_active_fc4s(shost)));
3097	fc_host_active_fc4s(shost)[2] = 1;
3098	fc_host_active_fc4s(shost)[7] = 1;
3099
3100	fc_host_max_npiv_vports(shost) = phba->max_vpi;
3101	spin_lock_irq(shost->host_lock);
3102	vport->load_flag &= ~FC_LOADING;
3103	spin_unlock_irq(shost->host_lock);
3104}
3105
3106/**
3107 * lpfc_stop_port_s3 - Stop SLI3 device port
3108 * @phba: pointer to lpfc hba data structure.
3109 *
3110 * This routine is invoked to stop an SLI3 device port, it stops the device
3111 * from generating interrupts and stops the device driver's timers for the
3112 * device.
3113 **/
3114static void
3115lpfc_stop_port_s3(struct lpfc_hba *phba)
3116{
3117	/* Clear all interrupt enable conditions */
3118	writel(0, phba->HCregaddr);
3119	readl(phba->HCregaddr); /* flush */
3120	/* Clear all pending interrupts */
3121	writel(0xffffffff, phba->HAregaddr);
3122	readl(phba->HAregaddr); /* flush */
3123
3124	/* Reset some HBA SLI setup states */
3125	lpfc_stop_hba_timers(phba);
3126	phba->pport->work_port_events = 0;
3127}
3128
3129/**
3130 * lpfc_stop_port_s4 - Stop SLI4 device port
3131 * @phba: pointer to lpfc hba data structure.
3132 *
3133 * This routine is invoked to stop an SLI4 device port, it stops the device
3134 * from generating interrupts and stops the device driver's timers for the
3135 * device.
3136 **/
3137static void
3138lpfc_stop_port_s4(struct lpfc_hba *phba)
3139{
3140	/* Reset some HBA SLI4 setup states */
3141	lpfc_stop_hba_timers(phba);
3142	phba->pport->work_port_events = 0;
3143	phba->sli4_hba.intr_enable = 0;
3144}
3145
3146/**
3147 * lpfc_stop_port - Wrapper function for stopping hba port
3148 * @phba: Pointer to HBA context object.
3149 *
3150 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
3151 * the API jump table function pointer from the lpfc_hba struct.
3152 **/
3153void
3154lpfc_stop_port(struct lpfc_hba *phba)
3155{
3156	phba->lpfc_stop_port(phba);
3157}
3158
3159/**
3160 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
3161 * @phba: Pointer to hba for which this call is being executed.
3162 *
3163 * This routine starts the timer waiting for the FCF rediscovery to complete.
3164 **/
3165void
3166lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
3167{
3168	unsigned long fcf_redisc_wait_tmo =
3169		(jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
3170	/* Start fcf rediscovery wait period timer */
3171	mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
3172	spin_lock_irq(&phba->hbalock);
3173	/* Allow action to new fcf asynchronous event */
3174	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
3175	/* Mark the FCF rediscovery pending state */
3176	phba->fcf.fcf_flag |= FCF_REDISC_PEND;
3177	spin_unlock_irq(&phba->hbalock);
3178}
3179
3180/**
3181 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
3182 * @ptr: Map to lpfc_hba data structure pointer.
3183 *
3184 * This routine is invoked when waiting for FCF table rediscover has been
3185 * timed out. If new FCF record(s) has (have) been discovered during the
3186 * wait period, a new FCF event shall be added to the FCOE async event
3187 * list, and then worker thread shall be waked up for processing from the
3188 * worker thread context.
3189 **/
3190void
3191lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
3192{
3193	struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
3194
3195	/* Don't send FCF rediscovery event if timer cancelled */
3196	spin_lock_irq(&phba->hbalock);
3197	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3198		spin_unlock_irq(&phba->hbalock);
3199		return;
3200	}
3201	/* Clear FCF rediscovery timer pending flag */
3202	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3203	/* FCF rediscovery event to worker thread */
3204	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
3205	spin_unlock_irq(&phba->hbalock);
3206	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3207			"2776 FCF rediscover quiescent timer expired\n");
3208	/* wake up worker thread */
3209	lpfc_worker_wake_up(phba);
3210}
3211
3212/**
3213 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3214 * @phba: pointer to lpfc hba data structure.
3215 * @acqe_link: pointer to the async link completion queue entry.
3216 *
3217 * This routine is to parse the SLI4 link-attention link fault code and
3218 * translate it into the base driver's read link attention mailbox command
3219 * status.
3220 *
3221 * Return: Link-attention status in terms of base driver's coding.
3222 **/
3223static uint16_t
3224lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3225			   struct lpfc_acqe_link *acqe_link)
3226{
3227	uint16_t latt_fault;
3228
3229	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3230	case LPFC_ASYNC_LINK_FAULT_NONE:
3231	case LPFC_ASYNC_LINK_FAULT_LOCAL:
3232	case LPFC_ASYNC_LINK_FAULT_REMOTE:
3233		latt_fault = 0;
3234		break;
3235	default:
3236		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3237				"0398 Invalid link fault code: x%x\n",
3238				bf_get(lpfc_acqe_link_fault, acqe_link));
3239		latt_fault = MBXERR_ERROR;
3240		break;
3241	}
3242	return latt_fault;
3243}
3244
3245/**
3246 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3247 * @phba: pointer to lpfc hba data structure.
3248 * @acqe_link: pointer to the async link completion queue entry.
3249 *
3250 * This routine is to parse the SLI4 link attention type and translate it
3251 * into the base driver's link attention type coding.
3252 *
3253 * Return: Link attention type in terms of base driver's coding.
3254 **/
3255static uint8_t
3256lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3257			  struct lpfc_acqe_link *acqe_link)
3258{
3259	uint8_t att_type;
3260
3261	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3262	case LPFC_ASYNC_LINK_STATUS_DOWN:
3263	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3264		att_type = LPFC_ATT_LINK_DOWN;
3265		break;
3266	case LPFC_ASYNC_LINK_STATUS_UP:
3267		/* Ignore physical link up events - wait for logical link up */
3268		att_type = LPFC_ATT_RESERVED;
3269		break;
3270	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3271		att_type = LPFC_ATT_LINK_UP;
3272		break;
3273	default:
3274		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3275				"0399 Invalid link attention type: x%x\n",
3276				bf_get(lpfc_acqe_link_status, acqe_link));
3277		att_type = LPFC_ATT_RESERVED;
3278		break;
3279	}
3280	return att_type;
3281}
3282
3283/**
3284 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3285 * @phba: pointer to lpfc hba data structure.
3286 * @acqe_link: pointer to the async link completion queue entry.
3287 *
3288 * This routine is to parse the SLI4 link-attention link speed and translate
3289 * it into the base driver's link-attention link speed coding.
3290 *
3291 * Return: Link-attention link speed in terms of base driver's coding.
3292 **/
3293static uint8_t
3294lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3295				struct lpfc_acqe_link *acqe_link)
3296{
3297	uint8_t link_speed;
3298
3299	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3300	case LPFC_ASYNC_LINK_SPEED_ZERO:
3301	case LPFC_ASYNC_LINK_SPEED_10MBPS:
3302	case LPFC_ASYNC_LINK_SPEED_100MBPS:
3303		link_speed = LPFC_LINK_SPEED_UNKNOWN;
3304		break;
3305	case LPFC_ASYNC_LINK_SPEED_1GBPS:
3306		link_speed = LPFC_LINK_SPEED_1GHZ;
3307		break;
3308	case LPFC_ASYNC_LINK_SPEED_10GBPS:
3309		link_speed = LPFC_LINK_SPEED_10GHZ;
3310		break;
3311	default:
3312		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3313				"0483 Invalid link-attention link speed: x%x\n",
3314				bf_get(lpfc_acqe_link_speed, acqe_link));
3315		link_speed = LPFC_LINK_SPEED_UNKNOWN;
3316		break;
3317	}
3318	return link_speed;
3319}
3320
3321/**
3322 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3323 * @phba: pointer to lpfc hba data structure.
3324 * @acqe_link: pointer to the async link completion queue entry.
3325 *
3326 * This routine is to handle the SLI4 asynchronous FCoE link event.
3327 **/
3328static void
3329lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3330			 struct lpfc_acqe_link *acqe_link)
3331{
3332	struct lpfc_dmabuf *mp;
3333	LPFC_MBOXQ_t *pmb;
3334	MAILBOX_t *mb;
3335	struct lpfc_mbx_read_top *la;
3336	uint8_t att_type;
3337	int rc;
3338
3339	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3340	if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
3341		return;
3342	phba->fcoe_eventtag = acqe_link->event_tag;
3343	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3344	if (!pmb) {
3345		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3346				"0395 The mboxq allocation failed\n");
3347		return;
3348	}
3349	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3350	if (!mp) {
3351		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3352				"0396 The lpfc_dmabuf allocation failed\n");
3353		goto out_free_pmb;
3354	}
3355	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3356	if (!mp->virt) {
3357		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3358				"0397 The mbuf allocation failed\n");
3359		goto out_free_dmabuf;
3360	}
3361
3362	/* Cleanup any outstanding ELS commands */
3363	lpfc_els_flush_all_cmd(phba);
3364
3365	/* Block ELS IOCBs until we have done process link event */
3366	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3367
3368	/* Update link event statistics */
3369	phba->sli.slistat.link_event++;
3370
3371	/* Create lpfc_handle_latt mailbox command from link ACQE */
3372	lpfc_read_topology(phba, pmb, mp);
3373	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3374	pmb->vport = phba->pport;
3375
3376	/* Keep the link status for extra SLI4 state machine reference */
3377	phba->sli4_hba.link_state.speed =
3378				bf_get(lpfc_acqe_link_speed, acqe_link);
3379	phba->sli4_hba.link_state.duplex =
3380				bf_get(lpfc_acqe_link_duplex, acqe_link);
3381	phba->sli4_hba.link_state.status =
3382				bf_get(lpfc_acqe_link_status, acqe_link);
3383	phba->sli4_hba.link_state.type =
3384				bf_get(lpfc_acqe_link_type, acqe_link);
3385	phba->sli4_hba.link_state.number =
3386				bf_get(lpfc_acqe_link_number, acqe_link);
3387	phba->sli4_hba.link_state.fault =
3388				bf_get(lpfc_acqe_link_fault, acqe_link);
3389	phba->sli4_hba.link_state.logical_speed =
3390			bf_get(lpfc_acqe_logical_link_speed, acqe_link);
3391	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3392			"2900 Async FC/FCoE Link event - Speed:%dGBit "
3393			"duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3394			"Logical speed:%dMbps Fault:%d\n",
3395			phba->sli4_hba.link_state.speed,
3396			phba->sli4_hba.link_state.topology,
3397			phba->sli4_hba.link_state.status,
3398			phba->sli4_hba.link_state.type,
3399			phba->sli4_hba.link_state.number,
3400			phba->sli4_hba.link_state.logical_speed * 10,
3401			phba->sli4_hba.link_state.fault);
3402	/*
3403	 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3404	 * topology info. Note: Optional for non FC-AL ports.
3405	 */
3406	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3407		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3408		if (rc == MBX_NOT_FINISHED)
3409			goto out_free_dmabuf;
3410		return;
3411	}
3412	/*
3413	 * For FCoE Mode: fill in all the topology information we need and call
3414	 * the READ_TOPOLOGY completion routine to continue without actually
3415	 * sending the READ_TOPOLOGY mailbox command to the port.
3416	 */
3417	/* Parse and translate status field */
3418	mb = &pmb->u.mb;
3419	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3420
3421	/* Parse and translate link attention fields */
3422	la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3423	la->eventTag = acqe_link->event_tag;
3424	bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3425	bf_set(lpfc_mbx_read_top_link_spd, la,
3426	       lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3427
3428	/* Fake the the following irrelvant fields */
3429	bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3430	bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3431	bf_set(lpfc_mbx_read_top_il, la, 0);
3432	bf_set(lpfc_mbx_read_top_pb, la, 0);
3433	bf_set(lpfc_mbx_read_top_fa, la, 0);
3434	bf_set(lpfc_mbx_read_top_mm, la, 0);
3435
3436	/* Invoke the lpfc_handle_latt mailbox command callback function */
3437	lpfc_mbx_cmpl_read_topology(phba, pmb);
3438
3439	return;
3440
3441out_free_dmabuf:
3442	kfree(mp);
3443out_free_pmb:
3444	mempool_free(pmb, phba->mbox_mem_pool);
3445}
3446
3447/**
3448 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
3449 * @phba: pointer to lpfc hba data structure.
3450 * @acqe_fc: pointer to the async fc completion queue entry.
3451 *
3452 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
3453 * that the event was received and then issue a read_topology mailbox command so
3454 * that the rest of the driver will treat it the same as SLI3.
3455 **/
3456static void
3457lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3458{
3459	struct lpfc_dmabuf *mp;
3460	LPFC_MBOXQ_t *pmb;
3461	int rc;
3462
3463	if (bf_get(lpfc_trailer_type, acqe_fc) !=
3464	    LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
3465		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3466				"2895 Non FC link Event detected.(%d)\n",
3467				bf_get(lpfc_trailer_type, acqe_fc));
3468		return;
3469	}
3470	/* Keep the link status for extra SLI4 state machine reference */
3471	phba->sli4_hba.link_state.speed =
3472				bf_get(lpfc_acqe_fc_la_speed, acqe_fc);
3473	phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3474	phba->sli4_hba.link_state.topology =
3475				bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
3476	phba->sli4_hba.link_state.status =
3477				bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
3478	phba->sli4_hba.link_state.type =
3479				bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
3480	phba->sli4_hba.link_state.number =
3481				bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
3482	phba->sli4_hba.link_state.fault =
3483				bf_get(lpfc_acqe_link_fault, acqe_fc);
3484	phba->sli4_hba.link_state.logical_speed =
3485				bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
3486	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3487			"2896 Async FC event - Speed:%dGBaud Topology:x%x "
3488			"LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3489			"%dMbps Fault:%d\n",
3490			phba->sli4_hba.link_state.speed,
3491			phba->sli4_hba.link_state.topology,
3492			phba->sli4_hba.link_state.status,
3493			phba->sli4_hba.link_state.type,
3494			phba->sli4_hba.link_state.number,
3495			phba->sli4_hba.link_state.logical_speed * 10,
3496			phba->sli4_hba.link_state.fault);
3497	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3498	if (!pmb) {
3499		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3500				"2897 The mboxq allocation failed\n");
3501		return;
3502	}
3503	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3504	if (!mp) {
3505		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3506				"2898 The lpfc_dmabuf allocation failed\n");
3507		goto out_free_pmb;
3508	}
3509	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3510	if (!mp->virt) {
3511		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3512				"2899 The mbuf allocation failed\n");
3513		goto out_free_dmabuf;
3514	}
3515
3516	/* Cleanup any outstanding ELS commands */
3517	lpfc_els_flush_all_cmd(phba);
3518
3519	/* Block ELS IOCBs until we have done process link event */
3520	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3521
3522	/* Update link event statistics */
3523	phba->sli.slistat.link_event++;
3524
3525	/* Create lpfc_handle_latt mailbox command from link ACQE */
3526	lpfc_read_topology(phba, pmb, mp);
3527	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3528	pmb->vport = phba->pport;
3529
3530	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3531	if (rc == MBX_NOT_FINISHED)
3532		goto out_free_dmabuf;
3533	return;
3534
3535out_free_dmabuf:
3536	kfree(mp);
3537out_free_pmb:
3538	mempool_free(pmb, phba->mbox_mem_pool);
3539}
3540
3541/**
3542 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
3543 * @phba: pointer to lpfc hba data structure.
3544 * @acqe_fc: pointer to the async SLI completion queue entry.
3545 *
3546 * This routine is to handle the SLI4 asynchronous SLI events.
3547 **/
3548static void
3549lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3550{
3551	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3552			"2901 Async SLI event - Event Data1:x%08x Event Data2:"
3553			"x%08x SLI Event Type:%d",
3554			acqe_sli->event_data1, acqe_sli->event_data2,
3555			bf_get(lpfc_trailer_type, acqe_sli));
3556	return;
3557}
3558
3559/**
3560 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3561 * @vport: pointer to vport data structure.
3562 *
3563 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3564 * response to a CVL event.
3565 *
3566 * Return the pointer to the ndlp with the vport if successful, otherwise
3567 * return NULL.
3568 **/
3569static struct lpfc_nodelist *
3570lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3571{
3572	struct lpfc_nodelist *ndlp;
3573	struct Scsi_Host *shost;
3574	struct lpfc_hba *phba;
3575
3576	if (!vport)
3577		return NULL;
3578	phba = vport->phba;
3579	if (!phba)
3580		return NULL;
3581	ndlp = lpfc_findnode_did(vport, Fabric_DID);
3582	if (!ndlp) {
3583		/* Cannot find existing Fabric ndlp, so allocate a new one */
3584		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3585		if (!ndlp)
3586			return 0;
3587		lpfc_nlp_init(vport, ndlp, Fabric_DID);
3588		/* Set the node type */
3589		ndlp->nlp_type |= NLP_FABRIC;
3590		/* Put ndlp onto node list */
3591		lpfc_enqueue_node(vport, ndlp);
3592	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
3593		/* re-setup ndlp without removing from node list */
3594		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3595		if (!ndlp)
3596			return 0;
3597	}
3598	if ((phba->pport->port_state < LPFC_FLOGI) &&
3599		(phba->pport->port_state != LPFC_VPORT_FAILED))
3600		return NULL;
3601	/* If virtual link is not yet instantiated ignore CVL */
3602	if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
3603		&& (vport->port_state != LPFC_VPORT_FAILED))
3604		return NULL;
3605	shost = lpfc_shost_from_vport(vport);
3606	if (!shost)
3607		return NULL;
3608	lpfc_linkdown_port(vport);
3609	lpfc_cleanup_pending_mbox(vport);
3610	spin_lock_irq(shost->host_lock);
3611	vport->fc_flag |= FC_VPORT_CVL_RCVD;
3612	spin_unlock_irq(shost->host_lock);
3613
3614	return ndlp;
3615}
3616
3617/**
3618 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3619 * @vport: pointer to lpfc hba data structure.
3620 *
3621 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3622 * response to a FCF dead event.
3623 **/
3624static void
3625lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3626{
3627	struct lpfc_vport **vports;
3628	int i;
3629
3630	vports = lpfc_create_vport_work_array(phba);
3631	if (vports)
3632		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3633			lpfc_sli4_perform_vport_cvl(vports[i]);
3634	lpfc_destroy_vport_work_array(phba, vports);
3635}
3636
3637/**
3638 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
3639 * @phba: pointer to lpfc hba data structure.
3640 * @acqe_link: pointer to the async fcoe completion queue entry.
3641 *
3642 * This routine is to handle the SLI4 asynchronous fcoe event.
3643 **/
3644static void
3645lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3646			struct lpfc_acqe_fip *acqe_fip)
3647{
3648	uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
3649	int rc;
3650	struct lpfc_vport *vport;
3651	struct lpfc_nodelist *ndlp;
3652	struct Scsi_Host  *shost;
3653	int active_vlink_present;
3654	struct lpfc_vport **vports;
3655	int i;
3656
3657	phba->fc_eventTag = acqe_fip->event_tag;
3658	phba->fcoe_eventtag = acqe_fip->event_tag;
3659	switch (event_type) {
3660	case LPFC_FIP_EVENT_TYPE_NEW_FCF:
3661	case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
3662		if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
3663			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3664					LOG_DISCOVERY,
3665					"2546 New FCF event, evt_tag:x%x, "
3666					"index:x%x\n",
3667					acqe_fip->event_tag,
3668					acqe_fip->index);
3669		else
3670			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3671					LOG_DISCOVERY,
3672					"2788 FCF param modified event, "
3673					"evt_tag:x%x, index:x%x\n",
3674					acqe_fip->event_tag,
3675					acqe_fip->index);
3676		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3677			/*
3678			 * During period of FCF discovery, read the FCF
3679			 * table record indexed by the event to update
3680			 * FCF roundrobin failover eligible FCF bmask.
3681			 */
3682			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3683					LOG_DISCOVERY,
3684					"2779 Read FCF (x%x) for updating "
3685					"roundrobin FCF failover bmask\n",
3686					acqe_fip->index);
3687			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
3688		}
3689
3690		/* If the FCF discovery is in progress, do nothing. */
3691		spin_lock_irq(&phba->hbalock);
3692		if (phba->hba_flag & FCF_TS_INPROG) {
3693			spin_unlock_irq(&phba->hbalock);
3694			break;
3695		}
3696		/* If fast FCF failover rescan event is pending, do nothing */
3697		if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3698			spin_unlock_irq(&phba->hbalock);
3699			break;
3700		}
3701
3702		/* If the FCF has been in discovered state, do nothing. */
3703		if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3704			spin_unlock_irq(&phba->hbalock);
3705			break;
3706		}
3707		spin_unlock_irq(&phba->hbalock);
3708
3709		/* Otherwise, scan the entire FCF table and re-discover SAN */
3710		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3711				"2770 Start FCF table scan per async FCF "
3712				"event, evt_tag:x%x, index:x%x\n",
3713				acqe_fip->event_tag, acqe_fip->index);
3714		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3715						     LPFC_FCOE_FCF_GET_FIRST);
3716		if (rc)
3717			lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3718					"2547 Issue FCF scan read FCF mailbox "
3719					"command failed (x%x)\n", rc);
3720		break;
3721
3722	case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
3723		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3724			"2548 FCF Table full count 0x%x tag 0x%x\n",
3725			bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
3726			acqe_fip->event_tag);
3727		break;
3728
3729	case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
3730		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3731			"2549 FCF (x%x) disconnected from network, "
3732			"tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
3733		/*
3734		 * If we are in the middle of FCF failover process, clear
3735		 * the corresponding FCF bit in the roundrobin bitmap.
3736		 */
3737		spin_lock_irq(&phba->hbalock);
3738		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3739			spin_unlock_irq(&phba->hbalock);
3740			/* Update FLOGI FCF failover eligible FCF bmask */
3741			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
3742			break;
3743		}
3744		spin_unlock_irq(&phba->hbalock);
3745
3746		/* If the event is not for currently used fcf do nothing */
3747		if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
3748			break;
3749
3750		/*
3751		 * Otherwise, request the port to rediscover the entire FCF
3752		 * table for a fast recovery from case that the current FCF
3753		 * is no longer valid as we are not in the middle of FCF
3754		 * failover process already.
3755		 */
3756		spin_lock_irq(&phba->hbalock);
3757		/* Mark the fast failover process in progress */
3758		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3759		spin_unlock_irq(&phba->hbalock);
3760
3761		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3762				"2771 Start FCF fast failover process due to "
3763				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3764				"\n", acqe_fip->event_tag, acqe_fip->index);
3765		rc = lpfc_sli4_redisc_fcf_table(phba);
3766		if (rc) {
3767			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3768					LOG_DISCOVERY,
3769					"2772 Issue FCF rediscover mabilbox "
3770					"command failed, fail through to FCF "
3771					"dead event\n");
3772			spin_lock_irq(&phba->hbalock);
3773			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3774			spin_unlock_irq(&phba->hbalock);
3775			/*
3776			 * Last resort will fail over by treating this
3777			 * as a link down to FCF registration.
3778			 */
3779			lpfc_sli4_fcf_dead_failthrough(phba);
3780		} else {
3781			/* Reset FCF roundrobin bmask for new discovery */
3782			lpfc_sli4_clear_fcf_rr_bmask(phba);
3783			/*
3784			 * Handling fast FCF failover to a DEAD FCF event is
3785			 * considered equalivant to receiving CVL to all vports.
3786			 */
3787			lpfc_sli4_perform_all_vport_cvl(phba);
3788		}
3789		break;
3790	case LPFC_FIP_EVENT_TYPE_CVL:
3791		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3792			"2718 Clear Virtual Link Received for VPI 0x%x"
3793			" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3794
3795		vport = lpfc_find_vport_by_vpid(phba,
3796						acqe_fip->index);
3797		ndlp = lpfc_sli4_perform_vport_cvl(vport);
3798		if (!ndlp)
3799			break;
3800		active_vlink_present = 0;
3801
3802		vports = lpfc_create_vport_work_array(phba);
3803		if (vports) {
3804			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3805					i++) {
3806				if ((!(vports[i]->fc_flag &
3807					FC_VPORT_CVL_RCVD)) &&
3808					(vports[i]->port_state > LPFC_FDISC)) {
3809					active_vlink_present = 1;
3810					break;
3811				}
3812			}
3813			lpfc_destroy_vport_work_array(phba, vports);
3814		}
3815
3816		if (active_vlink_present) {
3817			/*
3818			 * If there are other active VLinks present,
3819			 * re-instantiate the Vlink using FDISC.
3820			 */
3821			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3822			shost = lpfc_shost_from_vport(vport);
3823			spin_lock_irq(shost->host_lock);
3824			ndlp->nlp_flag |= NLP_DELAY_TMO;
3825			spin_unlock_irq(shost->host_lock);
3826			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3827			vport->port_state = LPFC_FDISC;
3828		} else {
3829			/*
3830			 * Otherwise, we request port to rediscover
3831			 * the entire FCF table for a fast recovery
3832			 * from possible case that the current FCF
3833			 * is no longer valid if we are not already
3834			 * in the FCF failover process.
3835			 */
3836			spin_lock_irq(&phba->hbalock);
3837			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3838				spin_unlock_irq(&phba->hbalock);
3839				break;
3840			}
3841			/* Mark the fast failover process in progress */
3842			phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3843			spin_unlock_irq(&phba->hbalock);
3844			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3845					LOG_DISCOVERY,
3846					"2773 Start FCF failover per CVL, "
3847					"evt_tag:x%x\n", acqe_fip->event_tag);
3848			rc = lpfc_sli4_redisc_fcf_table(phba);
3849			if (rc) {
3850				lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3851						LOG_DISCOVERY,
3852						"2774 Issue FCF rediscover "
3853						"mabilbox command failed, "
3854						"through to CVL event\n");
3855				spin_lock_irq(&phba->hbalock);
3856				phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3857				spin_unlock_irq(&phba->hbalock);
3858				/*
3859				 * Last resort will be re-try on the
3860				 * the current registered FCF entry.
3861				 */
3862				lpfc_retry_pport_discovery(phba);
3863			} else
3864				/*
3865				 * Reset FCF roundrobin bmask for new
3866				 * discovery.
3867				 */
3868				lpfc_sli4_clear_fcf_rr_bmask(phba);
3869		}
3870		break;
3871	default:
3872		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3873			"0288 Unknown FCoE event type 0x%x event tag "
3874			"0x%x\n", event_type, acqe_fip->event_tag);
3875		break;
3876	}
3877}
3878
3879/**
3880 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3881 * @phba: pointer to lpfc hba data structure.
3882 * @acqe_link: pointer to the async dcbx completion queue entry.
3883 *
3884 * This routine is to handle the SLI4 asynchronous dcbx event.
3885 **/
3886static void
3887lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3888			 struct lpfc_acqe_dcbx *acqe_dcbx)
3889{
3890	phba->fc_eventTag = acqe_dcbx->event_tag;
3891	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3892			"0290 The SLI4 DCBX asynchronous event is not "
3893			"handled yet\n");
3894}
3895
3896/**
3897 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
3898 * @phba: pointer to lpfc hba data structure.
3899 * @acqe_link: pointer to the async grp5 completion queue entry.
3900 *
3901 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
3902 * is an asynchronous notified of a logical link speed change.  The Port
3903 * reports the logical link speed in units of 10Mbps.
3904 **/
3905static void
3906lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
3907			 struct lpfc_acqe_grp5 *acqe_grp5)
3908{
3909	uint16_t prev_ll_spd;
3910
3911	phba->fc_eventTag = acqe_grp5->event_tag;
3912	phba->fcoe_eventtag = acqe_grp5->event_tag;
3913	prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
3914	phba->sli4_hba.link_state.logical_speed =
3915		(bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
3916	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3917			"2789 GRP5 Async Event: Updating logical link speed "
3918			"from %dMbps to %dMbps\n", (prev_ll_spd * 10),
3919			(phba->sli4_hba.link_state.logical_speed*10));
3920}
3921
3922/**
3923 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3924 * @phba: pointer to lpfc hba data structure.
3925 *
3926 * This routine is invoked by the worker thread to process all the pending
3927 * SLI4 asynchronous events.
3928 **/
3929void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3930{
3931	struct lpfc_cq_event *cq_event;
3932
3933	/* First, declare the async event has been handled */
3934	spin_lock_irq(&phba->hbalock);
3935	phba->hba_flag &= ~ASYNC_EVENT;
3936	spin_unlock_irq(&phba->hbalock);
3937	/* Now, handle all the async events */
3938	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3939		/* Get the first event from the head of the event queue */
3940		spin_lock_irq(&phba->hbalock);
3941		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3942				 cq_event, struct lpfc_cq_event, list);
3943		spin_unlock_irq(&phba->hbalock);
3944		/* Process the asynchronous event */
3945		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3946		case LPFC_TRAILER_CODE_LINK:
3947			lpfc_sli4_async_link_evt(phba,
3948						 &cq_event->cqe.acqe_link);
3949			break;
3950		case LPFC_TRAILER_CODE_FCOE:
3951			lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
3952			break;
3953		case LPFC_TRAILER_CODE_DCBX:
3954			lpfc_sli4_async_dcbx_evt(phba,
3955						 &cq_event->cqe.acqe_dcbx);
3956			break;
3957		case LPFC_TRAILER_CODE_GRP5:
3958			lpfc_sli4_async_grp5_evt(phba,
3959						 &cq_event->cqe.acqe_grp5);
3960			break;
3961		case LPFC_TRAILER_CODE_FC:
3962			lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
3963			break;
3964		case LPFC_TRAILER_CODE_SLI:
3965			lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
3966			break;
3967		default:
3968			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3969					"1804 Invalid asynchrous event code: "
3970					"x%x\n", bf_get(lpfc_trailer_code,
3971					&cq_event->cqe.mcqe_cmpl));
3972			break;
3973		}
3974		/* Free the completion event processed to the free pool */
3975		lpfc_sli4_cq_event_release(phba, cq_event);
3976	}
3977}
3978
3979/**
3980 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3981 * @phba: pointer to lpfc hba data structure.
3982 *
3983 * This routine is invoked by the worker thread to process FCF table
3984 * rediscovery pending completion event.
3985 **/
3986void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3987{
3988	int rc;
3989
3990	spin_lock_irq(&phba->hbalock);
3991	/* Clear FCF rediscovery timeout event */
3992	phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3993	/* Clear driver fast failover FCF record flag */
3994	phba->fcf.failover_rec.flag = 0;
3995	/* Set state for FCF fast failover */
3996	phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3997	spin_unlock_irq(&phba->hbalock);
3998
3999	/* Scan FCF table from the first entry to re-discover SAN */
4000	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4001			"2777 Start post-quiescent FCF table scan\n");
4002	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
4003	if (rc)
4004		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4005				"2747 Issue FCF scan read FCF mailbox "
4006				"command failed 0x%x\n", rc);
4007}
4008
4009/**
4010 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
4011 * @phba: pointer to lpfc hba data structure.
4012 * @dev_grp: The HBA PCI-Device group number.
4013 *
4014 * This routine is invoked to set up the per HBA PCI-Device group function
4015 * API jump table entries.
4016 *
4017 * Return: 0 if success, otherwise -ENODEV
4018 **/
4019int
4020lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4021{
4022	int rc;
4023
4024	/* Set up lpfc PCI-device group */
4025	phba->pci_dev_grp = dev_grp;
4026
4027	/* The LPFC_PCI_DEV_OC uses SLI4 */
4028	if (dev_grp == LPFC_PCI_DEV_OC)
4029		phba->sli_rev = LPFC_SLI_REV4;
4030
4031	/* Set up device INIT API function jump table */
4032	rc = lpfc_init_api_table_setup(phba, dev_grp);
4033	if (rc)
4034		return -ENODEV;
4035	/* Set up SCSI API function jump table */
4036	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
4037	if (rc)
4038		return -ENODEV;
4039	/* Set up SLI API function jump table */
4040	rc = lpfc_sli_api_table_setup(phba, dev_grp);
4041	if (rc)
4042		return -ENODEV;
4043	/* Set up MBOX API function jump table */
4044	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
4045	if (rc)
4046		return -ENODEV;
4047
4048	return 0;
4049}
4050
4051/**
4052 * lpfc_log_intr_mode - Log the active interrupt mode
4053 * @phba: pointer to lpfc hba data structure.
4054 * @intr_mode: active interrupt mode adopted.
4055 *
4056 * This routine it invoked to log the currently used active interrupt mode
4057 * to the device.
4058 **/
4059static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
4060{
4061	switch (intr_mode) {
4062	case 0:
4063		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4064				"0470 Enable INTx interrupt mode.\n");
4065		break;
4066	case 1:
4067		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4068				"0481 Enabled MSI interrupt mode.\n");
4069		break;
4070	case 2:
4071		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4072				"0480 Enabled MSI-X interrupt mode.\n");
4073		break;
4074	default:
4075		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4076				"0482 Illegal interrupt mode.\n");
4077		break;
4078	}
4079	return;
4080}
4081
4082/**
4083 * lpfc_enable_pci_dev - Enable a generic PCI device.
4084 * @phba: pointer to lpfc hba data structure.
4085 *
4086 * This routine is invoked to enable the PCI device that is common to all
4087 * PCI devices.
4088 *
4089 * Return codes
4090 * 	0 - successful
4091 * 	other values - error
4092 **/
4093static int
4094lpfc_enable_pci_dev(struct lpfc_hba *phba)
4095{
4096	struct pci_dev *pdev;
4097	int bars = 0;
4098
4099	/* Obtain PCI device reference */
4100	if (!phba->pcidev)
4101		goto out_error;
4102	else
4103		pdev = phba->pcidev;
4104	/* Select PCI BARs */
4105	bars = pci_select_bars(pdev, IORESOURCE_MEM);
4106	/* Enable PCI device */
4107	if (pci_enable_device_mem(pdev))
4108		goto out_error;
4109	/* Request PCI resource for the device */
4110	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
4111		goto out_disable_device;
4112	/* Set up device as PCI master and save state for EEH */
4113	pci_set_master(pdev);
4114	pci_try_set_mwi(pdev);
4115	pci_save_state(pdev);
4116
4117	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4118	if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
4119		pdev->needs_freset = 1;
4120
4121	return 0;
4122
4123out_disable_device:
4124	pci_disable_device(pdev);
4125out_error:
4126	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4127			"1401 Failed to enable pci device, bars:x%x\n", bars);
4128	return -ENODEV;
4129}
4130
4131/**
4132 * lpfc_disable_pci_dev - Disable a generic PCI device.
4133 * @phba: pointer to lpfc hba data structure.
4134 *
4135 * This routine is invoked to disable the PCI device that is common to all
4136 * PCI devices.
4137 **/
4138static void
4139lpfc_disable_pci_dev(struct lpfc_hba *phba)
4140{
4141	struct pci_dev *pdev;
4142	int bars;
4143
4144	/* Obtain PCI device reference */
4145	if (!phba->pcidev)
4146		return;
4147	else
4148		pdev = phba->pcidev;
4149	/* Select PCI BARs */
4150	bars = pci_select_bars(pdev, IORESOURCE_MEM);
4151	/* Release PCI resource and disable PCI device */
4152	pci_release_selected_regions(pdev, bars);
4153	pci_disable_device(pdev);
4154	/* Null out PCI private reference to driver */
4155	pci_set_drvdata(pdev, NULL);
4156
4157	return;
4158}
4159
4160/**
4161 * lpfc_reset_hba - Reset a hba
4162 * @phba: pointer to lpfc hba data structure.
4163 *
4164 * This routine is invoked to reset a hba device. It brings the HBA
4165 * offline, performs a board restart, and then brings the board back
4166 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
4167 * on outstanding mailbox commands.
4168 **/
4169void
4170lpfc_reset_hba(struct lpfc_hba *phba)
4171{
4172	/* If resets are disabled then set error state and return. */
4173	if (!phba->cfg_enable_hba_reset) {
4174		phba->link_state = LPFC_HBA_ERROR;
4175		return;
4176	}
4177	lpfc_offline_prep(phba);
4178	lpfc_offline(phba);
4179	lpfc_sli_brdrestart(phba);
4180	lpfc_online(phba);
4181	lpfc_unblock_mgmt_io(phba);
4182}
4183
4184/**
4185 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
4186 * @phba: pointer to lpfc hba data structure.
4187 *
4188 * This function enables the PCI SR-IOV virtual functions to a physical
4189 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4190 * enable the number of virtual functions to the physical function. As
4191 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4192 * API call does not considered as an error condition for most of the device.
4193 **/
4194uint16_t
4195lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
4196{
4197	struct pci_dev *pdev = phba->pcidev;
4198	uint16_t nr_virtfn;
4199	int pos;
4200
4201	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4202	if (pos == 0)
4203		return 0;
4204
4205	pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
4206	return nr_virtfn;
4207}
4208
4209/**
4210 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4211 * @phba: pointer to lpfc hba data structure.
4212 * @nr_vfn: number of virtual functions to be enabled.
4213 *
4214 * This function enables the PCI SR-IOV virtual functions to a physical
4215 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4216 * enable the number of virtual functions to the physical function. As
4217 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4218 * API call does not considered as an error condition for most of the device.
4219 **/
4220int
4221lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4222{
4223	struct pci_dev *pdev = phba->pcidev;
4224	uint16_t max_nr_vfn;
4225	int rc;
4226
4227	max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
4228	if (nr_vfn > max_nr_vfn) {
4229		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4230				"3057 Requested vfs (%d) greater than "
4231				"supported vfs (%d)", nr_vfn, max_nr_vfn);
4232		return -EINVAL;
4233	}
4234
4235	rc = pci_enable_sriov(pdev, nr_vfn);
4236	if (rc) {
4237		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4238				"2806 Failed to enable sriov on this device "
4239				"with vfn number nr_vf:%d, rc:%d\n",
4240				nr_vfn, rc);
4241	} else
4242		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4243				"2807 Successful enable sriov on this device "
4244				"with vfn number nr_vf:%d\n", nr_vfn);
4245	return rc;
4246}
4247
4248/**
4249 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4250 * @phba: pointer to lpfc hba data structure.
4251 *
4252 * This routine is invoked to set up the driver internal resources specific to
4253 * support the SLI-3 HBA device it attached to.
4254 *
4255 * Return codes
4256 * 	0 - successful
4257 * 	other values - error
4258 **/
4259static int
4260lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4261{
4262	struct lpfc_sli *psli;
4263	int rc;
4264
4265	/*
4266	 * Initialize timers used by driver
4267	 */
4268
4269	/* Heartbeat timer */
4270	init_timer(&phba->hb_tmofunc);
4271	phba->hb_tmofunc.function = lpfc_hb_timeout;
4272	phba->hb_tmofunc.data = (unsigned long)phba;
4273
4274	psli = &phba->sli;
4275	/* MBOX heartbeat timer */
4276	init_timer(&psli->mbox_tmo);
4277	psli->mbox_tmo.function = lpfc_mbox_timeout;
4278	psli->mbox_tmo.data = (unsigned long) phba;
4279	/* FCP polling mode timer */
4280	init_timer(&phba->fcp_poll_timer);
4281	phba->fcp_poll_timer.function = lpfc_poll_timeout;
4282	phba->fcp_poll_timer.data = (unsigned long) phba;
4283	/* Fabric block timer */
4284	init_timer(&phba->fabric_block_timer);
4285	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4286	phba->fabric_block_timer.data = (unsigned long) phba;
4287	/* EA polling mode timer */
4288	init_timer(&phba->eratt_poll);
4289	phba->eratt_poll.function = lpfc_poll_eratt;
4290	phba->eratt_poll.data = (unsigned long) phba;
4291
4292	/* Host attention work mask setup */
4293	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
4294	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
4295
4296	/* Get all the module params for configuring this host */
4297	lpfc_get_cfgparam(phba);
4298	if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
4299		phba->menlo_flag |= HBA_MENLO_SUPPORT;
4300		/* check for menlo minimum sg count */
4301		if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
4302			phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4303	}
4304
4305	/*
4306	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4307	 * used to create the sg_dma_buf_pool must be dynamically calculated.
4308	 * 2 segments are added since the IOCB needs a command and response bde.
4309	 */
4310	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4311		sizeof(struct fcp_rsp) +
4312			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4313
4314	if (phba->cfg_enable_bg) {
4315		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4316		phba->cfg_sg_dma_buf_size +=
4317			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4318	}
4319
4320	/* Also reinitialize the host templates with new values. */
4321	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4322	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4323
4324	phba->max_vpi = LPFC_MAX_VPI;
4325	/* This will be set to correct value after config_port mbox */
4326	phba->max_vports = 0;
4327
4328	/*
4329	 * Initialize the SLI Layer to run with lpfc HBAs.
4330	 */
4331	lpfc_sli_setup(phba);
4332	lpfc_sli_queue_setup(phba);
4333
4334	/* Allocate device driver memory */
4335	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4336		return -ENOMEM;
4337
4338	/*
4339	 * Enable sr-iov virtual functions if supported and configured
4340	 * through the module parameter.
4341	 */
4342	if (phba->cfg_sriov_nr_virtfn > 0) {
4343		rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4344						 phba->cfg_sriov_nr_virtfn);
4345		if (rc) {
4346			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4347					"2808 Requested number of SR-IOV "
4348					"virtual functions (%d) is not "
4349					"supported\n",
4350					phba->cfg_sriov_nr_virtfn);
4351			phba->cfg_sriov_nr_virtfn = 0;
4352		}
4353	}
4354
4355	return 0;
4356}
4357
4358/**
4359 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
4360 * @phba: pointer to lpfc hba data structure.
4361 *
4362 * This routine is invoked to unset the driver internal resources set up
4363 * specific for supporting the SLI-3 HBA device it attached to.
4364 **/
4365static void
4366lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4367{
4368	/* Free device driver memory allocated */
4369	lpfc_mem_free_all(phba);
4370
4371	return;
4372}
4373
4374/**
4375 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
4376 * @phba: pointer to lpfc hba data structure.
4377 *
4378 * This routine is invoked to set up the driver internal resources specific to
4379 * support the SLI-4 HBA device it attached to.
4380 *
4381 * Return codes
4382 * 	0 - successful
4383 * 	other values - error
4384 **/
4385static int
4386lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4387{
4388	struct lpfc_sli *psli;
4389	LPFC_MBOXQ_t *mboxq;
4390	int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
4391	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4392	struct lpfc_mqe *mqe;
4393	int longs, sli_family;
4394	int sges_per_segment;
4395
4396	/* Before proceed, wait for POST done and device ready */
4397	rc = lpfc_sli4_post_status_check(phba);
4398	if (rc)
4399		return -ENODEV;
4400
4401	/*
4402	 * Initialize timers used by driver
4403	 */
4404
4405	/* Heartbeat timer */
4406	init_timer(&phba->hb_tmofunc);
4407	phba->hb_tmofunc.function = lpfc_hb_timeout;
4408	phba->hb_tmofunc.data = (unsigned long)phba;
4409	init_timer(&phba->rrq_tmr);
4410	phba->rrq_tmr.function = lpfc_rrq_timeout;
4411	phba->rrq_tmr.data = (unsigned long)phba;
4412
4413	psli = &phba->sli;
4414	/* MBOX heartbeat timer */
4415	init_timer(&psli->mbox_tmo);
4416	psli->mbox_tmo.function = lpfc_mbox_timeout;
4417	psli->mbox_tmo.data = (unsigned long) phba;
4418	/* Fabric block timer */
4419	init_timer(&phba->fabric_block_timer);
4420	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4421	phba->fabric_block_timer.data = (unsigned long) phba;
4422	/* EA polling mode timer */
4423	init_timer(&phba->eratt_poll);
4424	phba->eratt_poll.function = lpfc_poll_eratt;
4425	phba->eratt_poll.data = (unsigned long) phba;
4426	/* FCF rediscover timer */
4427	init_timer(&phba->fcf.redisc_wait);
4428	phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4429	phba->fcf.redisc_wait.data = (unsigned long)phba;
4430
4431	/*
4432	 * Control structure for handling external multi-buffer mailbox
4433	 * command pass-through.
4434	 */
4435	memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
4436		sizeof(struct lpfc_mbox_ext_buf_ctx));
4437	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4438
4439	/*
4440	 * We need to do a READ_CONFIG mailbox command here before
4441	 * calling lpfc_get_cfgparam. For VFs this will report the
4442	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
4443	 * All of the resources allocated
4444	 * for this Port are tied to these values.
4445	 */
4446	/* Get all the module params for configuring this host */
4447	lpfc_get_cfgparam(phba);
4448	phba->max_vpi = LPFC_MAX_VPI;
4449	/* This will be set to correct value after the read_config mbox */
4450	phba->max_vports = 0;
4451
4452	/* Program the default value of vlan_id and fc_map */
4453	phba->valid_vlan = 0;
4454	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4455	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4456	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4457
4458	/* With BlockGuard we can have multiple SGEs per Data Segemnt */
4459	sges_per_segment = 1;
4460	if (phba->cfg_enable_bg)
4461		sges_per_segment = 2;
4462
4463	/*
4464	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4465	 * used to create the sg_dma_buf_pool must be dynamically calculated.
4466	 * 2 segments are added since the IOCB needs a command and response bde.
4467	 * To insure that the scsi sgl does not cross a 4k page boundary only
4468	 * sgl sizes of must be a power of 2.
4469	 */
4470	buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4471		    (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) *
4472		    sizeof(struct sli4_sge)));
4473
4474	sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
4475	max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
4476	switch (sli_family) {
4477	case LPFC_SLI_INTF_FAMILY_BE2:
4478	case LPFC_SLI_INTF_FAMILY_BE3:
4479		/* There is a single hint for BE - 2 pages per BPL. */
4480		if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
4481		    LPFC_SLI_INTF_SLI_HINT1_1)
4482			max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
4483		break;
4484	case LPFC_SLI_INTF_FAMILY_LNCR_A0:
4485	case LPFC_SLI_INTF_FAMILY_LNCR_B0:
4486	default:
4487		break;
4488	}
4489
4490	for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4491	     dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4492	     dma_buf_size = dma_buf_size << 1)
4493		;
4494	if (dma_buf_size == max_buf_size)
4495		phba->cfg_sg_seg_cnt = (dma_buf_size -
4496			sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
4497			(2 * sizeof(struct sli4_sge))) /
4498				sizeof(struct sli4_sge);
4499	phba->cfg_sg_dma_buf_size = dma_buf_size;
4500
4501	/* Initialize buffer queue management fields */
4502	hbq_count = lpfc_sli_hbq_count();
4503	for (i = 0; i < hbq_count; ++i)
4504		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4505	INIT_LIST_HEAD(&phba->rb_pend_list);
4506	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4507	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
4508
4509	/*
4510	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
4511	 */
4512	/* Initialize the Abort scsi buffer list used by driver */
4513	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
4514	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
4515	/* This abort list used by worker thread */
4516	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4517
4518	/*
4519	 * Initialize driver internal slow-path work queues
4520	 */
4521
4522	/* Driver internel slow-path CQ Event pool */
4523	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
4524	/* Response IOCB work queue list */
4525	INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
4526	/* Asynchronous event CQ Event work queue list */
4527	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4528	/* Fast-path XRI aborted CQ Event work queue list */
4529	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4530	/* Slow-path XRI aborted CQ Event work queue list */
4531	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4532	/* Receive queue CQ Event work queue list */
4533	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4534
4535	/* Initialize extent block lists. */
4536	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
4537	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
4538	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
4539	INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
4540
4541	/* Initialize the driver internal SLI layer lists. */
4542	lpfc_sli_setup(phba);
4543	lpfc_sli_queue_setup(phba);
4544
4545	/* Allocate device driver memory */
4546	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4547	if (rc)
4548		return -ENOMEM;
4549
4550	/* IF Type 2 ports get initialized now. */
4551	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4552	    LPFC_SLI_INTF_IF_TYPE_2) {
4553		rc = lpfc_pci_function_reset(phba);
4554		if (unlikely(rc))
4555			return -ENODEV;
4556	}
4557
4558	/* Create the bootstrap mailbox command */
4559	rc = lpfc_create_bootstrap_mbox(phba);
4560	if (unlikely(rc))
4561		goto out_free_mem;
4562
4563	/* Set up the host's endian order with the device. */
4564	rc = lpfc_setup_endian_order(phba);
4565	if (unlikely(rc))
4566		goto out_free_bsmbx;
4567
4568	/* Set up the hba's configuration parameters. */
4569	rc = lpfc_sli4_read_config(phba);
4570	if (unlikely(rc))
4571		goto out_free_bsmbx;
4572
4573	/* IF Type 0 ports get initialized now. */
4574	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4575	    LPFC_SLI_INTF_IF_TYPE_0) {
4576		rc = lpfc_pci_function_reset(phba);
4577		if (unlikely(rc))
4578			goto out_free_bsmbx;
4579	}
4580
4581	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4582						       GFP_KERNEL);
4583	if (!mboxq) {
4584		rc = -ENOMEM;
4585		goto out_free_bsmbx;
4586	}
4587
4588	/* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
4589	lpfc_supported_pages(mboxq);
4590	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4591	if (!rc) {
4592		mqe = &mboxq->u.mqe;
4593		memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4594		       LPFC_MAX_SUPPORTED_PAGES);
4595		for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4596			switch (pn_page[i]) {
4597			case LPFC_SLI4_PARAMETERS:
4598				phba->sli4_hba.pc_sli4_params.supported = 1;
4599				break;
4600			default:
4601				break;
4602			}
4603		}
4604		/* Read the port's SLI4 Parameters capabilities if supported. */
4605		if (phba->sli4_hba.pc_sli4_params.supported)
4606			rc = lpfc_pc_sli4_params_get(phba, mboxq);
4607		if (rc) {
4608			mempool_free(mboxq, phba->mbox_mem_pool);
4609			rc = -EIO;
4610			goto out_free_bsmbx;
4611		}
4612	}
4613	/*
4614	 * Get sli4 parameters that override parameters from Port capabilities.
4615	 * If this call fails, it isn't critical unless the SLI4 parameters come
4616	 * back in conflict.
4617	 */
4618	rc = lpfc_get_sli4_parameters(phba, mboxq);
4619	if (rc) {
4620		if (phba->sli4_hba.extents_in_use &&
4621		    phba->sli4_hba.rpi_hdrs_in_use) {
4622			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4623				"2999 Unsupported SLI4 Parameters "
4624				"Extents and RPI headers enabled.\n");
4625			goto out_free_bsmbx;
4626		}
4627	}
4628	mempool_free(mboxq, phba->mbox_mem_pool);
4629	/* Verify all the SLI4 queues */
4630	rc = lpfc_sli4_queue_verify(phba);
4631	if (rc)
4632		goto out_free_bsmbx;
4633
4634	/* Create driver internal CQE event pool */
4635	rc = lpfc_sli4_cq_event_pool_create(phba);
4636	if (rc)
4637		goto out_free_bsmbx;
4638
4639	/* Initialize and populate the iocb list per host */
4640	rc = lpfc_init_sgl_list(phba);
4641	if (rc) {
4642		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4643				"1400 Failed to initialize sgl list.\n");
4644		goto out_destroy_cq_event_pool;
4645	}
4646	rc = lpfc_init_active_sgl_array(phba);
4647	if (rc) {
4648		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4649				"1430 Failed to initialize sgl list.\n");
4650		goto out_free_sgl_list;
4651	}
4652	rc = lpfc_sli4_init_rpi_hdrs(phba);
4653	if (rc) {
4654		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4655				"1432 Failed to initialize rpi headers.\n");
4656		goto out_free_active_sgl;
4657	}
4658
4659	/* Allocate eligible FCF bmask memory for FCF roundrobin failover */
4660	longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4661	phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4662					 GFP_KERNEL);
4663	if (!phba->fcf.fcf_rr_bmask) {
4664		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4665				"2759 Failed allocate memory for FCF round "
4666				"robin failover bmask\n");
4667		rc = -ENOMEM;
4668		goto out_remove_rpi_hdrs;
4669	}
4670
4671	/*
4672	 * The cfg_fcp_eq_count can be zero whenever there is exactly one
4673	 * interrupt vector.  This is not an error
4674	 */
4675	if (phba->cfg_fcp_eq_count) {
4676		phba->sli4_hba.fcp_eq_hdl =
4677				kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4678				    phba->cfg_fcp_eq_count), GFP_KERNEL);
4679		if (!phba->sli4_hba.fcp_eq_hdl) {
4680			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4681					"2572 Failed allocate memory for "
4682					"fast-path per-EQ handle array\n");
4683			rc = -ENOMEM;
4684			goto out_free_fcf_rr_bmask;
4685		}
4686	}
4687
4688	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4689				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4690	if (!phba->sli4_hba.msix_entries) {
4691		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4692				"2573 Failed allocate memory for msi-x "
4693				"interrupt vector entries\n");
4694		rc = -ENOMEM;
4695		goto out_free_fcp_eq_hdl;
4696	}
4697
4698	/*
4699	 * Enable sr-iov virtual functions if supported and configured
4700	 * through the module parameter.
4701	 */
4702	if (phba->cfg_sriov_nr_virtfn > 0) {
4703		rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4704						 phba->cfg_sriov_nr_virtfn);
4705		if (rc) {
4706			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4707					"3020 Requested number of SR-IOV "
4708					"virtual functions (%d) is not "
4709					"supported\n",
4710					phba->cfg_sriov_nr_virtfn);
4711			phba->cfg_sriov_nr_virtfn = 0;
4712		}
4713	}
4714
4715	return 0;
4716
4717out_free_fcp_eq_hdl:
4718	kfree(phba->sli4_hba.fcp_eq_hdl);
4719out_free_fcf_rr_bmask:
4720	kfree(phba->fcf.fcf_rr_bmask);
4721out_remove_rpi_hdrs:
4722	lpfc_sli4_remove_rpi_hdrs(phba);
4723out_free_active_sgl:
4724	lpfc_free_active_sgl(phba);
4725out_free_sgl_list:
4726	lpfc_free_sgl_list(phba);
4727out_destroy_cq_event_pool:
4728	lpfc_sli4_cq_event_pool_destroy(phba);
4729out_free_bsmbx:
4730	lpfc_destroy_bootstrap_mbox(phba);
4731out_free_mem:
4732	lpfc_mem_free(phba);
4733	return rc;
4734}
4735
4736/**
4737 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4738 * @phba: pointer to lpfc hba data structure.
4739 *
4740 * This routine is invoked to unset the driver internal resources set up
4741 * specific for supporting the SLI-4 HBA device it attached to.
4742 **/
4743static void
4744lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4745{
4746	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4747
4748	/* Free memory allocated for msi-x interrupt vector entries */
4749	kfree(phba->sli4_hba.msix_entries);
4750
4751	/* Free memory allocated for fast-path work queue handles */
4752	kfree(phba->sli4_hba.fcp_eq_hdl);
4753
4754	/* Free the allocated rpi headers. */
4755	lpfc_sli4_remove_rpi_hdrs(phba);
4756	lpfc_sli4_remove_rpis(phba);
4757
4758	/* Free eligible FCF index bmask */
4759	kfree(phba->fcf.fcf_rr_bmask);
4760
4761	/* Free the ELS sgl list */
4762	lpfc_free_active_sgl(phba);
4763	lpfc_free_sgl_list(phba);
4764
4765	/* Free the SCSI sgl management array */
4766	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4767
4768	/* Free the completion queue EQ event pool */
4769	lpfc_sli4_cq_event_release_all(phba);
4770	lpfc_sli4_cq_event_pool_destroy(phba);
4771
4772	/* Release resource identifiers. */
4773	lpfc_sli4_dealloc_resource_identifiers(phba);
4774
4775	/* Free the bsmbx region. */
4776	lpfc_destroy_bootstrap_mbox(phba);
4777
4778	/* Free the SLI Layer memory with SLI4 HBAs */
4779	lpfc_mem_free_all(phba);
4780
4781	/* Free the current connect table */
4782	list_for_each_entry_safe(conn_entry, next_conn_entry,
4783		&phba->fcf_conn_rec_list, list) {
4784		list_del_init(&conn_entry->list);
4785		kfree(conn_entry);
4786	}
4787
4788	return;
4789}
4790
4791/**
4792 * lpfc_init_api_table_setup - Set up init api function jump table
4793 * @phba: The hba struct for which this call is being executed.
4794 * @dev_grp: The HBA PCI-Device group number.
4795 *
4796 * This routine sets up the device INIT interface API function jump table
4797 * in @phba struct.
4798 *
4799 * Returns: 0 - success, -ENODEV - failure.
4800 **/
4801int
4802lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4803{
4804	phba->lpfc_hba_init_link = lpfc_hba_init_link;
4805	phba->lpfc_hba_down_link = lpfc_hba_down_link;
4806	phba->lpfc_selective_reset = lpfc_selective_reset;
4807	switch (dev_grp) {
4808	case LPFC_PCI_DEV_LP:
4809		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4810		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4811		phba->lpfc_stop_port = lpfc_stop_port_s3;
4812		break;
4813	case LPFC_PCI_DEV_OC:
4814		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4815		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4816		phba->lpfc_stop_port = lpfc_stop_port_s4;
4817		break;
4818	default:
4819		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4820				"1431 Invalid HBA PCI-device group: 0x%x\n",
4821				dev_grp);
4822		return -ENODEV;
4823		break;
4824	}
4825	return 0;
4826}
4827
4828/**
4829 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4830 * @phba: pointer to lpfc hba data structure.
4831 *
4832 * This routine is invoked to set up the driver internal resources before the
4833 * device specific resource setup to support the HBA device it attached to.
4834 *
4835 * Return codes
4836 *	0 - successful
4837 *	other values - error
4838 **/
4839static int
4840lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4841{
4842	/*
4843	 * Driver resources common to all SLI revisions
4844	 */
4845	atomic_set(&phba->fast_event_count, 0);
4846	spin_lock_init(&phba->hbalock);
4847
4848	/* Initialize ndlp management spinlock */
4849	spin_lock_init(&phba->ndlp_lock);
4850
4851	INIT_LIST_HEAD(&phba->port_list);
4852	INIT_LIST_HEAD(&phba->work_list);
4853	init_waitqueue_head(&phba->wait_4_mlo_m_q);
4854
4855	/* Initialize the wait queue head for the kernel thread */
4856	init_waitqueue_head(&phba->work_waitq);
4857
4858	/* Initialize the scsi buffer list used by driver for scsi IO */
4859	spin_lock_init(&phba->scsi_buf_list_lock);
4860	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4861
4862	/* Initialize the fabric iocb list */
4863	INIT_LIST_HEAD(&phba->fabric_iocb_list);
4864
4865	/* Initialize list to save ELS buffers */
4866	INIT_LIST_HEAD(&phba->elsbuf);
4867
4868	/* Initialize FCF connection rec list */
4869	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4870
4871	return 0;
4872}
4873
4874/**
4875 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4876 * @phba: pointer to lpfc hba data structure.
4877 *
4878 * This routine is invoked to set up the driver internal resources after the
4879 * device specific resource setup to support the HBA device it attached to.
4880 *
4881 * Return codes
4882 * 	0 - successful
4883 * 	other values - error
4884 **/
4885static int
4886lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4887{
4888	int error;
4889
4890	/* Startup the kernel thread for this host adapter. */
4891	phba->worker_thread = kthread_run(lpfc_do_work, phba,
4892					  "lpfc_worker_%d", phba->brd_no);
4893	if (IS_ERR(phba->worker_thread)) {
4894		error = PTR_ERR(phba->worker_thread);
4895		return error;
4896	}
4897
4898	return 0;
4899}
4900
4901/**
4902 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4903 * @phba: pointer to lpfc hba data structure.
4904 *
4905 * This routine is invoked to unset the driver internal resources set up after
4906 * the device specific resource setup for supporting the HBA device it
4907 * attached to.
4908 **/
4909static void
4910lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4911{
4912	/* Stop kernel worker thread */
4913	kthread_stop(phba->worker_thread);
4914}
4915
4916/**
4917 * lpfc_free_iocb_list - Free iocb list.
4918 * @phba: pointer to lpfc hba data structure.
4919 *
4920 * This routine is invoked to free the driver's IOCB list and memory.
4921 **/
4922static void
4923lpfc_free_iocb_list(struct lpfc_hba *phba)
4924{
4925	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4926
4927	spin_lock_irq(&phba->hbalock);
4928	list_for_each_entry_safe(iocbq_entry, iocbq_next,
4929				 &phba->lpfc_iocb_list, list) {
4930		list_del(&iocbq_entry->list);
4931		kfree(iocbq_entry);
4932		phba->total_iocbq_bufs--;
4933	}
4934	spin_unlock_irq(&phba->hbalock);
4935
4936	return;
4937}
4938
4939/**
4940 * lpfc_init_iocb_list - Allocate and initialize iocb list.
4941 * @phba: pointer to lpfc hba data structure.
4942 *
4943 * This routine is invoked to allocate and initizlize the driver's IOCB
4944 * list and set up the IOCB tag array accordingly.
4945 *
4946 * Return codes
4947 *	0 - successful
4948 *	other values - error
4949 **/
4950static int
4951lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4952{
4953	struct lpfc_iocbq *iocbq_entry = NULL;
4954	uint16_t iotag;
4955	int i;
4956
4957	/* Initialize and populate the iocb list per host.  */
4958	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
4959	for (i = 0; i < iocb_count; i++) {
4960		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
4961		if (iocbq_entry == NULL) {
4962			printk(KERN_ERR "%s: only allocated %d iocbs of "
4963				"expected %d count. Unloading driver.\n",
4964				__func__, i, LPFC_IOCB_LIST_CNT);
4965			goto out_free_iocbq;
4966		}
4967
4968		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4969		if (iotag == 0) {
4970			kfree(iocbq_entry);
4971			printk(KERN_ERR "%s: failed to allocate IOTAG. "
4972				"Unloading driver.\n", __func__);
4973			goto out_free_iocbq;
4974		}
4975		iocbq_entry->sli4_lxritag = NO_XRI;
4976		iocbq_entry->sli4_xritag = NO_XRI;
4977
4978		spin_lock_irq(&phba->hbalock);
4979		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4980		phba->total_iocbq_bufs++;
4981		spin_unlock_irq(&phba->hbalock);
4982	}
4983
4984	return 0;
4985
4986out_free_iocbq:
4987	lpfc_free_iocb_list(phba);
4988
4989	return -ENOMEM;
4990}
4991
4992/**
4993 * lpfc_free_sgl_list - Free sgl list.
4994 * @phba: pointer to lpfc hba data structure.
4995 *
4996 * This routine is invoked to free the driver's sgl list and memory.
4997 **/
4998static void
4999lpfc_free_sgl_list(struct lpfc_hba *phba)
5000{
5001	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
5002	LIST_HEAD(sglq_list);
5003
5004	spin_lock_irq(&phba->hbalock);
5005	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
5006	spin_unlock_irq(&phba->hbalock);
5007
5008	list_for_each_entry_safe(sglq_entry, sglq_next,
5009				 &sglq_list, list) {
5010		list_del(&sglq_entry->list);
5011		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
5012		kfree(sglq_entry);
5013		phba->sli4_hba.total_sglq_bufs--;
5014	}
5015	kfree(phba->sli4_hba.lpfc_els_sgl_array);
5016}
5017
5018/**
5019 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
5020 * @phba: pointer to lpfc hba data structure.
5021 *
5022 * This routine is invoked to allocate the driver's active sgl memory.
5023 * This array will hold the sglq_entry's for active IOs.
5024 **/
5025static int
5026lpfc_init_active_sgl_array(struct lpfc_hba *phba)
5027{
5028	int size;
5029	size = sizeof(struct lpfc_sglq *);
5030	size *= phba->sli4_hba.max_cfg_param.max_xri;
5031
5032	phba->sli4_hba.lpfc_sglq_active_list =
5033		kzalloc(size, GFP_KERNEL);
5034	if (!phba->sli4_hba.lpfc_sglq_active_list)
5035		return -ENOMEM;
5036	return 0;
5037}
5038
5039/**
5040 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
5041 * @phba: pointer to lpfc hba data structure.
5042 *
5043 * This routine is invoked to walk through the array of active sglq entries
5044 * and free all of the resources.
5045 * This is just a place holder for now.
5046 **/
5047static void
5048lpfc_free_active_sgl(struct lpfc_hba *phba)
5049{
5050	kfree(phba->sli4_hba.lpfc_sglq_active_list);
5051}
5052
5053/**
5054 * lpfc_init_sgl_list - Allocate and initialize sgl list.
5055 * @phba: pointer to lpfc hba data structure.
5056 *
5057 * This routine is invoked to allocate and initizlize the driver's sgl
5058 * list and set up the sgl xritag tag array accordingly.
5059 *
5060 * Return codes
5061 *	0 - successful
5062 *	other values - error
5063 **/
5064static int
5065lpfc_init_sgl_list(struct lpfc_hba *phba)
5066{
5067	struct lpfc_sglq *sglq_entry = NULL;
5068	int i;
5069	int els_xri_cnt;
5070
5071	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
5072	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5073				"2400 ELS XRI count %d.\n",
5074				els_xri_cnt);
5075	/* Initialize and populate the sglq list per host/VF. */
5076	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
5077	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
5078
5079	/* Sanity check on XRI management */
5080	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
5081		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5082				"2562 No room left for SCSI XRI allocation: "
5083				"max_xri=%d, els_xri=%d\n",
5084				phba->sli4_hba.max_cfg_param.max_xri,
5085				els_xri_cnt);
5086		return -ENOMEM;
5087	}
5088
5089	/* Allocate memory for the ELS XRI management array */
5090	phba->sli4_hba.lpfc_els_sgl_array =
5091			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
5092			GFP_KERNEL);
5093
5094	if (!phba->sli4_hba.lpfc_els_sgl_array) {
5095		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5096				"2401 Failed to allocate memory for ELS "
5097				"XRI management array of size %d.\n",
5098				els_xri_cnt);
5099		return -ENOMEM;
5100	}
5101
5102	/* Keep the SCSI XRI into the XRI management array */
5103	phba->sli4_hba.scsi_xri_max =
5104			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
5105	phba->sli4_hba.scsi_xri_cnt = 0;
5106	phba->sli4_hba.lpfc_scsi_psb_array =
5107			kzalloc((sizeof(struct lpfc_scsi_buf *) *
5108			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
5109
5110	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
5111		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5112				"2563 Failed to allocate memory for SCSI "
5113				"XRI management array of size %d.\n",
5114				phba->sli4_hba.scsi_xri_max);
5115		kfree(phba->sli4_hba.lpfc_els_sgl_array);
5116		return -ENOMEM;
5117	}
5118
5119	for (i = 0; i < els_xri_cnt; i++) {
5120		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
5121		if (sglq_entry == NULL) {
5122			printk(KERN_ERR "%s: only allocated %d sgls of "
5123				"expected %d count. Unloading driver.\n",
5124				__func__, i, els_xri_cnt);
5125			goto out_free_mem;
5126		}
5127
5128		sglq_entry->buff_type = GEN_BUFF_TYPE;
5129		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
5130		if (sglq_entry->virt == NULL) {
5131			kfree(sglq_entry);
5132			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
5133				"Unloading driver.\n", __func__);
5134			goto out_free_mem;
5135		}
5136		sglq_entry->sgl = sglq_entry->virt;
5137		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
5138
5139		/* The list order is used by later block SGL registraton */
5140		spin_lock_irq(&phba->hbalock);
5141		sglq_entry->state = SGL_FREED;
5142		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
5143		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
5144		phba->sli4_hba.total_sglq_bufs++;
5145		spin_unlock_irq(&phba->hbalock);
5146	}
5147	return 0;
5148
5149out_free_mem:
5150	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
5151	lpfc_free_sgl_list(phba);
5152	return -ENOMEM;
5153}
5154
5155/**
5156 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
5157 * @phba: pointer to lpfc hba data structure.
5158 *
5159 * This routine is invoked to post rpi header templates to the
5160 * port for those SLI4 ports that do not support extents.  This routine
5161 * posts a PAGE_SIZE memory region to the port to hold up to
5162 * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
5163 * and should be called only when interrupts are disabled.
5164 *
5165 * Return codes
5166 * 	0 - successful
5167 *	-ERROR - otherwise.
5168 **/
5169int
5170lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
5171{
5172	int rc = 0;
5173	struct lpfc_rpi_hdr *rpi_hdr;
5174
5175	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
5176	if (!phba->sli4_hba.rpi_hdrs_in_use)
5177		return rc;
5178	if (phba->sli4_hba.extents_in_use)
5179		return -EIO;
5180
5181	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
5182	if (!rpi_hdr) {
5183		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5184				"0391 Error during rpi post operation\n");
5185		lpfc_sli4_remove_rpis(phba);
5186		rc = -ENODEV;
5187	}
5188
5189	return rc;
5190}
5191
5192/**
5193 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
5194 * @phba: pointer to lpfc hba data structure.
5195 *
5196 * This routine is invoked to allocate a single 4KB memory region to
5197 * support rpis and stores them in the phba.  This single region
5198 * provides support for up to 64 rpis.  The region is used globally
5199 * by the device.
5200 *
5201 * Returns:
5202 *   A valid rpi hdr on success.
5203 *   A NULL pointer on any failure.
5204 **/
5205struct lpfc_rpi_hdr *
5206lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5207{
5208	uint16_t rpi_limit, curr_rpi_range;
5209	struct lpfc_dmabuf *dmabuf;
5210	struct lpfc_rpi_hdr *rpi_hdr;
5211	uint32_t rpi_count;
5212
5213	/*
5214	 * If the SLI4 port supports extents, posting the rpi header isn't
5215	 * required.  Set the expected maximum count and let the actual value
5216	 * get set when extents are fully allocated.
5217	 */
5218	if (!phba->sli4_hba.rpi_hdrs_in_use)
5219		return NULL;
5220	if (phba->sli4_hba.extents_in_use)
5221		return NULL;
5222
5223	/* The limit on the logical index is just the max_rpi count. */
5224	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
5225	phba->sli4_hba.max_cfg_param.max_rpi - 1;
5226
5227	spin_lock_irq(&phba->hbalock);
5228	/*
5229	 * Establish the starting RPI in this header block.  The starting
5230	 * rpi is normalized to a zero base because the physical rpi is
5231	 * port based.
5232	 */
5233	curr_rpi_range = phba->sli4_hba.next_rpi;
5234	spin_unlock_irq(&phba->hbalock);
5235
5236	/*
5237	 * The port has a limited number of rpis. The increment here
5238	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
5239	 * and to allow the full max_rpi range per port.
5240	 */
5241	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
5242		rpi_count = rpi_limit - curr_rpi_range;
5243	else
5244		rpi_count = LPFC_RPI_HDR_COUNT;
5245
5246	if (!rpi_count)
5247		return NULL;
5248	/*
5249	 * First allocate the protocol header region for the port.  The
5250	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
5251	 */
5252	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5253	if (!dmabuf)
5254		return NULL;
5255
5256	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5257					  LPFC_HDR_TEMPLATE_SIZE,
5258					  &dmabuf->phys,
5259					  GFP_KERNEL);
5260	if (!dmabuf->virt) {
5261		rpi_hdr = NULL;
5262		goto err_free_dmabuf;
5263	}
5264
5265	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
5266	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
5267		rpi_hdr = NULL;
5268		goto err_free_coherent;
5269	}
5270
5271	/* Save the rpi header data for cleanup later. */
5272	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
5273	if (!rpi_hdr)
5274		goto err_free_coherent;
5275
5276	rpi_hdr->dmabuf = dmabuf;
5277	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
5278	rpi_hdr->page_count = 1;
5279	spin_lock_irq(&phba->hbalock);
5280
5281	/* The rpi_hdr stores the logical index only. */
5282	rpi_hdr->start_rpi = curr_rpi_range;
5283	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
5284
5285	/*
5286	 * The next_rpi stores the next logical module-64 rpi value used
5287	 * to post physical rpis in subsequent rpi postings.
5288	 */
5289	phba->sli4_hba.next_rpi += rpi_count;
5290	spin_unlock_irq(&phba->hbalock);
5291	return rpi_hdr;
5292
5293 err_free_coherent:
5294	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
5295			  dmabuf->virt, dmabuf->phys);
5296 err_free_dmabuf:
5297	kfree(dmabuf);
5298	return NULL;
5299}
5300
5301/**
5302 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
5303 * @phba: pointer to lpfc hba data structure.
5304 *
5305 * This routine is invoked to remove all memory resources allocated
5306 * to support rpis for SLI4 ports not supporting extents. This routine
5307 * presumes the caller has released all rpis consumed by fabric or port
5308 * logins and is prepared to have the header pages removed.
5309 **/
5310void
5311lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
5312{
5313	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
5314
5315	if (!phba->sli4_hba.rpi_hdrs_in_use)
5316		goto exit;
5317
5318	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
5319				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
5320		list_del(&rpi_hdr->list);
5321		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
5322				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
5323		kfree(rpi_hdr->dmabuf);
5324		kfree(rpi_hdr);
5325	}
5326 exit:
5327	/* There are no rpis available to the port now. */
5328	phba->sli4_hba.next_rpi = 0;
5329}
5330
5331/**
5332 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
5333 * @pdev: pointer to pci device data structure.
5334 *
5335 * This routine is invoked to allocate the driver hba data structure for an
5336 * HBA device. If the allocation is successful, the phba reference to the
5337 * PCI device data structure is set.
5338 *
5339 * Return codes
5340 *      pointer to @phba - successful
5341 *      NULL - error
5342 **/
5343static struct lpfc_hba *
5344lpfc_hba_alloc(struct pci_dev *pdev)
5345{
5346	struct lpfc_hba *phba;
5347
5348	/* Allocate memory for HBA structure */
5349	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
5350	if (!phba) {
5351		dev_err(&pdev->dev, "failed to allocate hba struct\n");
5352		return NULL;
5353	}
5354
5355	/* Set reference to PCI device in HBA structure */
5356	phba->pcidev = pdev;
5357
5358	/* Assign an unused board number */
5359	phba->brd_no = lpfc_get_instance();
5360	if (phba->brd_no < 0) {
5361		kfree(phba);
5362		return NULL;
5363	}
5364
5365	spin_lock_init(&phba->ct_ev_lock);
5366	INIT_LIST_HEAD(&phba->ct_ev_waiters);
5367
5368	return phba;
5369}
5370
5371/**
5372 * lpfc_hba_free - Free driver hba data structure with a device.
5373 * @phba: pointer to lpfc hba data structure.
5374 *
5375 * This routine is invoked to free the driver hba data structure with an
5376 * HBA device.
5377 **/
5378static void
5379lpfc_hba_free(struct lpfc_hba *phba)
5380{
5381	/* Release the driver assigned board number */
5382	idr_remove(&lpfc_hba_index, phba->brd_no);
5383
5384	kfree(phba);
5385	return;
5386}
5387
5388/**
5389 * lpfc_create_shost - Create hba physical port with associated scsi host.
5390 * @phba: pointer to lpfc hba data structure.
5391 *
5392 * This routine is invoked to create HBA physical port and associate a SCSI
5393 * host with it.
5394 *
5395 * Return codes
5396 *      0 - successful
5397 *      other values - error
5398 **/
5399static int
5400lpfc_create_shost(struct lpfc_hba *phba)
5401{
5402	struct lpfc_vport *vport;
5403	struct Scsi_Host  *shost;
5404
5405	/* Initialize HBA FC structure */
5406	phba->fc_edtov = FF_DEF_EDTOV;
5407	phba->fc_ratov = FF_DEF_RATOV;
5408	phba->fc_altov = FF_DEF_ALTOV;
5409	phba->fc_arbtov = FF_DEF_ARBTOV;
5410
5411	atomic_set(&phba->sdev_cnt, 0);
5412	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
5413	if (!vport)
5414		return -ENODEV;
5415
5416	shost = lpfc_shost_from_vport(vport);
5417	phba->pport = vport;
5418	lpfc_debugfs_initialize(vport);
5419	/* Put reference to SCSI host to driver's device private data */
5420	pci_set_drvdata(phba->pcidev, shost);
5421
5422	return 0;
5423}
5424
5425/**
5426 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
5427 * @phba: pointer to lpfc hba data structure.
5428 *
5429 * This routine is invoked to destroy HBA physical port and the associated
5430 * SCSI host.
5431 **/
5432static void
5433lpfc_destroy_shost(struct lpfc_hba *phba)
5434{
5435	struct lpfc_vport *vport = phba->pport;
5436
5437	/* Destroy physical port that associated with the SCSI host */
5438	destroy_port(vport);
5439
5440	return;
5441}
5442
5443/**
5444 * lpfc_setup_bg - Setup Block guard structures and debug areas.
5445 * @phba: pointer to lpfc hba data structure.
5446 * @shost: the shost to be used to detect Block guard settings.
5447 *
5448 * This routine sets up the local Block guard protocol settings for @shost.
5449 * This routine also allocates memory for debugging bg buffers.
5450 **/
5451static void
5452lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
5453{
5454	int pagecnt = 10;
5455	if (lpfc_prot_mask && lpfc_prot_guard) {
5456		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5457				"1478 Registering BlockGuard with the "
5458				"SCSI layer\n");
5459		scsi_host_set_prot(shost, lpfc_prot_mask);
5460		scsi_host_set_guard(shost, lpfc_prot_guard);
5461	}
5462	if (!_dump_buf_data) {
5463		while (pagecnt) {
5464			spin_lock_init(&_dump_buf_lock);
5465			_dump_buf_data =
5466				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
5467			if (_dump_buf_data) {
5468				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5469					"9043 BLKGRD: allocated %d pages for "
5470				       "_dump_buf_data at 0x%p\n",
5471				       (1 << pagecnt), _dump_buf_data);
5472				_dump_buf_data_order = pagecnt;
5473				memset(_dump_buf_data, 0,
5474				       ((1 << PAGE_SHIFT) << pagecnt));
5475				break;
5476			} else
5477				--pagecnt;
5478		}
5479		if (!_dump_buf_data_order)
5480			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5481				"9044 BLKGRD: ERROR unable to allocate "
5482			       "memory for hexdump\n");
5483	} else
5484		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5485			"9045 BLKGRD: already allocated _dump_buf_data=0x%p"
5486		       "\n", _dump_buf_data);
5487	if (!_dump_buf_dif) {
5488		while (pagecnt) {
5489			_dump_buf_dif =
5490				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
5491			if (_dump_buf_dif) {
5492				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5493					"9046 BLKGRD: allocated %d pages for "
5494				       "_dump_buf_dif at 0x%p\n",
5495				       (1 << pagecnt), _dump_buf_dif);
5496				_dump_buf_dif_order = pagecnt;
5497				memset(_dump_buf_dif, 0,
5498				       ((1 << PAGE_SHIFT) << pagecnt));
5499				break;
5500			} else
5501				--pagecnt;
5502		}
5503		if (!_dump_buf_dif_order)
5504			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5505			"9047 BLKGRD: ERROR unable to allocate "
5506			       "memory for hexdump\n");
5507	} else
5508		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5509			"9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
5510		       _dump_buf_dif);
5511}
5512
5513/**
5514 * lpfc_post_init_setup - Perform necessary device post initialization setup.
5515 * @phba: pointer to lpfc hba data structure.
5516 *
5517 * This routine is invoked to perform all the necessary post initialization
5518 * setup for the device.
5519 **/
5520static void
5521lpfc_post_init_setup(struct lpfc_hba *phba)
5522{
5523	struct Scsi_Host  *shost;
5524	struct lpfc_adapter_event_header adapter_event;
5525
5526	/* Get the default values for Model Name and Description */
5527	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
5528
5529	/*
5530	 * hba setup may have changed the hba_queue_depth so we need to
5531	 * adjust the value of can_queue.
5532	 */
5533	shost = pci_get_drvdata(phba->pcidev);
5534	shost->can_queue = phba->cfg_hba_queue_depth - 10;
5535	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
5536		lpfc_setup_bg(phba, shost);
5537
5538	lpfc_host_attrib_init(shost);
5539
5540	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
5541		spin_lock_irq(shost->host_lock);
5542		lpfc_poll_start_timer(phba);
5543		spin_unlock_irq(shost->host_lock);
5544	}
5545
5546	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5547			"0428 Perform SCSI scan\n");
5548	/* Send board arrival event to upper layer */
5549	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
5550	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
5551	fc_host_post_vendor_event(shost, fc_get_event_number(),
5552				  sizeof(adapter_event),
5553				  (char *) &adapter_event,
5554				  LPFC_NL_VENDOR_ID);
5555	return;
5556}
5557
5558/**
5559 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
5560 * @phba: pointer to lpfc hba data structure.
5561 *
5562 * This routine is invoked to set up the PCI device memory space for device
5563 * with SLI-3 interface spec.
5564 *
5565 * Return codes
5566 * 	0 - successful
5567 * 	other values - error
5568 **/
5569static int
5570lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5571{
5572	struct pci_dev *pdev;
5573	unsigned long bar0map_len, bar2map_len;
5574	int i, hbq_count;
5575	void *ptr;
5576	int error = -ENODEV;
5577
5578	/* Obtain PCI device reference */
5579	if (!phba->pcidev)
5580		return error;
5581	else
5582		pdev = phba->pcidev;
5583
5584	/* Set the device DMA mask size */
5585	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5586	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5587		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5588		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5589			return error;
5590		}
5591	}
5592
5593	/* Get the bus address of Bar0 and Bar2 and the number of bytes
5594	 * required by each mapping.
5595	 */
5596	phba->pci_bar0_map = pci_resource_start(pdev, 0);
5597	bar0map_len = pci_resource_len(pdev, 0);
5598
5599	phba->pci_bar2_map = pci_resource_start(pdev, 2);
5600	bar2map_len = pci_resource_len(pdev, 2);
5601
5602	/* Map HBA SLIM to a kernel virtual address. */
5603	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5604	if (!phba->slim_memmap_p) {
5605		dev_printk(KERN_ERR, &pdev->dev,
5606			   "ioremap failed for SLIM memory.\n");
5607		goto out;
5608	}
5609
5610	/* Map HBA Control Registers to a kernel virtual address. */
5611	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5612	if (!phba->ctrl_regs_memmap_p) {
5613		dev_printk(KERN_ERR, &pdev->dev,
5614			   "ioremap failed for HBA control registers.\n");
5615		goto out_iounmap_slim;
5616	}
5617
5618	/* Allocate memory for SLI-2 structures */
5619	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5620					       SLI2_SLIM_SIZE,
5621					       &phba->slim2p.phys,
5622					       GFP_KERNEL);
5623	if (!phba->slim2p.virt)
5624		goto out_iounmap;
5625
5626	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5627	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5628	phba->mbox_ext = (phba->slim2p.virt +
5629		offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5630	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5631	phba->IOCBs = (phba->slim2p.virt +
5632		       offsetof(struct lpfc_sli2_slim, IOCBs));
5633
5634	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5635						 lpfc_sli_hbq_size(),
5636						 &phba->hbqslimp.phys,
5637						 GFP_KERNEL);
5638	if (!phba->hbqslimp.virt)
5639		goto out_free_slim;
5640
5641	hbq_count = lpfc_sli_hbq_count();
5642	ptr = phba->hbqslimp.virt;
5643	for (i = 0; i < hbq_count; ++i) {
5644		phba->hbqs[i].hbq_virt = ptr;
5645		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5646		ptr += (lpfc_hbq_defs[i]->entry_count *
5647			sizeof(struct lpfc_hbq_entry));
5648	}
5649	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5650	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5651
5652	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5653
5654	INIT_LIST_HEAD(&phba->rb_pend_list);
5655
5656	phba->MBslimaddr = phba->slim_memmap_p;
5657	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5658	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5659	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5660	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5661
5662	return 0;
5663
5664out_free_slim:
5665	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5666			  phba->slim2p.virt, phba->slim2p.phys);
5667out_iounmap:
5668	iounmap(phba->ctrl_regs_memmap_p);
5669out_iounmap_slim:
5670	iounmap(phba->slim_memmap_p);
5671out:
5672	return error;
5673}
5674
5675/**
5676 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5677 * @phba: pointer to lpfc hba data structure.
5678 *
5679 * This routine is invoked to unset the PCI device memory space for device
5680 * with SLI-3 interface spec.
5681 **/
5682static void
5683lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5684{
5685	struct pci_dev *pdev;
5686
5687	/* Obtain PCI device reference */
5688	if (!phba->pcidev)
5689		return;
5690	else
5691		pdev = phba->pcidev;
5692
5693	/* Free coherent DMA memory allocated */
5694	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5695			  phba->hbqslimp.virt, phba->hbqslimp.phys);
5696	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5697			  phba->slim2p.virt, phba->slim2p.phys);
5698
5699	/* I/O memory unmap */
5700	iounmap(phba->ctrl_regs_memmap_p);
5701	iounmap(phba->slim_memmap_p);
5702
5703	return;
5704}
5705
5706/**
5707 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
5708 * @phba: pointer to lpfc hba data structure.
5709 *
5710 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5711 * done and check status.
5712 *
5713 * Return 0 if successful, otherwise -ENODEV.
5714 **/
5715int
5716lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5717{
5718	struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
5719	struct lpfc_register reg_data;
5720	int i, port_error = 0;
5721	uint32_t if_type;
5722
5723	memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
5724	memset(&reg_data, 0, sizeof(reg_data));
5725	if (!phba->sli4_hba.PSMPHRregaddr)
5726		return -ENODEV;
5727
5728	/* Wait up to 30 seconds for the SLI Port POST done and ready */
5729	for (i = 0; i < 3000; i++) {
5730		if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
5731			&portsmphr_reg.word0) ||
5732			(bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
5733			/* Port has a fatal POST error, break out */
5734			port_error = -ENODEV;
5735			break;
5736		}
5737		if (LPFC_POST_STAGE_PORT_READY ==
5738		    bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
5739			break;
5740		msleep(10);
5741	}
5742
5743	/*
5744	 * If there was a port error during POST, then don't proceed with
5745	 * other register reads as the data may not be valid.  Just exit.
5746	 */
5747	if (port_error) {
5748		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5749			"1408 Port Failed POST - portsmphr=0x%x, "
5750			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
5751			"scr2=x%x, hscratch=x%x, pstatus=x%x\n",
5752			portsmphr_reg.word0,
5753			bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
5754			bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
5755			bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
5756			bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
5757			bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
5758			bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
5759			bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
5760			bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
5761	} else {
5762		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5763				"2534 Device Info: SLIFamily=0x%x, "
5764				"SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
5765				"SLIHint_2=0x%x, FT=0x%x\n",
5766				bf_get(lpfc_sli_intf_sli_family,
5767				       &phba->sli4_hba.sli_intf),
5768				bf_get(lpfc_sli_intf_slirev,
5769				       &phba->sli4_hba.sli_intf),
5770				bf_get(lpfc_sli_intf_if_type,
5771				       &phba->sli4_hba.sli_intf),
5772				bf_get(lpfc_sli_intf_sli_hint1,
5773				       &phba->sli4_hba.sli_intf),
5774				bf_get(lpfc_sli_intf_sli_hint2,
5775				       &phba->sli4_hba.sli_intf),
5776				bf_get(lpfc_sli_intf_func_type,
5777				       &phba->sli4_hba.sli_intf));
5778		/*
5779		 * Check for other Port errors during the initialization
5780		 * process.  Fail the load if the port did not come up
5781		 * correctly.
5782		 */
5783		if_type = bf_get(lpfc_sli_intf_if_type,
5784				 &phba->sli4_hba.sli_intf);
5785		switch (if_type) {
5786		case LPFC_SLI_INTF_IF_TYPE_0:
5787			phba->sli4_hba.ue_mask_lo =
5788			      readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
5789			phba->sli4_hba.ue_mask_hi =
5790			      readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
5791			uerrlo_reg.word0 =
5792			      readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
5793			uerrhi_reg.word0 =
5794				readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
5795			if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5796			    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5797				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5798						"1422 Unrecoverable Error "
5799						"Detected during POST "
5800						"uerr_lo_reg=0x%x, "
5801						"uerr_hi_reg=0x%x, "
5802						"ue_mask_lo_reg=0x%x, "
5803						"ue_mask_hi_reg=0x%x\n",
5804						uerrlo_reg.word0,
5805						uerrhi_reg.word0,
5806						phba->sli4_hba.ue_mask_lo,
5807						phba->sli4_hba.ue_mask_hi);
5808				port_error = -ENODEV;
5809			}
5810			break;
5811		case LPFC_SLI_INTF_IF_TYPE_2:
5812			/* Final checks.  The port status should be clean. */
5813			if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
5814				&reg_data.word0) ||
5815				(bf_get(lpfc_sliport_status_err, &reg_data) &&
5816				 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
5817				phba->work_status[0] =
5818					readl(phba->sli4_hba.u.if_type2.
5819					      ERR1regaddr);
5820				phba->work_status[1] =
5821					readl(phba->sli4_hba.u.if_type2.
5822					      ERR2regaddr);
5823				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5824					"2888 Port Error Detected "
5825					"during POST: "
5826					"port status reg 0x%x, "
5827					"port_smphr reg 0x%x, "
5828					"error 1=0x%x, error 2=0x%x\n",
5829					reg_data.word0,
5830					portsmphr_reg.word0,
5831					phba->work_status[0],
5832					phba->work_status[1]);
5833				port_error = -ENODEV;
5834			}
5835			break;
5836		case LPFC_SLI_INTF_IF_TYPE_1:
5837		default:
5838			break;
5839		}
5840	}
5841	return port_error;
5842}
5843
5844/**
5845 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5846 * @phba: pointer to lpfc hba data structure.
5847 * @if_type:  The SLI4 interface type getting configured.
5848 *
5849 * This routine is invoked to set up SLI4 BAR0 PCI config space register
5850 * memory map.
5851 **/
5852static void
5853lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
5854{
5855	switch (if_type) {
5856	case LPFC_SLI_INTF_IF_TYPE_0:
5857		phba->sli4_hba.u.if_type0.UERRLOregaddr =
5858			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
5859		phba->sli4_hba.u.if_type0.UERRHIregaddr =
5860			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
5861		phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
5862			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
5863		phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
5864			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
5865		phba->sli4_hba.SLIINTFregaddr =
5866			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5867		break;
5868	case LPFC_SLI_INTF_IF_TYPE_2:
5869		phba->sli4_hba.u.if_type2.ERR1regaddr =
5870			phba->sli4_hba.conf_regs_memmap_p +
5871						LPFC_CTL_PORT_ER1_OFFSET;
5872		phba->sli4_hba.u.if_type2.ERR2regaddr =
5873			phba->sli4_hba.conf_regs_memmap_p +
5874						LPFC_CTL_PORT_ER2_OFFSET;
5875		phba->sli4_hba.u.if_type2.CTRLregaddr =
5876			phba->sli4_hba.conf_regs_memmap_p +
5877						LPFC_CTL_PORT_CTL_OFFSET;
5878		phba->sli4_hba.u.if_type2.STATUSregaddr =
5879			phba->sli4_hba.conf_regs_memmap_p +
5880						LPFC_CTL_PORT_STA_OFFSET;
5881		phba->sli4_hba.SLIINTFregaddr =
5882			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5883		phba->sli4_hba.PSMPHRregaddr =
5884			phba->sli4_hba.conf_regs_memmap_p +
5885						LPFC_CTL_PORT_SEM_OFFSET;
5886		phba->sli4_hba.RQDBregaddr =
5887			phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
5888		phba->sli4_hba.WQDBregaddr =
5889			phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
5890		phba->sli4_hba.EQCQDBregaddr =
5891			phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
5892		phba->sli4_hba.MQDBregaddr =
5893			phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
5894		phba->sli4_hba.BMBXregaddr =
5895			phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
5896		break;
5897	case LPFC_SLI_INTF_IF_TYPE_1:
5898	default:
5899		dev_printk(KERN_ERR, &phba->pcidev->dev,
5900			   "FATAL - unsupported SLI4 interface type - %d\n",
5901			   if_type);
5902		break;
5903	}
5904}
5905
5906/**
5907 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5908 * @phba: pointer to lpfc hba data structure.
5909 *
5910 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5911 * memory map.
5912 **/
5913static void
5914lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5915{
5916	phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5917		LPFC_SLIPORT_IF0_SMPHR;
5918	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5919		LPFC_HST_ISR0;
5920	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5921		LPFC_HST_IMR0;
5922	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5923		LPFC_HST_ISCR0;
5924}
5925
5926/**
5927 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
5928 * @phba: pointer to lpfc hba data structure.
5929 * @vf: virtual function number
5930 *
5931 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5932 * based on the given viftual function number, @vf.
5933 *
5934 * Return 0 if successful, otherwise -ENODEV.
5935 **/
5936static int
5937lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5938{
5939	if (vf > LPFC_VIR_FUNC_MAX)
5940		return -ENODEV;
5941
5942	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5943				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5944	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5945				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5946	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5947				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5948	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5949				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5950	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5951				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5952	return 0;
5953}
5954
5955/**
5956 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
5957 * @phba: pointer to lpfc hba data structure.
5958 *
5959 * This routine is invoked to create the bootstrap mailbox
5960 * region consistent with the SLI-4 interface spec.  This
5961 * routine allocates all memory necessary to communicate
5962 * mailbox commands to the port and sets up all alignment
5963 * needs.  No locks are expected to be held when calling
5964 * this routine.
5965 *
5966 * Return codes
5967 * 	0 - successful
5968 * 	-ENOMEM - could not allocated memory.
5969 **/
5970static int
5971lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
5972{
5973	uint32_t bmbx_size;
5974	struct lpfc_dmabuf *dmabuf;
5975	struct dma_address *dma_address;
5976	uint32_t pa_addr;
5977	uint64_t phys_addr;
5978
5979	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5980	if (!dmabuf)
5981		return -ENOMEM;
5982
5983	/*
5984	 * The bootstrap mailbox region is comprised of 2 parts
5985	 * plus an alignment restriction of 16 bytes.
5986	 */
5987	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5988	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5989					  bmbx_size,
5990					  &dmabuf->phys,
5991					  GFP_KERNEL);
5992	if (!dmabuf->virt) {
5993		kfree(dmabuf);
5994		return -ENOMEM;
5995	}
5996	memset(dmabuf->virt, 0, bmbx_size);
5997
5998	/*
5999	 * Initialize the bootstrap mailbox pointers now so that the register
6000	 * operations are simple later.  The mailbox dma address is required
6001	 * to be 16-byte aligned.  Also align the virtual memory as each
6002	 * maibox is copied into the bmbx mailbox region before issuing the
6003	 * command to the port.
6004	 */
6005	phba->sli4_hba.bmbx.dmabuf = dmabuf;
6006	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
6007
6008	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
6009					      LPFC_ALIGN_16_BYTE);
6010	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
6011					      LPFC_ALIGN_16_BYTE);
6012
6013	/*
6014	 * Set the high and low physical addresses now.  The SLI4 alignment
6015	 * requirement is 16 bytes and the mailbox is posted to the port
6016	 * as two 30-bit addresses.  The other data is a bit marking whether
6017	 * the 30-bit address is the high or low address.
6018	 * Upcast bmbx aphys to 64bits so shift instruction compiles
6019	 * clean on 32 bit machines.
6020	 */
6021	dma_address = &phba->sli4_hba.bmbx.dma_address;
6022	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
6023	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
6024	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
6025					   LPFC_BMBX_BIT1_ADDR_HI);
6026
6027	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
6028	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
6029					   LPFC_BMBX_BIT1_ADDR_LO);
6030	return 0;
6031}
6032
6033/**
6034 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
6035 * @phba: pointer to lpfc hba data structure.
6036 *
6037 * This routine is invoked to teardown the bootstrap mailbox
6038 * region and release all host resources. This routine requires
6039 * the caller to ensure all mailbox commands recovered, no
6040 * additional mailbox comands are sent, and interrupts are disabled
6041 * before calling this routine.
6042 *
6043 **/
6044static void
6045lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
6046{
6047	dma_free_coherent(&phba->pcidev->dev,
6048			  phba->sli4_hba.bmbx.bmbx_size,
6049			  phba->sli4_hba.bmbx.dmabuf->virt,
6050			  phba->sli4_hba.bmbx.dmabuf->phys);
6051
6052	kfree(phba->sli4_hba.bmbx.dmabuf);
6053	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
6054}
6055
6056/**
6057 * lpfc_sli4_read_config - Get the config parameters.
6058 * @phba: pointer to lpfc hba data structure.
6059 *
6060 * This routine is invoked to read the configuration parameters from the HBA.
6061 * The configuration parameters are used to set the base and maximum values
6062 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
6063 * allocation for the port.
6064 *
6065 * Return codes
6066 * 	0 - successful
6067 * 	-ENOMEM - No available memory
6068 *      -EIO - The mailbox failed to complete successfully.
6069 **/
6070int
6071lpfc_sli4_read_config(struct lpfc_hba *phba)
6072{
6073	LPFC_MBOXQ_t *pmb;
6074	struct lpfc_mbx_read_config *rd_config;
6075	union  lpfc_sli4_cfg_shdr *shdr;
6076	uint32_t shdr_status, shdr_add_status;
6077	struct lpfc_mbx_get_func_cfg *get_func_cfg;
6078	struct lpfc_rsrc_desc_fcfcoe *desc;
6079	uint32_t desc_count;
6080	int length, i, rc = 0;
6081
6082	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6083	if (!pmb) {
6084		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6085				"2011 Unable to allocate memory for issuing "
6086				"SLI_CONFIG_SPECIAL mailbox command\n");
6087		return -ENOMEM;
6088	}
6089
6090	lpfc_read_config(phba, pmb);
6091
6092	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6093	if (rc != MBX_SUCCESS) {
6094		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6095			"2012 Mailbox failed , mbxCmd x%x "
6096			"READ_CONFIG, mbxStatus x%x\n",
6097			bf_get(lpfc_mqe_command, &pmb->u.mqe),
6098			bf_get(lpfc_mqe_status, &pmb->u.mqe));
6099		rc = -EIO;
6100	} else {
6101		rd_config = &pmb->u.mqe.un.rd_config;
6102		if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
6103			phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
6104			phba->sli4_hba.lnk_info.lnk_tp =
6105				bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
6106			phba->sli4_hba.lnk_info.lnk_no =
6107				bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
6108			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6109					"3081 lnk_type:%d, lnk_numb:%d\n",
6110					phba->sli4_hba.lnk_info.lnk_tp,
6111					phba->sli4_hba.lnk_info.lnk_no);
6112		} else
6113			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6114					"3082 Mailbox (x%x) returned ldv:x0\n",
6115					bf_get(lpfc_mqe_command, &pmb->u.mqe));
6116		phba->sli4_hba.extents_in_use =
6117			bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
6118		phba->sli4_hba.max_cfg_param.max_xri =
6119			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
6120		phba->sli4_hba.max_cfg_param.xri_base =
6121			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
6122		phba->sli4_hba.max_cfg_param.max_vpi =
6123			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
6124		phba->sli4_hba.max_cfg_param.vpi_base =
6125			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
6126		phba->sli4_hba.max_cfg_param.max_rpi =
6127			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
6128		phba->sli4_hba.max_cfg_param.rpi_base =
6129			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
6130		phba->sli4_hba.max_cfg_param.max_vfi =
6131			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
6132		phba->sli4_hba.max_cfg_param.vfi_base =
6133			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
6134		phba->sli4_hba.max_cfg_param.max_fcfi =
6135			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
6136		phba->sli4_hba.max_cfg_param.max_eq =
6137			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
6138		phba->sli4_hba.max_cfg_param.max_rq =
6139			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
6140		phba->sli4_hba.max_cfg_param.max_wq =
6141			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
6142		phba->sli4_hba.max_cfg_param.max_cq =
6143			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
6144		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
6145		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
6146		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
6147		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
6148		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
6149				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
6150		phba->max_vports = phba->max_vpi;
6151		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6152				"2003 cfg params Extents? %d "
6153				"XRI(B:%d M:%d), "
6154				"VPI(B:%d M:%d) "
6155				"VFI(B:%d M:%d) "
6156				"RPI(B:%d M:%d) "
6157				"FCFI(Count:%d)\n",
6158				phba->sli4_hba.extents_in_use,
6159				phba->sli4_hba.max_cfg_param.xri_base,
6160				phba->sli4_hba.max_cfg_param.max_xri,
6161				phba->sli4_hba.max_cfg_param.vpi_base,
6162				phba->sli4_hba.max_cfg_param.max_vpi,
6163				phba->sli4_hba.max_cfg_param.vfi_base,
6164				phba->sli4_hba.max_cfg_param.max_vfi,
6165				phba->sli4_hba.max_cfg_param.rpi_base,
6166				phba->sli4_hba.max_cfg_param.max_rpi,
6167				phba->sli4_hba.max_cfg_param.max_fcfi);
6168	}
6169
6170	if (rc)
6171		goto read_cfg_out;
6172
6173	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
6174	if (phba->cfg_hba_queue_depth >
6175		(phba->sli4_hba.max_cfg_param.max_xri -
6176			lpfc_sli4_get_els_iocb_cnt(phba)))
6177		phba->cfg_hba_queue_depth =
6178			phba->sli4_hba.max_cfg_param.max_xri -
6179				lpfc_sli4_get_els_iocb_cnt(phba);
6180
6181	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
6182	    LPFC_SLI_INTF_IF_TYPE_2)
6183		goto read_cfg_out;
6184
6185	/* get the pf# and vf# for SLI4 if_type 2 port */
6186	length = (sizeof(struct lpfc_mbx_get_func_cfg) -
6187		  sizeof(struct lpfc_sli4_cfg_mhdr));
6188	lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
6189			 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
6190			 length, LPFC_SLI4_MBX_EMBED);
6191
6192	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6193	shdr = (union lpfc_sli4_cfg_shdr *)
6194				&pmb->u.mqe.un.sli4_config.header.cfg_shdr;
6195	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6196	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6197	if (rc || shdr_status || shdr_add_status) {
6198		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6199				"3026 Mailbox failed , mbxCmd x%x "
6200				"GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6201				bf_get(lpfc_mqe_command, &pmb->u.mqe),
6202				bf_get(lpfc_mqe_status, &pmb->u.mqe));
6203		rc = -EIO;
6204		goto read_cfg_out;
6205	}
6206
6207	/* search for fc_fcoe resrouce descriptor */
6208	get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6209	desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6210
6211	for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6212		desc = (struct lpfc_rsrc_desc_fcfcoe *)
6213			&get_func_cfg->func_cfg.desc[i];
6214		if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6215		    bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
6216			phba->sli4_hba.iov.pf_number =
6217				bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6218			phba->sli4_hba.iov.vf_number =
6219				bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
6220			break;
6221		}
6222	}
6223
6224	if (i < LPFC_RSRC_DESC_MAX_NUM)
6225		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6226				"3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6227				"vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6228				phba->sli4_hba.iov.vf_number);
6229	else {
6230		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6231				"3028 GET_FUNCTION_CONFIG: failed to find "
6232				"Resrouce Descriptor:x%x\n",
6233				LPFC_RSRC_DESC_TYPE_FCFCOE);
6234		rc = -EIO;
6235	}
6236
6237read_cfg_out:
6238	mempool_free(pmb, phba->mbox_mem_pool);
6239	return rc;
6240}
6241
6242/**
6243 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
6244 * @phba: pointer to lpfc hba data structure.
6245 *
6246 * This routine is invoked to setup the port-side endian order when
6247 * the port if_type is 0.  This routine has no function for other
6248 * if_types.
6249 *
6250 * Return codes
6251 * 	0 - successful
6252 * 	-ENOMEM - No available memory
6253 *      -EIO - The mailbox failed to complete successfully.
6254 **/
6255static int
6256lpfc_setup_endian_order(struct lpfc_hba *phba)
6257{
6258	LPFC_MBOXQ_t *mboxq;
6259	uint32_t if_type, rc = 0;
6260	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
6261				      HOST_ENDIAN_HIGH_WORD1};
6262
6263	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6264	switch (if_type) {
6265	case LPFC_SLI_INTF_IF_TYPE_0:
6266		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6267						       GFP_KERNEL);
6268		if (!mboxq) {
6269			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6270					"0492 Unable to allocate memory for "
6271					"issuing SLI_CONFIG_SPECIAL mailbox "
6272					"command\n");
6273			return -ENOMEM;
6274		}
6275
6276		/*
6277		 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
6278		 * two words to contain special data values and no other data.
6279		 */
6280		memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
6281		memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
6282		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6283		if (rc != MBX_SUCCESS) {
6284			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6285					"0493 SLI_CONFIG_SPECIAL mailbox "
6286					"failed with status x%x\n",
6287					rc);
6288			rc = -EIO;
6289		}
6290		mempool_free(mboxq, phba->mbox_mem_pool);
6291		break;
6292	case LPFC_SLI_INTF_IF_TYPE_2:
6293	case LPFC_SLI_INTF_IF_TYPE_1:
6294	default:
6295		break;
6296	}
6297	return rc;
6298}
6299
6300/**
6301 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
6302 * @phba: pointer to lpfc hba data structure.
6303 *
6304 * This routine is invoked to check the user settable queue counts for EQs and
6305 * CQs. after this routine is called the counts will be set to valid values that
6306 * adhere to the constraints of the system's interrupt vectors and the port's
6307 * queue resources.
6308 *
6309 * Return codes
6310 *      0 - successful
6311 *      -ENOMEM - No available memory
6312 **/
6313static int
6314lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6315{
6316	int cfg_fcp_wq_count;
6317	int cfg_fcp_eq_count;
6318
6319	/*
6320	 * Sanity check for confiugred queue parameters against the run-time
6321	 * device parameters
6322	 */
6323
6324	/* Sanity check on FCP fast-path WQ parameters */
6325	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
6326	if (cfg_fcp_wq_count >
6327	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
6328		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
6329				   LPFC_SP_WQN_DEF;
6330		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
6331			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6332					"2581 Not enough WQs (%d) from "
6333					"the pci function for supporting "
6334					"FCP WQs (%d)\n",
6335					phba->sli4_hba.max_cfg_param.max_wq,
6336					phba->cfg_fcp_wq_count);
6337			goto out_error;
6338		}
6339		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6340				"2582 Not enough WQs (%d) from the pci "
6341				"function for supporting the requested "
6342				"FCP WQs (%d), the actual FCP WQs can "
6343				"be supported: %d\n",
6344				phba->sli4_hba.max_cfg_param.max_wq,
6345				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
6346	}
6347	/* The actual number of FCP work queues adopted */
6348	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
6349
6350	/* Sanity check on FCP fast-path EQ parameters */
6351	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
6352	if (cfg_fcp_eq_count >
6353	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
6354		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
6355				   LPFC_SP_EQN_DEF;
6356		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
6357			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6358					"2574 Not enough EQs (%d) from the "
6359					"pci function for supporting FCP "
6360					"EQs (%d)\n",
6361					phba->sli4_hba.max_cfg_param.max_eq,
6362					phba->cfg_fcp_eq_count);
6363			goto out_error;
6364		}
6365		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6366				"2575 Not enough EQs (%d) from the pci "
6367				"function for supporting the requested "
6368				"FCP EQs (%d), the actual FCP EQs can "
6369				"be supported: %d\n",
6370				phba->sli4_hba.max_cfg_param.max_eq,
6371				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
6372	}
6373	/* It does not make sense to have more EQs than WQs */
6374	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
6375		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6376				"2593 The FCP EQ count(%d) cannot be greater "
6377				"than the FCP WQ count(%d), limiting the "
6378				"FCP EQ count to %d\n", cfg_fcp_eq_count,
6379				phba->cfg_fcp_wq_count,
6380				phba->cfg_fcp_wq_count);
6381		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
6382	}
6383	/* The actual number of FCP event queues adopted */
6384	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
6385	/* The overall number of event queues used */
6386	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
6387
6388	/* Get EQ depth from module parameter, fake the default for now */
6389	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
6390	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
6391
6392	/* Get CQ depth from module parameter, fake the default for now */
6393	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6394	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6395
6396	return 0;
6397out_error:
6398	return -ENOMEM;
6399}
6400
6401/**
6402 * lpfc_sli4_queue_create - Create all the SLI4 queues
6403 * @phba: pointer to lpfc hba data structure.
6404 *
6405 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
6406 * operation. For each SLI4 queue type, the parameters such as queue entry
6407 * count (queue depth) shall be taken from the module parameter. For now,
6408 * we just use some constant number as place holder.
6409 *
6410 * Return codes
6411 *      0 - sucessful
6412 *      -ENOMEM - No availble memory
6413 *      -EIO - The mailbox failed to complete successfully.
6414 **/
6415int
6416lpfc_sli4_queue_create(struct lpfc_hba *phba)
6417{
6418	struct lpfc_queue *qdesc;
6419	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6420
6421	/*
6422	 * Create Event Queues (EQs)
6423	 */
6424
6425	/* Create slow path event queue */
6426	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6427				      phba->sli4_hba.eq_ecount);
6428	if (!qdesc) {
6429		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6430				"0496 Failed allocate slow-path EQ\n");
6431		goto out_error;
6432	}
6433	phba->sli4_hba.sp_eq = qdesc;
6434
6435	/*
6436	 * Create fast-path FCP Event Queue(s).  The cfg_fcp_eq_count can be
6437	 * zero whenever there is exactly one interrupt vector.  This is not
6438	 * an error.
6439	 */
6440	if (phba->cfg_fcp_eq_count) {
6441		phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
6442				       phba->cfg_fcp_eq_count), GFP_KERNEL);
6443		if (!phba->sli4_hba.fp_eq) {
6444			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6445					"2576 Failed allocate memory for "
6446					"fast-path EQ record array\n");
6447			goto out_free_sp_eq;
6448		}
6449	}
6450	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6451		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6452					      phba->sli4_hba.eq_ecount);
6453		if (!qdesc) {
6454			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6455					"0497 Failed allocate fast-path EQ\n");
6456			goto out_free_fp_eq;
6457		}
6458		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
6459	}
6460
6461	/*
6462	 * Create Complete Queues (CQs)
6463	 */
6464
6465	/* Create slow-path Mailbox Command Complete Queue */
6466	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6467				      phba->sli4_hba.cq_ecount);
6468	if (!qdesc) {
6469		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6470				"0500 Failed allocate slow-path mailbox CQ\n");
6471		goto out_free_fp_eq;
6472	}
6473	phba->sli4_hba.mbx_cq = qdesc;
6474
6475	/* Create slow-path ELS Complete Queue */
6476	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6477				      phba->sli4_hba.cq_ecount);
6478	if (!qdesc) {
6479		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6480				"0501 Failed allocate slow-path ELS CQ\n");
6481		goto out_free_mbx_cq;
6482	}
6483	phba->sli4_hba.els_cq = qdesc;
6484
6485
6486	/*
6487	 * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs.
6488	 * If there are no FCP EQs then create exactly one FCP CQ.
6489	 */
6490	if (phba->cfg_fcp_eq_count)
6491		phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6492						 phba->cfg_fcp_eq_count),
6493						GFP_KERNEL);
6494	else
6495		phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *),
6496						GFP_KERNEL);
6497	if (!phba->sli4_hba.fcp_cq) {
6498		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6499				"2577 Failed allocate memory for fast-path "
6500				"CQ record array\n");
6501		goto out_free_els_cq;
6502	}
6503	fcp_cqidx = 0;
6504	do {
6505		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6506					      phba->sli4_hba.cq_ecount);
6507		if (!qdesc) {
6508			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6509					"0499 Failed allocate fast-path FCP "
6510					"CQ (%d)\n", fcp_cqidx);
6511			goto out_free_fcp_cq;
6512		}
6513		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
6514	} while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6515
6516	/* Create Mailbox Command Queue */
6517	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6518	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6519
6520	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
6521				      phba->sli4_hba.mq_ecount);
6522	if (!qdesc) {
6523		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6524				"0505 Failed allocate slow-path MQ\n");
6525		goto out_free_fcp_cq;
6526	}
6527	phba->sli4_hba.mbx_wq = qdesc;
6528
6529	/*
6530	 * Create all the Work Queues (WQs)
6531	 */
6532	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6533	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6534
6535	/* Create slow-path ELS Work Queue */
6536	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6537				      phba->sli4_hba.wq_ecount);
6538	if (!qdesc) {
6539		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6540				"0504 Failed allocate slow-path ELS WQ\n");
6541		goto out_free_mbx_wq;
6542	}
6543	phba->sli4_hba.els_wq = qdesc;
6544
6545	/* Create fast-path FCP Work Queue(s) */
6546	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6547				phba->cfg_fcp_wq_count), GFP_KERNEL);
6548	if (!phba->sli4_hba.fcp_wq) {
6549		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6550				"2578 Failed allocate memory for fast-path "
6551				"WQ record array\n");
6552		goto out_free_els_wq;
6553	}
6554	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6555		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6556					      phba->sli4_hba.wq_ecount);
6557		if (!qdesc) {
6558			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6559					"0503 Failed allocate fast-path FCP "
6560					"WQ (%d)\n", fcp_wqidx);
6561			goto out_free_fcp_wq;
6562		}
6563		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
6564	}
6565
6566	/*
6567	 * Create Receive Queue (RQ)
6568	 */
6569	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6570	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6571
6572	/* Create Receive Queue for header */
6573	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6574				      phba->sli4_hba.rq_ecount);
6575	if (!qdesc) {
6576		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6577				"0506 Failed allocate receive HRQ\n");
6578		goto out_free_fcp_wq;
6579	}
6580	phba->sli4_hba.hdr_rq = qdesc;
6581
6582	/* Create Receive Queue for data */
6583	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6584				      phba->sli4_hba.rq_ecount);
6585	if (!qdesc) {
6586		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6587				"0507 Failed allocate receive DRQ\n");
6588		goto out_free_hdr_rq;
6589	}
6590	phba->sli4_hba.dat_rq = qdesc;
6591
6592	return 0;
6593
6594out_free_hdr_rq:
6595	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6596	phba->sli4_hba.hdr_rq = NULL;
6597out_free_fcp_wq:
6598	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
6599		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
6600		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
6601	}
6602	kfree(phba->sli4_hba.fcp_wq);
6603	phba->sli4_hba.fcp_wq = NULL;
6604out_free_els_wq:
6605	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6606	phba->sli4_hba.els_wq = NULL;
6607out_free_mbx_wq:
6608	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6609	phba->sli4_hba.mbx_wq = NULL;
6610out_free_fcp_cq:
6611	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
6612		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
6613		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
6614	}
6615	kfree(phba->sli4_hba.fcp_cq);
6616	phba->sli4_hba.fcp_cq = NULL;
6617out_free_els_cq:
6618	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6619	phba->sli4_hba.els_cq = NULL;
6620out_free_mbx_cq:
6621	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6622	phba->sli4_hba.mbx_cq = NULL;
6623out_free_fp_eq:
6624	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
6625		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
6626		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
6627	}
6628	kfree(phba->sli4_hba.fp_eq);
6629	phba->sli4_hba.fp_eq = NULL;
6630out_free_sp_eq:
6631	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6632	phba->sli4_hba.sp_eq = NULL;
6633out_error:
6634	return -ENOMEM;
6635}
6636
6637/**
6638 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
6639 * @phba: pointer to lpfc hba data structure.
6640 *
6641 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
6642 * operation.
6643 *
6644 * Return codes
6645 *      0 - successful
6646 *      -ENOMEM - No available memory
6647 *      -EIO - The mailbox failed to complete successfully.
6648 **/
6649void
6650lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6651{
6652	int fcp_qidx;
6653
6654	/* Release mailbox command work queue */
6655	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6656	phba->sli4_hba.mbx_wq = NULL;
6657
6658	/* Release ELS work queue */
6659	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6660	phba->sli4_hba.els_wq = NULL;
6661
6662	/* Release FCP work queue */
6663	if (phba->sli4_hba.fcp_wq != NULL)
6664		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
6665		     fcp_qidx++)
6666			lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
6667	kfree(phba->sli4_hba.fcp_wq);
6668	phba->sli4_hba.fcp_wq = NULL;
6669
6670	/* Release unsolicited receive queue */
6671	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6672	phba->sli4_hba.hdr_rq = NULL;
6673	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
6674	phba->sli4_hba.dat_rq = NULL;
6675
6676	/* Release ELS complete queue */
6677	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6678	phba->sli4_hba.els_cq = NULL;
6679
6680	/* Release mailbox command complete queue */
6681	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6682	phba->sli4_hba.mbx_cq = NULL;
6683
6684	/* Release FCP response complete queue */
6685	fcp_qidx = 0;
6686	if (phba->sli4_hba.fcp_cq != NULL)
6687		do
6688			lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6689		while (++fcp_qidx < phba->cfg_fcp_eq_count);
6690	kfree(phba->sli4_hba.fcp_cq);
6691	phba->sli4_hba.fcp_cq = NULL;
6692
6693	/* Release fast-path event queue */
6694	if (phba->sli4_hba.fp_eq != NULL)
6695		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
6696		     fcp_qidx++)
6697			lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
6698	kfree(phba->sli4_hba.fp_eq);
6699	phba->sli4_hba.fp_eq = NULL;
6700
6701	/* Release slow-path event queue */
6702	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6703	phba->sli4_hba.sp_eq = NULL;
6704
6705	return;
6706}
6707
6708/**
6709 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
6710 * @phba: pointer to lpfc hba data structure.
6711 *
6712 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
6713 * operation.
6714 *
6715 * Return codes
6716 *      0 - successful
6717 *      -ENOMEM - No available memory
6718 *      -EIO - The mailbox failed to complete successfully.
6719 **/
6720int
6721lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6722{
6723	int rc = -ENOMEM;
6724	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6725	int fcp_cq_index = 0;
6726
6727	/*
6728	 * Set up Event Queues (EQs)
6729	 */
6730
6731	/* Set up slow-path event queue */
6732	if (!phba->sli4_hba.sp_eq) {
6733		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6734				"0520 Slow-path EQ not allocated\n");
6735		goto out_error;
6736	}
6737	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
6738			    LPFC_SP_DEF_IMAX);
6739	if (rc) {
6740		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6741				"0521 Failed setup of slow-path EQ: "
6742				"rc = 0x%x\n", rc);
6743		goto out_error;
6744	}
6745	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6746			"2583 Slow-path EQ setup: queue-id=%d\n",
6747			phba->sli4_hba.sp_eq->queue_id);
6748
6749	/* Set up fast-path event queue */
6750	if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
6751		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6752				"3147 Fast-path EQs not allocated\n");
6753		rc = -ENOMEM;
6754		goto out_destroy_sp_eq;
6755	}
6756	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6757		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
6758			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6759					"0522 Fast-path EQ (%d) not "
6760					"allocated\n", fcp_eqidx);
6761			rc = -ENOMEM;
6762			goto out_destroy_fp_eq;
6763		}
6764		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
6765				    phba->cfg_fcp_imax);
6766		if (rc) {
6767			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6768					"0523 Failed setup of fast-path EQ "
6769					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
6770			goto out_destroy_fp_eq;
6771		}
6772		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6773				"2584 Fast-path EQ setup: "
6774				"queue[%d]-id=%d\n", fcp_eqidx,
6775				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
6776	}
6777
6778	/*
6779	 * Set up Complete Queues (CQs)
6780	 */
6781
6782	/* Set up slow-path MBOX Complete Queue as the first CQ */
6783	if (!phba->sli4_hba.mbx_cq) {
6784		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6785				"0528 Mailbox CQ not allocated\n");
6786		rc = -ENOMEM;
6787		goto out_destroy_fp_eq;
6788	}
6789	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
6790			    LPFC_MCQ, LPFC_MBOX);
6791	if (rc) {
6792		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6793				"0529 Failed setup of slow-path mailbox CQ: "
6794				"rc = 0x%x\n", rc);
6795		goto out_destroy_fp_eq;
6796	}
6797	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6798			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6799			phba->sli4_hba.mbx_cq->queue_id,
6800			phba->sli4_hba.sp_eq->queue_id);
6801
6802	/* Set up slow-path ELS Complete Queue */
6803	if (!phba->sli4_hba.els_cq) {
6804		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6805				"0530 ELS CQ not allocated\n");
6806		rc = -ENOMEM;
6807		goto out_destroy_mbx_cq;
6808	}
6809	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
6810			    LPFC_WCQ, LPFC_ELS);
6811	if (rc) {
6812		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6813				"0531 Failed setup of slow-path ELS CQ: "
6814				"rc = 0x%x\n", rc);
6815		goto out_destroy_mbx_cq;
6816	}
6817	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6818			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
6819			phba->sli4_hba.els_cq->queue_id,
6820			phba->sli4_hba.sp_eq->queue_id);
6821
6822	/* Set up fast-path FCP Response Complete Queue */
6823	if (!phba->sli4_hba.fcp_cq) {
6824		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6825				"3148 Fast-path FCP CQ array not "
6826				"allocated\n");
6827		rc = -ENOMEM;
6828		goto out_destroy_els_cq;
6829	}
6830	fcp_cqidx = 0;
6831	do {
6832		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6833			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6834					"0526 Fast-path FCP CQ (%d) not "
6835					"allocated\n", fcp_cqidx);
6836			rc = -ENOMEM;
6837			goto out_destroy_fcp_cq;
6838		}
6839		if (phba->cfg_fcp_eq_count)
6840			rc = lpfc_cq_create(phba,
6841					    phba->sli4_hba.fcp_cq[fcp_cqidx],
6842					    phba->sli4_hba.fp_eq[fcp_cqidx],
6843					    LPFC_WCQ, LPFC_FCP);
6844		else
6845			rc = lpfc_cq_create(phba,
6846					    phba->sli4_hba.fcp_cq[fcp_cqidx],
6847					    phba->sli4_hba.sp_eq,
6848					    LPFC_WCQ, LPFC_FCP);
6849		if (rc) {
6850			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6851					"0527 Failed setup of fast-path FCP "
6852					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6853			goto out_destroy_fcp_cq;
6854		}
6855		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6856				"2588 FCP CQ setup: cq[%d]-id=%d, "
6857				"parent %seq[%d]-id=%d\n",
6858				fcp_cqidx,
6859				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6860				(phba->cfg_fcp_eq_count) ? "" : "sp_",
6861				fcp_cqidx,
6862				(phba->cfg_fcp_eq_count) ?
6863				   phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
6864				   phba->sli4_hba.sp_eq->queue_id);
6865	} while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6866
6867	/*
6868	 * Set up all the Work Queues (WQs)
6869	 */
6870
6871	/* Set up Mailbox Command Queue */
6872	if (!phba->sli4_hba.mbx_wq) {
6873		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6874				"0538 Slow-path MQ not allocated\n");
6875		rc = -ENOMEM;
6876		goto out_destroy_fcp_cq;
6877	}
6878	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
6879			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
6880	if (rc) {
6881		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6882				"0539 Failed setup of slow-path MQ: "
6883				"rc = 0x%x\n", rc);
6884		goto out_destroy_fcp_cq;
6885	}
6886	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6887			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
6888			phba->sli4_hba.mbx_wq->queue_id,
6889			phba->sli4_hba.mbx_cq->queue_id);
6890
6891	/* Set up slow-path ELS Work Queue */
6892	if (!phba->sli4_hba.els_wq) {
6893		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6894				"0536 Slow-path ELS WQ not allocated\n");
6895		rc = -ENOMEM;
6896		goto out_destroy_mbx_wq;
6897	}
6898	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
6899			    phba->sli4_hba.els_cq, LPFC_ELS);
6900	if (rc) {
6901		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6902				"0537 Failed setup of slow-path ELS WQ: "
6903				"rc = 0x%x\n", rc);
6904		goto out_destroy_mbx_wq;
6905	}
6906	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6907			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
6908			phba->sli4_hba.els_wq->queue_id,
6909			phba->sli4_hba.els_cq->queue_id);
6910
6911	/* Set up fast-path FCP Work Queue */
6912	if (!phba->sli4_hba.fcp_wq) {
6913		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6914				"3149 Fast-path FCP WQ array not "
6915				"allocated\n");
6916		rc = -ENOMEM;
6917		goto out_destroy_els_wq;
6918	}
6919	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6920		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6921			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6922					"0534 Fast-path FCP WQ (%d) not "
6923					"allocated\n", fcp_wqidx);
6924			rc = -ENOMEM;
6925			goto out_destroy_fcp_wq;
6926		}
6927		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6928				    phba->sli4_hba.fcp_cq[fcp_cq_index],
6929				    LPFC_FCP);
6930		if (rc) {
6931			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6932					"0535 Failed setup of fast-path FCP "
6933					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6934			goto out_destroy_fcp_wq;
6935		}
6936		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6937				"2591 FCP WQ setup: wq[%d]-id=%d, "
6938				"parent cq[%d]-id=%d\n",
6939				fcp_wqidx,
6940				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6941				fcp_cq_index,
6942				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6943		/* Round robin FCP Work Queue's Completion Queue assignment */
6944		if (phba->cfg_fcp_eq_count)
6945			fcp_cq_index = ((fcp_cq_index + 1) %
6946					phba->cfg_fcp_eq_count);
6947	}
6948
6949	/*
6950	 * Create Receive Queue (RQ)
6951	 */
6952	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6953		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6954				"0540 Receive Queue not allocated\n");
6955		rc = -ENOMEM;
6956		goto out_destroy_fcp_wq;
6957	}
6958
6959	lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
6960	lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
6961
6962	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6963			    phba->sli4_hba.els_cq, LPFC_USOL);
6964	if (rc) {
6965		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6966				"0541 Failed setup of Receive Queue: "
6967				"rc = 0x%x\n", rc);
6968		goto out_destroy_fcp_wq;
6969	}
6970
6971	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6972			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6973			"parent cq-id=%d\n",
6974			phba->sli4_hba.hdr_rq->queue_id,
6975			phba->sli4_hba.dat_rq->queue_id,
6976			phba->sli4_hba.els_cq->queue_id);
6977	return 0;
6978
6979out_destroy_fcp_wq:
6980	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6981		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6982out_destroy_els_wq:
6983	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6984out_destroy_mbx_wq:
6985	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6986out_destroy_fcp_cq:
6987	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6988		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6989out_destroy_els_cq:
6990	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6991out_destroy_mbx_cq:
6992	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6993out_destroy_fp_eq:
6994	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6995		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6996out_destroy_sp_eq:
6997	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6998out_error:
6999	return rc;
7000}
7001
7002/**
7003 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
7004 * @phba: pointer to lpfc hba data structure.
7005 *
7006 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
7007 * operation.
7008 *
7009 * Return codes
7010 *      0 - successful
7011 *      -ENOMEM - No available memory
7012 *      -EIO - The mailbox failed to complete successfully.
7013 **/
7014void
7015lpfc_sli4_queue_unset(struct lpfc_hba *phba)
7016{
7017	int fcp_qidx;
7018
7019	/* Unset mailbox command work queue */
7020	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7021	/* Unset ELS work queue */
7022	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7023	/* Unset unsolicited receive queue */
7024	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7025	/* Unset FCP work queue */
7026	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
7027		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
7028	/* Unset mailbox command complete queue */
7029	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7030	/* Unset ELS complete queue */
7031	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7032	/* Unset FCP response complete queue */
7033	if (phba->sli4_hba.fcp_cq) {
7034		fcp_qidx = 0;
7035		do {
7036			lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
7037		} while (++fcp_qidx < phba->cfg_fcp_eq_count);
7038	}
7039	/* Unset fast-path event queue */
7040	if (phba->sli4_hba.fp_eq) {
7041		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
7042		     fcp_qidx++)
7043			lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
7044	}
7045	/* Unset slow-path event queue */
7046	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
7047}
7048
7049/**
7050 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
7051 * @phba: pointer to lpfc hba data structure.
7052 *
7053 * This routine is invoked to allocate and set up a pool of completion queue
7054 * events. The body of the completion queue event is a completion queue entry
7055 * CQE. For now, this pool is used for the interrupt service routine to queue
7056 * the following HBA completion queue events for the worker thread to process:
7057 *   - Mailbox asynchronous events
7058 *   - Receive queue completion unsolicited events
7059 * Later, this can be used for all the slow-path events.
7060 *
7061 * Return codes
7062 *      0 - successful
7063 *      -ENOMEM - No available memory
7064 **/
7065static int
7066lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
7067{
7068	struct lpfc_cq_event *cq_event;
7069	int i;
7070
7071	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
7072		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
7073		if (!cq_event)
7074			goto out_pool_create_fail;
7075		list_add_tail(&cq_event->list,
7076			      &phba->sli4_hba.sp_cqe_event_pool);
7077	}
7078	return 0;
7079
7080out_pool_create_fail:
7081	lpfc_sli4_cq_event_pool_destroy(phba);
7082	return -ENOMEM;
7083}
7084
7085/**
7086 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
7087 * @phba: pointer to lpfc hba data structure.
7088 *
7089 * This routine is invoked to free the pool of completion queue events at
7090 * driver unload time. Note that, it is the responsibility of the driver
7091 * cleanup routine to free all the outstanding completion-queue events
7092 * allocated from this pool back into the pool before invoking this routine
7093 * to destroy the pool.
7094 **/
7095static void
7096lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
7097{
7098	struct lpfc_cq_event *cq_event, *next_cq_event;
7099
7100	list_for_each_entry_safe(cq_event, next_cq_event,
7101				 &phba->sli4_hba.sp_cqe_event_pool, list) {
7102		list_del(&cq_event->list);
7103		kfree(cq_event);
7104	}
7105}
7106
7107/**
7108 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
7109 * @phba: pointer to lpfc hba data structure.
7110 *
7111 * This routine is the lock free version of the API invoked to allocate a
7112 * completion-queue event from the free pool.
7113 *
7114 * Return: Pointer to the newly allocated completion-queue event if successful
7115 *         NULL otherwise.
7116 **/
7117struct lpfc_cq_event *
7118__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
7119{
7120	struct lpfc_cq_event *cq_event = NULL;
7121
7122	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
7123			 struct lpfc_cq_event, list);
7124	return cq_event;
7125}
7126
7127/**
7128 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
7129 * @phba: pointer to lpfc hba data structure.
7130 *
7131 * This routine is the lock version of the API invoked to allocate a
7132 * completion-queue event from the free pool.
7133 *
7134 * Return: Pointer to the newly allocated completion-queue event if successful
7135 *         NULL otherwise.
7136 **/
7137struct lpfc_cq_event *
7138lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
7139{
7140	struct lpfc_cq_event *cq_event;
7141	unsigned long iflags;
7142
7143	spin_lock_irqsave(&phba->hbalock, iflags);
7144	cq_event = __lpfc_sli4_cq_event_alloc(phba);
7145	spin_unlock_irqrestore(&phba->hbalock, iflags);
7146	return cq_event;
7147}
7148
7149/**
7150 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
7151 * @phba: pointer to lpfc hba data structure.
7152 * @cq_event: pointer to the completion queue event to be freed.
7153 *
7154 * This routine is the lock free version of the API invoked to release a
7155 * completion-queue event back into the free pool.
7156 **/
7157void
7158__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
7159			     struct lpfc_cq_event *cq_event)
7160{
7161	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
7162}
7163
7164/**
7165 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
7166 * @phba: pointer to lpfc hba data structure.
7167 * @cq_event: pointer to the completion queue event to be freed.
7168 *
7169 * This routine is the lock version of the API invoked to release a
7170 * completion-queue event back into the free pool.
7171 **/
7172void
7173lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
7174			   struct lpfc_cq_event *cq_event)
7175{
7176	unsigned long iflags;
7177	spin_lock_irqsave(&phba->hbalock, iflags);
7178	__lpfc_sli4_cq_event_release(phba, cq_event);
7179	spin_unlock_irqrestore(&phba->hbalock, iflags);
7180}
7181
7182/**
7183 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
7184 * @phba: pointer to lpfc hba data structure.
7185 *
7186 * This routine is to free all the pending completion-queue events to the
7187 * back into the free pool for device reset.
7188 **/
7189static void
7190lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
7191{
7192	LIST_HEAD(cqelist);
7193	struct lpfc_cq_event *cqe;
7194	unsigned long iflags;
7195
7196	/* Retrieve all the pending WCQEs from pending WCQE lists */
7197	spin_lock_irqsave(&phba->hbalock, iflags);
7198	/* Pending FCP XRI abort events */
7199	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
7200			 &cqelist);
7201	/* Pending ELS XRI abort events */
7202	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
7203			 &cqelist);
7204	/* Pending asynnc events */
7205	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
7206			 &cqelist);
7207	spin_unlock_irqrestore(&phba->hbalock, iflags);
7208
7209	while (!list_empty(&cqelist)) {
7210		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
7211		lpfc_sli4_cq_event_release(phba, cqe);
7212	}
7213}
7214
7215/**
7216 * lpfc_pci_function_reset - Reset pci function.
7217 * @phba: pointer to lpfc hba data structure.
7218 *
7219 * This routine is invoked to request a PCI function reset. It will destroys
7220 * all resources assigned to the PCI function which originates this request.
7221 *
7222 * Return codes
7223 *      0 - successful
7224 *      -ENOMEM - No available memory
7225 *      -EIO - The mailbox failed to complete successfully.
7226 **/
7227int
7228lpfc_pci_function_reset(struct lpfc_hba *phba)
7229{
7230	LPFC_MBOXQ_t *mboxq;
7231	uint32_t rc = 0, if_type;
7232	uint32_t shdr_status, shdr_add_status;
7233	uint32_t rdy_chk, num_resets = 0, reset_again = 0;
7234	union lpfc_sli4_cfg_shdr *shdr;
7235	struct lpfc_register reg_data;
7236
7237	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7238	switch (if_type) {
7239	case LPFC_SLI_INTF_IF_TYPE_0:
7240		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7241						       GFP_KERNEL);
7242		if (!mboxq) {
7243			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7244					"0494 Unable to allocate memory for "
7245					"issuing SLI_FUNCTION_RESET mailbox "
7246					"command\n");
7247			return -ENOMEM;
7248		}
7249
7250		/* Setup PCI function reset mailbox-ioctl command */
7251		lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7252				 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
7253				 LPFC_SLI4_MBX_EMBED);
7254		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7255		shdr = (union lpfc_sli4_cfg_shdr *)
7256			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7257		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7258		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7259					 &shdr->response);
7260		if (rc != MBX_TIMEOUT)
7261			mempool_free(mboxq, phba->mbox_mem_pool);
7262		if (shdr_status || shdr_add_status || rc) {
7263			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7264					"0495 SLI_FUNCTION_RESET mailbox "
7265					"failed with status x%x add_status x%x,"
7266					" mbx status x%x\n",
7267					shdr_status, shdr_add_status, rc);
7268			rc = -ENXIO;
7269		}
7270		break;
7271	case LPFC_SLI_INTF_IF_TYPE_2:
7272		for (num_resets = 0;
7273		     num_resets < MAX_IF_TYPE_2_RESETS;
7274		     num_resets++) {
7275			reg_data.word0 = 0;
7276			bf_set(lpfc_sliport_ctrl_end, &reg_data,
7277			       LPFC_SLIPORT_LITTLE_ENDIAN);
7278			bf_set(lpfc_sliport_ctrl_ip, &reg_data,
7279			       LPFC_SLIPORT_INIT_PORT);
7280			writel(reg_data.word0, phba->sli4_hba.u.if_type2.
7281			       CTRLregaddr);
7282
7283			/*
7284			 * Poll the Port Status Register and wait for RDY for
7285			 * up to 10 seconds.  If the port doesn't respond, treat
7286			 * it as an error.  If the port responds with RN, start
7287			 * the loop again.
7288			 */
7289			for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
7290				msleep(10);
7291				if (lpfc_readl(phba->sli4_hba.u.if_type2.
7292					      STATUSregaddr, &reg_data.word0)) {
7293					rc = -ENODEV;
7294					goto out;
7295				}
7296				if (bf_get(lpfc_sliport_status_rn, &reg_data))
7297					reset_again++;
7298				if (bf_get(lpfc_sliport_status_rdy, &reg_data))
7299					break;
7300			}
7301
7302			/*
7303			 * If the port responds to the init request with
7304			 * reset needed, delay for a bit and restart the loop.
7305			 */
7306			if (reset_again && (rdy_chk < 1000)) {
7307				msleep(10);
7308				reset_again = 0;
7309				continue;
7310			}
7311
7312			/* Detect any port errors. */
7313			if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
7314			    (rdy_chk >= 1000)) {
7315				phba->work_status[0] = readl(
7316					phba->sli4_hba.u.if_type2.ERR1regaddr);
7317				phba->work_status[1] = readl(
7318					phba->sli4_hba.u.if_type2.ERR2regaddr);
7319				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7320					"2890 Port Error Detected "
7321					"during Port Reset: "
7322					"port status reg 0x%x, "
7323					"error 1=0x%x, error 2=0x%x\n",
7324					reg_data.word0,
7325					phba->work_status[0],
7326					phba->work_status[1]);
7327				rc = -ENODEV;
7328			}
7329
7330			/*
7331			 * Terminate the outer loop provided the Port indicated
7332			 * ready within 10 seconds.
7333			 */
7334			if (rdy_chk < 1000)
7335				break;
7336		}
7337		/* delay driver action following IF_TYPE_2 function reset */
7338		msleep(100);
7339		break;
7340	case LPFC_SLI_INTF_IF_TYPE_1:
7341	default:
7342		break;
7343	}
7344
7345out:
7346	/* Catch the not-ready port failure after a port reset. */
7347	if (num_resets >= MAX_IF_TYPE_2_RESETS)
7348		rc = -ENODEV;
7349
7350	return rc;
7351}
7352
7353/**
7354 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
7355 * @phba: pointer to lpfc hba data structure.
7356 * @cnt: number of nop mailbox commands to send.
7357 *
7358 * This routine is invoked to send a number @cnt of NOP mailbox command and
7359 * wait for each command to complete.
7360 *
7361 * Return: the number of NOP mailbox command completed.
7362 **/
7363static int
7364lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
7365{
7366	LPFC_MBOXQ_t *mboxq;
7367	int length, cmdsent;
7368	uint32_t mbox_tmo;
7369	uint32_t rc = 0;
7370	uint32_t shdr_status, shdr_add_status;
7371	union lpfc_sli4_cfg_shdr *shdr;
7372
7373	if (cnt == 0) {
7374		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7375				"2518 Requested to send 0 NOP mailbox cmd\n");
7376		return cnt;
7377	}
7378
7379	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7380	if (!mboxq) {
7381		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7382				"2519 Unable to allocate memory for issuing "
7383				"NOP mailbox command\n");
7384		return 0;
7385	}
7386
7387	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
7388	length = (sizeof(struct lpfc_mbx_nop) -
7389		  sizeof(struct lpfc_sli4_cfg_mhdr));
7390	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7391			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
7392
7393	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
7394		if (!phba->sli4_hba.intr_enable)
7395			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7396		else {
7397			mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
7398			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7399		}
7400		if (rc == MBX_TIMEOUT)
7401			break;
7402		/* Check return status */
7403		shdr = (union lpfc_sli4_cfg_shdr *)
7404			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7405		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7406		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7407					 &shdr->response);
7408		if (shdr_status || shdr_add_status || rc) {
7409			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7410					"2520 NOP mailbox command failed "
7411					"status x%x add_status x%x mbx "
7412					"status x%x\n", shdr_status,
7413					shdr_add_status, rc);
7414			break;
7415		}
7416	}
7417
7418	if (rc != MBX_TIMEOUT)
7419		mempool_free(mboxq, phba->mbox_mem_pool);
7420
7421	return cmdsent;
7422}
7423
7424/**
7425 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
7426 * @phba: pointer to lpfc hba data structure.
7427 *
7428 * This routine is invoked to set up the PCI device memory space for device
7429 * with SLI-4 interface spec.
7430 *
7431 * Return codes
7432 * 	0 - successful
7433 * 	other values - error
7434 **/
7435static int
7436lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7437{
7438	struct pci_dev *pdev;
7439	unsigned long bar0map_len, bar1map_len, bar2map_len;
7440	int error = -ENODEV;
7441	uint32_t if_type;
7442
7443	/* Obtain PCI device reference */
7444	if (!phba->pcidev)
7445		return error;
7446	else
7447		pdev = phba->pcidev;
7448
7449	/* Set the device DMA mask size */
7450	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
7451	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
7452		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
7453		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
7454			return error;
7455		}
7456	}
7457
7458	/*
7459	 * The BARs and register set definitions and offset locations are
7460	 * dependent on the if_type.
7461	 */
7462	if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
7463				  &phba->sli4_hba.sli_intf.word0)) {
7464		return error;
7465	}
7466
7467	/* There is no SLI3 failback for SLI4 devices. */
7468	if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
7469	    LPFC_SLI_INTF_VALID) {
7470		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7471				"2894 SLI_INTF reg contents invalid "
7472				"sli_intf reg 0x%x\n",
7473				phba->sli4_hba.sli_intf.word0);
7474		return error;
7475	}
7476
7477	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7478	/*
7479	 * Get the bus address of SLI4 device Bar regions and the
7480	 * number of bytes required by each mapping. The mapping of the
7481	 * particular PCI BARs regions is dependent on the type of
7482	 * SLI4 device.
7483	 */
7484	if (pci_resource_start(pdev, 0)) {
7485		phba->pci_bar0_map = pci_resource_start(pdev, 0);
7486		bar0map_len = pci_resource_len(pdev, 0);
7487
7488		/*
7489		 * Map SLI4 PCI Config Space Register base to a kernel virtual
7490		 * addr
7491		 */
7492		phba->sli4_hba.conf_regs_memmap_p =
7493			ioremap(phba->pci_bar0_map, bar0map_len);
7494		if (!phba->sli4_hba.conf_regs_memmap_p) {
7495			dev_printk(KERN_ERR, &pdev->dev,
7496				   "ioremap failed for SLI4 PCI config "
7497				   "registers.\n");
7498			goto out;
7499		}
7500		/* Set up BAR0 PCI config space register memory map */
7501		lpfc_sli4_bar0_register_memmap(phba, if_type);
7502	} else {
7503		phba->pci_bar0_map = pci_resource_start(pdev, 1);
7504		bar0map_len = pci_resource_len(pdev, 1);
7505		if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7506			dev_printk(KERN_ERR, &pdev->dev,
7507			   "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
7508			goto out;
7509		}
7510		phba->sli4_hba.conf_regs_memmap_p =
7511				ioremap(phba->pci_bar0_map, bar0map_len);
7512		if (!phba->sli4_hba.conf_regs_memmap_p) {
7513			dev_printk(KERN_ERR, &pdev->dev,
7514				"ioremap failed for SLI4 PCI config "
7515				"registers.\n");
7516				goto out;
7517		}
7518		lpfc_sli4_bar0_register_memmap(phba, if_type);
7519	}
7520
7521	if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7522	    (pci_resource_start(pdev, 2))) {
7523		/*
7524		 * Map SLI4 if type 0 HBA Control Register base to a kernel
7525		 * virtual address and setup the registers.
7526		 */
7527		phba->pci_bar1_map = pci_resource_start(pdev, 2);
7528		bar1map_len = pci_resource_len(pdev, 2);
7529		phba->sli4_hba.ctrl_regs_memmap_p =
7530				ioremap(phba->pci_bar1_map, bar1map_len);
7531		if (!phba->sli4_hba.ctrl_regs_memmap_p) {
7532			dev_printk(KERN_ERR, &pdev->dev,
7533			   "ioremap failed for SLI4 HBA control registers.\n");
7534			goto out_iounmap_conf;
7535		}
7536		lpfc_sli4_bar1_register_memmap(phba);
7537	}
7538
7539	if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7540	    (pci_resource_start(pdev, 4))) {
7541		/*
7542		 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
7543		 * virtual address and setup the registers.
7544		 */
7545		phba->pci_bar2_map = pci_resource_start(pdev, 4);
7546		bar2map_len = pci_resource_len(pdev, 4);
7547		phba->sli4_hba.drbl_regs_memmap_p =
7548				ioremap(phba->pci_bar2_map, bar2map_len);
7549		if (!phba->sli4_hba.drbl_regs_memmap_p) {
7550			dev_printk(KERN_ERR, &pdev->dev,
7551			   "ioremap failed for SLI4 HBA doorbell registers.\n");
7552			goto out_iounmap_ctrl;
7553		}
7554		error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
7555		if (error)
7556			goto out_iounmap_all;
7557	}
7558
7559	return 0;
7560
7561out_iounmap_all:
7562	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7563out_iounmap_ctrl:
7564	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7565out_iounmap_conf:
7566	iounmap(phba->sli4_hba.conf_regs_memmap_p);
7567out:
7568	return error;
7569}
7570
7571/**
7572 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
7573 * @phba: pointer to lpfc hba data structure.
7574 *
7575 * This routine is invoked to unset the PCI device memory space for device
7576 * with SLI-4 interface spec.
7577 **/
7578static void
7579lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
7580{
7581	uint32_t if_type;
7582	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7583
7584	switch (if_type) {
7585	case LPFC_SLI_INTF_IF_TYPE_0:
7586		iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7587		iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7588		iounmap(phba->sli4_hba.conf_regs_memmap_p);
7589		break;
7590	case LPFC_SLI_INTF_IF_TYPE_2:
7591		iounmap(phba->sli4_hba.conf_regs_memmap_p);
7592		break;
7593	case LPFC_SLI_INTF_IF_TYPE_1:
7594	default:
7595		dev_printk(KERN_ERR, &phba->pcidev->dev,
7596			   "FATAL - unsupported SLI4 interface type - %d\n",
7597			   if_type);
7598		break;
7599	}
7600}
7601
7602/**
7603 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
7604 * @phba: pointer to lpfc hba data structure.
7605 *
7606 * This routine is invoked to enable the MSI-X interrupt vectors to device
7607 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
7608 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
7609 * invoked, enables either all or nothing, depending on the current
7610 * availability of PCI vector resources. The device driver is responsible
7611 * for calling the individual request_irq() to register each MSI-X vector
7612 * with a interrupt handler, which is done in this function. Note that
7613 * later when device is unloading, the driver should always call free_irq()
7614 * on all MSI-X vectors it has done request_irq() on before calling
7615 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
7616 * will be left with MSI-X enabled and leaks its vectors.
7617 *
7618 * Return codes
7619 *   0 - successful
7620 *   other values - error
7621 **/
7622static int
7623lpfc_sli_enable_msix(struct lpfc_hba *phba)
7624{
7625	int rc, i;
7626	LPFC_MBOXQ_t *pmb;
7627
7628	/* Set up MSI-X multi-message vectors */
7629	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7630		phba->msix_entries[i].entry = i;
7631
7632	/* Configure MSI-X capability structure */
7633	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
7634				ARRAY_SIZE(phba->msix_entries));
7635	if (rc) {
7636		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7637				"0420 PCI enable MSI-X failed (%d)\n", rc);
7638		goto msi_fail_out;
7639	}
7640	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7641		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7642				"0477 MSI-X entry[%d]: vector=x%x "
7643				"message=%d\n", i,
7644				phba->msix_entries[i].vector,
7645				phba->msix_entries[i].entry);
7646	/*
7647	 * Assign MSI-X vectors to interrupt handlers
7648	 */
7649
7650	/* vector-0 is associated to slow-path handler */
7651	rc = request_irq(phba->msix_entries[0].vector,
7652			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
7653			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7654	if (rc) {
7655		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7656				"0421 MSI-X slow-path request_irq failed "
7657				"(%d)\n", rc);
7658		goto msi_fail_out;
7659	}
7660
7661	/* vector-1 is associated to fast-path handler */
7662	rc = request_irq(phba->msix_entries[1].vector,
7663			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
7664			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
7665
7666	if (rc) {
7667		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7668				"0429 MSI-X fast-path request_irq failed "
7669				"(%d)\n", rc);
7670		goto irq_fail_out;
7671	}
7672
7673	/*
7674	 * Configure HBA MSI-X attention conditions to messages
7675	 */
7676	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7677
7678	if (!pmb) {
7679		rc = -ENOMEM;
7680		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7681				"0474 Unable to allocate memory for issuing "
7682				"MBOX_CONFIG_MSI command\n");
7683		goto mem_fail_out;
7684	}
7685	rc = lpfc_config_msi(phba, pmb);
7686	if (rc)
7687		goto mbx_fail_out;
7688	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
7689	if (rc != MBX_SUCCESS) {
7690		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
7691				"0351 Config MSI mailbox command failed, "
7692				"mbxCmd x%x, mbxStatus x%x\n",
7693				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
7694		goto mbx_fail_out;
7695	}
7696
7697	/* Free memory allocated for mailbox command */
7698	mempool_free(pmb, phba->mbox_mem_pool);
7699	return rc;
7700
7701mbx_fail_out:
7702	/* Free memory allocated for mailbox command */
7703	mempool_free(pmb, phba->mbox_mem_pool);
7704
7705mem_fail_out:
7706	/* free the irq already requested */
7707	free_irq(phba->msix_entries[1].vector, phba);
7708
7709irq_fail_out:
7710	/* free the irq already requested */
7711	free_irq(phba->msix_entries[0].vector, phba);
7712
7713msi_fail_out:
7714	/* Unconfigure MSI-X capability structure */
7715	pci_disable_msix(phba->pcidev);
7716	return rc;
7717}
7718
7719/**
7720 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
7721 * @phba: pointer to lpfc hba data structure.
7722 *
7723 * This routine is invoked to release the MSI-X vectors and then disable the
7724 * MSI-X interrupt mode to device with SLI-3 interface spec.
7725 **/
7726static void
7727lpfc_sli_disable_msix(struct lpfc_hba *phba)
7728{
7729	int i;
7730
7731	/* Free up MSI-X multi-message vectors */
7732	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7733		free_irq(phba->msix_entries[i].vector, phba);
7734	/* Disable MSI-X */
7735	pci_disable_msix(phba->pcidev);
7736
7737	return;
7738}
7739
7740/**
7741 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
7742 * @phba: pointer to lpfc hba data structure.
7743 *
7744 * This routine is invoked to enable the MSI interrupt mode to device with
7745 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
7746 * enable the MSI vector. The device driver is responsible for calling the
7747 * request_irq() to register MSI vector with a interrupt the handler, which
7748 * is done in this function.
7749 *
7750 * Return codes
7751 * 	0 - successful
7752 * 	other values - error
7753 */
7754static int
7755lpfc_sli_enable_msi(struct lpfc_hba *phba)
7756{
7757	int rc;
7758
7759	rc = pci_enable_msi(phba->pcidev);
7760	if (!rc)
7761		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7762				"0462 PCI enable MSI mode success.\n");
7763	else {
7764		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7765				"0471 PCI enable MSI mode failed (%d)\n", rc);
7766		return rc;
7767	}
7768
7769	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7770			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7771	if (rc) {
7772		pci_disable_msi(phba->pcidev);
7773		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7774				"0478 MSI request_irq failed (%d)\n", rc);
7775	}
7776	return rc;
7777}
7778
7779/**
7780 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
7781 * @phba: pointer to lpfc hba data structure.
7782 *
7783 * This routine is invoked to disable the MSI interrupt mode to device with
7784 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
7785 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7786 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7787 * its vector.
7788 */
7789static void
7790lpfc_sli_disable_msi(struct lpfc_hba *phba)
7791{
7792	free_irq(phba->pcidev->irq, phba);
7793	pci_disable_msi(phba->pcidev);
7794	return;
7795}
7796
7797/**
7798 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
7799 * @phba: pointer to lpfc hba data structure.
7800 *
7801 * This routine is invoked to enable device interrupt and associate driver's
7802 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
7803 * spec. Depends on the interrupt mode configured to the driver, the driver
7804 * will try to fallback from the configured interrupt mode to an interrupt
7805 * mode which is supported by the platform, kernel, and device in the order
7806 * of:
7807 * MSI-X -> MSI -> IRQ.
7808 *
7809 * Return codes
7810 *   0 - successful
7811 *   other values - error
7812 **/
7813static uint32_t
7814lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7815{
7816	uint32_t intr_mode = LPFC_INTR_ERROR;
7817	int retval;
7818
7819	if (cfg_mode == 2) {
7820		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
7821		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
7822		if (!retval) {
7823			/* Now, try to enable MSI-X interrupt mode */
7824			retval = lpfc_sli_enable_msix(phba);
7825			if (!retval) {
7826				/* Indicate initialization to MSI-X mode */
7827				phba->intr_type = MSIX;
7828				intr_mode = 2;
7829			}
7830		}
7831	}
7832
7833	/* Fallback to MSI if MSI-X initialization failed */
7834	if (cfg_mode >= 1 && phba->intr_type == NONE) {
7835		retval = lpfc_sli_enable_msi(phba);
7836		if (!retval) {
7837			/* Indicate initialization to MSI mode */
7838			phba->intr_type = MSI;
7839			intr_mode = 1;
7840		}
7841	}
7842
7843	/* Fallback to INTx if both MSI-X/MSI initalization failed */
7844	if (phba->intr_type == NONE) {
7845		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7846				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7847		if (!retval) {
7848			/* Indicate initialization to INTx mode */
7849			phba->intr_type = INTx;
7850			intr_mode = 0;
7851		}
7852	}
7853	return intr_mode;
7854}
7855
7856/**
7857 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
7858 * @phba: pointer to lpfc hba data structure.
7859 *
7860 * This routine is invoked to disable device interrupt and disassociate the
7861 * driver's interrupt handler(s) from interrupt vector(s) to device with
7862 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
7863 * release the interrupt vector(s) for the message signaled interrupt.
7864 **/
7865static void
7866lpfc_sli_disable_intr(struct lpfc_hba *phba)
7867{
7868	/* Disable the currently initialized interrupt mode */
7869	if (phba->intr_type == MSIX)
7870		lpfc_sli_disable_msix(phba);
7871	else if (phba->intr_type == MSI)
7872		lpfc_sli_disable_msi(phba);
7873	else if (phba->intr_type == INTx)
7874		free_irq(phba->pcidev->irq, phba);
7875
7876	/* Reset interrupt management states */
7877	phba->intr_type = NONE;
7878	phba->sli.slistat.sli_intr = 0;
7879
7880	return;
7881}
7882
7883/**
7884 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
7885 * @phba: pointer to lpfc hba data structure.
7886 *
7887 * This routine is invoked to enable the MSI-X interrupt vectors to device
7888 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
7889 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
7890 * enables either all or nothing, depending on the current availability of
7891 * PCI vector resources. The device driver is responsible for calling the
7892 * individual request_irq() to register each MSI-X vector with a interrupt
7893 * handler, which is done in this function. Note that later when device is
7894 * unloading, the driver should always call free_irq() on all MSI-X vectors
7895 * it has done request_irq() on before calling pci_disable_msix(). Failure
7896 * to do so results in a BUG_ON() and a device will be left with MSI-X
7897 * enabled and leaks its vectors.
7898 *
7899 * Return codes
7900 * 0 - successful
7901 * other values - error
7902 **/
7903static int
7904lpfc_sli4_enable_msix(struct lpfc_hba *phba)
7905{
7906	int vectors, rc, index;
7907
7908	/* Set up MSI-X multi-message vectors */
7909	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
7910		phba->sli4_hba.msix_entries[index].entry = index;
7911
7912	/* Configure MSI-X capability structure */
7913	vectors = phba->sli4_hba.cfg_eqn;
7914enable_msix_vectors:
7915	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
7916			     vectors);
7917	if (rc > 1) {
7918		vectors = rc;
7919		goto enable_msix_vectors;
7920	} else if (rc) {
7921		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7922				"0484 PCI enable MSI-X failed (%d)\n", rc);
7923		goto msi_fail_out;
7924	}
7925
7926	/* Log MSI-X vector assignment */
7927	for (index = 0; index < vectors; index++)
7928		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7929				"0489 MSI-X entry[%d]: vector=x%x "
7930				"message=%d\n", index,
7931				phba->sli4_hba.msix_entries[index].vector,
7932				phba->sli4_hba.msix_entries[index].entry);
7933	/*
7934	 * Assign MSI-X vectors to interrupt handlers
7935	 */
7936	if (vectors > 1)
7937		rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7938				 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
7939				 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7940	else
7941		/* All Interrupts need to be handled by one EQ */
7942		rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7943				 &lpfc_sli4_intr_handler, IRQF_SHARED,
7944				 LPFC_DRIVER_NAME, phba);
7945	if (rc) {
7946		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7947				"0485 MSI-X slow-path request_irq failed "
7948				"(%d)\n", rc);
7949		goto msi_fail_out;
7950	}
7951
7952	/* The rest of the vector(s) are associated to fast-path handler(s) */
7953	for (index = 1; index < vectors; index++) {
7954		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
7955		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
7956		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
7957				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
7958				 LPFC_FP_DRIVER_HANDLER_NAME,
7959				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7960		if (rc) {
7961			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7962					"0486 MSI-X fast-path (%d) "
7963					"request_irq failed (%d)\n", index, rc);
7964			goto cfg_fail_out;
7965		}
7966	}
7967	phba->sli4_hba.msix_vec_nr = vectors;
7968
7969	return rc;
7970
7971cfg_fail_out:
7972	/* free the irq already requested */
7973	for (--index; index >= 1; index--)
7974		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
7975			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7976
7977	/* free the irq already requested */
7978	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7979
7980msi_fail_out:
7981	/* Unconfigure MSI-X capability structure */
7982	pci_disable_msix(phba->pcidev);
7983	return rc;
7984}
7985
7986/**
7987 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
7988 * @phba: pointer to lpfc hba data structure.
7989 *
7990 * This routine is invoked to release the MSI-X vectors and then disable the
7991 * MSI-X interrupt mode to device with SLI-4 interface spec.
7992 **/
7993static void
7994lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7995{
7996	int index;
7997
7998	/* Free up MSI-X multi-message vectors */
7999	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
8000
8001	for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
8002		free_irq(phba->sli4_hba.msix_entries[index].vector,
8003			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
8004
8005	/* Disable MSI-X */
8006	pci_disable_msix(phba->pcidev);
8007
8008	return;
8009}
8010
8011/**
8012 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
8013 * @phba: pointer to lpfc hba data structure.
8014 *
8015 * This routine is invoked to enable the MSI interrupt mode to device with
8016 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
8017 * to enable the MSI vector. The device driver is responsible for calling
8018 * the request_irq() to register MSI vector with a interrupt the handler,
8019 * which is done in this function.
8020 *
8021 * Return codes
8022 * 	0 - successful
8023 * 	other values - error
8024 **/
8025static int
8026lpfc_sli4_enable_msi(struct lpfc_hba *phba)
8027{
8028	int rc, index;
8029
8030	rc = pci_enable_msi(phba->pcidev);
8031	if (!rc)
8032		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8033				"0487 PCI enable MSI mode success.\n");
8034	else {
8035		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8036				"0488 PCI enable MSI mode failed (%d)\n", rc);
8037		return rc;
8038	}
8039
8040	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
8041			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8042	if (rc) {
8043		pci_disable_msi(phba->pcidev);
8044		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8045				"0490 MSI request_irq failed (%d)\n", rc);
8046		return rc;
8047	}
8048
8049	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
8050		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8051		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8052	}
8053
8054	return 0;
8055}
8056
8057/**
8058 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
8059 * @phba: pointer to lpfc hba data structure.
8060 *
8061 * This routine is invoked to disable the MSI interrupt mode to device with
8062 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
8063 * done request_irq() on before calling pci_disable_msi(). Failure to do so
8064 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
8065 * its vector.
8066 **/
8067static void
8068lpfc_sli4_disable_msi(struct lpfc_hba *phba)
8069{
8070	free_irq(phba->pcidev->irq, phba);
8071	pci_disable_msi(phba->pcidev);
8072	return;
8073}
8074
8075/**
8076 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
8077 * @phba: pointer to lpfc hba data structure.
8078 *
8079 * This routine is invoked to enable device interrupt and associate driver's
8080 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
8081 * interface spec. Depends on the interrupt mode configured to the driver,
8082 * the driver will try to fallback from the configured interrupt mode to an
8083 * interrupt mode which is supported by the platform, kernel, and device in
8084 * the order of:
8085 * MSI-X -> MSI -> IRQ.
8086 *
8087 * Return codes
8088 * 	0 - successful
8089 * 	other values - error
8090 **/
8091static uint32_t
8092lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8093{
8094	uint32_t intr_mode = LPFC_INTR_ERROR;
8095	int retval, index;
8096
8097	if (cfg_mode == 2) {
8098		/* Preparation before conf_msi mbox cmd */
8099		retval = 0;
8100		if (!retval) {
8101			/* Now, try to enable MSI-X interrupt mode */
8102			retval = lpfc_sli4_enable_msix(phba);
8103			if (!retval) {
8104				/* Indicate initialization to MSI-X mode */
8105				phba->intr_type = MSIX;
8106				intr_mode = 2;
8107			}
8108		}
8109	}
8110
8111	/* Fallback to MSI if MSI-X initialization failed */
8112	if (cfg_mode >= 1 && phba->intr_type == NONE) {
8113		retval = lpfc_sli4_enable_msi(phba);
8114		if (!retval) {
8115			/* Indicate initialization to MSI mode */
8116			phba->intr_type = MSI;
8117			intr_mode = 1;
8118		}
8119	}
8120
8121	/* Fallback to INTx if both MSI-X/MSI initalization failed */
8122	if (phba->intr_type == NONE) {
8123		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
8124				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8125		if (!retval) {
8126			/* Indicate initialization to INTx mode */
8127			phba->intr_type = INTx;
8128			intr_mode = 0;
8129			for (index = 0; index < phba->cfg_fcp_eq_count;
8130			     index++) {
8131				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8132				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8133			}
8134		}
8135	}
8136	return intr_mode;
8137}
8138
8139/**
8140 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
8141 * @phba: pointer to lpfc hba data structure.
8142 *
8143 * This routine is invoked to disable device interrupt and disassociate
8144 * the driver's interrupt handler(s) from interrupt vector(s) to device
8145 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
8146 * will release the interrupt vector(s) for the message signaled interrupt.
8147 **/
8148static void
8149lpfc_sli4_disable_intr(struct lpfc_hba *phba)
8150{
8151	/* Disable the currently initialized interrupt mode */
8152	if (phba->intr_type == MSIX)
8153		lpfc_sli4_disable_msix(phba);
8154	else if (phba->intr_type == MSI)
8155		lpfc_sli4_disable_msi(phba);
8156	else if (phba->intr_type == INTx)
8157		free_irq(phba->pcidev->irq, phba);
8158
8159	/* Reset interrupt management states */
8160	phba->intr_type = NONE;
8161	phba->sli.slistat.sli_intr = 0;
8162
8163	return;
8164}
8165
8166/**
8167 * lpfc_unset_hba - Unset SLI3 hba device initialization
8168 * @phba: pointer to lpfc hba data structure.
8169 *
8170 * This routine is invoked to unset the HBA device initialization steps to
8171 * a device with SLI-3 interface spec.
8172 **/
8173static void
8174lpfc_unset_hba(struct lpfc_hba *phba)
8175{
8176	struct lpfc_vport *vport = phba->pport;
8177	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
8178
8179	spin_lock_irq(shost->host_lock);
8180	vport->load_flag |= FC_UNLOADING;
8181	spin_unlock_irq(shost->host_lock);
8182
8183	kfree(phba->vpi_bmask);
8184	kfree(phba->vpi_ids);
8185
8186	lpfc_stop_hba_timers(phba);
8187
8188	phba->pport->work_port_events = 0;
8189
8190	lpfc_sli_hba_down(phba);
8191
8192	lpfc_sli_brdrestart(phba);
8193
8194	lpfc_sli_disable_intr(phba);
8195
8196	return;
8197}
8198
8199/**
8200 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
8201 * @phba: pointer to lpfc hba data structure.
8202 *
8203 * This routine is invoked to unset the HBA device initialization steps to
8204 * a device with SLI-4 interface spec.
8205 **/
8206static void
8207lpfc_sli4_unset_hba(struct lpfc_hba *phba)
8208{
8209	struct lpfc_vport *vport = phba->pport;
8210	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
8211
8212	spin_lock_irq(shost->host_lock);
8213	vport->load_flag |= FC_UNLOADING;
8214	spin_unlock_irq(shost->host_lock);
8215
8216	phba->pport->work_port_events = 0;
8217
8218	/* Stop the SLI4 device port */
8219	lpfc_stop_port(phba);
8220
8221	lpfc_sli4_disable_intr(phba);
8222
8223	/* Reset SLI4 HBA FCoE function */
8224	lpfc_pci_function_reset(phba);
8225	lpfc_sli4_queue_destroy(phba);
8226
8227	return;
8228}
8229
8230/**
8231 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
8232 * @phba: Pointer to HBA context object.
8233 *
8234 * This function is called in the SLI4 code path to wait for completion
8235 * of device's XRIs exchange busy. It will check the XRI exchange busy
8236 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
8237 * that, it will check the XRI exchange busy on outstanding FCP and ELS
8238 * I/Os every 30 seconds, log error message, and wait forever. Only when
8239 * all XRI exchange busy complete, the driver unload shall proceed with
8240 * invoking the function reset ioctl mailbox command to the CNA and the
8241 * the rest of the driver unload resource release.
8242 **/
8243static void
8244lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
8245{
8246	int wait_time = 0;
8247	int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8248	int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8249
8250	while (!fcp_xri_cmpl || !els_xri_cmpl) {
8251		if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
8252			if (!fcp_xri_cmpl)
8253				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8254						"2877 FCP XRI exchange busy "
8255						"wait time: %d seconds.\n",
8256						wait_time/1000);
8257			if (!els_xri_cmpl)
8258				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8259						"2878 ELS XRI exchange busy "
8260						"wait time: %d seconds.\n",
8261						wait_time/1000);
8262			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
8263			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
8264		} else {
8265			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
8266			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
8267		}
8268		fcp_xri_cmpl =
8269			list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8270		els_xri_cmpl =
8271			list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8272	}
8273}
8274
8275/**
8276 * lpfc_sli4_hba_unset - Unset the fcoe hba
8277 * @phba: Pointer to HBA context object.
8278 *
8279 * This function is called in the SLI4 code path to reset the HBA's FCoE
8280 * function. The caller is not required to hold any lock. This routine
8281 * issues PCI function reset mailbox command to reset the FCoE function.
8282 * At the end of the function, it calls lpfc_hba_down_post function to
8283 * free any pending commands.
8284 **/
8285static void
8286lpfc_sli4_hba_unset(struct lpfc_hba *phba)
8287{
8288	int wait_cnt = 0;
8289	LPFC_MBOXQ_t *mboxq;
8290	struct pci_dev *pdev = phba->pcidev;
8291
8292	lpfc_stop_hba_timers(phba);
8293	phba->sli4_hba.intr_enable = 0;
8294
8295	/*
8296	 * Gracefully wait out the potential current outstanding asynchronous
8297	 * mailbox command.
8298	 */
8299
8300	/* First, block any pending async mailbox command from posted */
8301	spin_lock_irq(&phba->hbalock);
8302	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8303	spin_unlock_irq(&phba->hbalock);
8304	/* Now, trying to wait it out if we can */
8305	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8306		msleep(10);
8307		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
8308			break;
8309	}
8310	/* Forcefully release the outstanding mailbox command if timed out */
8311	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8312		spin_lock_irq(&phba->hbalock);
8313		mboxq = phba->sli.mbox_active;
8314		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8315		__lpfc_mbox_cmpl_put(phba, mboxq);
8316		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8317		phba->sli.mbox_active = NULL;
8318		spin_unlock_irq(&phba->hbalock);
8319	}
8320
8321	/* Abort all iocbs associated with the hba */
8322	lpfc_sli_hba_iocb_abort(phba);
8323
8324	/* Wait for completion of device XRI exchange busy */
8325	lpfc_sli4_xri_exchange_busy_wait(phba);
8326
8327	/* Disable PCI subsystem interrupt */
8328	lpfc_sli4_disable_intr(phba);
8329
8330	/* Disable SR-IOV if enabled */
8331	if (phba->cfg_sriov_nr_virtfn)
8332		pci_disable_sriov(pdev);
8333
8334	/* Stop kthread signal shall trigger work_done one more time */
8335	kthread_stop(phba->worker_thread);
8336
8337	/* Reset SLI4 HBA FCoE function */
8338	lpfc_pci_function_reset(phba);
8339	lpfc_sli4_queue_destroy(phba);
8340
8341	/* Stop the SLI4 device port */
8342	phba->pport->work_port_events = 0;
8343}
8344
8345 /**
8346 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
8347 * @phba: Pointer to HBA context object.
8348 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8349 *
8350 * This function is called in the SLI4 code path to read the port's
8351 * sli4 capabilities.
8352 *
8353 * This function may be be called from any context that can block-wait
8354 * for the completion.  The expectation is that this routine is called
8355 * typically from probe_one or from the online routine.
8356 **/
8357int
8358lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8359{
8360	int rc;
8361	struct lpfc_mqe *mqe;
8362	struct lpfc_pc_sli4_params *sli4_params;
8363	uint32_t mbox_tmo;
8364
8365	rc = 0;
8366	mqe = &mboxq->u.mqe;
8367
8368	/* Read the port's SLI4 Parameters port capabilities */
8369	lpfc_pc_sli4_params(mboxq);
8370	if (!phba->sli4_hba.intr_enable)
8371		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8372	else {
8373		mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
8374		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8375	}
8376
8377	if (unlikely(rc))
8378		return 1;
8379
8380	sli4_params = &phba->sli4_hba.pc_sli4_params;
8381	sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
8382	sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
8383	sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
8384	sli4_params->featurelevel_1 = bf_get(featurelevel_1,
8385					     &mqe->un.sli4_params);
8386	sli4_params->featurelevel_2 = bf_get(featurelevel_2,
8387					     &mqe->un.sli4_params);
8388	sli4_params->proto_types = mqe->un.sli4_params.word3;
8389	sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
8390	sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
8391	sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
8392	sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
8393	sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
8394	sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
8395	sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
8396	sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
8397	sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
8398	sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
8399	sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
8400	sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
8401	sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
8402	sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
8403	sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
8404	sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
8405	sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
8406	sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
8407	sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
8408	sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
8409
8410	/* Make sure that sge_supp_len can be handled by the driver */
8411	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8412		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8413
8414	return rc;
8415}
8416
8417/**
8418 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
8419 * @phba: Pointer to HBA context object.
8420 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8421 *
8422 * This function is called in the SLI4 code path to read the port's
8423 * sli4 capabilities.
8424 *
8425 * This function may be be called from any context that can block-wait
8426 * for the completion.  The expectation is that this routine is called
8427 * typically from probe_one or from the online routine.
8428 **/
8429int
8430lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8431{
8432	int rc;
8433	struct lpfc_mqe *mqe = &mboxq->u.mqe;
8434	struct lpfc_pc_sli4_params *sli4_params;
8435	uint32_t mbox_tmo;
8436	int length;
8437	struct lpfc_sli4_parameters *mbx_sli4_parameters;
8438
8439	/*
8440	 * By default, the driver assumes the SLI4 port requires RPI
8441	 * header postings.  The SLI4_PARAM response will correct this
8442	 * assumption.
8443	 */
8444	phba->sli4_hba.rpi_hdrs_in_use = 1;
8445
8446	/* Read the port's SLI4 Config Parameters */
8447	length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
8448		  sizeof(struct lpfc_sli4_cfg_mhdr));
8449	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8450			 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
8451			 length, LPFC_SLI4_MBX_EMBED);
8452	if (!phba->sli4_hba.intr_enable)
8453		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8454	else {
8455		mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
8456		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8457	}
8458	if (unlikely(rc))
8459		return rc;
8460	sli4_params = &phba->sli4_hba.pc_sli4_params;
8461	mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
8462	sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
8463	sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
8464	sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
8465	sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
8466					     mbx_sli4_parameters);
8467	sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
8468					     mbx_sli4_parameters);
8469	if (bf_get(cfg_phwq, mbx_sli4_parameters))
8470		phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
8471	else
8472		phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
8473	sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
8474	sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
8475	sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
8476	sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
8477	sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
8478	sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
8479	sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
8480					    mbx_sli4_parameters);
8481	sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
8482					   mbx_sli4_parameters);
8483	phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
8484	phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
8485
8486	/* Make sure that sge_supp_len can be handled by the driver */
8487	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8488		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8489
8490	return 0;
8491}
8492
8493/**
8494 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
8495 * @pdev: pointer to PCI device
8496 * @pid: pointer to PCI device identifier
8497 *
8498 * This routine is to be called to attach a device with SLI-3 interface spec
8499 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8500 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8501 * information of the device and driver to see if the driver state that it can
8502 * support this kind of device. If the match is successful, the driver core
8503 * invokes this routine. If this routine determines it can claim the HBA, it
8504 * does all the initialization that it needs to do to handle the HBA properly.
8505 *
8506 * Return code
8507 * 	0 - driver can claim the device
8508 * 	negative value - driver can not claim the device
8509 **/
8510static int __devinit
8511lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
8512{
8513	struct lpfc_hba   *phba;
8514	struct lpfc_vport *vport = NULL;
8515	struct Scsi_Host  *shost = NULL;
8516	int error;
8517	uint32_t cfg_mode, intr_mode;
8518
8519	/* Allocate memory for HBA structure */
8520	phba = lpfc_hba_alloc(pdev);
8521	if (!phba)
8522		return -ENOMEM;
8523
8524	/* Perform generic PCI device enabling operation */
8525	error = lpfc_enable_pci_dev(phba);
8526	if (error)
8527		goto out_free_phba;
8528
8529	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
8530	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
8531	if (error)
8532		goto out_disable_pci_dev;
8533
8534	/* Set up SLI-3 specific device PCI memory space */
8535	error = lpfc_sli_pci_mem_setup(phba);
8536	if (error) {
8537		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8538				"1402 Failed to set up pci memory space.\n");
8539		goto out_disable_pci_dev;
8540	}
8541
8542	/* Set up phase-1 common device driver resources */
8543	error = lpfc_setup_driver_resource_phase1(phba);
8544	if (error) {
8545		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8546				"1403 Failed to set up driver resource.\n");
8547		goto out_unset_pci_mem_s3;
8548	}
8549
8550	/* Set up SLI-3 specific device driver resources */
8551	error = lpfc_sli_driver_resource_setup(phba);
8552	if (error) {
8553		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8554				"1404 Failed to set up driver resource.\n");
8555		goto out_unset_pci_mem_s3;
8556	}
8557
8558	/* Initialize and populate the iocb list per host */
8559	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
8560	if (error) {
8561		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8562				"1405 Failed to initialize iocb list.\n");
8563		goto out_unset_driver_resource_s3;
8564	}
8565
8566	/* Set up common device driver resources */
8567	error = lpfc_setup_driver_resource_phase2(phba);
8568	if (error) {
8569		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8570				"1406 Failed to set up driver resource.\n");
8571		goto out_free_iocb_list;
8572	}
8573
8574	/* Get the default values for Model Name and Description */
8575	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
8576
8577	/* Create SCSI host to the physical port */
8578	error = lpfc_create_shost(phba);
8579	if (error) {
8580		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8581				"1407 Failed to create scsi host.\n");
8582		goto out_unset_driver_resource;
8583	}
8584
8585	/* Configure sysfs attributes */
8586	vport = phba->pport;
8587	error = lpfc_alloc_sysfs_attr(vport);
8588	if (error) {
8589		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8590				"1476 Failed to allocate sysfs attr\n");
8591		goto out_destroy_shost;
8592	}
8593
8594	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8595	/* Now, trying to enable interrupt and bring up the device */
8596	cfg_mode = phba->cfg_use_msi;
8597	while (true) {
8598		/* Put device to a known state before enabling interrupt */
8599		lpfc_stop_port(phba);
8600		/* Configure and enable interrupt */
8601		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
8602		if (intr_mode == LPFC_INTR_ERROR) {
8603			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8604					"0431 Failed to enable interrupt.\n");
8605			error = -ENODEV;
8606			goto out_free_sysfs_attr;
8607		}
8608		/* SLI-3 HBA setup */
8609		if (lpfc_sli_hba_setup(phba)) {
8610			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8611					"1477 Failed to set up hba\n");
8612			error = -ENODEV;
8613			goto out_remove_device;
8614		}
8615
8616		/* Wait 50ms for the interrupts of previous mailbox commands */
8617		msleep(50);
8618		/* Check active interrupts on message signaled interrupts */
8619		if (intr_mode == 0 ||
8620		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
8621			/* Log the current active interrupt mode */
8622			phba->intr_mode = intr_mode;
8623			lpfc_log_intr_mode(phba, intr_mode);
8624			break;
8625		} else {
8626			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8627					"0447 Configure interrupt mode (%d) "
8628					"failed active interrupt test.\n",
8629					intr_mode);
8630			/* Disable the current interrupt mode */
8631			lpfc_sli_disable_intr(phba);
8632			/* Try next level of interrupt mode */
8633			cfg_mode = --intr_mode;
8634		}
8635	}
8636
8637	/* Perform post initialization setup */
8638	lpfc_post_init_setup(phba);
8639
8640	/* Check if there are static vports to be created. */
8641	lpfc_create_static_vport(phba);
8642
8643	return 0;
8644
8645out_remove_device:
8646	lpfc_unset_hba(phba);
8647out_free_sysfs_attr:
8648	lpfc_free_sysfs_attr(vport);
8649out_destroy_shost:
8650	lpfc_destroy_shost(phba);
8651out_unset_driver_resource:
8652	lpfc_unset_driver_resource_phase2(phba);
8653out_free_iocb_list:
8654	lpfc_free_iocb_list(phba);
8655out_unset_driver_resource_s3:
8656	lpfc_sli_driver_resource_unset(phba);
8657out_unset_pci_mem_s3:
8658	lpfc_sli_pci_mem_unset(phba);
8659out_disable_pci_dev:
8660	lpfc_disable_pci_dev(phba);
8661	if (shost)
8662		scsi_host_put(shost);
8663out_free_phba:
8664	lpfc_hba_free(phba);
8665	return error;
8666}
8667
8668/**
8669 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
8670 * @pdev: pointer to PCI device
8671 *
8672 * This routine is to be called to disattach a device with SLI-3 interface
8673 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8674 * removed from PCI bus, it performs all the necessary cleanup for the HBA
8675 * device to be removed from the PCI subsystem properly.
8676 **/
8677static void __devexit
8678lpfc_pci_remove_one_s3(struct pci_dev *pdev)
8679{
8680	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
8681	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8682	struct lpfc_vport **vports;
8683	struct lpfc_hba   *phba = vport->phba;
8684	int i;
8685	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
8686
8687	spin_lock_irq(&phba->hbalock);
8688	vport->load_flag |= FC_UNLOADING;
8689	spin_unlock_irq(&phba->hbalock);
8690
8691	lpfc_free_sysfs_attr(vport);
8692
8693	/* Release all the vports against this physical port */
8694	vports = lpfc_create_vport_work_array(phba);
8695	if (vports != NULL)
8696		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8697			fc_vport_terminate(vports[i]->fc_vport);
8698	lpfc_destroy_vport_work_array(phba, vports);
8699
8700	/* Remove FC host and then SCSI host with the physical port */
8701	fc_remove_host(shost);
8702	scsi_remove_host(shost);
8703	lpfc_cleanup(vport);
8704
8705	/*
8706	 * Bring down the SLI Layer. This step disable all interrupts,
8707	 * clears the rings, discards all mailbox commands, and resets
8708	 * the HBA.
8709	 */
8710
8711	/* HBA interrupt will be disabled after this call */
8712	lpfc_sli_hba_down(phba);
8713	/* Stop kthread signal shall trigger work_done one more time */
8714	kthread_stop(phba->worker_thread);
8715	/* Final cleanup of txcmplq and reset the HBA */
8716	lpfc_sli_brdrestart(phba);
8717
8718	kfree(phba->vpi_bmask);
8719	kfree(phba->vpi_ids);
8720
8721	lpfc_stop_hba_timers(phba);
8722	spin_lock_irq(&phba->hbalock);
8723	list_del_init(&vport->listentry);
8724	spin_unlock_irq(&phba->hbalock);
8725
8726	lpfc_debugfs_terminate(vport);
8727
8728	/* Disable SR-IOV if enabled */
8729	if (phba->cfg_sriov_nr_virtfn)
8730		pci_disable_sriov(pdev);
8731
8732	/* Disable interrupt */
8733	lpfc_sli_disable_intr(phba);
8734
8735	pci_set_drvdata(pdev, NULL);
8736	scsi_host_put(shost);
8737
8738	/*
8739	 * Call scsi_free before mem_free since scsi bufs are released to their
8740	 * corresponding pools here.
8741	 */
8742	lpfc_scsi_free(phba);
8743	lpfc_mem_free_all(phba);
8744
8745	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
8746			  phba->hbqslimp.virt, phba->hbqslimp.phys);
8747
8748	/* Free resources associated with SLI2 interface */
8749	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8750			  phba->slim2p.virt, phba->slim2p.phys);
8751
8752	/* unmap adapter SLIM and Control Registers */
8753	iounmap(phba->ctrl_regs_memmap_p);
8754	iounmap(phba->slim_memmap_p);
8755
8756	lpfc_hba_free(phba);
8757
8758	pci_release_selected_regions(pdev, bars);
8759	pci_disable_device(pdev);
8760}
8761
8762/**
8763 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
8764 * @pdev: pointer to PCI device
8765 * @msg: power management message
8766 *
8767 * This routine is to be called from the kernel's PCI subsystem to support
8768 * system Power Management (PM) to device with SLI-3 interface spec. When
8769 * PM invokes this method, it quiesces the device by stopping the driver's
8770 * worker thread for the device, turning off device's interrupt and DMA,
8771 * and bring the device offline. Note that as the driver implements the
8772 * minimum PM requirements to a power-aware driver's PM support for the
8773 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8774 * to the suspend() method call will be treated as SUSPEND and the driver will
8775 * fully reinitialize its device during resume() method call, the driver will
8776 * set device to PCI_D3hot state in PCI config space instead of setting it
8777 * according to the @msg provided by the PM.
8778 *
8779 * Return code
8780 * 	0 - driver suspended the device
8781 * 	Error otherwise
8782 **/
8783static int
8784lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
8785{
8786	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8787	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8788
8789	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8790			"0473 PCI device Power Management suspend.\n");
8791
8792	/* Bring down the device */
8793	lpfc_offline_prep(phba);
8794	lpfc_offline(phba);
8795	kthread_stop(phba->worker_thread);
8796
8797	/* Disable interrupt from device */
8798	lpfc_sli_disable_intr(phba);
8799
8800	/* Save device state to PCI config space */
8801	pci_save_state(pdev);
8802	pci_set_power_state(pdev, PCI_D3hot);
8803
8804	return 0;
8805}
8806
8807/**
8808 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
8809 * @pdev: pointer to PCI device
8810 *
8811 * This routine is to be called from the kernel's PCI subsystem to support
8812 * system Power Management (PM) to device with SLI-3 interface spec. When PM
8813 * invokes this method, it restores the device's PCI config space state and
8814 * fully reinitializes the device and brings it online. Note that as the
8815 * driver implements the minimum PM requirements to a power-aware driver's
8816 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
8817 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
8818 * driver will fully reinitialize its device during resume() method call,
8819 * the device will be set to PCI_D0 directly in PCI config space before
8820 * restoring the state.
8821 *
8822 * Return code
8823 * 	0 - driver suspended the device
8824 * 	Error otherwise
8825 **/
8826static int
8827lpfc_pci_resume_one_s3(struct pci_dev *pdev)
8828{
8829	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8830	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8831	uint32_t intr_mode;
8832	int error;
8833
8834	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8835			"0452 PCI device Power Management resume.\n");
8836
8837	/* Restore device state from PCI config space */
8838	pci_set_power_state(pdev, PCI_D0);
8839	pci_restore_state(pdev);
8840
8841	/*
8842	 * As the new kernel behavior of pci_restore_state() API call clears
8843	 * device saved_state flag, need to save the restored state again.
8844	 */
8845	pci_save_state(pdev);
8846
8847	if (pdev->is_busmaster)
8848		pci_set_master(pdev);
8849
8850	/* Startup the kernel thread for this host adapter. */
8851	phba->worker_thread = kthread_run(lpfc_do_work, phba,
8852					"lpfc_worker_%d", phba->brd_no);
8853	if (IS_ERR(phba->worker_thread)) {
8854		error = PTR_ERR(phba->worker_thread);
8855		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8856				"0434 PM resume failed to start worker "
8857				"thread: error=x%x.\n", error);
8858		return error;
8859	}
8860
8861	/* Configure and enable interrupt */
8862	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8863	if (intr_mode == LPFC_INTR_ERROR) {
8864		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8865				"0430 PM resume Failed to enable interrupt\n");
8866		return -EIO;
8867	} else
8868		phba->intr_mode = intr_mode;
8869
8870	/* Restart HBA and bring it online */
8871	lpfc_sli_brdrestart(phba);
8872	lpfc_online(phba);
8873
8874	/* Log the current active interrupt mode */
8875	lpfc_log_intr_mode(phba, phba->intr_mode);
8876
8877	return 0;
8878}
8879
8880/**
8881 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
8882 * @phba: pointer to lpfc hba data structure.
8883 *
8884 * This routine is called to prepare the SLI3 device for PCI slot recover. It
8885 * aborts all the outstanding SCSI I/Os to the pci device.
8886 **/
8887static void
8888lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
8889{
8890	struct lpfc_sli *psli = &phba->sli;
8891	struct lpfc_sli_ring  *pring;
8892
8893	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8894			"2723 PCI channel I/O abort preparing for recovery\n");
8895
8896	/*
8897	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8898	 * and let the SCSI mid-layer to retry them to recover.
8899	 */
8900	pring = &psli->ring[psli->fcp_ring];
8901	lpfc_sli_abort_iocb_ring(phba, pring);
8902}
8903
8904/**
8905 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
8906 * @phba: pointer to lpfc hba data structure.
8907 *
8908 * This routine is called to prepare the SLI3 device for PCI slot reset. It
8909 * disables the device interrupt and pci device, and aborts the internal FCP
8910 * pending I/Os.
8911 **/
8912static void
8913lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
8914{
8915	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8916			"2710 PCI channel disable preparing for reset\n");
8917
8918	/* Block any management I/Os to the device */
8919	lpfc_block_mgmt_io(phba);
8920
8921	/* Block all SCSI devices' I/Os on the host */
8922	lpfc_scsi_dev_block(phba);
8923
8924	/* stop all timers */
8925	lpfc_stop_hba_timers(phba);
8926
8927	/* Disable interrupt and pci device */
8928	lpfc_sli_disable_intr(phba);
8929	pci_disable_device(phba->pcidev);
8930
8931	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
8932	lpfc_sli_flush_fcp_rings(phba);
8933}
8934
8935/**
8936 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
8937 * @phba: pointer to lpfc hba data structure.
8938 *
8939 * This routine is called to prepare the SLI3 device for PCI slot permanently
8940 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
8941 * pending I/Os.
8942 **/
8943static void
8944lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
8945{
8946	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8947			"2711 PCI channel permanent disable for failure\n");
8948	/* Block all SCSI devices' I/Os on the host */
8949	lpfc_scsi_dev_block(phba);
8950
8951	/* stop all timers */
8952	lpfc_stop_hba_timers(phba);
8953
8954	/* Clean up all driver's outstanding SCSI I/Os */
8955	lpfc_sli_flush_fcp_rings(phba);
8956}
8957
8958/**
8959 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
8960 * @pdev: pointer to PCI device.
8961 * @state: the current PCI connection state.
8962 *
8963 * This routine is called from the PCI subsystem for I/O error handling to
8964 * device with SLI-3 interface spec. This function is called by the PCI
8965 * subsystem after a PCI bus error affecting this device has been detected.
8966 * When this function is invoked, it will need to stop all the I/Os and
8967 * interrupt(s) to the device. Once that is done, it will return
8968 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
8969 * as desired.
8970 *
8971 * Return codes
8972 * 	PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
8973 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8974 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8975 **/
8976static pci_ers_result_t
8977lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
8978{
8979	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8980	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8981
8982	switch (state) {
8983	case pci_channel_io_normal:
8984		/* Non-fatal error, prepare for recovery */
8985		lpfc_sli_prep_dev_for_recover(phba);
8986		return PCI_ERS_RESULT_CAN_RECOVER;
8987	case pci_channel_io_frozen:
8988		/* Fatal error, prepare for slot reset */
8989		lpfc_sli_prep_dev_for_reset(phba);
8990		return PCI_ERS_RESULT_NEED_RESET;
8991	case pci_channel_io_perm_failure:
8992		/* Permanent failure, prepare for device down */
8993		lpfc_sli_prep_dev_for_perm_failure(phba);
8994		return PCI_ERS_RESULT_DISCONNECT;
8995	default:
8996		/* Unknown state, prepare and request slot reset */
8997		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8998				"0472 Unknown PCI error state: x%x\n", state);
8999		lpfc_sli_prep_dev_for_reset(phba);
9000		return PCI_ERS_RESULT_NEED_RESET;
9001	}
9002}
9003
9004/**
9005 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
9006 * @pdev: pointer to PCI device.
9007 *
9008 * This routine is called from the PCI subsystem for error handling to
9009 * device with SLI-3 interface spec. This is called after PCI bus has been
9010 * reset to restart the PCI card from scratch, as if from a cold-boot.
9011 * During the PCI subsystem error recovery, after driver returns
9012 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
9013 * recovery and then call this routine before calling the .resume method
9014 * to recover the device. This function will initialize the HBA device,
9015 * enable the interrupt, but it will just put the HBA to offline state
9016 * without passing any I/O traffic.
9017 *
9018 * Return codes
9019 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
9020 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9021 */
9022static pci_ers_result_t
9023lpfc_io_slot_reset_s3(struct pci_dev *pdev)
9024{
9025	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9026	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9027	struct lpfc_sli *psli = &phba->sli;
9028	uint32_t intr_mode;
9029
9030	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
9031	if (pci_enable_device_mem(pdev)) {
9032		printk(KERN_ERR "lpfc: Cannot re-enable "
9033			"PCI device after reset.\n");
9034		return PCI_ERS_RESULT_DISCONNECT;
9035	}
9036
9037	pci_restore_state(pdev);
9038
9039	/*
9040	 * As the new kernel behavior of pci_restore_state() API call clears
9041	 * device saved_state flag, need to save the restored state again.
9042	 */
9043	pci_save_state(pdev);
9044
9045	if (pdev->is_busmaster)
9046		pci_set_master(pdev);
9047
9048	spin_lock_irq(&phba->hbalock);
9049	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9050	spin_unlock_irq(&phba->hbalock);
9051
9052	/* Configure and enable interrupt */
9053	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
9054	if (intr_mode == LPFC_INTR_ERROR) {
9055		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9056				"0427 Cannot re-enable interrupt after "
9057				"slot reset.\n");
9058		return PCI_ERS_RESULT_DISCONNECT;
9059	} else
9060		phba->intr_mode = intr_mode;
9061
9062	/* Take device offline, it will perform cleanup */
9063	lpfc_offline_prep(phba);
9064	lpfc_offline(phba);
9065	lpfc_sli_brdrestart(phba);
9066
9067	/* Log the current active interrupt mode */
9068	lpfc_log_intr_mode(phba, phba->intr_mode);
9069
9070	return PCI_ERS_RESULT_RECOVERED;
9071}
9072
9073/**
9074 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
9075 * @pdev: pointer to PCI device
9076 *
9077 * This routine is called from the PCI subsystem for error handling to device
9078 * with SLI-3 interface spec. It is called when kernel error recovery tells
9079 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
9080 * error recovery. After this call, traffic can start to flow from this device
9081 * again.
9082 */
9083static void
9084lpfc_io_resume_s3(struct pci_dev *pdev)
9085{
9086	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9087	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9088
9089	/* Bring device online, it will be no-op for non-fatal error resume */
9090	lpfc_online(phba);
9091
9092	/* Clean up Advanced Error Reporting (AER) if needed */
9093	if (phba->hba_flag & HBA_AER_ENABLED)
9094		pci_cleanup_aer_uncorrect_error_status(pdev);
9095}
9096
9097/**
9098 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
9099 * @phba: pointer to lpfc hba data structure.
9100 *
9101 * returns the number of ELS/CT IOCBs to reserve
9102 **/
9103int
9104lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
9105{
9106	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
9107
9108	if (phba->sli_rev == LPFC_SLI_REV4) {
9109		if (max_xri <= 100)
9110			return 10;
9111		else if (max_xri <= 256)
9112			return 25;
9113		else if (max_xri <= 512)
9114			return 50;
9115		else if (max_xri <= 1024)
9116			return 100;
9117		else
9118			return 150;
9119	} else
9120		return 0;
9121}
9122
9123/**
9124 * lpfc_write_firmware - attempt to write a firmware image to the port
9125 * @phba: pointer to lpfc hba data structure.
9126 * @fw: pointer to firmware image returned from request_firmware.
9127 *
9128 * returns the number of bytes written if write is successful.
9129 * returns a negative error value if there were errors.
9130 * returns 0 if firmware matches currently active firmware on port.
9131 **/
9132int
9133lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
9134{
9135	char fwrev[FW_REV_STR_SIZE];
9136	struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
9137	struct list_head dma_buffer_list;
9138	int i, rc = 0;
9139	struct lpfc_dmabuf *dmabuf, *next;
9140	uint32_t offset = 0, temp_offset = 0;
9141
9142	INIT_LIST_HEAD(&dma_buffer_list);
9143	if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
9144	    (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
9145	     LPFC_FILE_TYPE_GROUP) ||
9146	    (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
9147	    (be32_to_cpu(image->size) != fw->size)) {
9148		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9149				"3022 Invalid FW image found. "
9150				"Magic:%x Type:%x ID:%x\n",
9151				be32_to_cpu(image->magic_number),
9152				bf_get_be32(lpfc_grp_hdr_file_type, image),
9153				bf_get_be32(lpfc_grp_hdr_id, image));
9154		return -EINVAL;
9155	}
9156	lpfc_decode_firmware_rev(phba, fwrev, 1);
9157	if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
9158		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9159				"3023 Updating Firmware. Current Version:%s "
9160				"New Version:%s\n",
9161				fwrev, image->revision);
9162		for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
9163			dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
9164					 GFP_KERNEL);
9165			if (!dmabuf) {
9166				rc = -ENOMEM;
9167				goto out;
9168			}
9169			dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9170							  SLI4_PAGE_SIZE,
9171							  &dmabuf->phys,
9172							  GFP_KERNEL);
9173			if (!dmabuf->virt) {
9174				kfree(dmabuf);
9175				rc = -ENOMEM;
9176				goto out;
9177			}
9178			list_add_tail(&dmabuf->list, &dma_buffer_list);
9179		}
9180		while (offset < fw->size) {
9181			temp_offset = offset;
9182			list_for_each_entry(dmabuf, &dma_buffer_list, list) {
9183				if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
9184					memcpy(dmabuf->virt,
9185					       fw->data + temp_offset,
9186					       fw->size - temp_offset);
9187					temp_offset = fw->size;
9188					break;
9189				}
9190				memcpy(dmabuf->virt, fw->data + temp_offset,
9191				       SLI4_PAGE_SIZE);
9192				temp_offset += SLI4_PAGE_SIZE;
9193			}
9194			rc = lpfc_wr_object(phba, &dma_buffer_list,
9195				    (fw->size - offset), &offset);
9196			if (rc) {
9197				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9198						"3024 Firmware update failed. "
9199						"%d\n", rc);
9200				goto out;
9201			}
9202		}
9203		rc = offset;
9204	}
9205out:
9206	list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
9207		list_del(&dmabuf->list);
9208		dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
9209				  dmabuf->virt, dmabuf->phys);
9210		kfree(dmabuf);
9211	}
9212	return rc;
9213}
9214
9215/**
9216 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
9217 * @pdev: pointer to PCI device
9218 * @pid: pointer to PCI device identifier
9219 *
9220 * This routine is called from the kernel's PCI subsystem to device with
9221 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
9222 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
9223 * information of the device and driver to see if the driver state that it
9224 * can support this kind of device. If the match is successful, the driver
9225 * core invokes this routine. If this routine determines it can claim the HBA,
9226 * it does all the initialization that it needs to do to handle the HBA
9227 * properly.
9228 *
9229 * Return code
9230 * 	0 - driver can claim the device
9231 * 	negative value - driver can not claim the device
9232 **/
9233static int __devinit
9234lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9235{
9236	struct lpfc_hba   *phba;
9237	struct lpfc_vport *vport = NULL;
9238	struct Scsi_Host  *shost = NULL;
9239	int error;
9240	uint32_t cfg_mode, intr_mode;
9241	int mcnt;
9242	int adjusted_fcp_eq_count;
9243	const struct firmware *fw;
9244	uint8_t file_name[16];
9245
9246	/* Allocate memory for HBA structure */
9247	phba = lpfc_hba_alloc(pdev);
9248	if (!phba)
9249		return -ENOMEM;
9250
9251	/* Perform generic PCI device enabling operation */
9252	error = lpfc_enable_pci_dev(phba);
9253	if (error)
9254		goto out_free_phba;
9255
9256	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
9257	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
9258	if (error)
9259		goto out_disable_pci_dev;
9260
9261	/* Set up SLI-4 specific device PCI memory space */
9262	error = lpfc_sli4_pci_mem_setup(phba);
9263	if (error) {
9264		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9265				"1410 Failed to set up pci memory space.\n");
9266		goto out_disable_pci_dev;
9267	}
9268
9269	/* Set up phase-1 common device driver resources */
9270	error = lpfc_setup_driver_resource_phase1(phba);
9271	if (error) {
9272		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9273				"1411 Failed to set up driver resource.\n");
9274		goto out_unset_pci_mem_s4;
9275	}
9276
9277	/* Set up SLI-4 Specific device driver resources */
9278	error = lpfc_sli4_driver_resource_setup(phba);
9279	if (error) {
9280		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9281				"1412 Failed to set up driver resource.\n");
9282		goto out_unset_pci_mem_s4;
9283	}
9284
9285	/* Initialize and populate the iocb list per host */
9286
9287	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9288			"2821 initialize iocb list %d.\n",
9289			phba->cfg_iocb_cnt*1024);
9290	error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
9291
9292	if (error) {
9293		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9294				"1413 Failed to initialize iocb list.\n");
9295		goto out_unset_driver_resource_s4;
9296	}
9297
9298	INIT_LIST_HEAD(&phba->active_rrq_list);
9299	INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
9300
9301	/* Set up common device driver resources */
9302	error = lpfc_setup_driver_resource_phase2(phba);
9303	if (error) {
9304		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9305				"1414 Failed to set up driver resource.\n");
9306		goto out_free_iocb_list;
9307	}
9308
9309	/* Get the default values for Model Name and Description */
9310	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9311
9312	/* Create SCSI host to the physical port */
9313	error = lpfc_create_shost(phba);
9314	if (error) {
9315		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9316				"1415 Failed to create scsi host.\n");
9317		goto out_unset_driver_resource;
9318	}
9319
9320	/* Configure sysfs attributes */
9321	vport = phba->pport;
9322	error = lpfc_alloc_sysfs_attr(vport);
9323	if (error) {
9324		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9325				"1416 Failed to allocate sysfs attr\n");
9326		goto out_destroy_shost;
9327	}
9328
9329	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
9330	/* Now, trying to enable interrupt and bring up the device */
9331	cfg_mode = phba->cfg_use_msi;
9332	while (true) {
9333		/* Put device to a known state before enabling interrupt */
9334		lpfc_stop_port(phba);
9335		/* Configure and enable interrupt */
9336		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
9337		if (intr_mode == LPFC_INTR_ERROR) {
9338			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9339					"0426 Failed to enable interrupt.\n");
9340			error = -ENODEV;
9341			goto out_free_sysfs_attr;
9342		}
9343		/* Default to single EQ for non-MSI-X */
9344		if (phba->intr_type != MSIX)
9345			adjusted_fcp_eq_count = 0;
9346		else if (phba->sli4_hba.msix_vec_nr <
9347					phba->cfg_fcp_eq_count + 1)
9348			adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
9349		else
9350			adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
9351		phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
9352		/* Set up SLI-4 HBA */
9353		if (lpfc_sli4_hba_setup(phba)) {
9354			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9355					"1421 Failed to set up hba\n");
9356			error = -ENODEV;
9357			goto out_disable_intr;
9358		}
9359
9360		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
9361		if (intr_mode != 0)
9362			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
9363							    LPFC_ACT_INTR_CNT);
9364
9365		/* Check active interrupts received only for MSI/MSI-X */
9366		if (intr_mode == 0 ||
9367		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
9368			/* Log the current active interrupt mode */
9369			phba->intr_mode = intr_mode;
9370			lpfc_log_intr_mode(phba, intr_mode);
9371			break;
9372		}
9373		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9374				"0451 Configure interrupt mode (%d) "
9375				"failed active interrupt test.\n",
9376				intr_mode);
9377		/* Unset the previous SLI-4 HBA setup. */
9378		/*
9379		 * TODO:  Is this operation compatible with IF TYPE 2
9380		 * devices?  All port state is deleted and cleared.
9381		 */
9382		lpfc_sli4_unset_hba(phba);
9383		/* Try next level of interrupt mode */
9384		cfg_mode = --intr_mode;
9385	}
9386
9387	/* Perform post initialization setup */
9388	lpfc_post_init_setup(phba);
9389
9390	/* check for firmware upgrade or downgrade (if_type 2 only) */
9391	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9392	    LPFC_SLI_INTF_IF_TYPE_2) {
9393		snprintf(file_name, 16, "%s.grp", phba->ModelName);
9394		error = request_firmware(&fw, file_name, &phba->pcidev->dev);
9395		if (!error) {
9396			lpfc_write_firmware(phba, fw);
9397			release_firmware(fw);
9398		}
9399	}
9400
9401	/* Check if there are static vports to be created. */
9402	lpfc_create_static_vport(phba);
9403	return 0;
9404
9405out_disable_intr:
9406	lpfc_sli4_disable_intr(phba);
9407out_free_sysfs_attr:
9408	lpfc_free_sysfs_attr(vport);
9409out_destroy_shost:
9410	lpfc_destroy_shost(phba);
9411out_unset_driver_resource:
9412	lpfc_unset_driver_resource_phase2(phba);
9413out_free_iocb_list:
9414	lpfc_free_iocb_list(phba);
9415out_unset_driver_resource_s4:
9416	lpfc_sli4_driver_resource_unset(phba);
9417out_unset_pci_mem_s4:
9418	lpfc_sli4_pci_mem_unset(phba);
9419out_disable_pci_dev:
9420	lpfc_disable_pci_dev(phba);
9421	if (shost)
9422		scsi_host_put(shost);
9423out_free_phba:
9424	lpfc_hba_free(phba);
9425	return error;
9426}
9427
9428/**
9429 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
9430 * @pdev: pointer to PCI device
9431 *
9432 * This routine is called from the kernel's PCI subsystem to device with
9433 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
9434 * removed from PCI bus, it performs all the necessary cleanup for the HBA
9435 * device to be removed from the PCI subsystem properly.
9436 **/
9437static void __devexit
9438lpfc_pci_remove_one_s4(struct pci_dev *pdev)
9439{
9440	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9441	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
9442	struct lpfc_vport **vports;
9443	struct lpfc_hba *phba = vport->phba;
9444	int i;
9445
9446	/* Mark the device unloading flag */
9447	spin_lock_irq(&phba->hbalock);
9448	vport->load_flag |= FC_UNLOADING;
9449	spin_unlock_irq(&phba->hbalock);
9450
9451	/* Free the HBA sysfs attributes */
9452	lpfc_free_sysfs_attr(vport);
9453
9454	/* Release all the vports against this physical port */
9455	vports = lpfc_create_vport_work_array(phba);
9456	if (vports != NULL)
9457		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
9458			fc_vport_terminate(vports[i]->fc_vport);
9459	lpfc_destroy_vport_work_array(phba, vports);
9460
9461	/* Remove FC host and then SCSI host with the physical port */
9462	fc_remove_host(shost);
9463	scsi_remove_host(shost);
9464
9465	/* Perform cleanup on the physical port */
9466	lpfc_cleanup(vport);
9467
9468	/*
9469	 * Bring down the SLI Layer. This step disables all interrupts,
9470	 * clears the rings, discards all mailbox commands, and resets
9471	 * the HBA FCoE function.
9472	 */
9473	lpfc_debugfs_terminate(vport);
9474	lpfc_sli4_hba_unset(phba);
9475
9476	spin_lock_irq(&phba->hbalock);
9477	list_del_init(&vport->listentry);
9478	spin_unlock_irq(&phba->hbalock);
9479
9480	/* Perform scsi free before driver resource_unset since scsi
9481	 * buffers are released to their corresponding pools here.
9482	 */
9483	lpfc_scsi_free(phba);
9484	lpfc_sli4_driver_resource_unset(phba);
9485
9486	/* Unmap adapter Control and Doorbell registers */
9487	lpfc_sli4_pci_mem_unset(phba);
9488
9489	/* Release PCI resources and disable device's PCI function */
9490	scsi_host_put(shost);
9491	lpfc_disable_pci_dev(phba);
9492
9493	/* Finally, free the driver's device data structure */
9494	lpfc_hba_free(phba);
9495
9496	return;
9497}
9498
9499/**
9500 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
9501 * @pdev: pointer to PCI device
9502 * @msg: power management message
9503 *
9504 * This routine is called from the kernel's PCI subsystem to support system
9505 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
9506 * this method, it quiesces the device by stopping the driver's worker
9507 * thread for the device, turning off device's interrupt and DMA, and bring
9508 * the device offline. Note that as the driver implements the minimum PM
9509 * requirements to a power-aware driver's PM support for suspend/resume -- all
9510 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
9511 * method call will be treated as SUSPEND and the driver will fully
9512 * reinitialize its device during resume() method call, the driver will set
9513 * device to PCI_D3hot state in PCI config space instead of setting it
9514 * according to the @msg provided by the PM.
9515 *
9516 * Return code
9517 * 	0 - driver suspended the device
9518 * 	Error otherwise
9519 **/
9520static int
9521lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
9522{
9523	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9524	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9525
9526	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9527			"2843 PCI device Power Management suspend.\n");
9528
9529	/* Bring down the device */
9530	lpfc_offline_prep(phba);
9531	lpfc_offline(phba);
9532	kthread_stop(phba->worker_thread);
9533
9534	/* Disable interrupt from device */
9535	lpfc_sli4_disable_intr(phba);
9536	lpfc_sli4_queue_destroy(phba);
9537
9538	/* Save device state to PCI config space */
9539	pci_save_state(pdev);
9540	pci_set_power_state(pdev, PCI_D3hot);
9541
9542	return 0;
9543}
9544
9545/**
9546 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
9547 * @pdev: pointer to PCI device
9548 *
9549 * This routine is called from the kernel's PCI subsystem to support system
9550 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
9551 * this method, it restores the device's PCI config space state and fully
9552 * reinitializes the device and brings it online. Note that as the driver
9553 * implements the minimum PM requirements to a power-aware driver's PM for
9554 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
9555 * to the suspend() method call will be treated as SUSPEND and the driver
9556 * will fully reinitialize its device during resume() method call, the device
9557 * will be set to PCI_D0 directly in PCI config space before restoring the
9558 * state.
9559 *
9560 * Return code
9561 * 	0 - driver suspended the device
9562 * 	Error otherwise
9563 **/
9564static int
9565lpfc_pci_resume_one_s4(struct pci_dev *pdev)
9566{
9567	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9568	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9569	uint32_t intr_mode;
9570	int error;
9571
9572	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9573			"0292 PCI device Power Management resume.\n");
9574
9575	/* Restore device state from PCI config space */
9576	pci_set_power_state(pdev, PCI_D0);
9577	pci_restore_state(pdev);
9578
9579	/*
9580	 * As the new kernel behavior of pci_restore_state() API call clears
9581	 * device saved_state flag, need to save the restored state again.
9582	 */
9583	pci_save_state(pdev);
9584
9585	if (pdev->is_busmaster)
9586		pci_set_master(pdev);
9587
9588	 /* Startup the kernel thread for this host adapter. */
9589	phba->worker_thread = kthread_run(lpfc_do_work, phba,
9590					"lpfc_worker_%d", phba->brd_no);
9591	if (IS_ERR(phba->worker_thread)) {
9592		error = PTR_ERR(phba->worker_thread);
9593		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9594				"0293 PM resume failed to start worker "
9595				"thread: error=x%x.\n", error);
9596		return error;
9597	}
9598
9599	/* Configure and enable interrupt */
9600	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9601	if (intr_mode == LPFC_INTR_ERROR) {
9602		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9603				"0294 PM resume Failed to enable interrupt\n");
9604		return -EIO;
9605	} else
9606		phba->intr_mode = intr_mode;
9607
9608	/* Restart HBA and bring it online */
9609	lpfc_sli_brdrestart(phba);
9610	lpfc_online(phba);
9611
9612	/* Log the current active interrupt mode */
9613	lpfc_log_intr_mode(phba, phba->intr_mode);
9614
9615	return 0;
9616}
9617
9618/**
9619 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
9620 * @phba: pointer to lpfc hba data structure.
9621 *
9622 * This routine is called to prepare the SLI4 device for PCI slot recover. It
9623 * aborts all the outstanding SCSI I/Os to the pci device.
9624 **/
9625static void
9626lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
9627{
9628	struct lpfc_sli *psli = &phba->sli;
9629	struct lpfc_sli_ring  *pring;
9630
9631	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9632			"2828 PCI channel I/O abort preparing for recovery\n");
9633	/*
9634	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9635	 * and let the SCSI mid-layer to retry them to recover.
9636	 */
9637	pring = &psli->ring[psli->fcp_ring];
9638	lpfc_sli_abort_iocb_ring(phba, pring);
9639}
9640
9641/**
9642 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
9643 * @phba: pointer to lpfc hba data structure.
9644 *
9645 * This routine is called to prepare the SLI4 device for PCI slot reset. It
9646 * disables the device interrupt and pci device, and aborts the internal FCP
9647 * pending I/Os.
9648 **/
9649static void
9650lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9651{
9652	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9653			"2826 PCI channel disable preparing for reset\n");
9654
9655	/* Block any management I/Os to the device */
9656	lpfc_block_mgmt_io(phba);
9657
9658	/* Block all SCSI devices' I/Os on the host */
9659	lpfc_scsi_dev_block(phba);
9660
9661	/* stop all timers */
9662	lpfc_stop_hba_timers(phba);
9663
9664	/* Disable interrupt and pci device */
9665	lpfc_sli4_disable_intr(phba);
9666	lpfc_sli4_queue_destroy(phba);
9667	pci_disable_device(phba->pcidev);
9668
9669	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
9670	lpfc_sli_flush_fcp_rings(phba);
9671}
9672
9673/**
9674 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
9675 * @phba: pointer to lpfc hba data structure.
9676 *
9677 * This routine is called to prepare the SLI4 device for PCI slot permanently
9678 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
9679 * pending I/Os.
9680 **/
9681static void
9682lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
9683{
9684	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9685			"2827 PCI channel permanent disable for failure\n");
9686
9687	/* Block all SCSI devices' I/Os on the host */
9688	lpfc_scsi_dev_block(phba);
9689
9690	/* stop all timers */
9691	lpfc_stop_hba_timers(phba);
9692
9693	/* Clean up all driver's outstanding SCSI I/Os */
9694	lpfc_sli_flush_fcp_rings(phba);
9695}
9696
9697/**
9698 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
9699 * @pdev: pointer to PCI device.
9700 * @state: the current PCI connection state.
9701 *
9702 * This routine is called from the PCI subsystem for error handling to device
9703 * with SLI-4 interface spec. This function is called by the PCI subsystem
9704 * after a PCI bus error affecting this device has been detected. When this
9705 * function is invoked, it will need to stop all the I/Os and interrupt(s)
9706 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
9707 * for the PCI subsystem to perform proper recovery as desired.
9708 *
9709 * Return codes
9710 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9711 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9712 **/
9713static pci_ers_result_t
9714lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
9715{
9716	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9717	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9718
9719	switch (state) {
9720	case pci_channel_io_normal:
9721		/* Non-fatal error, prepare for recovery */
9722		lpfc_sli4_prep_dev_for_recover(phba);
9723		return PCI_ERS_RESULT_CAN_RECOVER;
9724	case pci_channel_io_frozen:
9725		/* Fatal error, prepare for slot reset */
9726		lpfc_sli4_prep_dev_for_reset(phba);
9727		return PCI_ERS_RESULT_NEED_RESET;
9728	case pci_channel_io_perm_failure:
9729		/* Permanent failure, prepare for device down */
9730		lpfc_sli4_prep_dev_for_perm_failure(phba);
9731		return PCI_ERS_RESULT_DISCONNECT;
9732	default:
9733		/* Unknown state, prepare and request slot reset */
9734		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9735				"2825 Unknown PCI error state: x%x\n", state);
9736		lpfc_sli4_prep_dev_for_reset(phba);
9737		return PCI_ERS_RESULT_NEED_RESET;
9738	}
9739}
9740
9741/**
9742 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
9743 * @pdev: pointer to PCI device.
9744 *
9745 * This routine is called from the PCI subsystem for error handling to device
9746 * with SLI-4 interface spec. It is called after PCI bus has been reset to
9747 * restart the PCI card from scratch, as if from a cold-boot. During the
9748 * PCI subsystem error recovery, after the driver returns
9749 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
9750 * recovery and then call this routine before calling the .resume method to
9751 * recover the device. This function will initialize the HBA device, enable
9752 * the interrupt, but it will just put the HBA to offline state without
9753 * passing any I/O traffic.
9754 *
9755 * Return codes
9756 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
9757 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9758 */
9759static pci_ers_result_t
9760lpfc_io_slot_reset_s4(struct pci_dev *pdev)
9761{
9762	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9763	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9764	struct lpfc_sli *psli = &phba->sli;
9765	uint32_t intr_mode;
9766
9767	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
9768	if (pci_enable_device_mem(pdev)) {
9769		printk(KERN_ERR "lpfc: Cannot re-enable "
9770			"PCI device after reset.\n");
9771		return PCI_ERS_RESULT_DISCONNECT;
9772	}
9773
9774	pci_restore_state(pdev);
9775
9776	/*
9777	 * As the new kernel behavior of pci_restore_state() API call clears
9778	 * device saved_state flag, need to save the restored state again.
9779	 */
9780	pci_save_state(pdev);
9781
9782	if (pdev->is_busmaster)
9783		pci_set_master(pdev);
9784
9785	spin_lock_irq(&phba->hbalock);
9786	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9787	spin_unlock_irq(&phba->hbalock);
9788
9789	/* Configure and enable interrupt */
9790	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9791	if (intr_mode == LPFC_INTR_ERROR) {
9792		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9793				"2824 Cannot re-enable interrupt after "
9794				"slot reset.\n");
9795		return PCI_ERS_RESULT_DISCONNECT;
9796	} else
9797		phba->intr_mode = intr_mode;
9798
9799	/* Log the current active interrupt mode */
9800	lpfc_log_intr_mode(phba, phba->intr_mode);
9801
9802	return PCI_ERS_RESULT_RECOVERED;
9803}
9804
9805/**
9806 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
9807 * @pdev: pointer to PCI device
9808 *
9809 * This routine is called from the PCI subsystem for error handling to device
9810 * with SLI-4 interface spec. It is called when kernel error recovery tells
9811 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
9812 * error recovery. After this call, traffic can start to flow from this device
9813 * again.
9814 **/
9815static void
9816lpfc_io_resume_s4(struct pci_dev *pdev)
9817{
9818	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9819	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9820
9821	/*
9822	 * In case of slot reset, as function reset is performed through
9823	 * mailbox command which needs DMA to be enabled, this operation
9824	 * has to be moved to the io resume phase. Taking device offline
9825	 * will perform the necessary cleanup.
9826	 */
9827	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
9828		/* Perform device reset */
9829		lpfc_offline_prep(phba);
9830		lpfc_offline(phba);
9831		lpfc_sli_brdrestart(phba);
9832		/* Bring the device back online */
9833		lpfc_online(phba);
9834	}
9835
9836	/* Clean up Advanced Error Reporting (AER) if needed */
9837	if (phba->hba_flag & HBA_AER_ENABLED)
9838		pci_cleanup_aer_uncorrect_error_status(pdev);
9839}
9840
9841/**
9842 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
9843 * @pdev: pointer to PCI device
9844 * @pid: pointer to PCI device identifier
9845 *
9846 * This routine is to be registered to the kernel's PCI subsystem. When an
9847 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
9848 * at PCI device-specific information of the device and driver to see if the
9849 * driver state that it can support this kind of device. If the match is
9850 * successful, the driver core invokes this routine. This routine dispatches
9851 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
9852 * do all the initialization that it needs to do to handle the HBA device
9853 * properly.
9854 *
9855 * Return code
9856 * 	0 - driver can claim the device
9857 * 	negative value - driver can not claim the device
9858 **/
9859static int __devinit
9860lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
9861{
9862	int rc;
9863	struct lpfc_sli_intf intf;
9864
9865	if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
9866		return -ENODEV;
9867
9868	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
9869	    (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
9870		rc = lpfc_pci_probe_one_s4(pdev, pid);
9871	else
9872		rc = lpfc_pci_probe_one_s3(pdev, pid);
9873
9874	return rc;
9875}
9876
9877/**
9878 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
9879 * @pdev: pointer to PCI device
9880 *
9881 * This routine is to be registered to the kernel's PCI subsystem. When an
9882 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
9883 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
9884 * remove routine, which will perform all the necessary cleanup for the
9885 * device to be removed from the PCI subsystem properly.
9886 **/
9887static void __devexit
9888lpfc_pci_remove_one(struct pci_dev *pdev)
9889{
9890	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9891	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9892
9893	switch (phba->pci_dev_grp) {
9894	case LPFC_PCI_DEV_LP:
9895		lpfc_pci_remove_one_s3(pdev);
9896		break;
9897	case LPFC_PCI_DEV_OC:
9898		lpfc_pci_remove_one_s4(pdev);
9899		break;
9900	default:
9901		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9902				"1424 Invalid PCI device group: 0x%x\n",
9903				phba->pci_dev_grp);
9904		break;
9905	}
9906	return;
9907}
9908
9909/**
9910 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
9911 * @pdev: pointer to PCI device
9912 * @msg: power management message
9913 *
9914 * This routine is to be registered to the kernel's PCI subsystem to support
9915 * system Power Management (PM). When PM invokes this method, it dispatches
9916 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
9917 * suspend the device.
9918 *
9919 * Return code
9920 * 	0 - driver suspended the device
9921 * 	Error otherwise
9922 **/
9923static int
9924lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
9925{
9926	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9927	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9928	int rc = -ENODEV;
9929
9930	switch (phba->pci_dev_grp) {
9931	case LPFC_PCI_DEV_LP:
9932		rc = lpfc_pci_suspend_one_s3(pdev, msg);
9933		break;
9934	case LPFC_PCI_DEV_OC:
9935		rc = lpfc_pci_suspend_one_s4(pdev, msg);
9936		break;
9937	default:
9938		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9939				"1425 Invalid PCI device group: 0x%x\n",
9940				phba->pci_dev_grp);
9941		break;
9942	}
9943	return rc;
9944}
9945
9946/**
9947 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
9948 * @pdev: pointer to PCI device
9949 *
9950 * This routine is to be registered to the kernel's PCI subsystem to support
9951 * system Power Management (PM). When PM invokes this method, it dispatches
9952 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
9953 * resume the device.
9954 *
9955 * Return code
9956 * 	0 - driver suspended the device
9957 * 	Error otherwise
9958 **/
9959static int
9960lpfc_pci_resume_one(struct pci_dev *pdev)
9961{
9962	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9963	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9964	int rc = -ENODEV;
9965
9966	switch (phba->pci_dev_grp) {
9967	case LPFC_PCI_DEV_LP:
9968		rc = lpfc_pci_resume_one_s3(pdev);
9969		break;
9970	case LPFC_PCI_DEV_OC:
9971		rc = lpfc_pci_resume_one_s4(pdev);
9972		break;
9973	default:
9974		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9975				"1426 Invalid PCI device group: 0x%x\n",
9976				phba->pci_dev_grp);
9977		break;
9978	}
9979	return rc;
9980}
9981
9982/**
9983 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
9984 * @pdev: pointer to PCI device.
9985 * @state: the current PCI connection state.
9986 *
9987 * This routine is registered to the PCI subsystem for error handling. This
9988 * function is called by the PCI subsystem after a PCI bus error affecting
9989 * this device has been detected. When this routine is invoked, it dispatches
9990 * the action to the proper SLI-3 or SLI-4 device error detected handling
9991 * routine, which will perform the proper error detected operation.
9992 *
9993 * Return codes
9994 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9995 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9996 **/
9997static pci_ers_result_t
9998lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
9999{
10000	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10001	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10002	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
10003
10004	switch (phba->pci_dev_grp) {
10005	case LPFC_PCI_DEV_LP:
10006		rc = lpfc_io_error_detected_s3(pdev, state);
10007		break;
10008	case LPFC_PCI_DEV_OC:
10009		rc = lpfc_io_error_detected_s4(pdev, state);
10010		break;
10011	default:
10012		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10013				"1427 Invalid PCI device group: 0x%x\n",
10014				phba->pci_dev_grp);
10015		break;
10016	}
10017	return rc;
10018}
10019
10020/**
10021 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
10022 * @pdev: pointer to PCI device.
10023 *
10024 * This routine is registered to the PCI subsystem for error handling. This
10025 * function is called after PCI bus has been reset to restart the PCI card
10026 * from scratch, as if from a cold-boot. When this routine is invoked, it
10027 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
10028 * routine, which will perform the proper device reset.
10029 *
10030 * Return codes
10031 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
10032 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10033 **/
10034static pci_ers_result_t
10035lpfc_io_slot_reset(struct pci_dev *pdev)
10036{
10037	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10038	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10039	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
10040
10041	switch (phba->pci_dev_grp) {
10042	case LPFC_PCI_DEV_LP:
10043		rc = lpfc_io_slot_reset_s3(pdev);
10044		break;
10045	case LPFC_PCI_DEV_OC:
10046		rc = lpfc_io_slot_reset_s4(pdev);
10047		break;
10048	default:
10049		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10050				"1428 Invalid PCI device group: 0x%x\n",
10051				phba->pci_dev_grp);
10052		break;
10053	}
10054	return rc;
10055}
10056
10057/**
10058 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
10059 * @pdev: pointer to PCI device
10060 *
10061 * This routine is registered to the PCI subsystem for error handling. It
10062 * is called when kernel error recovery tells the lpfc driver that it is
10063 * OK to resume normal PCI operation after PCI bus error recovery. When
10064 * this routine is invoked, it dispatches the action to the proper SLI-3
10065 * or SLI-4 device io_resume routine, which will resume the device operation.
10066 **/
10067static void
10068lpfc_io_resume(struct pci_dev *pdev)
10069{
10070	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10071	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10072
10073	switch (phba->pci_dev_grp) {
10074	case LPFC_PCI_DEV_LP:
10075		lpfc_io_resume_s3(pdev);
10076		break;
10077	case LPFC_PCI_DEV_OC:
10078		lpfc_io_resume_s4(pdev);
10079		break;
10080	default:
10081		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10082				"1429 Invalid PCI device group: 0x%x\n",
10083				phba->pci_dev_grp);
10084		break;
10085	}
10086	return;
10087}
10088
10089/**
10090 * lpfc_mgmt_open - method called when 'lpfcmgmt' is opened from userspace
10091 * @inode: pointer to the inode representing the lpfcmgmt device
10092 * @filep: pointer to the file representing the open lpfcmgmt device
10093 *
10094 * This routine puts a reference count on the lpfc module whenever the
10095 * character device is opened
10096 **/
10097static int
10098lpfc_mgmt_open(struct inode *inode, struct file *filep)
10099{
10100	try_module_get(THIS_MODULE);
10101	return 0;
10102}
10103
10104/**
10105 * lpfc_mgmt_release - method called when 'lpfcmgmt' is closed in userspace
10106 * @inode: pointer to the inode representing the lpfcmgmt device
10107 * @filep: pointer to the file representing the open lpfcmgmt device
10108 *
10109 * This routine removes a reference count from the lpfc module when the
10110 * character device is closed
10111 **/
10112static int
10113lpfc_mgmt_release(struct inode *inode, struct file *filep)
10114{
10115	module_put(THIS_MODULE);
10116	return 0;
10117}
10118
10119static struct pci_device_id lpfc_id_table[] = {
10120	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
10121		PCI_ANY_ID, PCI_ANY_ID, },
10122	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
10123		PCI_ANY_ID, PCI_ANY_ID, },
10124	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
10125		PCI_ANY_ID, PCI_ANY_ID, },
10126	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
10127		PCI_ANY_ID, PCI_ANY_ID, },
10128	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
10129		PCI_ANY_ID, PCI_ANY_ID, },
10130	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
10131		PCI_ANY_ID, PCI_ANY_ID, },
10132	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
10133		PCI_ANY_ID, PCI_ANY_ID, },
10134	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
10135		PCI_ANY_ID, PCI_ANY_ID, },
10136	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
10137		PCI_ANY_ID, PCI_ANY_ID, },
10138	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
10139		PCI_ANY_ID, PCI_ANY_ID, },
10140	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
10141		PCI_ANY_ID, PCI_ANY_ID, },
10142	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
10143		PCI_ANY_ID, PCI_ANY_ID, },
10144	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
10145		PCI_ANY_ID, PCI_ANY_ID, },
10146	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
10147		PCI_ANY_ID, PCI_ANY_ID, },
10148	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
10149		PCI_ANY_ID, PCI_ANY_ID, },
10150	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
10151		PCI_ANY_ID, PCI_ANY_ID, },
10152	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
10153		PCI_ANY_ID, PCI_ANY_ID, },
10154	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
10155		PCI_ANY_ID, PCI_ANY_ID, },
10156	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
10157		PCI_ANY_ID, PCI_ANY_ID, },
10158	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
10159		PCI_ANY_ID, PCI_ANY_ID, },
10160	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
10161		PCI_ANY_ID, PCI_ANY_ID, },
10162	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
10163		PCI_ANY_ID, PCI_ANY_ID, },
10164	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
10165		PCI_ANY_ID, PCI_ANY_ID, },
10166	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
10167		PCI_ANY_ID, PCI_ANY_ID, },
10168	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
10169		PCI_ANY_ID, PCI_ANY_ID, },
10170	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
10171		PCI_ANY_ID, PCI_ANY_ID, },
10172	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
10173		PCI_ANY_ID, PCI_ANY_ID, },
10174	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
10175		PCI_ANY_ID, PCI_ANY_ID, },
10176	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
10177		PCI_ANY_ID, PCI_ANY_ID, },
10178	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
10179		PCI_ANY_ID, PCI_ANY_ID, },
10180	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
10181		PCI_ANY_ID, PCI_ANY_ID, },
10182	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
10183		PCI_ANY_ID, PCI_ANY_ID, },
10184	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
10185		PCI_ANY_ID, PCI_ANY_ID, },
10186	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
10187		PCI_ANY_ID, PCI_ANY_ID, },
10188	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
10189		PCI_ANY_ID, PCI_ANY_ID, },
10190	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
10191		PCI_ANY_ID, PCI_ANY_ID, },
10192	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
10193		PCI_ANY_ID, PCI_ANY_ID, },
10194	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
10195		PCI_ANY_ID, PCI_ANY_ID, },
10196	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
10197		PCI_ANY_ID, PCI_ANY_ID, },
10198	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
10199		PCI_ANY_ID, PCI_ANY_ID, },
10200	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
10201		PCI_ANY_ID, PCI_ANY_ID, },
10202	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
10203		PCI_ANY_ID, PCI_ANY_ID, },
10204	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
10205		PCI_ANY_ID, PCI_ANY_ID, },
10206	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
10207		PCI_ANY_ID, PCI_ANY_ID, },
10208	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
10209		PCI_ANY_ID, PCI_ANY_ID, },
10210	{ 0 }
10211};
10212
10213MODULE_DEVICE_TABLE(pci, lpfc_id_table);
10214
10215static struct pci_error_handlers lpfc_err_handler = {
10216	.error_detected = lpfc_io_error_detected,
10217	.slot_reset = lpfc_io_slot_reset,
10218	.resume = lpfc_io_resume,
10219};
10220
10221static struct pci_driver lpfc_driver = {
10222	.name		= LPFC_DRIVER_NAME,
10223	.id_table	= lpfc_id_table,
10224	.probe		= lpfc_pci_probe_one,
10225	.remove		= __devexit_p(lpfc_pci_remove_one),
10226	.suspend        = lpfc_pci_suspend_one,
10227	.resume		= lpfc_pci_resume_one,
10228	.err_handler    = &lpfc_err_handler,
10229};
10230
10231static const struct file_operations lpfc_mgmt_fop = {
10232	.open = lpfc_mgmt_open,
10233	.release = lpfc_mgmt_release,
10234};
10235
10236static struct miscdevice lpfc_mgmt_dev = {
10237	.minor = MISC_DYNAMIC_MINOR,
10238	.name = "lpfcmgmt",
10239	.fops = &lpfc_mgmt_fop,
10240};
10241
10242/**
10243 * lpfc_init - lpfc module initialization routine
10244 *
10245 * This routine is to be invoked when the lpfc module is loaded into the
10246 * kernel. The special kernel macro module_init() is used to indicate the
10247 * role of this routine to the kernel as lpfc module entry point.
10248 *
10249 * Return codes
10250 *   0 - successful
10251 *   -ENOMEM - FC attach transport failed
10252 *   all others - failed
10253 */
10254static int __init
10255lpfc_init(void)
10256{
10257	int error = 0;
10258
10259	printk(LPFC_MODULE_DESC "\n");
10260	printk(LPFC_COPYRIGHT "\n");
10261
10262	error = misc_register(&lpfc_mgmt_dev);
10263	if (error)
10264		printk(KERN_ERR "Could not register lpfcmgmt device, "
10265			"misc_register returned with status %d", error);
10266
10267	if (lpfc_enable_npiv) {
10268		lpfc_transport_functions.vport_create = lpfc_vport_create;
10269		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
10270	}
10271	lpfc_transport_template =
10272				fc_attach_transport(&lpfc_transport_functions);
10273	if (lpfc_transport_template == NULL)
10274		return -ENOMEM;
10275	if (lpfc_enable_npiv) {
10276		lpfc_vport_transport_template =
10277			fc_attach_transport(&lpfc_vport_transport_functions);
10278		if (lpfc_vport_transport_template == NULL) {
10279			fc_release_transport(lpfc_transport_template);
10280			return -ENOMEM;
10281		}
10282	}
10283	error = pci_register_driver(&lpfc_driver);
10284	if (error) {
10285		fc_release_transport(lpfc_transport_template);
10286		if (lpfc_enable_npiv)
10287			fc_release_transport(lpfc_vport_transport_template);
10288	}
10289
10290	return error;
10291}
10292
10293/**
10294 * lpfc_exit - lpfc module removal routine
10295 *
10296 * This routine is invoked when the lpfc module is removed from the kernel.
10297 * The special kernel macro module_exit() is used to indicate the role of
10298 * this routine to the kernel as lpfc module exit point.
10299 */
10300static void __exit
10301lpfc_exit(void)
10302{
10303	misc_deregister(&lpfc_mgmt_dev);
10304	pci_unregister_driver(&lpfc_driver);
10305	fc_release_transport(lpfc_transport_template);
10306	if (lpfc_enable_npiv)
10307		fc_release_transport(lpfc_vport_transport_template);
10308	if (_dump_buf_data) {
10309		printk(KERN_ERR	"9062 BLKGRD: freeing %lu pages for "
10310				"_dump_buf_data at 0x%p\n",
10311				(1L << _dump_buf_data_order), _dump_buf_data);
10312		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
10313	}
10314
10315	if (_dump_buf_dif) {
10316		printk(KERN_ERR	"9049 BLKGRD: freeing %lu pages for "
10317				"_dump_buf_dif at 0x%p\n",
10318				(1L << _dump_buf_dif_order), _dump_buf_dif);
10319		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
10320	}
10321}
10322
10323module_init(lpfc_init);
10324module_exit(lpfc_exit);
10325MODULE_LICENSE("GPL");
10326MODULE_DESCRIPTION(LPFC_MODULE_DESC);
10327MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
10328MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
10329