lpfc_init.c revision eff4a01b6e9f8cee3c541ab7f2ad18b5bbffd124
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/module.h>
28#include <linux/kthread.h>
29#include <linux/pci.h>
30#include <linux/spinlock.h>
31#include <linux/ctype.h>
32#include <linux/aer.h>
33#include <linux/slab.h>
34#include <linux/firmware.h>
35#include <linux/miscdevice.h>
36
37#include <scsi/scsi.h>
38#include <scsi/scsi_device.h>
39#include <scsi/scsi_host.h>
40#include <scsi/scsi_transport_fc.h>
41
42#include "lpfc_hw4.h"
43#include "lpfc_hw.h"
44#include "lpfc_sli.h"
45#include "lpfc_sli4.h"
46#include "lpfc_nl.h"
47#include "lpfc_disc.h"
48#include "lpfc_scsi.h"
49#include "lpfc.h"
50#include "lpfc_logmsg.h"
51#include "lpfc_crtn.h"
52#include "lpfc_vport.h"
53#include "lpfc_version.h"
54
55char *_dump_buf_data;
56unsigned long _dump_buf_data_order;
57char *_dump_buf_dif;
58unsigned long _dump_buf_dif_order;
59spinlock_t _dump_buf_lock;
60
61static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
62static int lpfc_post_rcv_buf(struct lpfc_hba *);
63static int lpfc_sli4_queue_verify(struct lpfc_hba *);
64static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
65static int lpfc_setup_endian_order(struct lpfc_hba *);
66static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
67static void lpfc_free_sgl_list(struct lpfc_hba *);
68static int lpfc_init_sgl_list(struct lpfc_hba *);
69static int lpfc_init_active_sgl_array(struct lpfc_hba *);
70static void lpfc_free_active_sgl(struct lpfc_hba *);
71static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
72static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
73static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
74static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
75static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
76
77static struct scsi_transport_template *lpfc_transport_template = NULL;
78static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
79static DEFINE_IDR(lpfc_hba_index);
80
81/**
82 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
83 * @phba: pointer to lpfc hba data structure.
84 *
85 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
86 * mailbox command. It retrieves the revision information from the HBA and
87 * collects the Vital Product Data (VPD) about the HBA for preparing the
88 * configuration of the HBA.
89 *
90 * Return codes:
91 *   0 - success.
92 *   -ERESTART - requests the SLI layer to reset the HBA and try again.
93 *   Any other value - indicates an error.
94 **/
95int
96lpfc_config_port_prep(struct lpfc_hba *phba)
97{
98	lpfc_vpd_t *vp = &phba->vpd;
99	int i = 0, rc;
100	LPFC_MBOXQ_t *pmb;
101	MAILBOX_t *mb;
102	char *lpfc_vpd_data = NULL;
103	uint16_t offset = 0;
104	static char licensed[56] =
105		    "key unlock for use with gnu public licensed code only\0";
106	static int init_key = 1;
107
108	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
109	if (!pmb) {
110		phba->link_state = LPFC_HBA_ERROR;
111		return -ENOMEM;
112	}
113
114	mb = &pmb->u.mb;
115	phba->link_state = LPFC_INIT_MBX_CMDS;
116
117	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
118		if (init_key) {
119			uint32_t *ptext = (uint32_t *) licensed;
120
121			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
122				*ptext = cpu_to_be32(*ptext);
123			init_key = 0;
124		}
125
126		lpfc_read_nv(phba, pmb);
127		memset((char*)mb->un.varRDnvp.rsvd3, 0,
128			sizeof (mb->un.varRDnvp.rsvd3));
129		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
130			 sizeof (licensed));
131
132		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
133
134		if (rc != MBX_SUCCESS) {
135			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
136					"0324 Config Port initialization "
137					"error, mbxCmd x%x READ_NVPARM, "
138					"mbxStatus x%x\n",
139					mb->mbxCommand, mb->mbxStatus);
140			mempool_free(pmb, phba->mbox_mem_pool);
141			return -ERESTART;
142		}
143		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
144		       sizeof(phba->wwnn));
145		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
146		       sizeof(phba->wwpn));
147	}
148
149	phba->sli3_options = 0x0;
150
151	/* Setup and issue mailbox READ REV command */
152	lpfc_read_rev(phba, pmb);
153	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
154	if (rc != MBX_SUCCESS) {
155		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
156				"0439 Adapter failed to init, mbxCmd x%x "
157				"READ_REV, mbxStatus x%x\n",
158				mb->mbxCommand, mb->mbxStatus);
159		mempool_free( pmb, phba->mbox_mem_pool);
160		return -ERESTART;
161	}
162
163
164	/*
165	 * The value of rr must be 1 since the driver set the cv field to 1.
166	 * This setting requires the FW to set all revision fields.
167	 */
168	if (mb->un.varRdRev.rr == 0) {
169		vp->rev.rBit = 0;
170		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
171				"0440 Adapter failed to init, READ_REV has "
172				"missing revision information.\n");
173		mempool_free(pmb, phba->mbox_mem_pool);
174		return -ERESTART;
175	}
176
177	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
178		mempool_free(pmb, phba->mbox_mem_pool);
179		return -EINVAL;
180	}
181
182	/* Save information as VPD data */
183	vp->rev.rBit = 1;
184	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
185	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
186	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
187	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
188	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
189	vp->rev.biuRev = mb->un.varRdRev.biuRev;
190	vp->rev.smRev = mb->un.varRdRev.smRev;
191	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
192	vp->rev.endecRev = mb->un.varRdRev.endecRev;
193	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
194	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
195	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
196	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
197	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
198	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
199
200	/* If the sli feature level is less then 9, we must
201	 * tear down all RPIs and VPIs on link down if NPIV
202	 * is enabled.
203	 */
204	if (vp->rev.feaLevelHigh < 9)
205		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
206
207	if (lpfc_is_LC_HBA(phba->pcidev->device))
208		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
209						sizeof (phba->RandomData));
210
211	/* Get adapter VPD information */
212	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
213	if (!lpfc_vpd_data)
214		goto out_free_mbox;
215	do {
216		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
218
219		if (rc != MBX_SUCCESS) {
220			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
221					"0441 VPD not present on adapter, "
222					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
223					mb->mbxCommand, mb->mbxStatus);
224			mb->un.varDmp.word_cnt = 0;
225		}
226		/* dump mem may return a zero when finished or we got a
227		 * mailbox error, either way we are done.
228		 */
229		if (mb->un.varDmp.word_cnt == 0)
230			break;
231		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
232			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
233		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
234				      lpfc_vpd_data + offset,
235				      mb->un.varDmp.word_cnt);
236		offset += mb->un.varDmp.word_cnt;
237	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
238	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
239
240	kfree(lpfc_vpd_data);
241out_free_mbox:
242	mempool_free(pmb, phba->mbox_mem_pool);
243	return 0;
244}
245
246/**
247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
248 * @phba: pointer to lpfc hba data structure.
249 * @pmboxq: pointer to the driver internal queue element for mailbox command.
250 *
251 * This is the completion handler for driver's configuring asynchronous event
252 * mailbox command to the device. If the mailbox command returns successfully,
253 * it will set internal async event support flag to 1; otherwise, it will
254 * set internal async event support flag to 0.
255 **/
256static void
257lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
258{
259	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
260		phba->temp_sensor_support = 1;
261	else
262		phba->temp_sensor_support = 0;
263	mempool_free(pmboxq, phba->mbox_mem_pool);
264	return;
265}
266
267/**
268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
269 * @phba: pointer to lpfc hba data structure.
270 * @pmboxq: pointer to the driver internal queue element for mailbox command.
271 *
272 * This is the completion handler for dump mailbox command for getting
273 * wake up parameters. When this command complete, the response contain
274 * Option rom version of the HBA. This function translate the version number
275 * into a human readable string and store it in OptionROMVersion.
276 **/
277static void
278lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
279{
280	struct prog_id *prg;
281	uint32_t prog_id_word;
282	char dist = ' ';
283	/* character array used for decoding dist type. */
284	char dist_char[] = "nabx";
285
286	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
287		mempool_free(pmboxq, phba->mbox_mem_pool);
288		return;
289	}
290
291	prg = (struct prog_id *) &prog_id_word;
292
293	/* word 7 contain option rom version */
294	prog_id_word = pmboxq->u.mb.un.varWords[7];
295
296	/* Decode the Option rom version word to a readable string */
297	if (prg->dist < 4)
298		dist = dist_char[prg->dist];
299
300	if ((prg->dist == 3) && (prg->num == 0))
301		sprintf(phba->OptionROMVersion, "%d.%d%d",
302			prg->ver, prg->rev, prg->lev);
303	else
304		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
305			prg->ver, prg->rev, prg->lev,
306			dist, prg->num);
307	mempool_free(pmboxq, phba->mbox_mem_pool);
308	return;
309}
310
311/**
312 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
313 *	cfg_soft_wwnn, cfg_soft_wwpn
314 * @vport: pointer to lpfc vport data structure.
315 *
316 *
317 * Return codes
318 *   None.
319 **/
320void
321lpfc_update_vport_wwn(struct lpfc_vport *vport)
322{
323	/* If the soft name exists then update it using the service params */
324	if (vport->phba->cfg_soft_wwnn)
325		u64_to_wwn(vport->phba->cfg_soft_wwnn,
326			   vport->fc_sparam.nodeName.u.wwn);
327	if (vport->phba->cfg_soft_wwpn)
328		u64_to_wwn(vport->phba->cfg_soft_wwpn,
329			   vport->fc_sparam.portName.u.wwn);
330
331	/*
332	 * If the name is empty or there exists a soft name
333	 * then copy the service params name, otherwise use the fc name
334	 */
335	if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
336		memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
337			sizeof(struct lpfc_name));
338	else
339		memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
340			sizeof(struct lpfc_name));
341
342	if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
343		memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
344			sizeof(struct lpfc_name));
345	else
346		memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
347			sizeof(struct lpfc_name));
348}
349
350/**
351 * lpfc_config_port_post - Perform lpfc initialization after config port
352 * @phba: pointer to lpfc hba data structure.
353 *
354 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
355 * command call. It performs all internal resource and state setups on the
356 * port: post IOCB buffers, enable appropriate host interrupt attentions,
357 * ELS ring timers, etc.
358 *
359 * Return codes
360 *   0 - success.
361 *   Any other value - error.
362 **/
363int
364lpfc_config_port_post(struct lpfc_hba *phba)
365{
366	struct lpfc_vport *vport = phba->pport;
367	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
368	LPFC_MBOXQ_t *pmb;
369	MAILBOX_t *mb;
370	struct lpfc_dmabuf *mp;
371	struct lpfc_sli *psli = &phba->sli;
372	uint32_t status, timeout;
373	int i, j;
374	int rc;
375
376	spin_lock_irq(&phba->hbalock);
377	/*
378	 * If the Config port completed correctly the HBA is not
379	 * over heated any more.
380	 */
381	if (phba->over_temp_state == HBA_OVER_TEMP)
382		phba->over_temp_state = HBA_NORMAL_TEMP;
383	spin_unlock_irq(&phba->hbalock);
384
385	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
386	if (!pmb) {
387		phba->link_state = LPFC_HBA_ERROR;
388		return -ENOMEM;
389	}
390	mb = &pmb->u.mb;
391
392	/* Get login parameters for NID.  */
393	rc = lpfc_read_sparam(phba, pmb, 0);
394	if (rc) {
395		mempool_free(pmb, phba->mbox_mem_pool);
396		return -ENOMEM;
397	}
398
399	pmb->vport = vport;
400	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
401		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
402				"0448 Adapter failed init, mbxCmd x%x "
403				"READ_SPARM mbxStatus x%x\n",
404				mb->mbxCommand, mb->mbxStatus);
405		phba->link_state = LPFC_HBA_ERROR;
406		mp = (struct lpfc_dmabuf *) pmb->context1;
407		mempool_free(pmb, phba->mbox_mem_pool);
408		lpfc_mbuf_free(phba, mp->virt, mp->phys);
409		kfree(mp);
410		return -EIO;
411	}
412
413	mp = (struct lpfc_dmabuf *) pmb->context1;
414
415	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
416	lpfc_mbuf_free(phba, mp->virt, mp->phys);
417	kfree(mp);
418	pmb->context1 = NULL;
419	lpfc_update_vport_wwn(vport);
420
421	/* Update the fc_host data structures with new wwn. */
422	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
423	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
424	fc_host_max_npiv_vports(shost) = phba->max_vpi;
425
426	/* If no serial number in VPD data, use low 6 bytes of WWNN */
427	/* This should be consolidated into parse_vpd ? - mr */
428	if (phba->SerialNumber[0] == 0) {
429		uint8_t *outptr;
430
431		outptr = &vport->fc_nodename.u.s.IEEE[0];
432		for (i = 0; i < 12; i++) {
433			status = *outptr++;
434			j = ((status & 0xf0) >> 4);
435			if (j <= 9)
436				phba->SerialNumber[i] =
437				    (char)((uint8_t) 0x30 + (uint8_t) j);
438			else
439				phba->SerialNumber[i] =
440				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
441			i++;
442			j = (status & 0xf);
443			if (j <= 9)
444				phba->SerialNumber[i] =
445				    (char)((uint8_t) 0x30 + (uint8_t) j);
446			else
447				phba->SerialNumber[i] =
448				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
449		}
450	}
451
452	lpfc_read_config(phba, pmb);
453	pmb->vport = vport;
454	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
455		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
456				"0453 Adapter failed to init, mbxCmd x%x "
457				"READ_CONFIG, mbxStatus x%x\n",
458				mb->mbxCommand, mb->mbxStatus);
459		phba->link_state = LPFC_HBA_ERROR;
460		mempool_free( pmb, phba->mbox_mem_pool);
461		return -EIO;
462	}
463
464	/* Check if the port is disabled */
465	lpfc_sli_read_link_ste(phba);
466
467	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
468	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
469		phba->cfg_hba_queue_depth =
470			(mb->un.varRdConfig.max_xri + 1) -
471					lpfc_sli4_get_els_iocb_cnt(phba);
472
473	phba->lmt = mb->un.varRdConfig.lmt;
474
475	/* Get the default values for Model Name and Description */
476	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
477
478	phba->link_state = LPFC_LINK_DOWN;
479
480	/* Only process IOCBs on ELS ring till hba_state is READY */
481	if (psli->ring[psli->extra_ring].cmdringaddr)
482		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
483	if (psli->ring[psli->fcp_ring].cmdringaddr)
484		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
485	if (psli->ring[psli->next_ring].cmdringaddr)
486		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
487
488	/* Post receive buffers for desired rings */
489	if (phba->sli_rev != 3)
490		lpfc_post_rcv_buf(phba);
491
492	/*
493	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
494	 */
495	if (phba->intr_type == MSIX) {
496		rc = lpfc_config_msi(phba, pmb);
497		if (rc) {
498			mempool_free(pmb, phba->mbox_mem_pool);
499			return -EIO;
500		}
501		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
502		if (rc != MBX_SUCCESS) {
503			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
504					"0352 Config MSI mailbox command "
505					"failed, mbxCmd x%x, mbxStatus x%x\n",
506					pmb->u.mb.mbxCommand,
507					pmb->u.mb.mbxStatus);
508			mempool_free(pmb, phba->mbox_mem_pool);
509			return -EIO;
510		}
511	}
512
513	spin_lock_irq(&phba->hbalock);
514	/* Initialize ERATT handling flag */
515	phba->hba_flag &= ~HBA_ERATT_HANDLED;
516
517	/* Enable appropriate host interrupts */
518	if (lpfc_readl(phba->HCregaddr, &status)) {
519		spin_unlock_irq(&phba->hbalock);
520		return -EIO;
521	}
522	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
523	if (psli->num_rings > 0)
524		status |= HC_R0INT_ENA;
525	if (psli->num_rings > 1)
526		status |= HC_R1INT_ENA;
527	if (psli->num_rings > 2)
528		status |= HC_R2INT_ENA;
529	if (psli->num_rings > 3)
530		status |= HC_R3INT_ENA;
531
532	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
533	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
534		status &= ~(HC_R0INT_ENA);
535
536	writel(status, phba->HCregaddr);
537	readl(phba->HCregaddr); /* flush */
538	spin_unlock_irq(&phba->hbalock);
539
540	/* Set up ring-0 (ELS) timer */
541	timeout = phba->fc_ratov * 2;
542	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
543	/* Set up heart beat (HB) timer */
544	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
545	phba->hb_outstanding = 0;
546	phba->last_completion_time = jiffies;
547	/* Set up error attention (ERATT) polling timer */
548	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
549
550	if (phba->hba_flag & LINK_DISABLED) {
551		lpfc_printf_log(phba,
552			KERN_ERR, LOG_INIT,
553			"2598 Adapter Link is disabled.\n");
554		lpfc_down_link(phba, pmb);
555		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
556		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
557		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
558			lpfc_printf_log(phba,
559			KERN_ERR, LOG_INIT,
560			"2599 Adapter failed to issue DOWN_LINK"
561			" mbox command rc 0x%x\n", rc);
562
563			mempool_free(pmb, phba->mbox_mem_pool);
564			return -EIO;
565		}
566	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
567		mempool_free(pmb, phba->mbox_mem_pool);
568		rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
569		if (rc)
570			return rc;
571	}
572	/* MBOX buffer will be freed in mbox compl */
573	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
574	if (!pmb) {
575		phba->link_state = LPFC_HBA_ERROR;
576		return -ENOMEM;
577	}
578
579	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
580	pmb->mbox_cmpl = lpfc_config_async_cmpl;
581	pmb->vport = phba->pport;
582	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
583
584	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
585		lpfc_printf_log(phba,
586				KERN_ERR,
587				LOG_INIT,
588				"0456 Adapter failed to issue "
589				"ASYNCEVT_ENABLE mbox status x%x\n",
590				rc);
591		mempool_free(pmb, phba->mbox_mem_pool);
592	}
593
594	/* Get Option rom version */
595	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
596	if (!pmb) {
597		phba->link_state = LPFC_HBA_ERROR;
598		return -ENOMEM;
599	}
600
601	lpfc_dump_wakeup_param(phba, pmb);
602	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
603	pmb->vport = phba->pport;
604	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
605
606	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
607		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
608				"to get Option ROM version status x%x\n", rc);
609		mempool_free(pmb, phba->mbox_mem_pool);
610	}
611
612	return 0;
613}
614
615/**
616 * lpfc_hba_init_link - Initialize the FC link
617 * @phba: pointer to lpfc hba data structure.
618 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
619 *
620 * This routine will issue the INIT_LINK mailbox command call.
621 * It is available to other drivers through the lpfc_hba data
622 * structure for use as a delayed link up mechanism with the
623 * module parameter lpfc_suppress_link_up.
624 *
625 * Return code
626 *		0 - success
627 *		Any other value - error
628 **/
629int
630lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
631{
632	return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
633}
634
635/**
636 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
637 * @phba: pointer to lpfc hba data structure.
638 * @fc_topology: desired fc topology.
639 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
640 *
641 * This routine will issue the INIT_LINK mailbox command call.
642 * It is available to other drivers through the lpfc_hba data
643 * structure for use as a delayed link up mechanism with the
644 * module parameter lpfc_suppress_link_up.
645 *
646 * Return code
647 *              0 - success
648 *              Any other value - error
649 **/
650int
651lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
652			       uint32_t flag)
653{
654	struct lpfc_vport *vport = phba->pport;
655	LPFC_MBOXQ_t *pmb;
656	MAILBOX_t *mb;
657	int rc;
658
659	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
660	if (!pmb) {
661		phba->link_state = LPFC_HBA_ERROR;
662		return -ENOMEM;
663	}
664	mb = &pmb->u.mb;
665	pmb->vport = vport;
666
667	if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
668	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
669	     !(phba->lmt & LMT_1Gb)) ||
670	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
671	     !(phba->lmt & LMT_2Gb)) ||
672	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
673	     !(phba->lmt & LMT_4Gb)) ||
674	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
675	     !(phba->lmt & LMT_8Gb)) ||
676	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
677	     !(phba->lmt & LMT_10Gb)) ||
678	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
679	     !(phba->lmt & LMT_16Gb))) {
680		/* Reset link speed to auto */
681		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
682			"1302 Invalid speed for this board:%d "
683			"Reset link speed to auto.\n",
684			phba->cfg_link_speed);
685			phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
686	}
687	lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
688	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
689	if (phba->sli_rev < LPFC_SLI_REV4)
690		lpfc_set_loopback_flag(phba);
691	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
692	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
693		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
694			"0498 Adapter failed to init, mbxCmd x%x "
695			"INIT_LINK, mbxStatus x%x\n",
696			mb->mbxCommand, mb->mbxStatus);
697		if (phba->sli_rev <= LPFC_SLI_REV3) {
698			/* Clear all interrupt enable conditions */
699			writel(0, phba->HCregaddr);
700			readl(phba->HCregaddr); /* flush */
701			/* Clear all pending interrupts */
702			writel(0xffffffff, phba->HAregaddr);
703			readl(phba->HAregaddr); /* flush */
704		}
705		phba->link_state = LPFC_HBA_ERROR;
706		if (rc != MBX_BUSY || flag == MBX_POLL)
707			mempool_free(pmb, phba->mbox_mem_pool);
708		return -EIO;
709	}
710	phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
711	if (flag == MBX_POLL)
712		mempool_free(pmb, phba->mbox_mem_pool);
713
714	return 0;
715}
716
717/**
718 * lpfc_hba_down_link - this routine downs the FC link
719 * @phba: pointer to lpfc hba data structure.
720 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
721 *
722 * This routine will issue the DOWN_LINK mailbox command call.
723 * It is available to other drivers through the lpfc_hba data
724 * structure for use to stop the link.
725 *
726 * Return code
727 *		0 - success
728 *		Any other value - error
729 **/
730int
731lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
732{
733	LPFC_MBOXQ_t *pmb;
734	int rc;
735
736	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
737	if (!pmb) {
738		phba->link_state = LPFC_HBA_ERROR;
739		return -ENOMEM;
740	}
741
742	lpfc_printf_log(phba,
743		KERN_ERR, LOG_INIT,
744		"0491 Adapter Link is disabled.\n");
745	lpfc_down_link(phba, pmb);
746	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
747	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
748	if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
749		lpfc_printf_log(phba,
750		KERN_ERR, LOG_INIT,
751		"2522 Adapter failed to issue DOWN_LINK"
752		" mbox command rc 0x%x\n", rc);
753
754		mempool_free(pmb, phba->mbox_mem_pool);
755		return -EIO;
756	}
757	if (flag == MBX_POLL)
758		mempool_free(pmb, phba->mbox_mem_pool);
759
760	return 0;
761}
762
763/**
764 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
765 * @phba: pointer to lpfc HBA data structure.
766 *
767 * This routine will do LPFC uninitialization before the HBA is reset when
768 * bringing down the SLI Layer.
769 *
770 * Return codes
771 *   0 - success.
772 *   Any other value - error.
773 **/
774int
775lpfc_hba_down_prep(struct lpfc_hba *phba)
776{
777	struct lpfc_vport **vports;
778	int i;
779
780	if (phba->sli_rev <= LPFC_SLI_REV3) {
781		/* Disable interrupts */
782		writel(0, phba->HCregaddr);
783		readl(phba->HCregaddr); /* flush */
784	}
785
786	if (phba->pport->load_flag & FC_UNLOADING)
787		lpfc_cleanup_discovery_resources(phba->pport);
788	else {
789		vports = lpfc_create_vport_work_array(phba);
790		if (vports != NULL)
791			for (i = 0; i <= phba->max_vports &&
792				vports[i] != NULL; i++)
793				lpfc_cleanup_discovery_resources(vports[i]);
794		lpfc_destroy_vport_work_array(phba, vports);
795	}
796	return 0;
797}
798
799/**
800 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
801 * @phba: pointer to lpfc HBA data structure.
802 *
803 * This routine will do uninitialization after the HBA is reset when bring
804 * down the SLI Layer.
805 *
806 * Return codes
807 *   0 - success.
808 *   Any other value - error.
809 **/
810static int
811lpfc_hba_down_post_s3(struct lpfc_hba *phba)
812{
813	struct lpfc_sli *psli = &phba->sli;
814	struct lpfc_sli_ring *pring;
815	struct lpfc_dmabuf *mp, *next_mp;
816	LIST_HEAD(completions);
817	int i;
818
819	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
820		lpfc_sli_hbqbuf_free_all(phba);
821	else {
822		/* Cleanup preposted buffers on the ELS ring */
823		pring = &psli->ring[LPFC_ELS_RING];
824		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
825			list_del(&mp->list);
826			pring->postbufq_cnt--;
827			lpfc_mbuf_free(phba, mp->virt, mp->phys);
828			kfree(mp);
829		}
830	}
831
832	spin_lock_irq(&phba->hbalock);
833	for (i = 0; i < psli->num_rings; i++) {
834		pring = &psli->ring[i];
835
836		/* At this point in time the HBA is either reset or DOA. Either
837		 * way, nothing should be on txcmplq as it will NEVER complete.
838		 */
839		list_splice_init(&pring->txcmplq, &completions);
840		pring->txcmplq_cnt = 0;
841		spin_unlock_irq(&phba->hbalock);
842
843		/* Cancel all the IOCBs from the completions list */
844		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
845				      IOERR_SLI_ABORTED);
846
847		lpfc_sli_abort_iocb_ring(phba, pring);
848		spin_lock_irq(&phba->hbalock);
849	}
850	spin_unlock_irq(&phba->hbalock);
851
852	return 0;
853}
854
855/**
856 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
857 * @phba: pointer to lpfc HBA data structure.
858 *
859 * This routine will do uninitialization after the HBA is reset when bring
860 * down the SLI Layer.
861 *
862 * Return codes
863 *   0 - success.
864 *   Any other value - error.
865 **/
866static int
867lpfc_hba_down_post_s4(struct lpfc_hba *phba)
868{
869	struct lpfc_scsi_buf *psb, *psb_next;
870	LIST_HEAD(aborts);
871	int ret;
872	unsigned long iflag = 0;
873	struct lpfc_sglq *sglq_entry = NULL;
874
875	ret = lpfc_hba_down_post_s3(phba);
876	if (ret)
877		return ret;
878	/* At this point in time the HBA is either reset or DOA. Either
879	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
880	 * on the lpfc_sgl_list so that it can either be freed if the
881	 * driver is unloading or reposted if the driver is restarting
882	 * the port.
883	 */
884	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
885					/* scsl_buf_list */
886	/* abts_sgl_list_lock required because worker thread uses this
887	 * list.
888	 */
889	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
890	list_for_each_entry(sglq_entry,
891		&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
892		sglq_entry->state = SGL_FREED;
893
894	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
895			&phba->sli4_hba.lpfc_sgl_list);
896	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
897	/* abts_scsi_buf_list_lock required because worker thread uses this
898	 * list.
899	 */
900	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
901	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
902			&aborts);
903	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
904	spin_unlock_irq(&phba->hbalock);
905
906	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
907		psb->pCmd = NULL;
908		psb->status = IOSTAT_SUCCESS;
909	}
910	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
911	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
912	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
913	return 0;
914}
915
916/**
917 * lpfc_hba_down_post - Wrapper func for hba down post routine
918 * @phba: pointer to lpfc HBA data structure.
919 *
920 * This routine wraps the actual SLI3 or SLI4 routine for performing
921 * uninitialization after the HBA is reset when bring down the SLI Layer.
922 *
923 * Return codes
924 *   0 - success.
925 *   Any other value - error.
926 **/
927int
928lpfc_hba_down_post(struct lpfc_hba *phba)
929{
930	return (*phba->lpfc_hba_down_post)(phba);
931}
932
933/**
934 * lpfc_hb_timeout - The HBA-timer timeout handler
935 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
936 *
937 * This is the HBA-timer timeout handler registered to the lpfc driver. When
938 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
939 * work-port-events bitmap and the worker thread is notified. This timeout
940 * event will be used by the worker thread to invoke the actual timeout
941 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
942 * be performed in the timeout handler and the HBA timeout event bit shall
943 * be cleared by the worker thread after it has taken the event bitmap out.
944 **/
945static void
946lpfc_hb_timeout(unsigned long ptr)
947{
948	struct lpfc_hba *phba;
949	uint32_t tmo_posted;
950	unsigned long iflag;
951
952	phba = (struct lpfc_hba *)ptr;
953
954	/* Check for heart beat timeout conditions */
955	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
956	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
957	if (!tmo_posted)
958		phba->pport->work_port_events |= WORKER_HB_TMO;
959	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
960
961	/* Tell the worker thread there is work to do */
962	if (!tmo_posted)
963		lpfc_worker_wake_up(phba);
964	return;
965}
966
967/**
968 * lpfc_rrq_timeout - The RRQ-timer timeout handler
969 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
970 *
971 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
972 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
973 * work-port-events bitmap and the worker thread is notified. This timeout
974 * event will be used by the worker thread to invoke the actual timeout
975 * handler routine, lpfc_rrq_handler. Any periodical operations will
976 * be performed in the timeout handler and the RRQ timeout event bit shall
977 * be cleared by the worker thread after it has taken the event bitmap out.
978 **/
979static void
980lpfc_rrq_timeout(unsigned long ptr)
981{
982	struct lpfc_hba *phba;
983	unsigned long iflag;
984
985	phba = (struct lpfc_hba *)ptr;
986	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
987	phba->hba_flag |= HBA_RRQ_ACTIVE;
988	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
989	lpfc_worker_wake_up(phba);
990}
991
992/**
993 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
994 * @phba: pointer to lpfc hba data structure.
995 * @pmboxq: pointer to the driver internal queue element for mailbox command.
996 *
997 * This is the callback function to the lpfc heart-beat mailbox command.
998 * If configured, the lpfc driver issues the heart-beat mailbox command to
999 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1000 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1001 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1002 * heart-beat outstanding state. Once the mailbox command comes back and
1003 * no error conditions detected, the heart-beat mailbox command timer is
1004 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1005 * state is cleared for the next heart-beat. If the timer expired with the
1006 * heart-beat outstanding state set, the driver will put the HBA offline.
1007 **/
1008static void
1009lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1010{
1011	unsigned long drvr_flag;
1012
1013	spin_lock_irqsave(&phba->hbalock, drvr_flag);
1014	phba->hb_outstanding = 0;
1015	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1016
1017	/* Check and reset heart-beat timer is necessary */
1018	mempool_free(pmboxq, phba->mbox_mem_pool);
1019	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1020		!(phba->link_state == LPFC_HBA_ERROR) &&
1021		!(phba->pport->load_flag & FC_UNLOADING))
1022		mod_timer(&phba->hb_tmofunc,
1023			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1024	return;
1025}
1026
1027/**
1028 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1029 * @phba: pointer to lpfc hba data structure.
1030 *
1031 * This is the actual HBA-timer timeout handler to be invoked by the worker
1032 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1033 * handler performs any periodic operations needed for the device. If such
1034 * periodic event has already been attended to either in the interrupt handler
1035 * or by processing slow-ring or fast-ring events within the HBA-timer
1036 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1037 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1038 * is configured and there is no heart-beat mailbox command outstanding, a
1039 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1040 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1041 * to offline.
1042 **/
1043void
1044lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1045{
1046	struct lpfc_vport **vports;
1047	LPFC_MBOXQ_t *pmboxq;
1048	struct lpfc_dmabuf *buf_ptr;
1049	int retval, i;
1050	struct lpfc_sli *psli = &phba->sli;
1051	LIST_HEAD(completions);
1052
1053	vports = lpfc_create_vport_work_array(phba);
1054	if (vports != NULL)
1055		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1056			lpfc_rcv_seq_check_edtov(vports[i]);
1057	lpfc_destroy_vport_work_array(phba, vports);
1058
1059	if ((phba->link_state == LPFC_HBA_ERROR) ||
1060		(phba->pport->load_flag & FC_UNLOADING) ||
1061		(phba->pport->fc_flag & FC_OFFLINE_MODE))
1062		return;
1063
1064	spin_lock_irq(&phba->pport->work_port_lock);
1065
1066	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
1067		jiffies)) {
1068		spin_unlock_irq(&phba->pport->work_port_lock);
1069		if (!phba->hb_outstanding)
1070			mod_timer(&phba->hb_tmofunc,
1071				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1072		else
1073			mod_timer(&phba->hb_tmofunc,
1074				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1075		return;
1076	}
1077	spin_unlock_irq(&phba->pport->work_port_lock);
1078
1079	if (phba->elsbuf_cnt &&
1080		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1081		spin_lock_irq(&phba->hbalock);
1082		list_splice_init(&phba->elsbuf, &completions);
1083		phba->elsbuf_cnt = 0;
1084		phba->elsbuf_prev_cnt = 0;
1085		spin_unlock_irq(&phba->hbalock);
1086
1087		while (!list_empty(&completions)) {
1088			list_remove_head(&completions, buf_ptr,
1089				struct lpfc_dmabuf, list);
1090			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1091			kfree(buf_ptr);
1092		}
1093	}
1094	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1095
1096	/* If there is no heart beat outstanding, issue a heartbeat command */
1097	if (phba->cfg_enable_hba_heartbeat) {
1098		if (!phba->hb_outstanding) {
1099			if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1100				(list_empty(&psli->mboxq))) {
1101				pmboxq = mempool_alloc(phba->mbox_mem_pool,
1102							GFP_KERNEL);
1103				if (!pmboxq) {
1104					mod_timer(&phba->hb_tmofunc,
1105						 jiffies +
1106						 HZ * LPFC_HB_MBOX_INTERVAL);
1107					return;
1108				}
1109
1110				lpfc_heart_beat(phba, pmboxq);
1111				pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1112				pmboxq->vport = phba->pport;
1113				retval = lpfc_sli_issue_mbox(phba, pmboxq,
1114						MBX_NOWAIT);
1115
1116				if (retval != MBX_BUSY &&
1117					retval != MBX_SUCCESS) {
1118					mempool_free(pmboxq,
1119							phba->mbox_mem_pool);
1120					mod_timer(&phba->hb_tmofunc,
1121						jiffies +
1122						HZ * LPFC_HB_MBOX_INTERVAL);
1123					return;
1124				}
1125				phba->skipped_hb = 0;
1126				phba->hb_outstanding = 1;
1127			} else if (time_before_eq(phba->last_completion_time,
1128					phba->skipped_hb)) {
1129				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1130					"2857 Last completion time not "
1131					" updated in %d ms\n",
1132					jiffies_to_msecs(jiffies
1133						 - phba->last_completion_time));
1134			} else
1135				phba->skipped_hb = jiffies;
1136
1137			mod_timer(&phba->hb_tmofunc,
1138				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1139			return;
1140		} else {
1141			/*
1142			* If heart beat timeout called with hb_outstanding set
1143			* we need to give the hb mailbox cmd a chance to
1144			* complete or TMO.
1145			*/
1146			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1147					"0459 Adapter heartbeat still out"
1148					"standing:last compl time was %d ms.\n",
1149					jiffies_to_msecs(jiffies
1150						 - phba->last_completion_time));
1151			mod_timer(&phba->hb_tmofunc,
1152				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1153		}
1154	}
1155}
1156
1157/**
1158 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1159 * @phba: pointer to lpfc hba data structure.
1160 *
1161 * This routine is called to bring the HBA offline when HBA hardware error
1162 * other than Port Error 6 has been detected.
1163 **/
1164static void
1165lpfc_offline_eratt(struct lpfc_hba *phba)
1166{
1167	struct lpfc_sli   *psli = &phba->sli;
1168
1169	spin_lock_irq(&phba->hbalock);
1170	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1171	spin_unlock_irq(&phba->hbalock);
1172	lpfc_offline_prep(phba);
1173
1174	lpfc_offline(phba);
1175	lpfc_reset_barrier(phba);
1176	spin_lock_irq(&phba->hbalock);
1177	lpfc_sli_brdreset(phba);
1178	spin_unlock_irq(&phba->hbalock);
1179	lpfc_hba_down_post(phba);
1180	lpfc_sli_brdready(phba, HS_MBRDY);
1181	lpfc_unblock_mgmt_io(phba);
1182	phba->link_state = LPFC_HBA_ERROR;
1183	return;
1184}
1185
1186/**
1187 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1188 * @phba: pointer to lpfc hba data structure.
1189 *
1190 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1191 * other than Port Error 6 has been detected.
1192 **/
1193static void
1194lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1195{
1196	lpfc_offline_prep(phba);
1197	lpfc_offline(phba);
1198	lpfc_sli4_brdreset(phba);
1199	lpfc_hba_down_post(phba);
1200	lpfc_sli4_post_status_check(phba);
1201	lpfc_unblock_mgmt_io(phba);
1202	phba->link_state = LPFC_HBA_ERROR;
1203}
1204
1205/**
1206 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1207 * @phba: pointer to lpfc hba data structure.
1208 *
1209 * This routine is invoked to handle the deferred HBA hardware error
1210 * conditions. This type of error is indicated by HBA by setting ER1
1211 * and another ER bit in the host status register. The driver will
1212 * wait until the ER1 bit clears before handling the error condition.
1213 **/
1214static void
1215lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1216{
1217	uint32_t old_host_status = phba->work_hs;
1218	struct lpfc_sli_ring  *pring;
1219	struct lpfc_sli *psli = &phba->sli;
1220
1221	/* If the pci channel is offline, ignore possible errors,
1222	 * since we cannot communicate with the pci card anyway.
1223	 */
1224	if (pci_channel_offline(phba->pcidev)) {
1225		spin_lock_irq(&phba->hbalock);
1226		phba->hba_flag &= ~DEFER_ERATT;
1227		spin_unlock_irq(&phba->hbalock);
1228		return;
1229	}
1230
1231	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1232		"0479 Deferred Adapter Hardware Error "
1233		"Data: x%x x%x x%x\n",
1234		phba->work_hs,
1235		phba->work_status[0], phba->work_status[1]);
1236
1237	spin_lock_irq(&phba->hbalock);
1238	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1239	spin_unlock_irq(&phba->hbalock);
1240
1241
1242	/*
1243	 * Firmware stops when it triggred erratt. That could cause the I/Os
1244	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1245	 * SCSI layer retry it after re-establishing link.
1246	 */
1247	pring = &psli->ring[psli->fcp_ring];
1248	lpfc_sli_abort_iocb_ring(phba, pring);
1249
1250	/*
1251	 * There was a firmware error. Take the hba offline and then
1252	 * attempt to restart it.
1253	 */
1254	lpfc_offline_prep(phba);
1255	lpfc_offline(phba);
1256
1257	/* Wait for the ER1 bit to clear.*/
1258	while (phba->work_hs & HS_FFER1) {
1259		msleep(100);
1260		if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1261			phba->work_hs = UNPLUG_ERR ;
1262			break;
1263		}
1264		/* If driver is unloading let the worker thread continue */
1265		if (phba->pport->load_flag & FC_UNLOADING) {
1266			phba->work_hs = 0;
1267			break;
1268		}
1269	}
1270
1271	/*
1272	 * This is to ptrotect against a race condition in which
1273	 * first write to the host attention register clear the
1274	 * host status register.
1275	 */
1276	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1277		phba->work_hs = old_host_status & ~HS_FFER1;
1278
1279	spin_lock_irq(&phba->hbalock);
1280	phba->hba_flag &= ~DEFER_ERATT;
1281	spin_unlock_irq(&phba->hbalock);
1282	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1283	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1284}
1285
1286static void
1287lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1288{
1289	struct lpfc_board_event_header board_event;
1290	struct Scsi_Host *shost;
1291
1292	board_event.event_type = FC_REG_BOARD_EVENT;
1293	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1294	shost = lpfc_shost_from_vport(phba->pport);
1295	fc_host_post_vendor_event(shost, fc_get_event_number(),
1296				  sizeof(board_event),
1297				  (char *) &board_event,
1298				  LPFC_NL_VENDOR_ID);
1299}
1300
1301/**
1302 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1303 * @phba: pointer to lpfc hba data structure.
1304 *
1305 * This routine is invoked to handle the following HBA hardware error
1306 * conditions:
1307 * 1 - HBA error attention interrupt
1308 * 2 - DMA ring index out of range
1309 * 3 - Mailbox command came back as unknown
1310 **/
1311static void
1312lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1313{
1314	struct lpfc_vport *vport = phba->pport;
1315	struct lpfc_sli   *psli = &phba->sli;
1316	struct lpfc_sli_ring  *pring;
1317	uint32_t event_data;
1318	unsigned long temperature;
1319	struct temp_event temp_event_data;
1320	struct Scsi_Host  *shost;
1321
1322	/* If the pci channel is offline, ignore possible errors,
1323	 * since we cannot communicate with the pci card anyway.
1324	 */
1325	if (pci_channel_offline(phba->pcidev)) {
1326		spin_lock_irq(&phba->hbalock);
1327		phba->hba_flag &= ~DEFER_ERATT;
1328		spin_unlock_irq(&phba->hbalock);
1329		return;
1330	}
1331
1332	/* If resets are disabled then leave the HBA alone and return */
1333	if (!phba->cfg_enable_hba_reset)
1334		return;
1335
1336	/* Send an internal error event to mgmt application */
1337	lpfc_board_errevt_to_mgmt(phba);
1338
1339	if (phba->hba_flag & DEFER_ERATT)
1340		lpfc_handle_deferred_eratt(phba);
1341
1342	if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1343		if (phba->work_hs & HS_FFER6)
1344			/* Re-establishing Link */
1345			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1346					"1301 Re-establishing Link "
1347					"Data: x%x x%x x%x\n",
1348					phba->work_hs, phba->work_status[0],
1349					phba->work_status[1]);
1350		if (phba->work_hs & HS_FFER8)
1351			/* Device Zeroization */
1352			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1353					"2861 Host Authentication device "
1354					"zeroization Data:x%x x%x x%x\n",
1355					phba->work_hs, phba->work_status[0],
1356					phba->work_status[1]);
1357
1358		spin_lock_irq(&phba->hbalock);
1359		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1360		spin_unlock_irq(&phba->hbalock);
1361
1362		/*
1363		* Firmware stops when it triggled erratt with HS_FFER6.
1364		* That could cause the I/Os dropped by the firmware.
1365		* Error iocb (I/O) on txcmplq and let the SCSI layer
1366		* retry it after re-establishing link.
1367		*/
1368		pring = &psli->ring[psli->fcp_ring];
1369		lpfc_sli_abort_iocb_ring(phba, pring);
1370
1371		/*
1372		 * There was a firmware error.  Take the hba offline and then
1373		 * attempt to restart it.
1374		 */
1375		lpfc_offline_prep(phba);
1376		lpfc_offline(phba);
1377		lpfc_sli_brdrestart(phba);
1378		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1379			lpfc_unblock_mgmt_io(phba);
1380			return;
1381		}
1382		lpfc_unblock_mgmt_io(phba);
1383	} else if (phba->work_hs & HS_CRIT_TEMP) {
1384		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1385		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1386		temp_event_data.event_code = LPFC_CRIT_TEMP;
1387		temp_event_data.data = (uint32_t)temperature;
1388
1389		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1390				"0406 Adapter maximum temperature exceeded "
1391				"(%ld), taking this port offline "
1392				"Data: x%x x%x x%x\n",
1393				temperature, phba->work_hs,
1394				phba->work_status[0], phba->work_status[1]);
1395
1396		shost = lpfc_shost_from_vport(phba->pport);
1397		fc_host_post_vendor_event(shost, fc_get_event_number(),
1398					  sizeof(temp_event_data),
1399					  (char *) &temp_event_data,
1400					  SCSI_NL_VID_TYPE_PCI
1401					  | PCI_VENDOR_ID_EMULEX);
1402
1403		spin_lock_irq(&phba->hbalock);
1404		phba->over_temp_state = HBA_OVER_TEMP;
1405		spin_unlock_irq(&phba->hbalock);
1406		lpfc_offline_eratt(phba);
1407
1408	} else {
1409		/* The if clause above forces this code path when the status
1410		 * failure is a value other than FFER6. Do not call the offline
1411		 * twice. This is the adapter hardware error path.
1412		 */
1413		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1414				"0457 Adapter Hardware Error "
1415				"Data: x%x x%x x%x\n",
1416				phba->work_hs,
1417				phba->work_status[0], phba->work_status[1]);
1418
1419		event_data = FC_REG_DUMP_EVENT;
1420		shost = lpfc_shost_from_vport(vport);
1421		fc_host_post_vendor_event(shost, fc_get_event_number(),
1422				sizeof(event_data), (char *) &event_data,
1423				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1424
1425		lpfc_offline_eratt(phba);
1426	}
1427	return;
1428}
1429
1430/**
1431 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1432 * @phba: pointer to lpfc hba data structure.
1433 *
1434 * This routine is invoked to handle the SLI4 HBA hardware error attention
1435 * conditions.
1436 **/
1437static void
1438lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1439{
1440	struct lpfc_vport *vport = phba->pport;
1441	uint32_t event_data;
1442	struct Scsi_Host *shost;
1443	uint32_t if_type;
1444	struct lpfc_register portstat_reg = {0};
1445	uint32_t reg_err1, reg_err2;
1446	uint32_t uerrlo_reg, uemasklo_reg;
1447	uint32_t pci_rd_rc1, pci_rd_rc2;
1448	int rc;
1449
1450	/* If the pci channel is offline, ignore possible errors, since
1451	 * we cannot communicate with the pci card anyway.
1452	 */
1453	if (pci_channel_offline(phba->pcidev))
1454		return;
1455	/* If resets are disabled then leave the HBA alone and return */
1456	if (!phba->cfg_enable_hba_reset)
1457		return;
1458
1459	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1460	switch (if_type) {
1461	case LPFC_SLI_INTF_IF_TYPE_0:
1462		pci_rd_rc1 = lpfc_readl(
1463				phba->sli4_hba.u.if_type0.UERRLOregaddr,
1464				&uerrlo_reg);
1465		pci_rd_rc2 = lpfc_readl(
1466				phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1467				&uemasklo_reg);
1468		/* consider PCI bus read error as pci_channel_offline */
1469		if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1470			return;
1471		lpfc_sli4_offline_eratt(phba);
1472		break;
1473	case LPFC_SLI_INTF_IF_TYPE_2:
1474		pci_rd_rc1 = lpfc_readl(
1475				phba->sli4_hba.u.if_type2.STATUSregaddr,
1476				&portstat_reg.word0);
1477		/* consider PCI bus read error as pci_channel_offline */
1478		if (pci_rd_rc1 == -EIO) {
1479			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1480				"3151 PCI bus read access failure: x%x\n",
1481				readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1482			return;
1483		}
1484		reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1485		reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1486		if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1487			/* TODO: Register for Overtemp async events. */
1488			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1489				"2889 Port Overtemperature event, "
1490				"taking port offline\n");
1491			spin_lock_irq(&phba->hbalock);
1492			phba->over_temp_state = HBA_OVER_TEMP;
1493			spin_unlock_irq(&phba->hbalock);
1494			lpfc_sli4_offline_eratt(phba);
1495			break;
1496		}
1497		if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1498		    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
1499			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1500					"3143 Port Down: Firmware Restarted\n");
1501		else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1502			 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1503			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1504					"3144 Port Down: Debug Dump\n");
1505		else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1506			 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1507			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1508					"3145 Port Down: Provisioning\n");
1509		/*
1510		 * On error status condition, driver need to wait for port
1511		 * ready before performing reset.
1512		 */
1513		rc = lpfc_sli4_pdev_status_reg_wait(phba);
1514		if (!rc) {
1515			/* need reset: attempt for port recovery */
1516			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1517					"2887 Reset Needed: Attempting Port "
1518					"Recovery...\n");
1519			lpfc_offline_prep(phba);
1520			lpfc_offline(phba);
1521			lpfc_sli_brdrestart(phba);
1522			if (lpfc_online(phba) == 0) {
1523				lpfc_unblock_mgmt_io(phba);
1524				/* don't report event on forced debug dump */
1525				if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1526				    reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1527					return;
1528				else
1529					break;
1530			}
1531			/* fall through for not able to recover */
1532		}
1533		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1534				"3152 Unrecoverable error, bring the port "
1535				"offline\n");
1536		lpfc_sli4_offline_eratt(phba);
1537		break;
1538	case LPFC_SLI_INTF_IF_TYPE_1:
1539	default:
1540		break;
1541	}
1542	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1543			"3123 Report dump event to upper layer\n");
1544	/* Send an internal error event to mgmt application */
1545	lpfc_board_errevt_to_mgmt(phba);
1546
1547	event_data = FC_REG_DUMP_EVENT;
1548	shost = lpfc_shost_from_vport(vport);
1549	fc_host_post_vendor_event(shost, fc_get_event_number(),
1550				  sizeof(event_data), (char *) &event_data,
1551				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1552}
1553
1554/**
1555 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1556 * @phba: pointer to lpfc HBA data structure.
1557 *
1558 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1559 * routine from the API jump table function pointer from the lpfc_hba struct.
1560 *
1561 * Return codes
1562 *   0 - success.
1563 *   Any other value - error.
1564 **/
1565void
1566lpfc_handle_eratt(struct lpfc_hba *phba)
1567{
1568	(*phba->lpfc_handle_eratt)(phba);
1569}
1570
1571/**
1572 * lpfc_handle_latt - The HBA link event handler
1573 * @phba: pointer to lpfc hba data structure.
1574 *
1575 * This routine is invoked from the worker thread to handle a HBA host
1576 * attention link event.
1577 **/
1578void
1579lpfc_handle_latt(struct lpfc_hba *phba)
1580{
1581	struct lpfc_vport *vport = phba->pport;
1582	struct lpfc_sli   *psli = &phba->sli;
1583	LPFC_MBOXQ_t *pmb;
1584	volatile uint32_t control;
1585	struct lpfc_dmabuf *mp;
1586	int rc = 0;
1587
1588	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1589	if (!pmb) {
1590		rc = 1;
1591		goto lpfc_handle_latt_err_exit;
1592	}
1593
1594	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1595	if (!mp) {
1596		rc = 2;
1597		goto lpfc_handle_latt_free_pmb;
1598	}
1599
1600	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1601	if (!mp->virt) {
1602		rc = 3;
1603		goto lpfc_handle_latt_free_mp;
1604	}
1605
1606	/* Cleanup any outstanding ELS commands */
1607	lpfc_els_flush_all_cmd(phba);
1608
1609	psli->slistat.link_event++;
1610	lpfc_read_topology(phba, pmb, mp);
1611	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
1612	pmb->vport = vport;
1613	/* Block ELS IOCBs until we have processed this mbox command */
1614	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1615	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1616	if (rc == MBX_NOT_FINISHED) {
1617		rc = 4;
1618		goto lpfc_handle_latt_free_mbuf;
1619	}
1620
1621	/* Clear Link Attention in HA REG */
1622	spin_lock_irq(&phba->hbalock);
1623	writel(HA_LATT, phba->HAregaddr);
1624	readl(phba->HAregaddr); /* flush */
1625	spin_unlock_irq(&phba->hbalock);
1626
1627	return;
1628
1629lpfc_handle_latt_free_mbuf:
1630	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1631	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1632lpfc_handle_latt_free_mp:
1633	kfree(mp);
1634lpfc_handle_latt_free_pmb:
1635	mempool_free(pmb, phba->mbox_mem_pool);
1636lpfc_handle_latt_err_exit:
1637	/* Enable Link attention interrupts */
1638	spin_lock_irq(&phba->hbalock);
1639	psli->sli_flag |= LPFC_PROCESS_LA;
1640	control = readl(phba->HCregaddr);
1641	control |= HC_LAINT_ENA;
1642	writel(control, phba->HCregaddr);
1643	readl(phba->HCregaddr); /* flush */
1644
1645	/* Clear Link Attention in HA REG */
1646	writel(HA_LATT, phba->HAregaddr);
1647	readl(phba->HAregaddr); /* flush */
1648	spin_unlock_irq(&phba->hbalock);
1649	lpfc_linkdown(phba);
1650	phba->link_state = LPFC_HBA_ERROR;
1651
1652	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1653		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1654
1655	return;
1656}
1657
1658/**
1659 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1660 * @phba: pointer to lpfc hba data structure.
1661 * @vpd: pointer to the vital product data.
1662 * @len: length of the vital product data in bytes.
1663 *
1664 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1665 * an array of characters. In this routine, the ModelName, ProgramType, and
1666 * ModelDesc, etc. fields of the phba data structure will be populated.
1667 *
1668 * Return codes
1669 *   0 - pointer to the VPD passed in is NULL
1670 *   1 - success
1671 **/
1672int
1673lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1674{
1675	uint8_t lenlo, lenhi;
1676	int Length;
1677	int i, j;
1678	int finished = 0;
1679	int index = 0;
1680
1681	if (!vpd)
1682		return 0;
1683
1684	/* Vital Product */
1685	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1686			"0455 Vital Product Data: x%x x%x x%x x%x\n",
1687			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1688			(uint32_t) vpd[3]);
1689	while (!finished && (index < (len - 4))) {
1690		switch (vpd[index]) {
1691		case 0x82:
1692		case 0x91:
1693			index += 1;
1694			lenlo = vpd[index];
1695			index += 1;
1696			lenhi = vpd[index];
1697			index += 1;
1698			i = ((((unsigned short)lenhi) << 8) + lenlo);
1699			index += i;
1700			break;
1701		case 0x90:
1702			index += 1;
1703			lenlo = vpd[index];
1704			index += 1;
1705			lenhi = vpd[index];
1706			index += 1;
1707			Length = ((((unsigned short)lenhi) << 8) + lenlo);
1708			if (Length > len - index)
1709				Length = len - index;
1710			while (Length > 0) {
1711			/* Look for Serial Number */
1712			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1713				index += 2;
1714				i = vpd[index];
1715				index += 1;
1716				j = 0;
1717				Length -= (3+i);
1718				while(i--) {
1719					phba->SerialNumber[j++] = vpd[index++];
1720					if (j == 31)
1721						break;
1722				}
1723				phba->SerialNumber[j] = 0;
1724				continue;
1725			}
1726			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1727				phba->vpd_flag |= VPD_MODEL_DESC;
1728				index += 2;
1729				i = vpd[index];
1730				index += 1;
1731				j = 0;
1732				Length -= (3+i);
1733				while(i--) {
1734					phba->ModelDesc[j++] = vpd[index++];
1735					if (j == 255)
1736						break;
1737				}
1738				phba->ModelDesc[j] = 0;
1739				continue;
1740			}
1741			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1742				phba->vpd_flag |= VPD_MODEL_NAME;
1743				index += 2;
1744				i = vpd[index];
1745				index += 1;
1746				j = 0;
1747				Length -= (3+i);
1748				while(i--) {
1749					phba->ModelName[j++] = vpd[index++];
1750					if (j == 79)
1751						break;
1752				}
1753				phba->ModelName[j] = 0;
1754				continue;
1755			}
1756			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1757				phba->vpd_flag |= VPD_PROGRAM_TYPE;
1758				index += 2;
1759				i = vpd[index];
1760				index += 1;
1761				j = 0;
1762				Length -= (3+i);
1763				while(i--) {
1764					phba->ProgramType[j++] = vpd[index++];
1765					if (j == 255)
1766						break;
1767				}
1768				phba->ProgramType[j] = 0;
1769				continue;
1770			}
1771			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1772				phba->vpd_flag |= VPD_PORT;
1773				index += 2;
1774				i = vpd[index];
1775				index += 1;
1776				j = 0;
1777				Length -= (3+i);
1778				while(i--) {
1779					if ((phba->sli_rev == LPFC_SLI_REV4) &&
1780					    (phba->sli4_hba.pport_name_sta ==
1781					     LPFC_SLI4_PPNAME_GET)) {
1782						j++;
1783						index++;
1784					} else
1785						phba->Port[j++] = vpd[index++];
1786					if (j == 19)
1787						break;
1788				}
1789				if ((phba->sli_rev != LPFC_SLI_REV4) ||
1790				    (phba->sli4_hba.pport_name_sta ==
1791				     LPFC_SLI4_PPNAME_NON))
1792					phba->Port[j] = 0;
1793				continue;
1794			}
1795			else {
1796				index += 2;
1797				i = vpd[index];
1798				index += 1;
1799				index += i;
1800				Length -= (3 + i);
1801			}
1802		}
1803		finished = 0;
1804		break;
1805		case 0x78:
1806			finished = 1;
1807			break;
1808		default:
1809			index ++;
1810			break;
1811		}
1812	}
1813
1814	return(1);
1815}
1816
1817/**
1818 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1819 * @phba: pointer to lpfc hba data structure.
1820 * @mdp: pointer to the data structure to hold the derived model name.
1821 * @descp: pointer to the data structure to hold the derived description.
1822 *
1823 * This routine retrieves HBA's description based on its registered PCI device
1824 * ID. The @descp passed into this function points to an array of 256 chars. It
1825 * shall be returned with the model name, maximum speed, and the host bus type.
1826 * The @mdp passed into this function points to an array of 80 chars. When the
1827 * function returns, the @mdp will be filled with the model name.
1828 **/
1829static void
1830lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1831{
1832	lpfc_vpd_t *vp;
1833	uint16_t dev_id = phba->pcidev->device;
1834	int max_speed;
1835	int GE = 0;
1836	int oneConnect = 0; /* default is not a oneConnect */
1837	struct {
1838		char *name;
1839		char *bus;
1840		char *function;
1841	} m = {"<Unknown>", "", ""};
1842
1843	if (mdp && mdp[0] != '\0'
1844		&& descp && descp[0] != '\0')
1845		return;
1846
1847	if (phba->lmt & LMT_16Gb)
1848		max_speed = 16;
1849	else if (phba->lmt & LMT_10Gb)
1850		max_speed = 10;
1851	else if (phba->lmt & LMT_8Gb)
1852		max_speed = 8;
1853	else if (phba->lmt & LMT_4Gb)
1854		max_speed = 4;
1855	else if (phba->lmt & LMT_2Gb)
1856		max_speed = 2;
1857	else
1858		max_speed = 1;
1859
1860	vp = &phba->vpd;
1861
1862	switch (dev_id) {
1863	case PCI_DEVICE_ID_FIREFLY:
1864		m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1865		break;
1866	case PCI_DEVICE_ID_SUPERFLY:
1867		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1868			m = (typeof(m)){"LP7000", "PCI",
1869					"Fibre Channel Adapter"};
1870		else
1871			m = (typeof(m)){"LP7000E", "PCI",
1872					"Fibre Channel Adapter"};
1873		break;
1874	case PCI_DEVICE_ID_DRAGONFLY:
1875		m = (typeof(m)){"LP8000", "PCI",
1876				"Fibre Channel Adapter"};
1877		break;
1878	case PCI_DEVICE_ID_CENTAUR:
1879		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1880			m = (typeof(m)){"LP9002", "PCI",
1881					"Fibre Channel Adapter"};
1882		else
1883			m = (typeof(m)){"LP9000", "PCI",
1884					"Fibre Channel Adapter"};
1885		break;
1886	case PCI_DEVICE_ID_RFLY:
1887		m = (typeof(m)){"LP952", "PCI",
1888				"Fibre Channel Adapter"};
1889		break;
1890	case PCI_DEVICE_ID_PEGASUS:
1891		m = (typeof(m)){"LP9802", "PCI-X",
1892				"Fibre Channel Adapter"};
1893		break;
1894	case PCI_DEVICE_ID_THOR:
1895		m = (typeof(m)){"LP10000", "PCI-X",
1896				"Fibre Channel Adapter"};
1897		break;
1898	case PCI_DEVICE_ID_VIPER:
1899		m = (typeof(m)){"LPX1000",  "PCI-X",
1900				"Fibre Channel Adapter"};
1901		break;
1902	case PCI_DEVICE_ID_PFLY:
1903		m = (typeof(m)){"LP982", "PCI-X",
1904				"Fibre Channel Adapter"};
1905		break;
1906	case PCI_DEVICE_ID_TFLY:
1907		m = (typeof(m)){"LP1050", "PCI-X",
1908				"Fibre Channel Adapter"};
1909		break;
1910	case PCI_DEVICE_ID_HELIOS:
1911		m = (typeof(m)){"LP11000", "PCI-X2",
1912				"Fibre Channel Adapter"};
1913		break;
1914	case PCI_DEVICE_ID_HELIOS_SCSP:
1915		m = (typeof(m)){"LP11000-SP", "PCI-X2",
1916				"Fibre Channel Adapter"};
1917		break;
1918	case PCI_DEVICE_ID_HELIOS_DCSP:
1919		m = (typeof(m)){"LP11002-SP",  "PCI-X2",
1920				"Fibre Channel Adapter"};
1921		break;
1922	case PCI_DEVICE_ID_NEPTUNE:
1923		m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1924		break;
1925	case PCI_DEVICE_ID_NEPTUNE_SCSP:
1926		m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1927		break;
1928	case PCI_DEVICE_ID_NEPTUNE_DCSP:
1929		m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1930		break;
1931	case PCI_DEVICE_ID_BMID:
1932		m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1933		break;
1934	case PCI_DEVICE_ID_BSMB:
1935		m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1936		break;
1937	case PCI_DEVICE_ID_ZEPHYR:
1938		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1939		break;
1940	case PCI_DEVICE_ID_ZEPHYR_SCSP:
1941		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1942		break;
1943	case PCI_DEVICE_ID_ZEPHYR_DCSP:
1944		m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1945		GE = 1;
1946		break;
1947	case PCI_DEVICE_ID_ZMID:
1948		m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1949		break;
1950	case PCI_DEVICE_ID_ZSMB:
1951		m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1952		break;
1953	case PCI_DEVICE_ID_LP101:
1954		m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1955		break;
1956	case PCI_DEVICE_ID_LP10000S:
1957		m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1958		break;
1959	case PCI_DEVICE_ID_LP11000S:
1960		m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1961		break;
1962	case PCI_DEVICE_ID_LPE11000S:
1963		m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1964		break;
1965	case PCI_DEVICE_ID_SAT:
1966		m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1967		break;
1968	case PCI_DEVICE_ID_SAT_MID:
1969		m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1970		break;
1971	case PCI_DEVICE_ID_SAT_SMB:
1972		m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1973		break;
1974	case PCI_DEVICE_ID_SAT_DCSP:
1975		m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1976		break;
1977	case PCI_DEVICE_ID_SAT_SCSP:
1978		m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1979		break;
1980	case PCI_DEVICE_ID_SAT_S:
1981		m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1982		break;
1983	case PCI_DEVICE_ID_HORNET:
1984		m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1985		GE = 1;
1986		break;
1987	case PCI_DEVICE_ID_PROTEUS_VF:
1988		m = (typeof(m)){"LPev12000", "PCIe IOV",
1989				"Fibre Channel Adapter"};
1990		break;
1991	case PCI_DEVICE_ID_PROTEUS_PF:
1992		m = (typeof(m)){"LPev12000", "PCIe IOV",
1993				"Fibre Channel Adapter"};
1994		break;
1995	case PCI_DEVICE_ID_PROTEUS_S:
1996		m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1997				"Fibre Channel Adapter"};
1998		break;
1999	case PCI_DEVICE_ID_TIGERSHARK:
2000		oneConnect = 1;
2001		m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2002		break;
2003	case PCI_DEVICE_ID_TOMCAT:
2004		oneConnect = 1;
2005		m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2006		break;
2007	case PCI_DEVICE_ID_FALCON:
2008		m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2009				"EmulexSecure Fibre"};
2010		break;
2011	case PCI_DEVICE_ID_BALIUS:
2012		m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2013				"Fibre Channel Adapter"};
2014		break;
2015	case PCI_DEVICE_ID_LANCER_FC:
2016	case PCI_DEVICE_ID_LANCER_FC_VF:
2017		m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2018		break;
2019	case PCI_DEVICE_ID_LANCER_FCOE:
2020	case PCI_DEVICE_ID_LANCER_FCOE_VF:
2021		oneConnect = 1;
2022		m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2023		break;
2024	default:
2025		m = (typeof(m)){"Unknown", "", ""};
2026		break;
2027	}
2028
2029	if (mdp && mdp[0] == '\0')
2030		snprintf(mdp, 79,"%s", m.name);
2031	/*
2032	 * oneConnect hba requires special processing, they are all initiators
2033	 * and we put the port number on the end
2034	 */
2035	if (descp && descp[0] == '\0') {
2036		if (oneConnect)
2037			snprintf(descp, 255,
2038				"Emulex OneConnect %s, %s Initiator, Port %s",
2039				m.name, m.function,
2040				phba->Port);
2041		else
2042			snprintf(descp, 255,
2043				"Emulex %s %d%s %s %s",
2044				m.name, max_speed, (GE) ? "GE" : "Gb",
2045				m.bus, m.function);
2046	}
2047}
2048
2049/**
2050 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2051 * @phba: pointer to lpfc hba data structure.
2052 * @pring: pointer to a IOCB ring.
2053 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2054 *
2055 * This routine posts a given number of IOCBs with the associated DMA buffer
2056 * descriptors specified by the cnt argument to the given IOCB ring.
2057 *
2058 * Return codes
2059 *   The number of IOCBs NOT able to be posted to the IOCB ring.
2060 **/
2061int
2062lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2063{
2064	IOCB_t *icmd;
2065	struct lpfc_iocbq *iocb;
2066	struct lpfc_dmabuf *mp1, *mp2;
2067
2068	cnt += pring->missbufcnt;
2069
2070	/* While there are buffers to post */
2071	while (cnt > 0) {
2072		/* Allocate buffer for  command iocb */
2073		iocb = lpfc_sli_get_iocbq(phba);
2074		if (iocb == NULL) {
2075			pring->missbufcnt = cnt;
2076			return cnt;
2077		}
2078		icmd = &iocb->iocb;
2079
2080		/* 2 buffers can be posted per command */
2081		/* Allocate buffer to post */
2082		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2083		if (mp1)
2084		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2085		if (!mp1 || !mp1->virt) {
2086			kfree(mp1);
2087			lpfc_sli_release_iocbq(phba, iocb);
2088			pring->missbufcnt = cnt;
2089			return cnt;
2090		}
2091
2092		INIT_LIST_HEAD(&mp1->list);
2093		/* Allocate buffer to post */
2094		if (cnt > 1) {
2095			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2096			if (mp2)
2097				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2098							    &mp2->phys);
2099			if (!mp2 || !mp2->virt) {
2100				kfree(mp2);
2101				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2102				kfree(mp1);
2103				lpfc_sli_release_iocbq(phba, iocb);
2104				pring->missbufcnt = cnt;
2105				return cnt;
2106			}
2107
2108			INIT_LIST_HEAD(&mp2->list);
2109		} else {
2110			mp2 = NULL;
2111		}
2112
2113		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2114		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2115		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2116		icmd->ulpBdeCount = 1;
2117		cnt--;
2118		if (mp2) {
2119			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2120			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2121			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2122			cnt--;
2123			icmd->ulpBdeCount = 2;
2124		}
2125
2126		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2127		icmd->ulpLe = 1;
2128
2129		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2130		    IOCB_ERROR) {
2131			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2132			kfree(mp1);
2133			cnt++;
2134			if (mp2) {
2135				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2136				kfree(mp2);
2137				cnt++;
2138			}
2139			lpfc_sli_release_iocbq(phba, iocb);
2140			pring->missbufcnt = cnt;
2141			return cnt;
2142		}
2143		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2144		if (mp2)
2145			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2146	}
2147	pring->missbufcnt = 0;
2148	return 0;
2149}
2150
2151/**
2152 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2153 * @phba: pointer to lpfc hba data structure.
2154 *
2155 * This routine posts initial receive IOCB buffers to the ELS ring. The
2156 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2157 * set to 64 IOCBs.
2158 *
2159 * Return codes
2160 *   0 - success (currently always success)
2161 **/
2162static int
2163lpfc_post_rcv_buf(struct lpfc_hba *phba)
2164{
2165	struct lpfc_sli *psli = &phba->sli;
2166
2167	/* Ring 0, ELS / CT buffers */
2168	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2169	/* Ring 2 - FCP no buffers needed */
2170
2171	return 0;
2172}
2173
2174#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2175
2176/**
2177 * lpfc_sha_init - Set up initial array of hash table entries
2178 * @HashResultPointer: pointer to an array as hash table.
2179 *
2180 * This routine sets up the initial values to the array of hash table entries
2181 * for the LC HBAs.
2182 **/
2183static void
2184lpfc_sha_init(uint32_t * HashResultPointer)
2185{
2186	HashResultPointer[0] = 0x67452301;
2187	HashResultPointer[1] = 0xEFCDAB89;
2188	HashResultPointer[2] = 0x98BADCFE;
2189	HashResultPointer[3] = 0x10325476;
2190	HashResultPointer[4] = 0xC3D2E1F0;
2191}
2192
2193/**
2194 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2195 * @HashResultPointer: pointer to an initial/result hash table.
2196 * @HashWorkingPointer: pointer to an working hash table.
2197 *
2198 * This routine iterates an initial hash table pointed by @HashResultPointer
2199 * with the values from the working hash table pointeed by @HashWorkingPointer.
2200 * The results are putting back to the initial hash table, returned through
2201 * the @HashResultPointer as the result hash table.
2202 **/
2203static void
2204lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2205{
2206	int t;
2207	uint32_t TEMP;
2208	uint32_t A, B, C, D, E;
2209	t = 16;
2210	do {
2211		HashWorkingPointer[t] =
2212		    S(1,
2213		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2214								     8] ^
2215		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2216	} while (++t <= 79);
2217	t = 0;
2218	A = HashResultPointer[0];
2219	B = HashResultPointer[1];
2220	C = HashResultPointer[2];
2221	D = HashResultPointer[3];
2222	E = HashResultPointer[4];
2223
2224	do {
2225		if (t < 20) {
2226			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2227		} else if (t < 40) {
2228			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2229		} else if (t < 60) {
2230			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2231		} else {
2232			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2233		}
2234		TEMP += S(5, A) + E + HashWorkingPointer[t];
2235		E = D;
2236		D = C;
2237		C = S(30, B);
2238		B = A;
2239		A = TEMP;
2240	} while (++t <= 79);
2241
2242	HashResultPointer[0] += A;
2243	HashResultPointer[1] += B;
2244	HashResultPointer[2] += C;
2245	HashResultPointer[3] += D;
2246	HashResultPointer[4] += E;
2247
2248}
2249
2250/**
2251 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2252 * @RandomChallenge: pointer to the entry of host challenge random number array.
2253 * @HashWorking: pointer to the entry of the working hash array.
2254 *
2255 * This routine calculates the working hash array referred by @HashWorking
2256 * from the challenge random numbers associated with the host, referred by
2257 * @RandomChallenge. The result is put into the entry of the working hash
2258 * array and returned by reference through @HashWorking.
2259 **/
2260static void
2261lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2262{
2263	*HashWorking = (*RandomChallenge ^ *HashWorking);
2264}
2265
2266/**
2267 * lpfc_hba_init - Perform special handling for LC HBA initialization
2268 * @phba: pointer to lpfc hba data structure.
2269 * @hbainit: pointer to an array of unsigned 32-bit integers.
2270 *
2271 * This routine performs the special handling for LC HBA initialization.
2272 **/
2273void
2274lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2275{
2276	int t;
2277	uint32_t *HashWorking;
2278	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2279
2280	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2281	if (!HashWorking)
2282		return;
2283
2284	HashWorking[0] = HashWorking[78] = *pwwnn++;
2285	HashWorking[1] = HashWorking[79] = *pwwnn;
2286
2287	for (t = 0; t < 7; t++)
2288		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2289
2290	lpfc_sha_init(hbainit);
2291	lpfc_sha_iterate(hbainit, HashWorking);
2292	kfree(HashWorking);
2293}
2294
2295/**
2296 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2297 * @vport: pointer to a virtual N_Port data structure.
2298 *
2299 * This routine performs the necessary cleanups before deleting the @vport.
2300 * It invokes the discovery state machine to perform necessary state
2301 * transitions and to release the ndlps associated with the @vport. Note,
2302 * the physical port is treated as @vport 0.
2303 **/
2304void
2305lpfc_cleanup(struct lpfc_vport *vport)
2306{
2307	struct lpfc_hba   *phba = vport->phba;
2308	struct lpfc_nodelist *ndlp, *next_ndlp;
2309	int i = 0;
2310
2311	if (phba->link_state > LPFC_LINK_DOWN)
2312		lpfc_port_link_failure(vport);
2313
2314	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2315		if (!NLP_CHK_NODE_ACT(ndlp)) {
2316			ndlp = lpfc_enable_node(vport, ndlp,
2317						NLP_STE_UNUSED_NODE);
2318			if (!ndlp)
2319				continue;
2320			spin_lock_irq(&phba->ndlp_lock);
2321			NLP_SET_FREE_REQ(ndlp);
2322			spin_unlock_irq(&phba->ndlp_lock);
2323			/* Trigger the release of the ndlp memory */
2324			lpfc_nlp_put(ndlp);
2325			continue;
2326		}
2327		spin_lock_irq(&phba->ndlp_lock);
2328		if (NLP_CHK_FREE_REQ(ndlp)) {
2329			/* The ndlp should not be in memory free mode already */
2330			spin_unlock_irq(&phba->ndlp_lock);
2331			continue;
2332		} else
2333			/* Indicate request for freeing ndlp memory */
2334			NLP_SET_FREE_REQ(ndlp);
2335		spin_unlock_irq(&phba->ndlp_lock);
2336
2337		if (vport->port_type != LPFC_PHYSICAL_PORT &&
2338		    ndlp->nlp_DID == Fabric_DID) {
2339			/* Just free up ndlp with Fabric_DID for vports */
2340			lpfc_nlp_put(ndlp);
2341			continue;
2342		}
2343
2344		/* take care of nodes in unused state before the state
2345		 * machine taking action.
2346		 */
2347		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2348			lpfc_nlp_put(ndlp);
2349			continue;
2350		}
2351
2352		if (ndlp->nlp_type & NLP_FABRIC)
2353			lpfc_disc_state_machine(vport, ndlp, NULL,
2354					NLP_EVT_DEVICE_RECOVERY);
2355
2356		lpfc_disc_state_machine(vport, ndlp, NULL,
2357					     NLP_EVT_DEVICE_RM);
2358	}
2359
2360	/* At this point, ALL ndlp's should be gone
2361	 * because of the previous NLP_EVT_DEVICE_RM.
2362	 * Lets wait for this to happen, if needed.
2363	 */
2364	while (!list_empty(&vport->fc_nodes)) {
2365		if (i++ > 3000) {
2366			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2367				"0233 Nodelist not empty\n");
2368			list_for_each_entry_safe(ndlp, next_ndlp,
2369						&vport->fc_nodes, nlp_listp) {
2370				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2371						LOG_NODE,
2372						"0282 did:x%x ndlp:x%p "
2373						"usgmap:x%x refcnt:%d\n",
2374						ndlp->nlp_DID, (void *)ndlp,
2375						ndlp->nlp_usg_map,
2376						atomic_read(
2377							&ndlp->kref.refcount));
2378			}
2379			break;
2380		}
2381
2382		/* Wait for any activity on ndlps to settle */
2383		msleep(10);
2384	}
2385	lpfc_cleanup_vports_rrqs(vport, NULL);
2386}
2387
2388/**
2389 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2390 * @vport: pointer to a virtual N_Port data structure.
2391 *
2392 * This routine stops all the timers associated with a @vport. This function
2393 * is invoked before disabling or deleting a @vport. Note that the physical
2394 * port is treated as @vport 0.
2395 **/
2396void
2397lpfc_stop_vport_timers(struct lpfc_vport *vport)
2398{
2399	del_timer_sync(&vport->els_tmofunc);
2400	del_timer_sync(&vport->fc_fdmitmo);
2401	del_timer_sync(&vport->delayed_disc_tmo);
2402	lpfc_can_disctmo(vport);
2403	return;
2404}
2405
2406/**
2407 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2408 * @phba: pointer to lpfc hba data structure.
2409 *
2410 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2411 * caller of this routine should already hold the host lock.
2412 **/
2413void
2414__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2415{
2416	/* Clear pending FCF rediscovery wait flag */
2417	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2418
2419	/* Now, try to stop the timer */
2420	del_timer(&phba->fcf.redisc_wait);
2421}
2422
2423/**
2424 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2425 * @phba: pointer to lpfc hba data structure.
2426 *
2427 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2428 * checks whether the FCF rediscovery wait timer is pending with the host
2429 * lock held before proceeding with disabling the timer and clearing the
2430 * wait timer pendig flag.
2431 **/
2432void
2433lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2434{
2435	spin_lock_irq(&phba->hbalock);
2436	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2437		/* FCF rediscovery timer already fired or stopped */
2438		spin_unlock_irq(&phba->hbalock);
2439		return;
2440	}
2441	__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2442	/* Clear failover in progress flags */
2443	phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2444	spin_unlock_irq(&phba->hbalock);
2445}
2446
2447/**
2448 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2449 * @phba: pointer to lpfc hba data structure.
2450 *
2451 * This routine stops all the timers associated with a HBA. This function is
2452 * invoked before either putting a HBA offline or unloading the driver.
2453 **/
2454void
2455lpfc_stop_hba_timers(struct lpfc_hba *phba)
2456{
2457	lpfc_stop_vport_timers(phba->pport);
2458	del_timer_sync(&phba->sli.mbox_tmo);
2459	del_timer_sync(&phba->fabric_block_timer);
2460	del_timer_sync(&phba->eratt_poll);
2461	del_timer_sync(&phba->hb_tmofunc);
2462	if (phba->sli_rev == LPFC_SLI_REV4) {
2463		del_timer_sync(&phba->rrq_tmr);
2464		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2465	}
2466	phba->hb_outstanding = 0;
2467
2468	switch (phba->pci_dev_grp) {
2469	case LPFC_PCI_DEV_LP:
2470		/* Stop any LightPulse device specific driver timers */
2471		del_timer_sync(&phba->fcp_poll_timer);
2472		break;
2473	case LPFC_PCI_DEV_OC:
2474		/* Stop any OneConnect device sepcific driver timers */
2475		lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2476		break;
2477	default:
2478		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2479				"0297 Invalid device group (x%x)\n",
2480				phba->pci_dev_grp);
2481		break;
2482	}
2483	return;
2484}
2485
2486/**
2487 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2488 * @phba: pointer to lpfc hba data structure.
2489 *
2490 * This routine marks a HBA's management interface as blocked. Once the HBA's
2491 * management interface is marked as blocked, all the user space access to
2492 * the HBA, whether they are from sysfs interface or libdfc interface will
2493 * all be blocked. The HBA is set to block the management interface when the
2494 * driver prepares the HBA interface for online or offline.
2495 **/
2496static void
2497lpfc_block_mgmt_io(struct lpfc_hba * phba)
2498{
2499	unsigned long iflag;
2500	uint8_t actcmd = MBX_HEARTBEAT;
2501	unsigned long timeout;
2502
2503	timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2504	spin_lock_irqsave(&phba->hbalock, iflag);
2505	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2506	if (phba->sli.mbox_active) {
2507		actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2508		/* Determine how long we might wait for the active mailbox
2509		 * command to be gracefully completed by firmware.
2510		 */
2511		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2512				phba->sli.mbox_active) * 1000) + jiffies;
2513	}
2514	spin_unlock_irqrestore(&phba->hbalock, iflag);
2515
2516	/* Wait for the outstnading mailbox command to complete */
2517	while (phba->sli.mbox_active) {
2518		/* Check active mailbox complete status every 2ms */
2519		msleep(2);
2520		if (time_after(jiffies, timeout)) {
2521			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2522				"2813 Mgmt IO is Blocked %x "
2523				"- mbox cmd %x still active\n",
2524				phba->sli.sli_flag, actcmd);
2525			break;
2526		}
2527	}
2528}
2529
2530/**
2531 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
2532 * @phba: pointer to lpfc hba data structure.
2533 *
2534 * Allocate RPIs for all active remote nodes. This is needed whenever
2535 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
2536 * is to fixup the temporary rpi assignments.
2537 **/
2538void
2539lpfc_sli4_node_prep(struct lpfc_hba *phba)
2540{
2541	struct lpfc_nodelist  *ndlp, *next_ndlp;
2542	struct lpfc_vport **vports;
2543	int i;
2544
2545	if (phba->sli_rev != LPFC_SLI_REV4)
2546		return;
2547
2548	vports = lpfc_create_vport_work_array(phba);
2549	if (vports != NULL) {
2550		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2551			if (vports[i]->load_flag & FC_UNLOADING)
2552				continue;
2553
2554			list_for_each_entry_safe(ndlp, next_ndlp,
2555						 &vports[i]->fc_nodes,
2556						 nlp_listp) {
2557				if (NLP_CHK_NODE_ACT(ndlp))
2558					ndlp->nlp_rpi =
2559						lpfc_sli4_alloc_rpi(phba);
2560			}
2561		}
2562	}
2563	lpfc_destroy_vport_work_array(phba, vports);
2564}
2565
2566/**
2567 * lpfc_online - Initialize and bring a HBA online
2568 * @phba: pointer to lpfc hba data structure.
2569 *
2570 * This routine initializes the HBA and brings a HBA online. During this
2571 * process, the management interface is blocked to prevent user space access
2572 * to the HBA interfering with the driver initialization.
2573 *
2574 * Return codes
2575 *   0 - successful
2576 *   1 - failed
2577 **/
2578int
2579lpfc_online(struct lpfc_hba *phba)
2580{
2581	struct lpfc_vport *vport;
2582	struct lpfc_vport **vports;
2583	int i;
2584
2585	if (!phba)
2586		return 0;
2587	vport = phba->pport;
2588
2589	if (!(vport->fc_flag & FC_OFFLINE_MODE))
2590		return 0;
2591
2592	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2593			"0458 Bring Adapter online\n");
2594
2595	lpfc_block_mgmt_io(phba);
2596
2597	if (!lpfc_sli_queue_setup(phba)) {
2598		lpfc_unblock_mgmt_io(phba);
2599		return 1;
2600	}
2601
2602	if (phba->sli_rev == LPFC_SLI_REV4) {
2603		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2604			lpfc_unblock_mgmt_io(phba);
2605			return 1;
2606		}
2607	} else {
2608		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
2609			lpfc_unblock_mgmt_io(phba);
2610			return 1;
2611		}
2612	}
2613
2614	vports = lpfc_create_vport_work_array(phba);
2615	if (vports != NULL)
2616		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2617			struct Scsi_Host *shost;
2618			shost = lpfc_shost_from_vport(vports[i]);
2619			spin_lock_irq(shost->host_lock);
2620			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2621			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2622				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2623			if (phba->sli_rev == LPFC_SLI_REV4)
2624				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2625			spin_unlock_irq(shost->host_lock);
2626		}
2627		lpfc_destroy_vport_work_array(phba, vports);
2628
2629	lpfc_unblock_mgmt_io(phba);
2630	return 0;
2631}
2632
2633/**
2634 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2635 * @phba: pointer to lpfc hba data structure.
2636 *
2637 * This routine marks a HBA's management interface as not blocked. Once the
2638 * HBA's management interface is marked as not blocked, all the user space
2639 * access to the HBA, whether they are from sysfs interface or libdfc
2640 * interface will be allowed. The HBA is set to block the management interface
2641 * when the driver prepares the HBA interface for online or offline and then
2642 * set to unblock the management interface afterwards.
2643 **/
2644void
2645lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2646{
2647	unsigned long iflag;
2648
2649	spin_lock_irqsave(&phba->hbalock, iflag);
2650	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2651	spin_unlock_irqrestore(&phba->hbalock, iflag);
2652}
2653
2654/**
2655 * lpfc_offline_prep - Prepare a HBA to be brought offline
2656 * @phba: pointer to lpfc hba data structure.
2657 *
2658 * This routine is invoked to prepare a HBA to be brought offline. It performs
2659 * unregistration login to all the nodes on all vports and flushes the mailbox
2660 * queue to make it ready to be brought offline.
2661 **/
2662void
2663lpfc_offline_prep(struct lpfc_hba * phba)
2664{
2665	struct lpfc_vport *vport = phba->pport;
2666	struct lpfc_nodelist  *ndlp, *next_ndlp;
2667	struct lpfc_vport **vports;
2668	struct Scsi_Host *shost;
2669	int i;
2670
2671	if (vport->fc_flag & FC_OFFLINE_MODE)
2672		return;
2673
2674	lpfc_block_mgmt_io(phba);
2675
2676	lpfc_linkdown(phba);
2677
2678	/* Issue an unreg_login to all nodes on all vports */
2679	vports = lpfc_create_vport_work_array(phba);
2680	if (vports != NULL) {
2681		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2682			if (vports[i]->load_flag & FC_UNLOADING)
2683				continue;
2684			shost = lpfc_shost_from_vport(vports[i]);
2685			spin_lock_irq(shost->host_lock);
2686			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2687			vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2688			vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2689			spin_unlock_irq(shost->host_lock);
2690
2691			shost =	lpfc_shost_from_vport(vports[i]);
2692			list_for_each_entry_safe(ndlp, next_ndlp,
2693						 &vports[i]->fc_nodes,
2694						 nlp_listp) {
2695				if (!NLP_CHK_NODE_ACT(ndlp))
2696					continue;
2697				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2698					continue;
2699				if (ndlp->nlp_type & NLP_FABRIC) {
2700					lpfc_disc_state_machine(vports[i], ndlp,
2701						NULL, NLP_EVT_DEVICE_RECOVERY);
2702					lpfc_disc_state_machine(vports[i], ndlp,
2703						NULL, NLP_EVT_DEVICE_RM);
2704				}
2705				spin_lock_irq(shost->host_lock);
2706				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2707
2708				/*
2709				 * Whenever an SLI4 port goes offline, free the
2710				 * RPI.  A new RPI when the adapter port comes
2711				 * back online.
2712				 */
2713				if (phba->sli_rev == LPFC_SLI_REV4)
2714					lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
2715
2716				spin_unlock_irq(shost->host_lock);
2717				lpfc_unreg_rpi(vports[i], ndlp);
2718			}
2719		}
2720	}
2721	lpfc_destroy_vport_work_array(phba, vports);
2722
2723	lpfc_sli_mbox_sys_shutdown(phba);
2724}
2725
2726/**
2727 * lpfc_offline - Bring a HBA offline
2728 * @phba: pointer to lpfc hba data structure.
2729 *
2730 * This routine actually brings a HBA offline. It stops all the timers
2731 * associated with the HBA, brings down the SLI layer, and eventually
2732 * marks the HBA as in offline state for the upper layer protocol.
2733 **/
2734void
2735lpfc_offline(struct lpfc_hba *phba)
2736{
2737	struct Scsi_Host  *shost;
2738	struct lpfc_vport **vports;
2739	int i;
2740
2741	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2742		return;
2743
2744	/* stop port and all timers associated with this hba */
2745	lpfc_stop_port(phba);
2746	vports = lpfc_create_vport_work_array(phba);
2747	if (vports != NULL)
2748		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2749			lpfc_stop_vport_timers(vports[i]);
2750	lpfc_destroy_vport_work_array(phba, vports);
2751	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2752			"0460 Bring Adapter offline\n");
2753	/* Bring down the SLI Layer and cleanup.  The HBA is offline
2754	   now.  */
2755	lpfc_sli_hba_down(phba);
2756	spin_lock_irq(&phba->hbalock);
2757	phba->work_ha = 0;
2758	spin_unlock_irq(&phba->hbalock);
2759	vports = lpfc_create_vport_work_array(phba);
2760	if (vports != NULL)
2761		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2762			shost = lpfc_shost_from_vport(vports[i]);
2763			spin_lock_irq(shost->host_lock);
2764			vports[i]->work_port_events = 0;
2765			vports[i]->fc_flag |= FC_OFFLINE_MODE;
2766			spin_unlock_irq(shost->host_lock);
2767		}
2768	lpfc_destroy_vport_work_array(phba, vports);
2769}
2770
2771/**
2772 * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated.
2773 * @phba: pointer to lpfc hba data structure.
2774 *
2775 * This routine goes through all the scsi buffers in the system and updates the
2776 * Physical XRIs assigned to the SCSI buffer because these may change after any
2777 * firmware reset
2778 *
2779 * Return codes
2780 *   0 - successful (for now, it always returns 0)
2781 **/
2782int
2783lpfc_scsi_buf_update(struct lpfc_hba *phba)
2784{
2785	struct lpfc_scsi_buf *sb, *sb_next;
2786
2787	spin_lock_irq(&phba->hbalock);
2788	spin_lock(&phba->scsi_buf_list_lock);
2789	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list)
2790		sb->cur_iocbq.sli4_xritag =
2791			phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
2792	spin_unlock(&phba->scsi_buf_list_lock);
2793	spin_unlock_irq(&phba->hbalock);
2794	return 0;
2795}
2796
2797/**
2798 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2799 * @phba: pointer to lpfc hba data structure.
2800 *
2801 * This routine is to free all the SCSI buffers and IOCBs from the driver
2802 * list back to kernel. It is called from lpfc_pci_remove_one to free
2803 * the internal resources before the device is removed from the system.
2804 *
2805 * Return codes
2806 *   0 - successful (for now, it always returns 0)
2807 **/
2808static int
2809lpfc_scsi_free(struct lpfc_hba *phba)
2810{
2811	struct lpfc_scsi_buf *sb, *sb_next;
2812	struct lpfc_iocbq *io, *io_next;
2813
2814	spin_lock_irq(&phba->hbalock);
2815	/* Release all the lpfc_scsi_bufs maintained by this host. */
2816	spin_lock(&phba->scsi_buf_list_lock);
2817	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2818		list_del(&sb->list);
2819		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2820			      sb->dma_handle);
2821		kfree(sb);
2822		phba->total_scsi_bufs--;
2823	}
2824	spin_unlock(&phba->scsi_buf_list_lock);
2825
2826	/* Release all the lpfc_iocbq entries maintained by this host. */
2827	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2828		list_del(&io->list);
2829		kfree(io);
2830		phba->total_iocbq_bufs--;
2831	}
2832
2833	spin_unlock_irq(&phba->hbalock);
2834	return 0;
2835}
2836
2837/**
2838 * lpfc_create_port - Create an FC port
2839 * @phba: pointer to lpfc hba data structure.
2840 * @instance: a unique integer ID to this FC port.
2841 * @dev: pointer to the device data structure.
2842 *
2843 * This routine creates a FC port for the upper layer protocol. The FC port
2844 * can be created on top of either a physical port or a virtual port provided
2845 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2846 * and associates the FC port created before adding the shost into the SCSI
2847 * layer.
2848 *
2849 * Return codes
2850 *   @vport - pointer to the virtual N_Port data structure.
2851 *   NULL - port create failed.
2852 **/
2853struct lpfc_vport *
2854lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2855{
2856	struct lpfc_vport *vport;
2857	struct Scsi_Host  *shost;
2858	int error = 0;
2859
2860	if (dev != &phba->pcidev->dev)
2861		shost = scsi_host_alloc(&lpfc_vport_template,
2862					sizeof(struct lpfc_vport));
2863	else
2864		shost = scsi_host_alloc(&lpfc_template,
2865					sizeof(struct lpfc_vport));
2866	if (!shost)
2867		goto out;
2868
2869	vport = (struct lpfc_vport *) shost->hostdata;
2870	vport->phba = phba;
2871	vport->load_flag |= FC_LOADING;
2872	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2873	vport->fc_rscn_flush = 0;
2874
2875	lpfc_get_vport_cfgparam(vport);
2876	shost->unique_id = instance;
2877	shost->max_id = LPFC_MAX_TARGET;
2878	shost->max_lun = vport->cfg_max_luns;
2879	shost->this_id = -1;
2880	shost->max_cmd_len = 16;
2881	if (phba->sli_rev == LPFC_SLI_REV4) {
2882		shost->dma_boundary =
2883			phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2884		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2885	}
2886
2887	/*
2888	 * Set initial can_queue value since 0 is no longer supported and
2889	 * scsi_add_host will fail. This will be adjusted later based on the
2890	 * max xri value determined in hba setup.
2891	 */
2892	shost->can_queue = phba->cfg_hba_queue_depth - 10;
2893	if (dev != &phba->pcidev->dev) {
2894		shost->transportt = lpfc_vport_transport_template;
2895		vport->port_type = LPFC_NPIV_PORT;
2896	} else {
2897		shost->transportt = lpfc_transport_template;
2898		vport->port_type = LPFC_PHYSICAL_PORT;
2899	}
2900
2901	/* Initialize all internally managed lists. */
2902	INIT_LIST_HEAD(&vport->fc_nodes);
2903	INIT_LIST_HEAD(&vport->rcv_buffer_list);
2904	spin_lock_init(&vport->work_port_lock);
2905
2906	init_timer(&vport->fc_disctmo);
2907	vport->fc_disctmo.function = lpfc_disc_timeout;
2908	vport->fc_disctmo.data = (unsigned long)vport;
2909
2910	init_timer(&vport->fc_fdmitmo);
2911	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2912	vport->fc_fdmitmo.data = (unsigned long)vport;
2913
2914	init_timer(&vport->els_tmofunc);
2915	vport->els_tmofunc.function = lpfc_els_timeout;
2916	vport->els_tmofunc.data = (unsigned long)vport;
2917
2918	init_timer(&vport->delayed_disc_tmo);
2919	vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
2920	vport->delayed_disc_tmo.data = (unsigned long)vport;
2921
2922	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2923	if (error)
2924		goto out_put_shost;
2925
2926	spin_lock_irq(&phba->hbalock);
2927	list_add_tail(&vport->listentry, &phba->port_list);
2928	spin_unlock_irq(&phba->hbalock);
2929	return vport;
2930
2931out_put_shost:
2932	scsi_host_put(shost);
2933out:
2934	return NULL;
2935}
2936
2937/**
2938 * destroy_port -  destroy an FC port
2939 * @vport: pointer to an lpfc virtual N_Port data structure.
2940 *
2941 * This routine destroys a FC port from the upper layer protocol. All the
2942 * resources associated with the port are released.
2943 **/
2944void
2945destroy_port(struct lpfc_vport *vport)
2946{
2947	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2948	struct lpfc_hba  *phba = vport->phba;
2949
2950	lpfc_debugfs_terminate(vport);
2951	fc_remove_host(shost);
2952	scsi_remove_host(shost);
2953
2954	spin_lock_irq(&phba->hbalock);
2955	list_del_init(&vport->listentry);
2956	spin_unlock_irq(&phba->hbalock);
2957
2958	lpfc_cleanup(vport);
2959	return;
2960}
2961
2962/**
2963 * lpfc_get_instance - Get a unique integer ID
2964 *
2965 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2966 * uses the kernel idr facility to perform the task.
2967 *
2968 * Return codes:
2969 *   instance - a unique integer ID allocated as the new instance.
2970 *   -1 - lpfc get instance failed.
2971 **/
2972int
2973lpfc_get_instance(void)
2974{
2975	int instance = 0;
2976
2977	/* Assign an unused number */
2978	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2979		return -1;
2980	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2981		return -1;
2982	return instance;
2983}
2984
2985/**
2986 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2987 * @shost: pointer to SCSI host data structure.
2988 * @time: elapsed time of the scan in jiffies.
2989 *
2990 * This routine is called by the SCSI layer with a SCSI host to determine
2991 * whether the scan host is finished.
2992 *
2993 * Note: there is no scan_start function as adapter initialization will have
2994 * asynchronously kicked off the link initialization.
2995 *
2996 * Return codes
2997 *   0 - SCSI host scan is not over yet.
2998 *   1 - SCSI host scan is over.
2999 **/
3000int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3001{
3002	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3003	struct lpfc_hba   *phba = vport->phba;
3004	int stat = 0;
3005
3006	spin_lock_irq(shost->host_lock);
3007
3008	if (vport->load_flag & FC_UNLOADING) {
3009		stat = 1;
3010		goto finished;
3011	}
3012	if (time >= 30 * HZ) {
3013		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3014				"0461 Scanning longer than 30 "
3015				"seconds.  Continuing initialization\n");
3016		stat = 1;
3017		goto finished;
3018	}
3019	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
3020		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3021				"0465 Link down longer than 15 "
3022				"seconds.  Continuing initialization\n");
3023		stat = 1;
3024		goto finished;
3025	}
3026
3027	if (vport->port_state != LPFC_VPORT_READY)
3028		goto finished;
3029	if (vport->num_disc_nodes || vport->fc_prli_sent)
3030		goto finished;
3031	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
3032		goto finished;
3033	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
3034		goto finished;
3035
3036	stat = 1;
3037
3038finished:
3039	spin_unlock_irq(shost->host_lock);
3040	return stat;
3041}
3042
3043/**
3044 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
3045 * @shost: pointer to SCSI host data structure.
3046 *
3047 * This routine initializes a given SCSI host attributes on a FC port. The
3048 * SCSI host can be either on top of a physical port or a virtual port.
3049 **/
3050void lpfc_host_attrib_init(struct Scsi_Host *shost)
3051{
3052	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3053	struct lpfc_hba   *phba = vport->phba;
3054	/*
3055	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
3056	 */
3057
3058	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
3059	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3060	fc_host_supported_classes(shost) = FC_COS_CLASS3;
3061
3062	memset(fc_host_supported_fc4s(shost), 0,
3063	       sizeof(fc_host_supported_fc4s(shost)));
3064	fc_host_supported_fc4s(shost)[2] = 1;
3065	fc_host_supported_fc4s(shost)[7] = 1;
3066
3067	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
3068				 sizeof fc_host_symbolic_name(shost));
3069
3070	fc_host_supported_speeds(shost) = 0;
3071	if (phba->lmt & LMT_16Gb)
3072		fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
3073	if (phba->lmt & LMT_10Gb)
3074		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
3075	if (phba->lmt & LMT_8Gb)
3076		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
3077	if (phba->lmt & LMT_4Gb)
3078		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
3079	if (phba->lmt & LMT_2Gb)
3080		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
3081	if (phba->lmt & LMT_1Gb)
3082		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
3083
3084	fc_host_maxframe_size(shost) =
3085		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
3086		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
3087
3088	fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
3089
3090	/* This value is also unchanging */
3091	memset(fc_host_active_fc4s(shost), 0,
3092	       sizeof(fc_host_active_fc4s(shost)));
3093	fc_host_active_fc4s(shost)[2] = 1;
3094	fc_host_active_fc4s(shost)[7] = 1;
3095
3096	fc_host_max_npiv_vports(shost) = phba->max_vpi;
3097	spin_lock_irq(shost->host_lock);
3098	vport->load_flag &= ~FC_LOADING;
3099	spin_unlock_irq(shost->host_lock);
3100}
3101
3102/**
3103 * lpfc_stop_port_s3 - Stop SLI3 device port
3104 * @phba: pointer to lpfc hba data structure.
3105 *
3106 * This routine is invoked to stop an SLI3 device port, it stops the device
3107 * from generating interrupts and stops the device driver's timers for the
3108 * device.
3109 **/
3110static void
3111lpfc_stop_port_s3(struct lpfc_hba *phba)
3112{
3113	/* Clear all interrupt enable conditions */
3114	writel(0, phba->HCregaddr);
3115	readl(phba->HCregaddr); /* flush */
3116	/* Clear all pending interrupts */
3117	writel(0xffffffff, phba->HAregaddr);
3118	readl(phba->HAregaddr); /* flush */
3119
3120	/* Reset some HBA SLI setup states */
3121	lpfc_stop_hba_timers(phba);
3122	phba->pport->work_port_events = 0;
3123}
3124
3125/**
3126 * lpfc_stop_port_s4 - Stop SLI4 device port
3127 * @phba: pointer to lpfc hba data structure.
3128 *
3129 * This routine is invoked to stop an SLI4 device port, it stops the device
3130 * from generating interrupts and stops the device driver's timers for the
3131 * device.
3132 **/
3133static void
3134lpfc_stop_port_s4(struct lpfc_hba *phba)
3135{
3136	/* Reset some HBA SLI4 setup states */
3137	lpfc_stop_hba_timers(phba);
3138	phba->pport->work_port_events = 0;
3139	phba->sli4_hba.intr_enable = 0;
3140}
3141
3142/**
3143 * lpfc_stop_port - Wrapper function for stopping hba port
3144 * @phba: Pointer to HBA context object.
3145 *
3146 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
3147 * the API jump table function pointer from the lpfc_hba struct.
3148 **/
3149void
3150lpfc_stop_port(struct lpfc_hba *phba)
3151{
3152	phba->lpfc_stop_port(phba);
3153}
3154
3155/**
3156 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
3157 * @phba: Pointer to hba for which this call is being executed.
3158 *
3159 * This routine starts the timer waiting for the FCF rediscovery to complete.
3160 **/
3161void
3162lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
3163{
3164	unsigned long fcf_redisc_wait_tmo =
3165		(jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
3166	/* Start fcf rediscovery wait period timer */
3167	mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
3168	spin_lock_irq(&phba->hbalock);
3169	/* Allow action to new fcf asynchronous event */
3170	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
3171	/* Mark the FCF rediscovery pending state */
3172	phba->fcf.fcf_flag |= FCF_REDISC_PEND;
3173	spin_unlock_irq(&phba->hbalock);
3174}
3175
3176/**
3177 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
3178 * @ptr: Map to lpfc_hba data structure pointer.
3179 *
3180 * This routine is invoked when waiting for FCF table rediscover has been
3181 * timed out. If new FCF record(s) has (have) been discovered during the
3182 * wait period, a new FCF event shall be added to the FCOE async event
3183 * list, and then worker thread shall be waked up for processing from the
3184 * worker thread context.
3185 **/
3186void
3187lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
3188{
3189	struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
3190
3191	/* Don't send FCF rediscovery event if timer cancelled */
3192	spin_lock_irq(&phba->hbalock);
3193	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3194		spin_unlock_irq(&phba->hbalock);
3195		return;
3196	}
3197	/* Clear FCF rediscovery timer pending flag */
3198	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3199	/* FCF rediscovery event to worker thread */
3200	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
3201	spin_unlock_irq(&phba->hbalock);
3202	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3203			"2776 FCF rediscover quiescent timer expired\n");
3204	/* wake up worker thread */
3205	lpfc_worker_wake_up(phba);
3206}
3207
3208/**
3209 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3210 * @phba: pointer to lpfc hba data structure.
3211 * @acqe_link: pointer to the async link completion queue entry.
3212 *
3213 * This routine is to parse the SLI4 link-attention link fault code and
3214 * translate it into the base driver's read link attention mailbox command
3215 * status.
3216 *
3217 * Return: Link-attention status in terms of base driver's coding.
3218 **/
3219static uint16_t
3220lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3221			   struct lpfc_acqe_link *acqe_link)
3222{
3223	uint16_t latt_fault;
3224
3225	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3226	case LPFC_ASYNC_LINK_FAULT_NONE:
3227	case LPFC_ASYNC_LINK_FAULT_LOCAL:
3228	case LPFC_ASYNC_LINK_FAULT_REMOTE:
3229		latt_fault = 0;
3230		break;
3231	default:
3232		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3233				"0398 Invalid link fault code: x%x\n",
3234				bf_get(lpfc_acqe_link_fault, acqe_link));
3235		latt_fault = MBXERR_ERROR;
3236		break;
3237	}
3238	return latt_fault;
3239}
3240
3241/**
3242 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3243 * @phba: pointer to lpfc hba data structure.
3244 * @acqe_link: pointer to the async link completion queue entry.
3245 *
3246 * This routine is to parse the SLI4 link attention type and translate it
3247 * into the base driver's link attention type coding.
3248 *
3249 * Return: Link attention type in terms of base driver's coding.
3250 **/
3251static uint8_t
3252lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3253			  struct lpfc_acqe_link *acqe_link)
3254{
3255	uint8_t att_type;
3256
3257	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3258	case LPFC_ASYNC_LINK_STATUS_DOWN:
3259	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3260		att_type = LPFC_ATT_LINK_DOWN;
3261		break;
3262	case LPFC_ASYNC_LINK_STATUS_UP:
3263		/* Ignore physical link up events - wait for logical link up */
3264		att_type = LPFC_ATT_RESERVED;
3265		break;
3266	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3267		att_type = LPFC_ATT_LINK_UP;
3268		break;
3269	default:
3270		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3271				"0399 Invalid link attention type: x%x\n",
3272				bf_get(lpfc_acqe_link_status, acqe_link));
3273		att_type = LPFC_ATT_RESERVED;
3274		break;
3275	}
3276	return att_type;
3277}
3278
3279/**
3280 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3281 * @phba: pointer to lpfc hba data structure.
3282 * @acqe_link: pointer to the async link completion queue entry.
3283 *
3284 * This routine is to parse the SLI4 link-attention link speed and translate
3285 * it into the base driver's link-attention link speed coding.
3286 *
3287 * Return: Link-attention link speed in terms of base driver's coding.
3288 **/
3289static uint8_t
3290lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3291				struct lpfc_acqe_link *acqe_link)
3292{
3293	uint8_t link_speed;
3294
3295	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3296	case LPFC_ASYNC_LINK_SPEED_ZERO:
3297	case LPFC_ASYNC_LINK_SPEED_10MBPS:
3298	case LPFC_ASYNC_LINK_SPEED_100MBPS:
3299		link_speed = LPFC_LINK_SPEED_UNKNOWN;
3300		break;
3301	case LPFC_ASYNC_LINK_SPEED_1GBPS:
3302		link_speed = LPFC_LINK_SPEED_1GHZ;
3303		break;
3304	case LPFC_ASYNC_LINK_SPEED_10GBPS:
3305		link_speed = LPFC_LINK_SPEED_10GHZ;
3306		break;
3307	default:
3308		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3309				"0483 Invalid link-attention link speed: x%x\n",
3310				bf_get(lpfc_acqe_link_speed, acqe_link));
3311		link_speed = LPFC_LINK_SPEED_UNKNOWN;
3312		break;
3313	}
3314	return link_speed;
3315}
3316
3317/**
3318 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3319 * @phba: pointer to lpfc hba data structure.
3320 * @acqe_link: pointer to the async link completion queue entry.
3321 *
3322 * This routine is to handle the SLI4 asynchronous FCoE link event.
3323 **/
3324static void
3325lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3326			 struct lpfc_acqe_link *acqe_link)
3327{
3328	struct lpfc_dmabuf *mp;
3329	LPFC_MBOXQ_t *pmb;
3330	MAILBOX_t *mb;
3331	struct lpfc_mbx_read_top *la;
3332	uint8_t att_type;
3333	int rc;
3334
3335	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3336	if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
3337		return;
3338	phba->fcoe_eventtag = acqe_link->event_tag;
3339	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3340	if (!pmb) {
3341		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3342				"0395 The mboxq allocation failed\n");
3343		return;
3344	}
3345	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3346	if (!mp) {
3347		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3348				"0396 The lpfc_dmabuf allocation failed\n");
3349		goto out_free_pmb;
3350	}
3351	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3352	if (!mp->virt) {
3353		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3354				"0397 The mbuf allocation failed\n");
3355		goto out_free_dmabuf;
3356	}
3357
3358	/* Cleanup any outstanding ELS commands */
3359	lpfc_els_flush_all_cmd(phba);
3360
3361	/* Block ELS IOCBs until we have done process link event */
3362	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3363
3364	/* Update link event statistics */
3365	phba->sli.slistat.link_event++;
3366
3367	/* Create lpfc_handle_latt mailbox command from link ACQE */
3368	lpfc_read_topology(phba, pmb, mp);
3369	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3370	pmb->vport = phba->pport;
3371
3372	/* Keep the link status for extra SLI4 state machine reference */
3373	phba->sli4_hba.link_state.speed =
3374				bf_get(lpfc_acqe_link_speed, acqe_link);
3375	phba->sli4_hba.link_state.duplex =
3376				bf_get(lpfc_acqe_link_duplex, acqe_link);
3377	phba->sli4_hba.link_state.status =
3378				bf_get(lpfc_acqe_link_status, acqe_link);
3379	phba->sli4_hba.link_state.type =
3380				bf_get(lpfc_acqe_link_type, acqe_link);
3381	phba->sli4_hba.link_state.number =
3382				bf_get(lpfc_acqe_link_number, acqe_link);
3383	phba->sli4_hba.link_state.fault =
3384				bf_get(lpfc_acqe_link_fault, acqe_link);
3385	phba->sli4_hba.link_state.logical_speed =
3386			bf_get(lpfc_acqe_logical_link_speed, acqe_link);
3387	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3388			"2900 Async FC/FCoE Link event - Speed:%dGBit "
3389			"duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3390			"Logical speed:%dMbps Fault:%d\n",
3391			phba->sli4_hba.link_state.speed,
3392			phba->sli4_hba.link_state.topology,
3393			phba->sli4_hba.link_state.status,
3394			phba->sli4_hba.link_state.type,
3395			phba->sli4_hba.link_state.number,
3396			phba->sli4_hba.link_state.logical_speed * 10,
3397			phba->sli4_hba.link_state.fault);
3398	/*
3399	 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3400	 * topology info. Note: Optional for non FC-AL ports.
3401	 */
3402	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3403		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3404		if (rc == MBX_NOT_FINISHED)
3405			goto out_free_dmabuf;
3406		return;
3407	}
3408	/*
3409	 * For FCoE Mode: fill in all the topology information we need and call
3410	 * the READ_TOPOLOGY completion routine to continue without actually
3411	 * sending the READ_TOPOLOGY mailbox command to the port.
3412	 */
3413	/* Parse and translate status field */
3414	mb = &pmb->u.mb;
3415	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3416
3417	/* Parse and translate link attention fields */
3418	la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3419	la->eventTag = acqe_link->event_tag;
3420	bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3421	bf_set(lpfc_mbx_read_top_link_spd, la,
3422	       lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3423
3424	/* Fake the the following irrelvant fields */
3425	bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3426	bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3427	bf_set(lpfc_mbx_read_top_il, la, 0);
3428	bf_set(lpfc_mbx_read_top_pb, la, 0);
3429	bf_set(lpfc_mbx_read_top_fa, la, 0);
3430	bf_set(lpfc_mbx_read_top_mm, la, 0);
3431
3432	/* Invoke the lpfc_handle_latt mailbox command callback function */
3433	lpfc_mbx_cmpl_read_topology(phba, pmb);
3434
3435	return;
3436
3437out_free_dmabuf:
3438	kfree(mp);
3439out_free_pmb:
3440	mempool_free(pmb, phba->mbox_mem_pool);
3441}
3442
3443/**
3444 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
3445 * @phba: pointer to lpfc hba data structure.
3446 * @acqe_fc: pointer to the async fc completion queue entry.
3447 *
3448 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
3449 * that the event was received and then issue a read_topology mailbox command so
3450 * that the rest of the driver will treat it the same as SLI3.
3451 **/
3452static void
3453lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3454{
3455	struct lpfc_dmabuf *mp;
3456	LPFC_MBOXQ_t *pmb;
3457	int rc;
3458
3459	if (bf_get(lpfc_trailer_type, acqe_fc) !=
3460	    LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
3461		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3462				"2895 Non FC link Event detected.(%d)\n",
3463				bf_get(lpfc_trailer_type, acqe_fc));
3464		return;
3465	}
3466	/* Keep the link status for extra SLI4 state machine reference */
3467	phba->sli4_hba.link_state.speed =
3468				bf_get(lpfc_acqe_fc_la_speed, acqe_fc);
3469	phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3470	phba->sli4_hba.link_state.topology =
3471				bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
3472	phba->sli4_hba.link_state.status =
3473				bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
3474	phba->sli4_hba.link_state.type =
3475				bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
3476	phba->sli4_hba.link_state.number =
3477				bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
3478	phba->sli4_hba.link_state.fault =
3479				bf_get(lpfc_acqe_link_fault, acqe_fc);
3480	phba->sli4_hba.link_state.logical_speed =
3481				bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
3482	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3483			"2896 Async FC event - Speed:%dGBaud Topology:x%x "
3484			"LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3485			"%dMbps Fault:%d\n",
3486			phba->sli4_hba.link_state.speed,
3487			phba->sli4_hba.link_state.topology,
3488			phba->sli4_hba.link_state.status,
3489			phba->sli4_hba.link_state.type,
3490			phba->sli4_hba.link_state.number,
3491			phba->sli4_hba.link_state.logical_speed * 10,
3492			phba->sli4_hba.link_state.fault);
3493	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3494	if (!pmb) {
3495		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3496				"2897 The mboxq allocation failed\n");
3497		return;
3498	}
3499	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3500	if (!mp) {
3501		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3502				"2898 The lpfc_dmabuf allocation failed\n");
3503		goto out_free_pmb;
3504	}
3505	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3506	if (!mp->virt) {
3507		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3508				"2899 The mbuf allocation failed\n");
3509		goto out_free_dmabuf;
3510	}
3511
3512	/* Cleanup any outstanding ELS commands */
3513	lpfc_els_flush_all_cmd(phba);
3514
3515	/* Block ELS IOCBs until we have done process link event */
3516	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3517
3518	/* Update link event statistics */
3519	phba->sli.slistat.link_event++;
3520
3521	/* Create lpfc_handle_latt mailbox command from link ACQE */
3522	lpfc_read_topology(phba, pmb, mp);
3523	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3524	pmb->vport = phba->pport;
3525
3526	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3527	if (rc == MBX_NOT_FINISHED)
3528		goto out_free_dmabuf;
3529	return;
3530
3531out_free_dmabuf:
3532	kfree(mp);
3533out_free_pmb:
3534	mempool_free(pmb, phba->mbox_mem_pool);
3535}
3536
3537/**
3538 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
3539 * @phba: pointer to lpfc hba data structure.
3540 * @acqe_fc: pointer to the async SLI completion queue entry.
3541 *
3542 * This routine is to handle the SLI4 asynchronous SLI events.
3543 **/
3544static void
3545lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3546{
3547	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3548			"2901 Async SLI event - Event Data1:x%08x Event Data2:"
3549			"x%08x SLI Event Type:%d",
3550			acqe_sli->event_data1, acqe_sli->event_data2,
3551			bf_get(lpfc_trailer_type, acqe_sli));
3552	return;
3553}
3554
3555/**
3556 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3557 * @vport: pointer to vport data structure.
3558 *
3559 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3560 * response to a CVL event.
3561 *
3562 * Return the pointer to the ndlp with the vport if successful, otherwise
3563 * return NULL.
3564 **/
3565static struct lpfc_nodelist *
3566lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3567{
3568	struct lpfc_nodelist *ndlp;
3569	struct Scsi_Host *shost;
3570	struct lpfc_hba *phba;
3571
3572	if (!vport)
3573		return NULL;
3574	phba = vport->phba;
3575	if (!phba)
3576		return NULL;
3577	ndlp = lpfc_findnode_did(vport, Fabric_DID);
3578	if (!ndlp) {
3579		/* Cannot find existing Fabric ndlp, so allocate a new one */
3580		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3581		if (!ndlp)
3582			return 0;
3583		lpfc_nlp_init(vport, ndlp, Fabric_DID);
3584		/* Set the node type */
3585		ndlp->nlp_type |= NLP_FABRIC;
3586		/* Put ndlp onto node list */
3587		lpfc_enqueue_node(vport, ndlp);
3588	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
3589		/* re-setup ndlp without removing from node list */
3590		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3591		if (!ndlp)
3592			return 0;
3593	}
3594	if ((phba->pport->port_state < LPFC_FLOGI) &&
3595		(phba->pport->port_state != LPFC_VPORT_FAILED))
3596		return NULL;
3597	/* If virtual link is not yet instantiated ignore CVL */
3598	if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
3599		&& (vport->port_state != LPFC_VPORT_FAILED))
3600		return NULL;
3601	shost = lpfc_shost_from_vport(vport);
3602	if (!shost)
3603		return NULL;
3604	lpfc_linkdown_port(vport);
3605	lpfc_cleanup_pending_mbox(vport);
3606	spin_lock_irq(shost->host_lock);
3607	vport->fc_flag |= FC_VPORT_CVL_RCVD;
3608	spin_unlock_irq(shost->host_lock);
3609
3610	return ndlp;
3611}
3612
3613/**
3614 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3615 * @vport: pointer to lpfc hba data structure.
3616 *
3617 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3618 * response to a FCF dead event.
3619 **/
3620static void
3621lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3622{
3623	struct lpfc_vport **vports;
3624	int i;
3625
3626	vports = lpfc_create_vport_work_array(phba);
3627	if (vports)
3628		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3629			lpfc_sli4_perform_vport_cvl(vports[i]);
3630	lpfc_destroy_vport_work_array(phba, vports);
3631}
3632
3633/**
3634 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
3635 * @phba: pointer to lpfc hba data structure.
3636 * @acqe_link: pointer to the async fcoe completion queue entry.
3637 *
3638 * This routine is to handle the SLI4 asynchronous fcoe event.
3639 **/
3640static void
3641lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3642			struct lpfc_acqe_fip *acqe_fip)
3643{
3644	uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
3645	int rc;
3646	struct lpfc_vport *vport;
3647	struct lpfc_nodelist *ndlp;
3648	struct Scsi_Host  *shost;
3649	int active_vlink_present;
3650	struct lpfc_vport **vports;
3651	int i;
3652
3653	phba->fc_eventTag = acqe_fip->event_tag;
3654	phba->fcoe_eventtag = acqe_fip->event_tag;
3655	switch (event_type) {
3656	case LPFC_FIP_EVENT_TYPE_NEW_FCF:
3657	case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
3658		if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
3659			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3660					LOG_DISCOVERY,
3661					"2546 New FCF event, evt_tag:x%x, "
3662					"index:x%x\n",
3663					acqe_fip->event_tag,
3664					acqe_fip->index);
3665		else
3666			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3667					LOG_DISCOVERY,
3668					"2788 FCF param modified event, "
3669					"evt_tag:x%x, index:x%x\n",
3670					acqe_fip->event_tag,
3671					acqe_fip->index);
3672		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3673			/*
3674			 * During period of FCF discovery, read the FCF
3675			 * table record indexed by the event to update
3676			 * FCF roundrobin failover eligible FCF bmask.
3677			 */
3678			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3679					LOG_DISCOVERY,
3680					"2779 Read FCF (x%x) for updating "
3681					"roundrobin FCF failover bmask\n",
3682					acqe_fip->index);
3683			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
3684		}
3685
3686		/* If the FCF discovery is in progress, do nothing. */
3687		spin_lock_irq(&phba->hbalock);
3688		if (phba->hba_flag & FCF_TS_INPROG) {
3689			spin_unlock_irq(&phba->hbalock);
3690			break;
3691		}
3692		/* If fast FCF failover rescan event is pending, do nothing */
3693		if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3694			spin_unlock_irq(&phba->hbalock);
3695			break;
3696		}
3697
3698		/* If the FCF has been in discovered state, do nothing. */
3699		if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3700			spin_unlock_irq(&phba->hbalock);
3701			break;
3702		}
3703		spin_unlock_irq(&phba->hbalock);
3704
3705		/* Otherwise, scan the entire FCF table and re-discover SAN */
3706		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3707				"2770 Start FCF table scan per async FCF "
3708				"event, evt_tag:x%x, index:x%x\n",
3709				acqe_fip->event_tag, acqe_fip->index);
3710		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3711						     LPFC_FCOE_FCF_GET_FIRST);
3712		if (rc)
3713			lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3714					"2547 Issue FCF scan read FCF mailbox "
3715					"command failed (x%x)\n", rc);
3716		break;
3717
3718	case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
3719		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3720			"2548 FCF Table full count 0x%x tag 0x%x\n",
3721			bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
3722			acqe_fip->event_tag);
3723		break;
3724
3725	case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
3726		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3727			"2549 FCF (x%x) disconnected from network, "
3728			"tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
3729		/*
3730		 * If we are in the middle of FCF failover process, clear
3731		 * the corresponding FCF bit in the roundrobin bitmap.
3732		 */
3733		spin_lock_irq(&phba->hbalock);
3734		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3735			spin_unlock_irq(&phba->hbalock);
3736			/* Update FLOGI FCF failover eligible FCF bmask */
3737			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
3738			break;
3739		}
3740		spin_unlock_irq(&phba->hbalock);
3741
3742		/* If the event is not for currently used fcf do nothing */
3743		if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
3744			break;
3745
3746		/*
3747		 * Otherwise, request the port to rediscover the entire FCF
3748		 * table for a fast recovery from case that the current FCF
3749		 * is no longer valid as we are not in the middle of FCF
3750		 * failover process already.
3751		 */
3752		spin_lock_irq(&phba->hbalock);
3753		/* Mark the fast failover process in progress */
3754		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3755		spin_unlock_irq(&phba->hbalock);
3756
3757		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3758				"2771 Start FCF fast failover process due to "
3759				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3760				"\n", acqe_fip->event_tag, acqe_fip->index);
3761		rc = lpfc_sli4_redisc_fcf_table(phba);
3762		if (rc) {
3763			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3764					LOG_DISCOVERY,
3765					"2772 Issue FCF rediscover mabilbox "
3766					"command failed, fail through to FCF "
3767					"dead event\n");
3768			spin_lock_irq(&phba->hbalock);
3769			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3770			spin_unlock_irq(&phba->hbalock);
3771			/*
3772			 * Last resort will fail over by treating this
3773			 * as a link down to FCF registration.
3774			 */
3775			lpfc_sli4_fcf_dead_failthrough(phba);
3776		} else {
3777			/* Reset FCF roundrobin bmask for new discovery */
3778			lpfc_sli4_clear_fcf_rr_bmask(phba);
3779			/*
3780			 * Handling fast FCF failover to a DEAD FCF event is
3781			 * considered equalivant to receiving CVL to all vports.
3782			 */
3783			lpfc_sli4_perform_all_vport_cvl(phba);
3784		}
3785		break;
3786	case LPFC_FIP_EVENT_TYPE_CVL:
3787		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3788			"2718 Clear Virtual Link Received for VPI 0x%x"
3789			" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3790
3791		vport = lpfc_find_vport_by_vpid(phba,
3792						acqe_fip->index);
3793		ndlp = lpfc_sli4_perform_vport_cvl(vport);
3794		if (!ndlp)
3795			break;
3796		active_vlink_present = 0;
3797
3798		vports = lpfc_create_vport_work_array(phba);
3799		if (vports) {
3800			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3801					i++) {
3802				if ((!(vports[i]->fc_flag &
3803					FC_VPORT_CVL_RCVD)) &&
3804					(vports[i]->port_state > LPFC_FDISC)) {
3805					active_vlink_present = 1;
3806					break;
3807				}
3808			}
3809			lpfc_destroy_vport_work_array(phba, vports);
3810		}
3811
3812		if (active_vlink_present) {
3813			/*
3814			 * If there are other active VLinks present,
3815			 * re-instantiate the Vlink using FDISC.
3816			 */
3817			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3818			shost = lpfc_shost_from_vport(vport);
3819			spin_lock_irq(shost->host_lock);
3820			ndlp->nlp_flag |= NLP_DELAY_TMO;
3821			spin_unlock_irq(shost->host_lock);
3822			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3823			vport->port_state = LPFC_FDISC;
3824		} else {
3825			/*
3826			 * Otherwise, we request port to rediscover
3827			 * the entire FCF table for a fast recovery
3828			 * from possible case that the current FCF
3829			 * is no longer valid if we are not already
3830			 * in the FCF failover process.
3831			 */
3832			spin_lock_irq(&phba->hbalock);
3833			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3834				spin_unlock_irq(&phba->hbalock);
3835				break;
3836			}
3837			/* Mark the fast failover process in progress */
3838			phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3839			spin_unlock_irq(&phba->hbalock);
3840			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3841					LOG_DISCOVERY,
3842					"2773 Start FCF failover per CVL, "
3843					"evt_tag:x%x\n", acqe_fip->event_tag);
3844			rc = lpfc_sli4_redisc_fcf_table(phba);
3845			if (rc) {
3846				lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3847						LOG_DISCOVERY,
3848						"2774 Issue FCF rediscover "
3849						"mabilbox command failed, "
3850						"through to CVL event\n");
3851				spin_lock_irq(&phba->hbalock);
3852				phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3853				spin_unlock_irq(&phba->hbalock);
3854				/*
3855				 * Last resort will be re-try on the
3856				 * the current registered FCF entry.
3857				 */
3858				lpfc_retry_pport_discovery(phba);
3859			} else
3860				/*
3861				 * Reset FCF roundrobin bmask for new
3862				 * discovery.
3863				 */
3864				lpfc_sli4_clear_fcf_rr_bmask(phba);
3865		}
3866		break;
3867	default:
3868		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3869			"0288 Unknown FCoE event type 0x%x event tag "
3870			"0x%x\n", event_type, acqe_fip->event_tag);
3871		break;
3872	}
3873}
3874
3875/**
3876 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3877 * @phba: pointer to lpfc hba data structure.
3878 * @acqe_link: pointer to the async dcbx completion queue entry.
3879 *
3880 * This routine is to handle the SLI4 asynchronous dcbx event.
3881 **/
3882static void
3883lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3884			 struct lpfc_acqe_dcbx *acqe_dcbx)
3885{
3886	phba->fc_eventTag = acqe_dcbx->event_tag;
3887	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3888			"0290 The SLI4 DCBX asynchronous event is not "
3889			"handled yet\n");
3890}
3891
3892/**
3893 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
3894 * @phba: pointer to lpfc hba data structure.
3895 * @acqe_link: pointer to the async grp5 completion queue entry.
3896 *
3897 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
3898 * is an asynchronous notified of a logical link speed change.  The Port
3899 * reports the logical link speed in units of 10Mbps.
3900 **/
3901static void
3902lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
3903			 struct lpfc_acqe_grp5 *acqe_grp5)
3904{
3905	uint16_t prev_ll_spd;
3906
3907	phba->fc_eventTag = acqe_grp5->event_tag;
3908	phba->fcoe_eventtag = acqe_grp5->event_tag;
3909	prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
3910	phba->sli4_hba.link_state.logical_speed =
3911		(bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
3912	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3913			"2789 GRP5 Async Event: Updating logical link speed "
3914			"from %dMbps to %dMbps\n", (prev_ll_spd * 10),
3915			(phba->sli4_hba.link_state.logical_speed*10));
3916}
3917
3918/**
3919 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3920 * @phba: pointer to lpfc hba data structure.
3921 *
3922 * This routine is invoked by the worker thread to process all the pending
3923 * SLI4 asynchronous events.
3924 **/
3925void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3926{
3927	struct lpfc_cq_event *cq_event;
3928
3929	/* First, declare the async event has been handled */
3930	spin_lock_irq(&phba->hbalock);
3931	phba->hba_flag &= ~ASYNC_EVENT;
3932	spin_unlock_irq(&phba->hbalock);
3933	/* Now, handle all the async events */
3934	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3935		/* Get the first event from the head of the event queue */
3936		spin_lock_irq(&phba->hbalock);
3937		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3938				 cq_event, struct lpfc_cq_event, list);
3939		spin_unlock_irq(&phba->hbalock);
3940		/* Process the asynchronous event */
3941		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3942		case LPFC_TRAILER_CODE_LINK:
3943			lpfc_sli4_async_link_evt(phba,
3944						 &cq_event->cqe.acqe_link);
3945			break;
3946		case LPFC_TRAILER_CODE_FCOE:
3947			lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
3948			break;
3949		case LPFC_TRAILER_CODE_DCBX:
3950			lpfc_sli4_async_dcbx_evt(phba,
3951						 &cq_event->cqe.acqe_dcbx);
3952			break;
3953		case LPFC_TRAILER_CODE_GRP5:
3954			lpfc_sli4_async_grp5_evt(phba,
3955						 &cq_event->cqe.acqe_grp5);
3956			break;
3957		case LPFC_TRAILER_CODE_FC:
3958			lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
3959			break;
3960		case LPFC_TRAILER_CODE_SLI:
3961			lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
3962			break;
3963		default:
3964			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3965					"1804 Invalid asynchrous event code: "
3966					"x%x\n", bf_get(lpfc_trailer_code,
3967					&cq_event->cqe.mcqe_cmpl));
3968			break;
3969		}
3970		/* Free the completion event processed to the free pool */
3971		lpfc_sli4_cq_event_release(phba, cq_event);
3972	}
3973}
3974
3975/**
3976 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3977 * @phba: pointer to lpfc hba data structure.
3978 *
3979 * This routine is invoked by the worker thread to process FCF table
3980 * rediscovery pending completion event.
3981 **/
3982void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3983{
3984	int rc;
3985
3986	spin_lock_irq(&phba->hbalock);
3987	/* Clear FCF rediscovery timeout event */
3988	phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3989	/* Clear driver fast failover FCF record flag */
3990	phba->fcf.failover_rec.flag = 0;
3991	/* Set state for FCF fast failover */
3992	phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3993	spin_unlock_irq(&phba->hbalock);
3994
3995	/* Scan FCF table from the first entry to re-discover SAN */
3996	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3997			"2777 Start post-quiescent FCF table scan\n");
3998	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3999	if (rc)
4000		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4001				"2747 Issue FCF scan read FCF mailbox "
4002				"command failed 0x%x\n", rc);
4003}
4004
4005/**
4006 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
4007 * @phba: pointer to lpfc hba data structure.
4008 * @dev_grp: The HBA PCI-Device group number.
4009 *
4010 * This routine is invoked to set up the per HBA PCI-Device group function
4011 * API jump table entries.
4012 *
4013 * Return: 0 if success, otherwise -ENODEV
4014 **/
4015int
4016lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4017{
4018	int rc;
4019
4020	/* Set up lpfc PCI-device group */
4021	phba->pci_dev_grp = dev_grp;
4022
4023	/* The LPFC_PCI_DEV_OC uses SLI4 */
4024	if (dev_grp == LPFC_PCI_DEV_OC)
4025		phba->sli_rev = LPFC_SLI_REV4;
4026
4027	/* Set up device INIT API function jump table */
4028	rc = lpfc_init_api_table_setup(phba, dev_grp);
4029	if (rc)
4030		return -ENODEV;
4031	/* Set up SCSI API function jump table */
4032	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
4033	if (rc)
4034		return -ENODEV;
4035	/* Set up SLI API function jump table */
4036	rc = lpfc_sli_api_table_setup(phba, dev_grp);
4037	if (rc)
4038		return -ENODEV;
4039	/* Set up MBOX API function jump table */
4040	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
4041	if (rc)
4042		return -ENODEV;
4043
4044	return 0;
4045}
4046
4047/**
4048 * lpfc_log_intr_mode - Log the active interrupt mode
4049 * @phba: pointer to lpfc hba data structure.
4050 * @intr_mode: active interrupt mode adopted.
4051 *
4052 * This routine it invoked to log the currently used active interrupt mode
4053 * to the device.
4054 **/
4055static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
4056{
4057	switch (intr_mode) {
4058	case 0:
4059		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4060				"0470 Enable INTx interrupt mode.\n");
4061		break;
4062	case 1:
4063		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4064				"0481 Enabled MSI interrupt mode.\n");
4065		break;
4066	case 2:
4067		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4068				"0480 Enabled MSI-X interrupt mode.\n");
4069		break;
4070	default:
4071		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4072				"0482 Illegal interrupt mode.\n");
4073		break;
4074	}
4075	return;
4076}
4077
4078/**
4079 * lpfc_enable_pci_dev - Enable a generic PCI device.
4080 * @phba: pointer to lpfc hba data structure.
4081 *
4082 * This routine is invoked to enable the PCI device that is common to all
4083 * PCI devices.
4084 *
4085 * Return codes
4086 * 	0 - successful
4087 * 	other values - error
4088 **/
4089static int
4090lpfc_enable_pci_dev(struct lpfc_hba *phba)
4091{
4092	struct pci_dev *pdev;
4093	int bars = 0;
4094
4095	/* Obtain PCI device reference */
4096	if (!phba->pcidev)
4097		goto out_error;
4098	else
4099		pdev = phba->pcidev;
4100	/* Select PCI BARs */
4101	bars = pci_select_bars(pdev, IORESOURCE_MEM);
4102	/* Enable PCI device */
4103	if (pci_enable_device_mem(pdev))
4104		goto out_error;
4105	/* Request PCI resource for the device */
4106	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
4107		goto out_disable_device;
4108	/* Set up device as PCI master and save state for EEH */
4109	pci_set_master(pdev);
4110	pci_try_set_mwi(pdev);
4111	pci_save_state(pdev);
4112
4113	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4114	if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
4115		pdev->needs_freset = 1;
4116
4117	return 0;
4118
4119out_disable_device:
4120	pci_disable_device(pdev);
4121out_error:
4122	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4123			"1401 Failed to enable pci device, bars:x%x\n", bars);
4124	return -ENODEV;
4125}
4126
4127/**
4128 * lpfc_disable_pci_dev - Disable a generic PCI device.
4129 * @phba: pointer to lpfc hba data structure.
4130 *
4131 * This routine is invoked to disable the PCI device that is common to all
4132 * PCI devices.
4133 **/
4134static void
4135lpfc_disable_pci_dev(struct lpfc_hba *phba)
4136{
4137	struct pci_dev *pdev;
4138	int bars;
4139
4140	/* Obtain PCI device reference */
4141	if (!phba->pcidev)
4142		return;
4143	else
4144		pdev = phba->pcidev;
4145	/* Select PCI BARs */
4146	bars = pci_select_bars(pdev, IORESOURCE_MEM);
4147	/* Release PCI resource and disable PCI device */
4148	pci_release_selected_regions(pdev, bars);
4149	pci_disable_device(pdev);
4150	/* Null out PCI private reference to driver */
4151	pci_set_drvdata(pdev, NULL);
4152
4153	return;
4154}
4155
4156/**
4157 * lpfc_reset_hba - Reset a hba
4158 * @phba: pointer to lpfc hba data structure.
4159 *
4160 * This routine is invoked to reset a hba device. It brings the HBA
4161 * offline, performs a board restart, and then brings the board back
4162 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
4163 * on outstanding mailbox commands.
4164 **/
4165void
4166lpfc_reset_hba(struct lpfc_hba *phba)
4167{
4168	/* If resets are disabled then set error state and return. */
4169	if (!phba->cfg_enable_hba_reset) {
4170		phba->link_state = LPFC_HBA_ERROR;
4171		return;
4172	}
4173	lpfc_offline_prep(phba);
4174	lpfc_offline(phba);
4175	lpfc_sli_brdrestart(phba);
4176	lpfc_online(phba);
4177	lpfc_unblock_mgmt_io(phba);
4178}
4179
4180/**
4181 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
4182 * @phba: pointer to lpfc hba data structure.
4183 *
4184 * This function enables the PCI SR-IOV virtual functions to a physical
4185 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4186 * enable the number of virtual functions to the physical function. As
4187 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4188 * API call does not considered as an error condition for most of the device.
4189 **/
4190uint16_t
4191lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
4192{
4193	struct pci_dev *pdev = phba->pcidev;
4194	uint16_t nr_virtfn;
4195	int pos;
4196
4197	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4198	if (pos == 0)
4199		return 0;
4200
4201	pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
4202	return nr_virtfn;
4203}
4204
4205/**
4206 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4207 * @phba: pointer to lpfc hba data structure.
4208 * @nr_vfn: number of virtual functions to be enabled.
4209 *
4210 * This function enables the PCI SR-IOV virtual functions to a physical
4211 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4212 * enable the number of virtual functions to the physical function. As
4213 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4214 * API call does not considered as an error condition for most of the device.
4215 **/
4216int
4217lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4218{
4219	struct pci_dev *pdev = phba->pcidev;
4220	uint16_t max_nr_vfn;
4221	int rc;
4222
4223	max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
4224	if (nr_vfn > max_nr_vfn) {
4225		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4226				"3057 Requested vfs (%d) greater than "
4227				"supported vfs (%d)", nr_vfn, max_nr_vfn);
4228		return -EINVAL;
4229	}
4230
4231	rc = pci_enable_sriov(pdev, nr_vfn);
4232	if (rc) {
4233		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4234				"2806 Failed to enable sriov on this device "
4235				"with vfn number nr_vf:%d, rc:%d\n",
4236				nr_vfn, rc);
4237	} else
4238		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4239				"2807 Successful enable sriov on this device "
4240				"with vfn number nr_vf:%d\n", nr_vfn);
4241	return rc;
4242}
4243
4244/**
4245 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4246 * @phba: pointer to lpfc hba data structure.
4247 *
4248 * This routine is invoked to set up the driver internal resources specific to
4249 * support the SLI-3 HBA device it attached to.
4250 *
4251 * Return codes
4252 * 	0 - successful
4253 * 	other values - error
4254 **/
4255static int
4256lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4257{
4258	struct lpfc_sli *psli;
4259	int rc;
4260
4261	/*
4262	 * Initialize timers used by driver
4263	 */
4264
4265	/* Heartbeat timer */
4266	init_timer(&phba->hb_tmofunc);
4267	phba->hb_tmofunc.function = lpfc_hb_timeout;
4268	phba->hb_tmofunc.data = (unsigned long)phba;
4269
4270	psli = &phba->sli;
4271	/* MBOX heartbeat timer */
4272	init_timer(&psli->mbox_tmo);
4273	psli->mbox_tmo.function = lpfc_mbox_timeout;
4274	psli->mbox_tmo.data = (unsigned long) phba;
4275	/* FCP polling mode timer */
4276	init_timer(&phba->fcp_poll_timer);
4277	phba->fcp_poll_timer.function = lpfc_poll_timeout;
4278	phba->fcp_poll_timer.data = (unsigned long) phba;
4279	/* Fabric block timer */
4280	init_timer(&phba->fabric_block_timer);
4281	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4282	phba->fabric_block_timer.data = (unsigned long) phba;
4283	/* EA polling mode timer */
4284	init_timer(&phba->eratt_poll);
4285	phba->eratt_poll.function = lpfc_poll_eratt;
4286	phba->eratt_poll.data = (unsigned long) phba;
4287
4288	/* Host attention work mask setup */
4289	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
4290	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
4291
4292	/* Get all the module params for configuring this host */
4293	lpfc_get_cfgparam(phba);
4294	if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
4295		phba->menlo_flag |= HBA_MENLO_SUPPORT;
4296		/* check for menlo minimum sg count */
4297		if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
4298			phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4299	}
4300
4301	/*
4302	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4303	 * used to create the sg_dma_buf_pool must be dynamically calculated.
4304	 * 2 segments are added since the IOCB needs a command and response bde.
4305	 */
4306	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4307		sizeof(struct fcp_rsp) +
4308			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4309
4310	if (phba->cfg_enable_bg) {
4311		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4312		phba->cfg_sg_dma_buf_size +=
4313			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4314	}
4315
4316	/* Also reinitialize the host templates with new values. */
4317	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4318	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4319
4320	phba->max_vpi = LPFC_MAX_VPI;
4321	/* This will be set to correct value after config_port mbox */
4322	phba->max_vports = 0;
4323
4324	/*
4325	 * Initialize the SLI Layer to run with lpfc HBAs.
4326	 */
4327	lpfc_sli_setup(phba);
4328	lpfc_sli_queue_setup(phba);
4329
4330	/* Allocate device driver memory */
4331	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4332		return -ENOMEM;
4333
4334	/*
4335	 * Enable sr-iov virtual functions if supported and configured
4336	 * through the module parameter.
4337	 */
4338	if (phba->cfg_sriov_nr_virtfn > 0) {
4339		rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4340						 phba->cfg_sriov_nr_virtfn);
4341		if (rc) {
4342			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4343					"2808 Requested number of SR-IOV "
4344					"virtual functions (%d) is not "
4345					"supported\n",
4346					phba->cfg_sriov_nr_virtfn);
4347			phba->cfg_sriov_nr_virtfn = 0;
4348		}
4349	}
4350
4351	return 0;
4352}
4353
4354/**
4355 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
4356 * @phba: pointer to lpfc hba data structure.
4357 *
4358 * This routine is invoked to unset the driver internal resources set up
4359 * specific for supporting the SLI-3 HBA device it attached to.
4360 **/
4361static void
4362lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4363{
4364	/* Free device driver memory allocated */
4365	lpfc_mem_free_all(phba);
4366
4367	return;
4368}
4369
4370/**
4371 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
4372 * @phba: pointer to lpfc hba data structure.
4373 *
4374 * This routine is invoked to set up the driver internal resources specific to
4375 * support the SLI-4 HBA device it attached to.
4376 *
4377 * Return codes
4378 * 	0 - successful
4379 * 	other values - error
4380 **/
4381static int
4382lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4383{
4384	struct lpfc_sli *psli;
4385	LPFC_MBOXQ_t *mboxq;
4386	int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
4387	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4388	struct lpfc_mqe *mqe;
4389	int longs, sli_family;
4390	int sges_per_segment;
4391
4392	/* Before proceed, wait for POST done and device ready */
4393	rc = lpfc_sli4_post_status_check(phba);
4394	if (rc)
4395		return -ENODEV;
4396
4397	/*
4398	 * Initialize timers used by driver
4399	 */
4400
4401	/* Heartbeat timer */
4402	init_timer(&phba->hb_tmofunc);
4403	phba->hb_tmofunc.function = lpfc_hb_timeout;
4404	phba->hb_tmofunc.data = (unsigned long)phba;
4405	init_timer(&phba->rrq_tmr);
4406	phba->rrq_tmr.function = lpfc_rrq_timeout;
4407	phba->rrq_tmr.data = (unsigned long)phba;
4408
4409	psli = &phba->sli;
4410	/* MBOX heartbeat timer */
4411	init_timer(&psli->mbox_tmo);
4412	psli->mbox_tmo.function = lpfc_mbox_timeout;
4413	psli->mbox_tmo.data = (unsigned long) phba;
4414	/* Fabric block timer */
4415	init_timer(&phba->fabric_block_timer);
4416	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4417	phba->fabric_block_timer.data = (unsigned long) phba;
4418	/* EA polling mode timer */
4419	init_timer(&phba->eratt_poll);
4420	phba->eratt_poll.function = lpfc_poll_eratt;
4421	phba->eratt_poll.data = (unsigned long) phba;
4422	/* FCF rediscover timer */
4423	init_timer(&phba->fcf.redisc_wait);
4424	phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4425	phba->fcf.redisc_wait.data = (unsigned long)phba;
4426
4427	/*
4428	 * Control structure for handling external multi-buffer mailbox
4429	 * command pass-through.
4430	 */
4431	memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
4432		sizeof(struct lpfc_mbox_ext_buf_ctx));
4433	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4434
4435	/*
4436	 * We need to do a READ_CONFIG mailbox command here before
4437	 * calling lpfc_get_cfgparam. For VFs this will report the
4438	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
4439	 * All of the resources allocated
4440	 * for this Port are tied to these values.
4441	 */
4442	/* Get all the module params for configuring this host */
4443	lpfc_get_cfgparam(phba);
4444	phba->max_vpi = LPFC_MAX_VPI;
4445	/* This will be set to correct value after the read_config mbox */
4446	phba->max_vports = 0;
4447
4448	/* Program the default value of vlan_id and fc_map */
4449	phba->valid_vlan = 0;
4450	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4451	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4452	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4453
4454	/* With BlockGuard we can have multiple SGEs per Data Segemnt */
4455	sges_per_segment = 1;
4456	if (phba->cfg_enable_bg)
4457		sges_per_segment = 2;
4458
4459	/*
4460	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4461	 * used to create the sg_dma_buf_pool must be dynamically calculated.
4462	 * 2 segments are added since the IOCB needs a command and response bde.
4463	 * To insure that the scsi sgl does not cross a 4k page boundary only
4464	 * sgl sizes of must be a power of 2.
4465	 */
4466	buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4467		    (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) *
4468		    sizeof(struct sli4_sge)));
4469
4470	sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
4471	max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
4472	switch (sli_family) {
4473	case LPFC_SLI_INTF_FAMILY_BE2:
4474	case LPFC_SLI_INTF_FAMILY_BE3:
4475		/* There is a single hint for BE - 2 pages per BPL. */
4476		if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
4477		    LPFC_SLI_INTF_SLI_HINT1_1)
4478			max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
4479		break;
4480	case LPFC_SLI_INTF_FAMILY_LNCR_A0:
4481	case LPFC_SLI_INTF_FAMILY_LNCR_B0:
4482	default:
4483		break;
4484	}
4485
4486	for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4487	     dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4488	     dma_buf_size = dma_buf_size << 1)
4489		;
4490	if (dma_buf_size == max_buf_size)
4491		phba->cfg_sg_seg_cnt = (dma_buf_size -
4492			sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
4493			(2 * sizeof(struct sli4_sge))) /
4494				sizeof(struct sli4_sge);
4495	phba->cfg_sg_dma_buf_size = dma_buf_size;
4496
4497	/* Initialize buffer queue management fields */
4498	hbq_count = lpfc_sli_hbq_count();
4499	for (i = 0; i < hbq_count; ++i)
4500		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4501	INIT_LIST_HEAD(&phba->rb_pend_list);
4502	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4503	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
4504
4505	/*
4506	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
4507	 */
4508	/* Initialize the Abort scsi buffer list used by driver */
4509	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
4510	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
4511	/* This abort list used by worker thread */
4512	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4513
4514	/*
4515	 * Initialize driver internal slow-path work queues
4516	 */
4517
4518	/* Driver internel slow-path CQ Event pool */
4519	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
4520	/* Response IOCB work queue list */
4521	INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
4522	/* Asynchronous event CQ Event work queue list */
4523	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4524	/* Fast-path XRI aborted CQ Event work queue list */
4525	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4526	/* Slow-path XRI aborted CQ Event work queue list */
4527	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4528	/* Receive queue CQ Event work queue list */
4529	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4530
4531	/* Initialize extent block lists. */
4532	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
4533	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
4534	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
4535	INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
4536
4537	/* Initialize the driver internal SLI layer lists. */
4538	lpfc_sli_setup(phba);
4539	lpfc_sli_queue_setup(phba);
4540
4541	/* Allocate device driver memory */
4542	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4543	if (rc)
4544		return -ENOMEM;
4545
4546	/* IF Type 2 ports get initialized now. */
4547	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4548	    LPFC_SLI_INTF_IF_TYPE_2) {
4549		rc = lpfc_pci_function_reset(phba);
4550		if (unlikely(rc))
4551			return -ENODEV;
4552	}
4553
4554	/* Create the bootstrap mailbox command */
4555	rc = lpfc_create_bootstrap_mbox(phba);
4556	if (unlikely(rc))
4557		goto out_free_mem;
4558
4559	/* Set up the host's endian order with the device. */
4560	rc = lpfc_setup_endian_order(phba);
4561	if (unlikely(rc))
4562		goto out_free_bsmbx;
4563
4564	/* Set up the hba's configuration parameters. */
4565	rc = lpfc_sli4_read_config(phba);
4566	if (unlikely(rc))
4567		goto out_free_bsmbx;
4568
4569	/* IF Type 0 ports get initialized now. */
4570	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4571	    LPFC_SLI_INTF_IF_TYPE_0) {
4572		rc = lpfc_pci_function_reset(phba);
4573		if (unlikely(rc))
4574			goto out_free_bsmbx;
4575	}
4576
4577	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4578						       GFP_KERNEL);
4579	if (!mboxq) {
4580		rc = -ENOMEM;
4581		goto out_free_bsmbx;
4582	}
4583
4584	/* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
4585	lpfc_supported_pages(mboxq);
4586	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4587	if (!rc) {
4588		mqe = &mboxq->u.mqe;
4589		memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4590		       LPFC_MAX_SUPPORTED_PAGES);
4591		for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4592			switch (pn_page[i]) {
4593			case LPFC_SLI4_PARAMETERS:
4594				phba->sli4_hba.pc_sli4_params.supported = 1;
4595				break;
4596			default:
4597				break;
4598			}
4599		}
4600		/* Read the port's SLI4 Parameters capabilities if supported. */
4601		if (phba->sli4_hba.pc_sli4_params.supported)
4602			rc = lpfc_pc_sli4_params_get(phba, mboxq);
4603		if (rc) {
4604			mempool_free(mboxq, phba->mbox_mem_pool);
4605			rc = -EIO;
4606			goto out_free_bsmbx;
4607		}
4608	}
4609	/*
4610	 * Get sli4 parameters that override parameters from Port capabilities.
4611	 * If this call fails, it isn't critical unless the SLI4 parameters come
4612	 * back in conflict.
4613	 */
4614	rc = lpfc_get_sli4_parameters(phba, mboxq);
4615	if (rc) {
4616		if (phba->sli4_hba.extents_in_use &&
4617		    phba->sli4_hba.rpi_hdrs_in_use) {
4618			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4619				"2999 Unsupported SLI4 Parameters "
4620				"Extents and RPI headers enabled.\n");
4621			goto out_free_bsmbx;
4622		}
4623	}
4624	mempool_free(mboxq, phba->mbox_mem_pool);
4625	/* Verify all the SLI4 queues */
4626	rc = lpfc_sli4_queue_verify(phba);
4627	if (rc)
4628		goto out_free_bsmbx;
4629
4630	/* Create driver internal CQE event pool */
4631	rc = lpfc_sli4_cq_event_pool_create(phba);
4632	if (rc)
4633		goto out_free_bsmbx;
4634
4635	/* Initialize and populate the iocb list per host */
4636	rc = lpfc_init_sgl_list(phba);
4637	if (rc) {
4638		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4639				"1400 Failed to initialize sgl list.\n");
4640		goto out_destroy_cq_event_pool;
4641	}
4642	rc = lpfc_init_active_sgl_array(phba);
4643	if (rc) {
4644		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4645				"1430 Failed to initialize sgl list.\n");
4646		goto out_free_sgl_list;
4647	}
4648	rc = lpfc_sli4_init_rpi_hdrs(phba);
4649	if (rc) {
4650		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4651				"1432 Failed to initialize rpi headers.\n");
4652		goto out_free_active_sgl;
4653	}
4654
4655	/* Allocate eligible FCF bmask memory for FCF roundrobin failover */
4656	longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4657	phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4658					 GFP_KERNEL);
4659	if (!phba->fcf.fcf_rr_bmask) {
4660		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4661				"2759 Failed allocate memory for FCF round "
4662				"robin failover bmask\n");
4663		rc = -ENOMEM;
4664		goto out_remove_rpi_hdrs;
4665	}
4666
4667	/*
4668	 * The cfg_fcp_eq_count can be zero whenever there is exactly one
4669	 * interrupt vector.  This is not an error
4670	 */
4671	if (phba->cfg_fcp_eq_count) {
4672		phba->sli4_hba.fcp_eq_hdl =
4673				kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4674				    phba->cfg_fcp_eq_count), GFP_KERNEL);
4675		if (!phba->sli4_hba.fcp_eq_hdl) {
4676			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4677					"2572 Failed allocate memory for "
4678					"fast-path per-EQ handle array\n");
4679			rc = -ENOMEM;
4680			goto out_free_fcf_rr_bmask;
4681		}
4682	}
4683
4684	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4685				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4686	if (!phba->sli4_hba.msix_entries) {
4687		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4688				"2573 Failed allocate memory for msi-x "
4689				"interrupt vector entries\n");
4690		rc = -ENOMEM;
4691		goto out_free_fcp_eq_hdl;
4692	}
4693
4694	/*
4695	 * Enable sr-iov virtual functions if supported and configured
4696	 * through the module parameter.
4697	 */
4698	if (phba->cfg_sriov_nr_virtfn > 0) {
4699		rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4700						 phba->cfg_sriov_nr_virtfn);
4701		if (rc) {
4702			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4703					"3020 Requested number of SR-IOV "
4704					"virtual functions (%d) is not "
4705					"supported\n",
4706					phba->cfg_sriov_nr_virtfn);
4707			phba->cfg_sriov_nr_virtfn = 0;
4708		}
4709	}
4710
4711	return 0;
4712
4713out_free_fcp_eq_hdl:
4714	kfree(phba->sli4_hba.fcp_eq_hdl);
4715out_free_fcf_rr_bmask:
4716	kfree(phba->fcf.fcf_rr_bmask);
4717out_remove_rpi_hdrs:
4718	lpfc_sli4_remove_rpi_hdrs(phba);
4719out_free_active_sgl:
4720	lpfc_free_active_sgl(phba);
4721out_free_sgl_list:
4722	lpfc_free_sgl_list(phba);
4723out_destroy_cq_event_pool:
4724	lpfc_sli4_cq_event_pool_destroy(phba);
4725out_free_bsmbx:
4726	lpfc_destroy_bootstrap_mbox(phba);
4727out_free_mem:
4728	lpfc_mem_free(phba);
4729	return rc;
4730}
4731
4732/**
4733 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4734 * @phba: pointer to lpfc hba data structure.
4735 *
4736 * This routine is invoked to unset the driver internal resources set up
4737 * specific for supporting the SLI-4 HBA device it attached to.
4738 **/
4739static void
4740lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4741{
4742	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4743
4744	/* Free memory allocated for msi-x interrupt vector entries */
4745	kfree(phba->sli4_hba.msix_entries);
4746
4747	/* Free memory allocated for fast-path work queue handles */
4748	kfree(phba->sli4_hba.fcp_eq_hdl);
4749
4750	/* Free the allocated rpi headers. */
4751	lpfc_sli4_remove_rpi_hdrs(phba);
4752	lpfc_sli4_remove_rpis(phba);
4753
4754	/* Free eligible FCF index bmask */
4755	kfree(phba->fcf.fcf_rr_bmask);
4756
4757	/* Free the ELS sgl list */
4758	lpfc_free_active_sgl(phba);
4759	lpfc_free_sgl_list(phba);
4760
4761	/* Free the SCSI sgl management array */
4762	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4763
4764	/* Free the completion queue EQ event pool */
4765	lpfc_sli4_cq_event_release_all(phba);
4766	lpfc_sli4_cq_event_pool_destroy(phba);
4767
4768	/* Release resource identifiers. */
4769	lpfc_sli4_dealloc_resource_identifiers(phba);
4770
4771	/* Free the bsmbx region. */
4772	lpfc_destroy_bootstrap_mbox(phba);
4773
4774	/* Free the SLI Layer memory with SLI4 HBAs */
4775	lpfc_mem_free_all(phba);
4776
4777	/* Free the current connect table */
4778	list_for_each_entry_safe(conn_entry, next_conn_entry,
4779		&phba->fcf_conn_rec_list, list) {
4780		list_del_init(&conn_entry->list);
4781		kfree(conn_entry);
4782	}
4783
4784	return;
4785}
4786
4787/**
4788 * lpfc_init_api_table_setup - Set up init api function jump table
4789 * @phba: The hba struct for which this call is being executed.
4790 * @dev_grp: The HBA PCI-Device group number.
4791 *
4792 * This routine sets up the device INIT interface API function jump table
4793 * in @phba struct.
4794 *
4795 * Returns: 0 - success, -ENODEV - failure.
4796 **/
4797int
4798lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4799{
4800	phba->lpfc_hba_init_link = lpfc_hba_init_link;
4801	phba->lpfc_hba_down_link = lpfc_hba_down_link;
4802	phba->lpfc_selective_reset = lpfc_selective_reset;
4803	switch (dev_grp) {
4804	case LPFC_PCI_DEV_LP:
4805		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4806		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4807		phba->lpfc_stop_port = lpfc_stop_port_s3;
4808		break;
4809	case LPFC_PCI_DEV_OC:
4810		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4811		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4812		phba->lpfc_stop_port = lpfc_stop_port_s4;
4813		break;
4814	default:
4815		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4816				"1431 Invalid HBA PCI-device group: 0x%x\n",
4817				dev_grp);
4818		return -ENODEV;
4819		break;
4820	}
4821	return 0;
4822}
4823
4824/**
4825 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4826 * @phba: pointer to lpfc hba data structure.
4827 *
4828 * This routine is invoked to set up the driver internal resources before the
4829 * device specific resource setup to support the HBA device it attached to.
4830 *
4831 * Return codes
4832 *	0 - successful
4833 *	other values - error
4834 **/
4835static int
4836lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4837{
4838	/*
4839	 * Driver resources common to all SLI revisions
4840	 */
4841	atomic_set(&phba->fast_event_count, 0);
4842	spin_lock_init(&phba->hbalock);
4843
4844	/* Initialize ndlp management spinlock */
4845	spin_lock_init(&phba->ndlp_lock);
4846
4847	INIT_LIST_HEAD(&phba->port_list);
4848	INIT_LIST_HEAD(&phba->work_list);
4849	init_waitqueue_head(&phba->wait_4_mlo_m_q);
4850
4851	/* Initialize the wait queue head for the kernel thread */
4852	init_waitqueue_head(&phba->work_waitq);
4853
4854	/* Initialize the scsi buffer list used by driver for scsi IO */
4855	spin_lock_init(&phba->scsi_buf_list_lock);
4856	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4857
4858	/* Initialize the fabric iocb list */
4859	INIT_LIST_HEAD(&phba->fabric_iocb_list);
4860
4861	/* Initialize list to save ELS buffers */
4862	INIT_LIST_HEAD(&phba->elsbuf);
4863
4864	/* Initialize FCF connection rec list */
4865	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4866
4867	return 0;
4868}
4869
4870/**
4871 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4872 * @phba: pointer to lpfc hba data structure.
4873 *
4874 * This routine is invoked to set up the driver internal resources after the
4875 * device specific resource setup to support the HBA device it attached to.
4876 *
4877 * Return codes
4878 * 	0 - successful
4879 * 	other values - error
4880 **/
4881static int
4882lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4883{
4884	int error;
4885
4886	/* Startup the kernel thread for this host adapter. */
4887	phba->worker_thread = kthread_run(lpfc_do_work, phba,
4888					  "lpfc_worker_%d", phba->brd_no);
4889	if (IS_ERR(phba->worker_thread)) {
4890		error = PTR_ERR(phba->worker_thread);
4891		return error;
4892	}
4893
4894	return 0;
4895}
4896
4897/**
4898 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4899 * @phba: pointer to lpfc hba data structure.
4900 *
4901 * This routine is invoked to unset the driver internal resources set up after
4902 * the device specific resource setup for supporting the HBA device it
4903 * attached to.
4904 **/
4905static void
4906lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4907{
4908	/* Stop kernel worker thread */
4909	kthread_stop(phba->worker_thread);
4910}
4911
4912/**
4913 * lpfc_free_iocb_list - Free iocb list.
4914 * @phba: pointer to lpfc hba data structure.
4915 *
4916 * This routine is invoked to free the driver's IOCB list and memory.
4917 **/
4918static void
4919lpfc_free_iocb_list(struct lpfc_hba *phba)
4920{
4921	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4922
4923	spin_lock_irq(&phba->hbalock);
4924	list_for_each_entry_safe(iocbq_entry, iocbq_next,
4925				 &phba->lpfc_iocb_list, list) {
4926		list_del(&iocbq_entry->list);
4927		kfree(iocbq_entry);
4928		phba->total_iocbq_bufs--;
4929	}
4930	spin_unlock_irq(&phba->hbalock);
4931
4932	return;
4933}
4934
4935/**
4936 * lpfc_init_iocb_list - Allocate and initialize iocb list.
4937 * @phba: pointer to lpfc hba data structure.
4938 *
4939 * This routine is invoked to allocate and initizlize the driver's IOCB
4940 * list and set up the IOCB tag array accordingly.
4941 *
4942 * Return codes
4943 *	0 - successful
4944 *	other values - error
4945 **/
4946static int
4947lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4948{
4949	struct lpfc_iocbq *iocbq_entry = NULL;
4950	uint16_t iotag;
4951	int i;
4952
4953	/* Initialize and populate the iocb list per host.  */
4954	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
4955	for (i = 0; i < iocb_count; i++) {
4956		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
4957		if (iocbq_entry == NULL) {
4958			printk(KERN_ERR "%s: only allocated %d iocbs of "
4959				"expected %d count. Unloading driver.\n",
4960				__func__, i, LPFC_IOCB_LIST_CNT);
4961			goto out_free_iocbq;
4962		}
4963
4964		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4965		if (iotag == 0) {
4966			kfree(iocbq_entry);
4967			printk(KERN_ERR "%s: failed to allocate IOTAG. "
4968				"Unloading driver.\n", __func__);
4969			goto out_free_iocbq;
4970		}
4971		iocbq_entry->sli4_lxritag = NO_XRI;
4972		iocbq_entry->sli4_xritag = NO_XRI;
4973
4974		spin_lock_irq(&phba->hbalock);
4975		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4976		phba->total_iocbq_bufs++;
4977		spin_unlock_irq(&phba->hbalock);
4978	}
4979
4980	return 0;
4981
4982out_free_iocbq:
4983	lpfc_free_iocb_list(phba);
4984
4985	return -ENOMEM;
4986}
4987
4988/**
4989 * lpfc_free_sgl_list - Free sgl list.
4990 * @phba: pointer to lpfc hba data structure.
4991 *
4992 * This routine is invoked to free the driver's sgl list and memory.
4993 **/
4994static void
4995lpfc_free_sgl_list(struct lpfc_hba *phba)
4996{
4997	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4998	LIST_HEAD(sglq_list);
4999
5000	spin_lock_irq(&phba->hbalock);
5001	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
5002	spin_unlock_irq(&phba->hbalock);
5003
5004	list_for_each_entry_safe(sglq_entry, sglq_next,
5005				 &sglq_list, list) {
5006		list_del(&sglq_entry->list);
5007		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
5008		kfree(sglq_entry);
5009		phba->sli4_hba.total_sglq_bufs--;
5010	}
5011	kfree(phba->sli4_hba.lpfc_els_sgl_array);
5012}
5013
5014/**
5015 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
5016 * @phba: pointer to lpfc hba data structure.
5017 *
5018 * This routine is invoked to allocate the driver's active sgl memory.
5019 * This array will hold the sglq_entry's for active IOs.
5020 **/
5021static int
5022lpfc_init_active_sgl_array(struct lpfc_hba *phba)
5023{
5024	int size;
5025	size = sizeof(struct lpfc_sglq *);
5026	size *= phba->sli4_hba.max_cfg_param.max_xri;
5027
5028	phba->sli4_hba.lpfc_sglq_active_list =
5029		kzalloc(size, GFP_KERNEL);
5030	if (!phba->sli4_hba.lpfc_sglq_active_list)
5031		return -ENOMEM;
5032	return 0;
5033}
5034
5035/**
5036 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
5037 * @phba: pointer to lpfc hba data structure.
5038 *
5039 * This routine is invoked to walk through the array of active sglq entries
5040 * and free all of the resources.
5041 * This is just a place holder for now.
5042 **/
5043static void
5044lpfc_free_active_sgl(struct lpfc_hba *phba)
5045{
5046	kfree(phba->sli4_hba.lpfc_sglq_active_list);
5047}
5048
5049/**
5050 * lpfc_init_sgl_list - Allocate and initialize sgl list.
5051 * @phba: pointer to lpfc hba data structure.
5052 *
5053 * This routine is invoked to allocate and initizlize the driver's sgl
5054 * list and set up the sgl xritag tag array accordingly.
5055 *
5056 * Return codes
5057 *	0 - successful
5058 *	other values - error
5059 **/
5060static int
5061lpfc_init_sgl_list(struct lpfc_hba *phba)
5062{
5063	struct lpfc_sglq *sglq_entry = NULL;
5064	int i;
5065	int els_xri_cnt;
5066
5067	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
5068	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5069				"2400 ELS XRI count %d.\n",
5070				els_xri_cnt);
5071	/* Initialize and populate the sglq list per host/VF. */
5072	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
5073	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
5074
5075	/* Sanity check on XRI management */
5076	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
5077		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5078				"2562 No room left for SCSI XRI allocation: "
5079				"max_xri=%d, els_xri=%d\n",
5080				phba->sli4_hba.max_cfg_param.max_xri,
5081				els_xri_cnt);
5082		return -ENOMEM;
5083	}
5084
5085	/* Allocate memory for the ELS XRI management array */
5086	phba->sli4_hba.lpfc_els_sgl_array =
5087			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
5088			GFP_KERNEL);
5089
5090	if (!phba->sli4_hba.lpfc_els_sgl_array) {
5091		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5092				"2401 Failed to allocate memory for ELS "
5093				"XRI management array of size %d.\n",
5094				els_xri_cnt);
5095		return -ENOMEM;
5096	}
5097
5098	/* Keep the SCSI XRI into the XRI management array */
5099	phba->sli4_hba.scsi_xri_max =
5100			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
5101	phba->sli4_hba.scsi_xri_cnt = 0;
5102	phba->sli4_hba.lpfc_scsi_psb_array =
5103			kzalloc((sizeof(struct lpfc_scsi_buf *) *
5104			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
5105
5106	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
5107		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5108				"2563 Failed to allocate memory for SCSI "
5109				"XRI management array of size %d.\n",
5110				phba->sli4_hba.scsi_xri_max);
5111		kfree(phba->sli4_hba.lpfc_els_sgl_array);
5112		return -ENOMEM;
5113	}
5114
5115	for (i = 0; i < els_xri_cnt; i++) {
5116		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
5117		if (sglq_entry == NULL) {
5118			printk(KERN_ERR "%s: only allocated %d sgls of "
5119				"expected %d count. Unloading driver.\n",
5120				__func__, i, els_xri_cnt);
5121			goto out_free_mem;
5122		}
5123
5124		sglq_entry->buff_type = GEN_BUFF_TYPE;
5125		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
5126		if (sglq_entry->virt == NULL) {
5127			kfree(sglq_entry);
5128			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
5129				"Unloading driver.\n", __func__);
5130			goto out_free_mem;
5131		}
5132		sglq_entry->sgl = sglq_entry->virt;
5133		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
5134
5135		/* The list order is used by later block SGL registraton */
5136		spin_lock_irq(&phba->hbalock);
5137		sglq_entry->state = SGL_FREED;
5138		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
5139		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
5140		phba->sli4_hba.total_sglq_bufs++;
5141		spin_unlock_irq(&phba->hbalock);
5142	}
5143	return 0;
5144
5145out_free_mem:
5146	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
5147	lpfc_free_sgl_list(phba);
5148	return -ENOMEM;
5149}
5150
5151/**
5152 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
5153 * @phba: pointer to lpfc hba data structure.
5154 *
5155 * This routine is invoked to post rpi header templates to the
5156 * port for those SLI4 ports that do not support extents.  This routine
5157 * posts a PAGE_SIZE memory region to the port to hold up to
5158 * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
5159 * and should be called only when interrupts are disabled.
5160 *
5161 * Return codes
5162 * 	0 - successful
5163 *	-ERROR - otherwise.
5164 **/
5165int
5166lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
5167{
5168	int rc = 0;
5169	struct lpfc_rpi_hdr *rpi_hdr;
5170
5171	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
5172	if (!phba->sli4_hba.rpi_hdrs_in_use)
5173		return rc;
5174	if (phba->sli4_hba.extents_in_use)
5175		return -EIO;
5176
5177	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
5178	if (!rpi_hdr) {
5179		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5180				"0391 Error during rpi post operation\n");
5181		lpfc_sli4_remove_rpis(phba);
5182		rc = -ENODEV;
5183	}
5184
5185	return rc;
5186}
5187
5188/**
5189 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
5190 * @phba: pointer to lpfc hba data structure.
5191 *
5192 * This routine is invoked to allocate a single 4KB memory region to
5193 * support rpis and stores them in the phba.  This single region
5194 * provides support for up to 64 rpis.  The region is used globally
5195 * by the device.
5196 *
5197 * Returns:
5198 *   A valid rpi hdr on success.
5199 *   A NULL pointer on any failure.
5200 **/
5201struct lpfc_rpi_hdr *
5202lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5203{
5204	uint16_t rpi_limit, curr_rpi_range;
5205	struct lpfc_dmabuf *dmabuf;
5206	struct lpfc_rpi_hdr *rpi_hdr;
5207	uint32_t rpi_count;
5208
5209	/*
5210	 * If the SLI4 port supports extents, posting the rpi header isn't
5211	 * required.  Set the expected maximum count and let the actual value
5212	 * get set when extents are fully allocated.
5213	 */
5214	if (!phba->sli4_hba.rpi_hdrs_in_use)
5215		return NULL;
5216	if (phba->sli4_hba.extents_in_use)
5217		return NULL;
5218
5219	/* The limit on the logical index is just the max_rpi count. */
5220	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
5221	phba->sli4_hba.max_cfg_param.max_rpi - 1;
5222
5223	spin_lock_irq(&phba->hbalock);
5224	/*
5225	 * Establish the starting RPI in this header block.  The starting
5226	 * rpi is normalized to a zero base because the physical rpi is
5227	 * port based.
5228	 */
5229	curr_rpi_range = phba->sli4_hba.next_rpi -
5230		phba->sli4_hba.max_cfg_param.rpi_base;
5231	spin_unlock_irq(&phba->hbalock);
5232
5233	/*
5234	 * The port has a limited number of rpis. The increment here
5235	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
5236	 * and to allow the full max_rpi range per port.
5237	 */
5238	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
5239		rpi_count = rpi_limit - curr_rpi_range;
5240	else
5241		rpi_count = LPFC_RPI_HDR_COUNT;
5242
5243	if (!rpi_count)
5244		return NULL;
5245	/*
5246	 * First allocate the protocol header region for the port.  The
5247	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
5248	 */
5249	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5250	if (!dmabuf)
5251		return NULL;
5252
5253	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5254					  LPFC_HDR_TEMPLATE_SIZE,
5255					  &dmabuf->phys,
5256					  GFP_KERNEL);
5257	if (!dmabuf->virt) {
5258		rpi_hdr = NULL;
5259		goto err_free_dmabuf;
5260	}
5261
5262	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
5263	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
5264		rpi_hdr = NULL;
5265		goto err_free_coherent;
5266	}
5267
5268	/* Save the rpi header data for cleanup later. */
5269	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
5270	if (!rpi_hdr)
5271		goto err_free_coherent;
5272
5273	rpi_hdr->dmabuf = dmabuf;
5274	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
5275	rpi_hdr->page_count = 1;
5276	spin_lock_irq(&phba->hbalock);
5277
5278	/* The rpi_hdr stores the logical index only. */
5279	rpi_hdr->start_rpi = curr_rpi_range;
5280	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
5281
5282	/*
5283	 * The next_rpi stores the next logical module-64 rpi value used
5284	 * to post physical rpis in subsequent rpi postings.
5285	 */
5286	phba->sli4_hba.next_rpi += rpi_count;
5287	spin_unlock_irq(&phba->hbalock);
5288	return rpi_hdr;
5289
5290 err_free_coherent:
5291	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
5292			  dmabuf->virt, dmabuf->phys);
5293 err_free_dmabuf:
5294	kfree(dmabuf);
5295	return NULL;
5296}
5297
5298/**
5299 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
5300 * @phba: pointer to lpfc hba data structure.
5301 *
5302 * This routine is invoked to remove all memory resources allocated
5303 * to support rpis for SLI4 ports not supporting extents. This routine
5304 * presumes the caller has released all rpis consumed by fabric or port
5305 * logins and is prepared to have the header pages removed.
5306 **/
5307void
5308lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
5309{
5310	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
5311
5312	if (!phba->sli4_hba.rpi_hdrs_in_use)
5313		goto exit;
5314
5315	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
5316				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
5317		list_del(&rpi_hdr->list);
5318		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
5319				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
5320		kfree(rpi_hdr->dmabuf);
5321		kfree(rpi_hdr);
5322	}
5323 exit:
5324	/* There are no rpis available to the port now. */
5325	phba->sli4_hba.next_rpi = 0;
5326}
5327
5328/**
5329 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
5330 * @pdev: pointer to pci device data structure.
5331 *
5332 * This routine is invoked to allocate the driver hba data structure for an
5333 * HBA device. If the allocation is successful, the phba reference to the
5334 * PCI device data structure is set.
5335 *
5336 * Return codes
5337 *      pointer to @phba - successful
5338 *      NULL - error
5339 **/
5340static struct lpfc_hba *
5341lpfc_hba_alloc(struct pci_dev *pdev)
5342{
5343	struct lpfc_hba *phba;
5344
5345	/* Allocate memory for HBA structure */
5346	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
5347	if (!phba) {
5348		dev_err(&pdev->dev, "failed to allocate hba struct\n");
5349		return NULL;
5350	}
5351
5352	/* Set reference to PCI device in HBA structure */
5353	phba->pcidev = pdev;
5354
5355	/* Assign an unused board number */
5356	phba->brd_no = lpfc_get_instance();
5357	if (phba->brd_no < 0) {
5358		kfree(phba);
5359		return NULL;
5360	}
5361
5362	spin_lock_init(&phba->ct_ev_lock);
5363	INIT_LIST_HEAD(&phba->ct_ev_waiters);
5364
5365	return phba;
5366}
5367
5368/**
5369 * lpfc_hba_free - Free driver hba data structure with a device.
5370 * @phba: pointer to lpfc hba data structure.
5371 *
5372 * This routine is invoked to free the driver hba data structure with an
5373 * HBA device.
5374 **/
5375static void
5376lpfc_hba_free(struct lpfc_hba *phba)
5377{
5378	/* Release the driver assigned board number */
5379	idr_remove(&lpfc_hba_index, phba->brd_no);
5380
5381	kfree(phba);
5382	return;
5383}
5384
5385/**
5386 * lpfc_create_shost - Create hba physical port with associated scsi host.
5387 * @phba: pointer to lpfc hba data structure.
5388 *
5389 * This routine is invoked to create HBA physical port and associate a SCSI
5390 * host with it.
5391 *
5392 * Return codes
5393 *      0 - successful
5394 *      other values - error
5395 **/
5396static int
5397lpfc_create_shost(struct lpfc_hba *phba)
5398{
5399	struct lpfc_vport *vport;
5400	struct Scsi_Host  *shost;
5401
5402	/* Initialize HBA FC structure */
5403	phba->fc_edtov = FF_DEF_EDTOV;
5404	phba->fc_ratov = FF_DEF_RATOV;
5405	phba->fc_altov = FF_DEF_ALTOV;
5406	phba->fc_arbtov = FF_DEF_ARBTOV;
5407
5408	atomic_set(&phba->sdev_cnt, 0);
5409	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
5410	if (!vport)
5411		return -ENODEV;
5412
5413	shost = lpfc_shost_from_vport(vport);
5414	phba->pport = vport;
5415	lpfc_debugfs_initialize(vport);
5416	/* Put reference to SCSI host to driver's device private data */
5417	pci_set_drvdata(phba->pcidev, shost);
5418
5419	return 0;
5420}
5421
5422/**
5423 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
5424 * @phba: pointer to lpfc hba data structure.
5425 *
5426 * This routine is invoked to destroy HBA physical port and the associated
5427 * SCSI host.
5428 **/
5429static void
5430lpfc_destroy_shost(struct lpfc_hba *phba)
5431{
5432	struct lpfc_vport *vport = phba->pport;
5433
5434	/* Destroy physical port that associated with the SCSI host */
5435	destroy_port(vport);
5436
5437	return;
5438}
5439
5440/**
5441 * lpfc_setup_bg - Setup Block guard structures and debug areas.
5442 * @phba: pointer to lpfc hba data structure.
5443 * @shost: the shost to be used to detect Block guard settings.
5444 *
5445 * This routine sets up the local Block guard protocol settings for @shost.
5446 * This routine also allocates memory for debugging bg buffers.
5447 **/
5448static void
5449lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
5450{
5451	int pagecnt = 10;
5452	if (lpfc_prot_mask && lpfc_prot_guard) {
5453		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5454				"1478 Registering BlockGuard with the "
5455				"SCSI layer\n");
5456		scsi_host_set_prot(shost, lpfc_prot_mask);
5457		scsi_host_set_guard(shost, lpfc_prot_guard);
5458	}
5459	if (!_dump_buf_data) {
5460		while (pagecnt) {
5461			spin_lock_init(&_dump_buf_lock);
5462			_dump_buf_data =
5463				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
5464			if (_dump_buf_data) {
5465				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5466					"9043 BLKGRD: allocated %d pages for "
5467				       "_dump_buf_data at 0x%p\n",
5468				       (1 << pagecnt), _dump_buf_data);
5469				_dump_buf_data_order = pagecnt;
5470				memset(_dump_buf_data, 0,
5471				       ((1 << PAGE_SHIFT) << pagecnt));
5472				break;
5473			} else
5474				--pagecnt;
5475		}
5476		if (!_dump_buf_data_order)
5477			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5478				"9044 BLKGRD: ERROR unable to allocate "
5479			       "memory for hexdump\n");
5480	} else
5481		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5482			"9045 BLKGRD: already allocated _dump_buf_data=0x%p"
5483		       "\n", _dump_buf_data);
5484	if (!_dump_buf_dif) {
5485		while (pagecnt) {
5486			_dump_buf_dif =
5487				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
5488			if (_dump_buf_dif) {
5489				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5490					"9046 BLKGRD: allocated %d pages for "
5491				       "_dump_buf_dif at 0x%p\n",
5492				       (1 << pagecnt), _dump_buf_dif);
5493				_dump_buf_dif_order = pagecnt;
5494				memset(_dump_buf_dif, 0,
5495				       ((1 << PAGE_SHIFT) << pagecnt));
5496				break;
5497			} else
5498				--pagecnt;
5499		}
5500		if (!_dump_buf_dif_order)
5501			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5502			"9047 BLKGRD: ERROR unable to allocate "
5503			       "memory for hexdump\n");
5504	} else
5505		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5506			"9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
5507		       _dump_buf_dif);
5508}
5509
5510/**
5511 * lpfc_post_init_setup - Perform necessary device post initialization setup.
5512 * @phba: pointer to lpfc hba data structure.
5513 *
5514 * This routine is invoked to perform all the necessary post initialization
5515 * setup for the device.
5516 **/
5517static void
5518lpfc_post_init_setup(struct lpfc_hba *phba)
5519{
5520	struct Scsi_Host  *shost;
5521	struct lpfc_adapter_event_header adapter_event;
5522
5523	/* Get the default values for Model Name and Description */
5524	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
5525
5526	/*
5527	 * hba setup may have changed the hba_queue_depth so we need to
5528	 * adjust the value of can_queue.
5529	 */
5530	shost = pci_get_drvdata(phba->pcidev);
5531	shost->can_queue = phba->cfg_hba_queue_depth - 10;
5532	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
5533		lpfc_setup_bg(phba, shost);
5534
5535	lpfc_host_attrib_init(shost);
5536
5537	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
5538		spin_lock_irq(shost->host_lock);
5539		lpfc_poll_start_timer(phba);
5540		spin_unlock_irq(shost->host_lock);
5541	}
5542
5543	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5544			"0428 Perform SCSI scan\n");
5545	/* Send board arrival event to upper layer */
5546	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
5547	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
5548	fc_host_post_vendor_event(shost, fc_get_event_number(),
5549				  sizeof(adapter_event),
5550				  (char *) &adapter_event,
5551				  LPFC_NL_VENDOR_ID);
5552	return;
5553}
5554
5555/**
5556 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
5557 * @phba: pointer to lpfc hba data structure.
5558 *
5559 * This routine is invoked to set up the PCI device memory space for device
5560 * with SLI-3 interface spec.
5561 *
5562 * Return codes
5563 * 	0 - successful
5564 * 	other values - error
5565 **/
5566static int
5567lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5568{
5569	struct pci_dev *pdev;
5570	unsigned long bar0map_len, bar2map_len;
5571	int i, hbq_count;
5572	void *ptr;
5573	int error = -ENODEV;
5574
5575	/* Obtain PCI device reference */
5576	if (!phba->pcidev)
5577		return error;
5578	else
5579		pdev = phba->pcidev;
5580
5581	/* Set the device DMA mask size */
5582	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5583	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5584		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5585		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5586			return error;
5587		}
5588	}
5589
5590	/* Get the bus address of Bar0 and Bar2 and the number of bytes
5591	 * required by each mapping.
5592	 */
5593	phba->pci_bar0_map = pci_resource_start(pdev, 0);
5594	bar0map_len = pci_resource_len(pdev, 0);
5595
5596	phba->pci_bar2_map = pci_resource_start(pdev, 2);
5597	bar2map_len = pci_resource_len(pdev, 2);
5598
5599	/* Map HBA SLIM to a kernel virtual address. */
5600	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5601	if (!phba->slim_memmap_p) {
5602		dev_printk(KERN_ERR, &pdev->dev,
5603			   "ioremap failed for SLIM memory.\n");
5604		goto out;
5605	}
5606
5607	/* Map HBA Control Registers to a kernel virtual address. */
5608	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5609	if (!phba->ctrl_regs_memmap_p) {
5610		dev_printk(KERN_ERR, &pdev->dev,
5611			   "ioremap failed for HBA control registers.\n");
5612		goto out_iounmap_slim;
5613	}
5614
5615	/* Allocate memory for SLI-2 structures */
5616	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5617					       SLI2_SLIM_SIZE,
5618					       &phba->slim2p.phys,
5619					       GFP_KERNEL);
5620	if (!phba->slim2p.virt)
5621		goto out_iounmap;
5622
5623	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5624	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5625	phba->mbox_ext = (phba->slim2p.virt +
5626		offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5627	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5628	phba->IOCBs = (phba->slim2p.virt +
5629		       offsetof(struct lpfc_sli2_slim, IOCBs));
5630
5631	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5632						 lpfc_sli_hbq_size(),
5633						 &phba->hbqslimp.phys,
5634						 GFP_KERNEL);
5635	if (!phba->hbqslimp.virt)
5636		goto out_free_slim;
5637
5638	hbq_count = lpfc_sli_hbq_count();
5639	ptr = phba->hbqslimp.virt;
5640	for (i = 0; i < hbq_count; ++i) {
5641		phba->hbqs[i].hbq_virt = ptr;
5642		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5643		ptr += (lpfc_hbq_defs[i]->entry_count *
5644			sizeof(struct lpfc_hbq_entry));
5645	}
5646	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5647	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5648
5649	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5650
5651	INIT_LIST_HEAD(&phba->rb_pend_list);
5652
5653	phba->MBslimaddr = phba->slim_memmap_p;
5654	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5655	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5656	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5657	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5658
5659	return 0;
5660
5661out_free_slim:
5662	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5663			  phba->slim2p.virt, phba->slim2p.phys);
5664out_iounmap:
5665	iounmap(phba->ctrl_regs_memmap_p);
5666out_iounmap_slim:
5667	iounmap(phba->slim_memmap_p);
5668out:
5669	return error;
5670}
5671
5672/**
5673 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5674 * @phba: pointer to lpfc hba data structure.
5675 *
5676 * This routine is invoked to unset the PCI device memory space for device
5677 * with SLI-3 interface spec.
5678 **/
5679static void
5680lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5681{
5682	struct pci_dev *pdev;
5683
5684	/* Obtain PCI device reference */
5685	if (!phba->pcidev)
5686		return;
5687	else
5688		pdev = phba->pcidev;
5689
5690	/* Free coherent DMA memory allocated */
5691	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5692			  phba->hbqslimp.virt, phba->hbqslimp.phys);
5693	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5694			  phba->slim2p.virt, phba->slim2p.phys);
5695
5696	/* I/O memory unmap */
5697	iounmap(phba->ctrl_regs_memmap_p);
5698	iounmap(phba->slim_memmap_p);
5699
5700	return;
5701}
5702
5703/**
5704 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
5705 * @phba: pointer to lpfc hba data structure.
5706 *
5707 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5708 * done and check status.
5709 *
5710 * Return 0 if successful, otherwise -ENODEV.
5711 **/
5712int
5713lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5714{
5715	struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
5716	struct lpfc_register reg_data;
5717	int i, port_error = 0;
5718	uint32_t if_type;
5719
5720	memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
5721	memset(&reg_data, 0, sizeof(reg_data));
5722	if (!phba->sli4_hba.PSMPHRregaddr)
5723		return -ENODEV;
5724
5725	/* Wait up to 30 seconds for the SLI Port POST done and ready */
5726	for (i = 0; i < 3000; i++) {
5727		if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
5728			&portsmphr_reg.word0) ||
5729			(bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
5730			/* Port has a fatal POST error, break out */
5731			port_error = -ENODEV;
5732			break;
5733		}
5734		if (LPFC_POST_STAGE_PORT_READY ==
5735		    bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
5736			break;
5737		msleep(10);
5738	}
5739
5740	/*
5741	 * If there was a port error during POST, then don't proceed with
5742	 * other register reads as the data may not be valid.  Just exit.
5743	 */
5744	if (port_error) {
5745		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5746			"1408 Port Failed POST - portsmphr=0x%x, "
5747			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
5748			"scr2=x%x, hscratch=x%x, pstatus=x%x\n",
5749			portsmphr_reg.word0,
5750			bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
5751			bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
5752			bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
5753			bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
5754			bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
5755			bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
5756			bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
5757			bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
5758	} else {
5759		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5760				"2534 Device Info: SLIFamily=0x%x, "
5761				"SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
5762				"SLIHint_2=0x%x, FT=0x%x\n",
5763				bf_get(lpfc_sli_intf_sli_family,
5764				       &phba->sli4_hba.sli_intf),
5765				bf_get(lpfc_sli_intf_slirev,
5766				       &phba->sli4_hba.sli_intf),
5767				bf_get(lpfc_sli_intf_if_type,
5768				       &phba->sli4_hba.sli_intf),
5769				bf_get(lpfc_sli_intf_sli_hint1,
5770				       &phba->sli4_hba.sli_intf),
5771				bf_get(lpfc_sli_intf_sli_hint2,
5772				       &phba->sli4_hba.sli_intf),
5773				bf_get(lpfc_sli_intf_func_type,
5774				       &phba->sli4_hba.sli_intf));
5775		/*
5776		 * Check for other Port errors during the initialization
5777		 * process.  Fail the load if the port did not come up
5778		 * correctly.
5779		 */
5780		if_type = bf_get(lpfc_sli_intf_if_type,
5781				 &phba->sli4_hba.sli_intf);
5782		switch (if_type) {
5783		case LPFC_SLI_INTF_IF_TYPE_0:
5784			phba->sli4_hba.ue_mask_lo =
5785			      readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
5786			phba->sli4_hba.ue_mask_hi =
5787			      readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
5788			uerrlo_reg.word0 =
5789			      readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
5790			uerrhi_reg.word0 =
5791				readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
5792			if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5793			    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5794				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5795						"1422 Unrecoverable Error "
5796						"Detected during POST "
5797						"uerr_lo_reg=0x%x, "
5798						"uerr_hi_reg=0x%x, "
5799						"ue_mask_lo_reg=0x%x, "
5800						"ue_mask_hi_reg=0x%x\n",
5801						uerrlo_reg.word0,
5802						uerrhi_reg.word0,
5803						phba->sli4_hba.ue_mask_lo,
5804						phba->sli4_hba.ue_mask_hi);
5805				port_error = -ENODEV;
5806			}
5807			break;
5808		case LPFC_SLI_INTF_IF_TYPE_2:
5809			/* Final checks.  The port status should be clean. */
5810			if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
5811				&reg_data.word0) ||
5812				(bf_get(lpfc_sliport_status_err, &reg_data) &&
5813				 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
5814				phba->work_status[0] =
5815					readl(phba->sli4_hba.u.if_type2.
5816					      ERR1regaddr);
5817				phba->work_status[1] =
5818					readl(phba->sli4_hba.u.if_type2.
5819					      ERR2regaddr);
5820				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5821					"2888 Port Error Detected "
5822					"during POST: "
5823					"port status reg 0x%x, "
5824					"port_smphr reg 0x%x, "
5825					"error 1=0x%x, error 2=0x%x\n",
5826					reg_data.word0,
5827					portsmphr_reg.word0,
5828					phba->work_status[0],
5829					phba->work_status[1]);
5830				port_error = -ENODEV;
5831			}
5832			break;
5833		case LPFC_SLI_INTF_IF_TYPE_1:
5834		default:
5835			break;
5836		}
5837	}
5838	return port_error;
5839}
5840
5841/**
5842 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5843 * @phba: pointer to lpfc hba data structure.
5844 * @if_type:  The SLI4 interface type getting configured.
5845 *
5846 * This routine is invoked to set up SLI4 BAR0 PCI config space register
5847 * memory map.
5848 **/
5849static void
5850lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
5851{
5852	switch (if_type) {
5853	case LPFC_SLI_INTF_IF_TYPE_0:
5854		phba->sli4_hba.u.if_type0.UERRLOregaddr =
5855			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
5856		phba->sli4_hba.u.if_type0.UERRHIregaddr =
5857			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
5858		phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
5859			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
5860		phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
5861			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
5862		phba->sli4_hba.SLIINTFregaddr =
5863			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5864		break;
5865	case LPFC_SLI_INTF_IF_TYPE_2:
5866		phba->sli4_hba.u.if_type2.ERR1regaddr =
5867			phba->sli4_hba.conf_regs_memmap_p +
5868						LPFC_CTL_PORT_ER1_OFFSET;
5869		phba->sli4_hba.u.if_type2.ERR2regaddr =
5870			phba->sli4_hba.conf_regs_memmap_p +
5871						LPFC_CTL_PORT_ER2_OFFSET;
5872		phba->sli4_hba.u.if_type2.CTRLregaddr =
5873			phba->sli4_hba.conf_regs_memmap_p +
5874						LPFC_CTL_PORT_CTL_OFFSET;
5875		phba->sli4_hba.u.if_type2.STATUSregaddr =
5876			phba->sli4_hba.conf_regs_memmap_p +
5877						LPFC_CTL_PORT_STA_OFFSET;
5878		phba->sli4_hba.SLIINTFregaddr =
5879			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5880		phba->sli4_hba.PSMPHRregaddr =
5881			phba->sli4_hba.conf_regs_memmap_p +
5882						LPFC_CTL_PORT_SEM_OFFSET;
5883		phba->sli4_hba.RQDBregaddr =
5884			phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
5885		phba->sli4_hba.WQDBregaddr =
5886			phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
5887		phba->sli4_hba.EQCQDBregaddr =
5888			phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
5889		phba->sli4_hba.MQDBregaddr =
5890			phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
5891		phba->sli4_hba.BMBXregaddr =
5892			phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
5893		break;
5894	case LPFC_SLI_INTF_IF_TYPE_1:
5895	default:
5896		dev_printk(KERN_ERR, &phba->pcidev->dev,
5897			   "FATAL - unsupported SLI4 interface type - %d\n",
5898			   if_type);
5899		break;
5900	}
5901}
5902
5903/**
5904 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5905 * @phba: pointer to lpfc hba data structure.
5906 *
5907 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5908 * memory map.
5909 **/
5910static void
5911lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5912{
5913	phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5914		LPFC_SLIPORT_IF0_SMPHR;
5915	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5916		LPFC_HST_ISR0;
5917	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5918		LPFC_HST_IMR0;
5919	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5920		LPFC_HST_ISCR0;
5921}
5922
5923/**
5924 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
5925 * @phba: pointer to lpfc hba data structure.
5926 * @vf: virtual function number
5927 *
5928 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5929 * based on the given viftual function number, @vf.
5930 *
5931 * Return 0 if successful, otherwise -ENODEV.
5932 **/
5933static int
5934lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5935{
5936	if (vf > LPFC_VIR_FUNC_MAX)
5937		return -ENODEV;
5938
5939	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5940				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5941	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5942				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5943	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5944				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5945	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5946				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5947	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5948				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5949	return 0;
5950}
5951
5952/**
5953 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
5954 * @phba: pointer to lpfc hba data structure.
5955 *
5956 * This routine is invoked to create the bootstrap mailbox
5957 * region consistent with the SLI-4 interface spec.  This
5958 * routine allocates all memory necessary to communicate
5959 * mailbox commands to the port and sets up all alignment
5960 * needs.  No locks are expected to be held when calling
5961 * this routine.
5962 *
5963 * Return codes
5964 * 	0 - successful
5965 * 	-ENOMEM - could not allocated memory.
5966 **/
5967static int
5968lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
5969{
5970	uint32_t bmbx_size;
5971	struct lpfc_dmabuf *dmabuf;
5972	struct dma_address *dma_address;
5973	uint32_t pa_addr;
5974	uint64_t phys_addr;
5975
5976	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5977	if (!dmabuf)
5978		return -ENOMEM;
5979
5980	/*
5981	 * The bootstrap mailbox region is comprised of 2 parts
5982	 * plus an alignment restriction of 16 bytes.
5983	 */
5984	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5985	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5986					  bmbx_size,
5987					  &dmabuf->phys,
5988					  GFP_KERNEL);
5989	if (!dmabuf->virt) {
5990		kfree(dmabuf);
5991		return -ENOMEM;
5992	}
5993	memset(dmabuf->virt, 0, bmbx_size);
5994
5995	/*
5996	 * Initialize the bootstrap mailbox pointers now so that the register
5997	 * operations are simple later.  The mailbox dma address is required
5998	 * to be 16-byte aligned.  Also align the virtual memory as each
5999	 * maibox is copied into the bmbx mailbox region before issuing the
6000	 * command to the port.
6001	 */
6002	phba->sli4_hba.bmbx.dmabuf = dmabuf;
6003	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
6004
6005	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
6006					      LPFC_ALIGN_16_BYTE);
6007	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
6008					      LPFC_ALIGN_16_BYTE);
6009
6010	/*
6011	 * Set the high and low physical addresses now.  The SLI4 alignment
6012	 * requirement is 16 bytes and the mailbox is posted to the port
6013	 * as two 30-bit addresses.  The other data is a bit marking whether
6014	 * the 30-bit address is the high or low address.
6015	 * Upcast bmbx aphys to 64bits so shift instruction compiles
6016	 * clean on 32 bit machines.
6017	 */
6018	dma_address = &phba->sli4_hba.bmbx.dma_address;
6019	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
6020	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
6021	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
6022					   LPFC_BMBX_BIT1_ADDR_HI);
6023
6024	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
6025	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
6026					   LPFC_BMBX_BIT1_ADDR_LO);
6027	return 0;
6028}
6029
6030/**
6031 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
6032 * @phba: pointer to lpfc hba data structure.
6033 *
6034 * This routine is invoked to teardown the bootstrap mailbox
6035 * region and release all host resources. This routine requires
6036 * the caller to ensure all mailbox commands recovered, no
6037 * additional mailbox comands are sent, and interrupts are disabled
6038 * before calling this routine.
6039 *
6040 **/
6041static void
6042lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
6043{
6044	dma_free_coherent(&phba->pcidev->dev,
6045			  phba->sli4_hba.bmbx.bmbx_size,
6046			  phba->sli4_hba.bmbx.dmabuf->virt,
6047			  phba->sli4_hba.bmbx.dmabuf->phys);
6048
6049	kfree(phba->sli4_hba.bmbx.dmabuf);
6050	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
6051}
6052
6053/**
6054 * lpfc_sli4_read_config - Get the config parameters.
6055 * @phba: pointer to lpfc hba data structure.
6056 *
6057 * This routine is invoked to read the configuration parameters from the HBA.
6058 * The configuration parameters are used to set the base and maximum values
6059 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
6060 * allocation for the port.
6061 *
6062 * Return codes
6063 * 	0 - successful
6064 * 	-ENOMEM - No available memory
6065 *      -EIO - The mailbox failed to complete successfully.
6066 **/
6067int
6068lpfc_sli4_read_config(struct lpfc_hba *phba)
6069{
6070	LPFC_MBOXQ_t *pmb;
6071	struct lpfc_mbx_read_config *rd_config;
6072	union  lpfc_sli4_cfg_shdr *shdr;
6073	uint32_t shdr_status, shdr_add_status;
6074	struct lpfc_mbx_get_func_cfg *get_func_cfg;
6075	struct lpfc_rsrc_desc_fcfcoe *desc;
6076	uint32_t desc_count;
6077	int length, i, rc = 0;
6078
6079	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6080	if (!pmb) {
6081		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6082				"2011 Unable to allocate memory for issuing "
6083				"SLI_CONFIG_SPECIAL mailbox command\n");
6084		return -ENOMEM;
6085	}
6086
6087	lpfc_read_config(phba, pmb);
6088
6089	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6090	if (rc != MBX_SUCCESS) {
6091		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6092			"2012 Mailbox failed , mbxCmd x%x "
6093			"READ_CONFIG, mbxStatus x%x\n",
6094			bf_get(lpfc_mqe_command, &pmb->u.mqe),
6095			bf_get(lpfc_mqe_status, &pmb->u.mqe));
6096		rc = -EIO;
6097	} else {
6098		rd_config = &pmb->u.mqe.un.rd_config;
6099		if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
6100			phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
6101			phba->sli4_hba.lnk_info.lnk_tp =
6102				bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
6103			phba->sli4_hba.lnk_info.lnk_no =
6104				bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
6105			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6106					"3081 lnk_type:%d, lnk_numb:%d\n",
6107					phba->sli4_hba.lnk_info.lnk_tp,
6108					phba->sli4_hba.lnk_info.lnk_no);
6109		} else
6110			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6111					"3082 Mailbox (x%x) returned ldv:x0\n",
6112					bf_get(lpfc_mqe_command, &pmb->u.mqe));
6113		phba->sli4_hba.extents_in_use =
6114			bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
6115		phba->sli4_hba.max_cfg_param.max_xri =
6116			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
6117		phba->sli4_hba.max_cfg_param.xri_base =
6118			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
6119		phba->sli4_hba.max_cfg_param.max_vpi =
6120			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
6121		phba->sli4_hba.max_cfg_param.vpi_base =
6122			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
6123		phba->sli4_hba.max_cfg_param.max_rpi =
6124			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
6125		phba->sli4_hba.max_cfg_param.rpi_base =
6126			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
6127		phba->sli4_hba.max_cfg_param.max_vfi =
6128			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
6129		phba->sli4_hba.max_cfg_param.vfi_base =
6130			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
6131		phba->sli4_hba.max_cfg_param.max_fcfi =
6132			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
6133		phba->sli4_hba.max_cfg_param.max_eq =
6134			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
6135		phba->sli4_hba.max_cfg_param.max_rq =
6136			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
6137		phba->sli4_hba.max_cfg_param.max_wq =
6138			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
6139		phba->sli4_hba.max_cfg_param.max_cq =
6140			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
6141		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
6142		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
6143		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
6144		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
6145		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
6146		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
6147				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
6148		phba->max_vports = phba->max_vpi;
6149		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6150				"2003 cfg params Extents? %d "
6151				"XRI(B:%d M:%d), "
6152				"VPI(B:%d M:%d) "
6153				"VFI(B:%d M:%d) "
6154				"RPI(B:%d M:%d) "
6155				"FCFI(Count:%d)\n",
6156				phba->sli4_hba.extents_in_use,
6157				phba->sli4_hba.max_cfg_param.xri_base,
6158				phba->sli4_hba.max_cfg_param.max_xri,
6159				phba->sli4_hba.max_cfg_param.vpi_base,
6160				phba->sli4_hba.max_cfg_param.max_vpi,
6161				phba->sli4_hba.max_cfg_param.vfi_base,
6162				phba->sli4_hba.max_cfg_param.max_vfi,
6163				phba->sli4_hba.max_cfg_param.rpi_base,
6164				phba->sli4_hba.max_cfg_param.max_rpi,
6165				phba->sli4_hba.max_cfg_param.max_fcfi);
6166	}
6167
6168	if (rc)
6169		goto read_cfg_out;
6170
6171	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
6172	if (phba->cfg_hba_queue_depth >
6173		(phba->sli4_hba.max_cfg_param.max_xri -
6174			lpfc_sli4_get_els_iocb_cnt(phba)))
6175		phba->cfg_hba_queue_depth =
6176			phba->sli4_hba.max_cfg_param.max_xri -
6177				lpfc_sli4_get_els_iocb_cnt(phba);
6178
6179	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
6180	    LPFC_SLI_INTF_IF_TYPE_2)
6181		goto read_cfg_out;
6182
6183	/* get the pf# and vf# for SLI4 if_type 2 port */
6184	length = (sizeof(struct lpfc_mbx_get_func_cfg) -
6185		  sizeof(struct lpfc_sli4_cfg_mhdr));
6186	lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
6187			 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
6188			 length, LPFC_SLI4_MBX_EMBED);
6189
6190	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6191	shdr = (union lpfc_sli4_cfg_shdr *)
6192				&pmb->u.mqe.un.sli4_config.header.cfg_shdr;
6193	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6194	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6195	if (rc || shdr_status || shdr_add_status) {
6196		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6197				"3026 Mailbox failed , mbxCmd x%x "
6198				"GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6199				bf_get(lpfc_mqe_command, &pmb->u.mqe),
6200				bf_get(lpfc_mqe_status, &pmb->u.mqe));
6201		rc = -EIO;
6202		goto read_cfg_out;
6203	}
6204
6205	/* search for fc_fcoe resrouce descriptor */
6206	get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6207	desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6208
6209	for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6210		desc = (struct lpfc_rsrc_desc_fcfcoe *)
6211			&get_func_cfg->func_cfg.desc[i];
6212		if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6213		    bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
6214			phba->sli4_hba.iov.pf_number =
6215				bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6216			phba->sli4_hba.iov.vf_number =
6217				bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
6218			break;
6219		}
6220	}
6221
6222	if (i < LPFC_RSRC_DESC_MAX_NUM)
6223		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6224				"3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6225				"vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6226				phba->sli4_hba.iov.vf_number);
6227	else {
6228		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6229				"3028 GET_FUNCTION_CONFIG: failed to find "
6230				"Resrouce Descriptor:x%x\n",
6231				LPFC_RSRC_DESC_TYPE_FCFCOE);
6232		rc = -EIO;
6233	}
6234
6235read_cfg_out:
6236	mempool_free(pmb, phba->mbox_mem_pool);
6237	return rc;
6238}
6239
6240/**
6241 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
6242 * @phba: pointer to lpfc hba data structure.
6243 *
6244 * This routine is invoked to setup the port-side endian order when
6245 * the port if_type is 0.  This routine has no function for other
6246 * if_types.
6247 *
6248 * Return codes
6249 * 	0 - successful
6250 * 	-ENOMEM - No available memory
6251 *      -EIO - The mailbox failed to complete successfully.
6252 **/
6253static int
6254lpfc_setup_endian_order(struct lpfc_hba *phba)
6255{
6256	LPFC_MBOXQ_t *mboxq;
6257	uint32_t if_type, rc = 0;
6258	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
6259				      HOST_ENDIAN_HIGH_WORD1};
6260
6261	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6262	switch (if_type) {
6263	case LPFC_SLI_INTF_IF_TYPE_0:
6264		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6265						       GFP_KERNEL);
6266		if (!mboxq) {
6267			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6268					"0492 Unable to allocate memory for "
6269					"issuing SLI_CONFIG_SPECIAL mailbox "
6270					"command\n");
6271			return -ENOMEM;
6272		}
6273
6274		/*
6275		 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
6276		 * two words to contain special data values and no other data.
6277		 */
6278		memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
6279		memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
6280		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6281		if (rc != MBX_SUCCESS) {
6282			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6283					"0493 SLI_CONFIG_SPECIAL mailbox "
6284					"failed with status x%x\n",
6285					rc);
6286			rc = -EIO;
6287		}
6288		mempool_free(mboxq, phba->mbox_mem_pool);
6289		break;
6290	case LPFC_SLI_INTF_IF_TYPE_2:
6291	case LPFC_SLI_INTF_IF_TYPE_1:
6292	default:
6293		break;
6294	}
6295	return rc;
6296}
6297
6298/**
6299 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
6300 * @phba: pointer to lpfc hba data structure.
6301 *
6302 * This routine is invoked to check the user settable queue counts for EQs and
6303 * CQs. after this routine is called the counts will be set to valid values that
6304 * adhere to the constraints of the system's interrupt vectors and the port's
6305 * queue resources.
6306 *
6307 * Return codes
6308 *      0 - successful
6309 *      -ENOMEM - No available memory
6310 **/
6311static int
6312lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6313{
6314	int cfg_fcp_wq_count;
6315	int cfg_fcp_eq_count;
6316
6317	/*
6318	 * Sanity check for confiugred queue parameters against the run-time
6319	 * device parameters
6320	 */
6321
6322	/* Sanity check on FCP fast-path WQ parameters */
6323	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
6324	if (cfg_fcp_wq_count >
6325	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
6326		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
6327				   LPFC_SP_WQN_DEF;
6328		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
6329			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6330					"2581 Not enough WQs (%d) from "
6331					"the pci function for supporting "
6332					"FCP WQs (%d)\n",
6333					phba->sli4_hba.max_cfg_param.max_wq,
6334					phba->cfg_fcp_wq_count);
6335			goto out_error;
6336		}
6337		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6338				"2582 Not enough WQs (%d) from the pci "
6339				"function for supporting the requested "
6340				"FCP WQs (%d), the actual FCP WQs can "
6341				"be supported: %d\n",
6342				phba->sli4_hba.max_cfg_param.max_wq,
6343				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
6344	}
6345	/* The actual number of FCP work queues adopted */
6346	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
6347
6348	/* Sanity check on FCP fast-path EQ parameters */
6349	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
6350	if (cfg_fcp_eq_count >
6351	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
6352		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
6353				   LPFC_SP_EQN_DEF;
6354		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
6355			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6356					"2574 Not enough EQs (%d) from the "
6357					"pci function for supporting FCP "
6358					"EQs (%d)\n",
6359					phba->sli4_hba.max_cfg_param.max_eq,
6360					phba->cfg_fcp_eq_count);
6361			goto out_error;
6362		}
6363		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6364				"2575 Not enough EQs (%d) from the pci "
6365				"function for supporting the requested "
6366				"FCP EQs (%d), the actual FCP EQs can "
6367				"be supported: %d\n",
6368				phba->sli4_hba.max_cfg_param.max_eq,
6369				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
6370	}
6371	/* It does not make sense to have more EQs than WQs */
6372	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
6373		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6374				"2593 The FCP EQ count(%d) cannot be greater "
6375				"than the FCP WQ count(%d), limiting the "
6376				"FCP EQ count to %d\n", cfg_fcp_eq_count,
6377				phba->cfg_fcp_wq_count,
6378				phba->cfg_fcp_wq_count);
6379		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
6380	}
6381	/* The actual number of FCP event queues adopted */
6382	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
6383	/* The overall number of event queues used */
6384	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
6385
6386	/* Get EQ depth from module parameter, fake the default for now */
6387	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
6388	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
6389
6390	/* Get CQ depth from module parameter, fake the default for now */
6391	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6392	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6393
6394	return 0;
6395out_error:
6396	return -ENOMEM;
6397}
6398
6399/**
6400 * lpfc_sli4_queue_create - Create all the SLI4 queues
6401 * @phba: pointer to lpfc hba data structure.
6402 *
6403 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
6404 * operation. For each SLI4 queue type, the parameters such as queue entry
6405 * count (queue depth) shall be taken from the module parameter. For now,
6406 * we just use some constant number as place holder.
6407 *
6408 * Return codes
6409 *      0 - sucessful
6410 *      -ENOMEM - No availble memory
6411 *      -EIO - The mailbox failed to complete successfully.
6412 **/
6413int
6414lpfc_sli4_queue_create(struct lpfc_hba *phba)
6415{
6416	struct lpfc_queue *qdesc;
6417	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6418
6419	/*
6420	 * Create Event Queues (EQs)
6421	 */
6422
6423	/* Create slow path event queue */
6424	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6425				      phba->sli4_hba.eq_ecount);
6426	if (!qdesc) {
6427		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6428				"0496 Failed allocate slow-path EQ\n");
6429		goto out_error;
6430	}
6431	phba->sli4_hba.sp_eq = qdesc;
6432
6433	/*
6434	 * Create fast-path FCP Event Queue(s).  The cfg_fcp_eq_count can be
6435	 * zero whenever there is exactly one interrupt vector.  This is not
6436	 * an error.
6437	 */
6438	if (phba->cfg_fcp_eq_count) {
6439		phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
6440				       phba->cfg_fcp_eq_count), GFP_KERNEL);
6441		if (!phba->sli4_hba.fp_eq) {
6442			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6443					"2576 Failed allocate memory for "
6444					"fast-path EQ record array\n");
6445			goto out_free_sp_eq;
6446		}
6447	}
6448	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6449		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6450					      phba->sli4_hba.eq_ecount);
6451		if (!qdesc) {
6452			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6453					"0497 Failed allocate fast-path EQ\n");
6454			goto out_free_fp_eq;
6455		}
6456		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
6457	}
6458
6459	/*
6460	 * Create Complete Queues (CQs)
6461	 */
6462
6463	/* Create slow-path Mailbox Command Complete Queue */
6464	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6465				      phba->sli4_hba.cq_ecount);
6466	if (!qdesc) {
6467		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6468				"0500 Failed allocate slow-path mailbox CQ\n");
6469		goto out_free_fp_eq;
6470	}
6471	phba->sli4_hba.mbx_cq = qdesc;
6472
6473	/* Create slow-path ELS Complete Queue */
6474	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6475				      phba->sli4_hba.cq_ecount);
6476	if (!qdesc) {
6477		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6478				"0501 Failed allocate slow-path ELS CQ\n");
6479		goto out_free_mbx_cq;
6480	}
6481	phba->sli4_hba.els_cq = qdesc;
6482
6483
6484	/*
6485	 * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs.
6486	 * If there are no FCP EQs then create exactly one FCP CQ.
6487	 */
6488	if (phba->cfg_fcp_eq_count)
6489		phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6490						 phba->cfg_fcp_eq_count),
6491						GFP_KERNEL);
6492	else
6493		phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *),
6494						GFP_KERNEL);
6495	if (!phba->sli4_hba.fcp_cq) {
6496		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6497				"2577 Failed allocate memory for fast-path "
6498				"CQ record array\n");
6499		goto out_free_els_cq;
6500	}
6501	fcp_cqidx = 0;
6502	do {
6503		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6504					      phba->sli4_hba.cq_ecount);
6505		if (!qdesc) {
6506			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6507					"0499 Failed allocate fast-path FCP "
6508					"CQ (%d)\n", fcp_cqidx);
6509			goto out_free_fcp_cq;
6510		}
6511		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
6512	} while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6513
6514	/* Create Mailbox Command Queue */
6515	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6516	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6517
6518	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
6519				      phba->sli4_hba.mq_ecount);
6520	if (!qdesc) {
6521		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6522				"0505 Failed allocate slow-path MQ\n");
6523		goto out_free_fcp_cq;
6524	}
6525	phba->sli4_hba.mbx_wq = qdesc;
6526
6527	/*
6528	 * Create all the Work Queues (WQs)
6529	 */
6530	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6531	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6532
6533	/* Create slow-path ELS Work Queue */
6534	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6535				      phba->sli4_hba.wq_ecount);
6536	if (!qdesc) {
6537		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6538				"0504 Failed allocate slow-path ELS WQ\n");
6539		goto out_free_mbx_wq;
6540	}
6541	phba->sli4_hba.els_wq = qdesc;
6542
6543	/* Create fast-path FCP Work Queue(s) */
6544	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6545				phba->cfg_fcp_wq_count), GFP_KERNEL);
6546	if (!phba->sli4_hba.fcp_wq) {
6547		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6548				"2578 Failed allocate memory for fast-path "
6549				"WQ record array\n");
6550		goto out_free_els_wq;
6551	}
6552	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6553		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6554					      phba->sli4_hba.wq_ecount);
6555		if (!qdesc) {
6556			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6557					"0503 Failed allocate fast-path FCP "
6558					"WQ (%d)\n", fcp_wqidx);
6559			goto out_free_fcp_wq;
6560		}
6561		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
6562	}
6563
6564	/*
6565	 * Create Receive Queue (RQ)
6566	 */
6567	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6568	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6569
6570	/* Create Receive Queue for header */
6571	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6572				      phba->sli4_hba.rq_ecount);
6573	if (!qdesc) {
6574		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6575				"0506 Failed allocate receive HRQ\n");
6576		goto out_free_fcp_wq;
6577	}
6578	phba->sli4_hba.hdr_rq = qdesc;
6579
6580	/* Create Receive Queue for data */
6581	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6582				      phba->sli4_hba.rq_ecount);
6583	if (!qdesc) {
6584		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6585				"0507 Failed allocate receive DRQ\n");
6586		goto out_free_hdr_rq;
6587	}
6588	phba->sli4_hba.dat_rq = qdesc;
6589
6590	return 0;
6591
6592out_free_hdr_rq:
6593	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6594	phba->sli4_hba.hdr_rq = NULL;
6595out_free_fcp_wq:
6596	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
6597		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
6598		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
6599	}
6600	kfree(phba->sli4_hba.fcp_wq);
6601	phba->sli4_hba.fcp_wq = NULL;
6602out_free_els_wq:
6603	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6604	phba->sli4_hba.els_wq = NULL;
6605out_free_mbx_wq:
6606	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6607	phba->sli4_hba.mbx_wq = NULL;
6608out_free_fcp_cq:
6609	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
6610		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
6611		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
6612	}
6613	kfree(phba->sli4_hba.fcp_cq);
6614	phba->sli4_hba.fcp_cq = NULL;
6615out_free_els_cq:
6616	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6617	phba->sli4_hba.els_cq = NULL;
6618out_free_mbx_cq:
6619	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6620	phba->sli4_hba.mbx_cq = NULL;
6621out_free_fp_eq:
6622	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
6623		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
6624		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
6625	}
6626	kfree(phba->sli4_hba.fp_eq);
6627	phba->sli4_hba.fp_eq = NULL;
6628out_free_sp_eq:
6629	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6630	phba->sli4_hba.sp_eq = NULL;
6631out_error:
6632	return -ENOMEM;
6633}
6634
6635/**
6636 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
6637 * @phba: pointer to lpfc hba data structure.
6638 *
6639 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
6640 * operation.
6641 *
6642 * Return codes
6643 *      0 - successful
6644 *      -ENOMEM - No available memory
6645 *      -EIO - The mailbox failed to complete successfully.
6646 **/
6647void
6648lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6649{
6650	int fcp_qidx;
6651
6652	/* Release mailbox command work queue */
6653	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6654	phba->sli4_hba.mbx_wq = NULL;
6655
6656	/* Release ELS work queue */
6657	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6658	phba->sli4_hba.els_wq = NULL;
6659
6660	/* Release FCP work queue */
6661	if (phba->sli4_hba.fcp_wq != NULL)
6662		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
6663		     fcp_qidx++)
6664			lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
6665	kfree(phba->sli4_hba.fcp_wq);
6666	phba->sli4_hba.fcp_wq = NULL;
6667
6668	/* Release unsolicited receive queue */
6669	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6670	phba->sli4_hba.hdr_rq = NULL;
6671	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
6672	phba->sli4_hba.dat_rq = NULL;
6673
6674	/* Release ELS complete queue */
6675	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6676	phba->sli4_hba.els_cq = NULL;
6677
6678	/* Release mailbox command complete queue */
6679	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6680	phba->sli4_hba.mbx_cq = NULL;
6681
6682	/* Release FCP response complete queue */
6683	fcp_qidx = 0;
6684	if (phba->sli4_hba.fcp_cq != NULL)
6685		do
6686			lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6687		while (++fcp_qidx < phba->cfg_fcp_eq_count);
6688	kfree(phba->sli4_hba.fcp_cq);
6689	phba->sli4_hba.fcp_cq = NULL;
6690
6691	/* Release fast-path event queue */
6692	if (phba->sli4_hba.fp_eq != NULL)
6693		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
6694		     fcp_qidx++)
6695			lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
6696	kfree(phba->sli4_hba.fp_eq);
6697	phba->sli4_hba.fp_eq = NULL;
6698
6699	/* Release slow-path event queue */
6700	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6701	phba->sli4_hba.sp_eq = NULL;
6702
6703	return;
6704}
6705
6706/**
6707 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
6708 * @phba: pointer to lpfc hba data structure.
6709 *
6710 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
6711 * operation.
6712 *
6713 * Return codes
6714 *      0 - successful
6715 *      -ENOMEM - No available memory
6716 *      -EIO - The mailbox failed to complete successfully.
6717 **/
6718int
6719lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6720{
6721	int rc = -ENOMEM;
6722	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6723	int fcp_cq_index = 0;
6724
6725	/*
6726	 * Set up Event Queues (EQs)
6727	 */
6728
6729	/* Set up slow-path event queue */
6730	if (!phba->sli4_hba.sp_eq) {
6731		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6732				"0520 Slow-path EQ not allocated\n");
6733		goto out_error;
6734	}
6735	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
6736			    LPFC_SP_DEF_IMAX);
6737	if (rc) {
6738		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6739				"0521 Failed setup of slow-path EQ: "
6740				"rc = 0x%x\n", rc);
6741		goto out_error;
6742	}
6743	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6744			"2583 Slow-path EQ setup: queue-id=%d\n",
6745			phba->sli4_hba.sp_eq->queue_id);
6746
6747	/* Set up fast-path event queue */
6748	if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
6749		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6750				"3147 Fast-path EQs not allocated\n");
6751		rc = -ENOMEM;
6752		goto out_destroy_sp_eq;
6753	}
6754	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6755		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
6756			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6757					"0522 Fast-path EQ (%d) not "
6758					"allocated\n", fcp_eqidx);
6759			rc = -ENOMEM;
6760			goto out_destroy_fp_eq;
6761		}
6762		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
6763				    phba->cfg_fcp_imax);
6764		if (rc) {
6765			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6766					"0523 Failed setup of fast-path EQ "
6767					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
6768			goto out_destroy_fp_eq;
6769		}
6770		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6771				"2584 Fast-path EQ setup: "
6772				"queue[%d]-id=%d\n", fcp_eqidx,
6773				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
6774	}
6775
6776	/*
6777	 * Set up Complete Queues (CQs)
6778	 */
6779
6780	/* Set up slow-path MBOX Complete Queue as the first CQ */
6781	if (!phba->sli4_hba.mbx_cq) {
6782		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6783				"0528 Mailbox CQ not allocated\n");
6784		rc = -ENOMEM;
6785		goto out_destroy_fp_eq;
6786	}
6787	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
6788			    LPFC_MCQ, LPFC_MBOX);
6789	if (rc) {
6790		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6791				"0529 Failed setup of slow-path mailbox CQ: "
6792				"rc = 0x%x\n", rc);
6793		goto out_destroy_fp_eq;
6794	}
6795	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6796			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6797			phba->sli4_hba.mbx_cq->queue_id,
6798			phba->sli4_hba.sp_eq->queue_id);
6799
6800	/* Set up slow-path ELS Complete Queue */
6801	if (!phba->sli4_hba.els_cq) {
6802		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6803				"0530 ELS CQ not allocated\n");
6804		rc = -ENOMEM;
6805		goto out_destroy_mbx_cq;
6806	}
6807	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
6808			    LPFC_WCQ, LPFC_ELS);
6809	if (rc) {
6810		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6811				"0531 Failed setup of slow-path ELS CQ: "
6812				"rc = 0x%x\n", rc);
6813		goto out_destroy_mbx_cq;
6814	}
6815	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6816			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
6817			phba->sli4_hba.els_cq->queue_id,
6818			phba->sli4_hba.sp_eq->queue_id);
6819
6820	/* Set up fast-path FCP Response Complete Queue */
6821	if (!phba->sli4_hba.fcp_cq) {
6822		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6823				"3148 Fast-path FCP CQ array not "
6824				"allocated\n");
6825		rc = -ENOMEM;
6826		goto out_destroy_els_cq;
6827	}
6828	fcp_cqidx = 0;
6829	do {
6830		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6831			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6832					"0526 Fast-path FCP CQ (%d) not "
6833					"allocated\n", fcp_cqidx);
6834			rc = -ENOMEM;
6835			goto out_destroy_fcp_cq;
6836		}
6837		if (phba->cfg_fcp_eq_count)
6838			rc = lpfc_cq_create(phba,
6839					    phba->sli4_hba.fcp_cq[fcp_cqidx],
6840					    phba->sli4_hba.fp_eq[fcp_cqidx],
6841					    LPFC_WCQ, LPFC_FCP);
6842		else
6843			rc = lpfc_cq_create(phba,
6844					    phba->sli4_hba.fcp_cq[fcp_cqidx],
6845					    phba->sli4_hba.sp_eq,
6846					    LPFC_WCQ, LPFC_FCP);
6847		if (rc) {
6848			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6849					"0527 Failed setup of fast-path FCP "
6850					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6851			goto out_destroy_fcp_cq;
6852		}
6853		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6854				"2588 FCP CQ setup: cq[%d]-id=%d, "
6855				"parent %seq[%d]-id=%d\n",
6856				fcp_cqidx,
6857				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6858				(phba->cfg_fcp_eq_count) ? "" : "sp_",
6859				fcp_cqidx,
6860				(phba->cfg_fcp_eq_count) ?
6861				   phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
6862				   phba->sli4_hba.sp_eq->queue_id);
6863	} while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6864
6865	/*
6866	 * Set up all the Work Queues (WQs)
6867	 */
6868
6869	/* Set up Mailbox Command Queue */
6870	if (!phba->sli4_hba.mbx_wq) {
6871		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6872				"0538 Slow-path MQ not allocated\n");
6873		rc = -ENOMEM;
6874		goto out_destroy_fcp_cq;
6875	}
6876	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
6877			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
6878	if (rc) {
6879		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6880				"0539 Failed setup of slow-path MQ: "
6881				"rc = 0x%x\n", rc);
6882		goto out_destroy_fcp_cq;
6883	}
6884	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6885			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
6886			phba->sli4_hba.mbx_wq->queue_id,
6887			phba->sli4_hba.mbx_cq->queue_id);
6888
6889	/* Set up slow-path ELS Work Queue */
6890	if (!phba->sli4_hba.els_wq) {
6891		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6892				"0536 Slow-path ELS WQ not allocated\n");
6893		rc = -ENOMEM;
6894		goto out_destroy_mbx_wq;
6895	}
6896	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
6897			    phba->sli4_hba.els_cq, LPFC_ELS);
6898	if (rc) {
6899		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6900				"0537 Failed setup of slow-path ELS WQ: "
6901				"rc = 0x%x\n", rc);
6902		goto out_destroy_mbx_wq;
6903	}
6904	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6905			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
6906			phba->sli4_hba.els_wq->queue_id,
6907			phba->sli4_hba.els_cq->queue_id);
6908
6909	/* Set up fast-path FCP Work Queue */
6910	if (!phba->sli4_hba.fcp_wq) {
6911		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6912				"3149 Fast-path FCP WQ array not "
6913				"allocated\n");
6914		rc = -ENOMEM;
6915		goto out_destroy_els_wq;
6916	}
6917	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6918		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6919			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6920					"0534 Fast-path FCP WQ (%d) not "
6921					"allocated\n", fcp_wqidx);
6922			rc = -ENOMEM;
6923			goto out_destroy_fcp_wq;
6924		}
6925		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6926				    phba->sli4_hba.fcp_cq[fcp_cq_index],
6927				    LPFC_FCP);
6928		if (rc) {
6929			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6930					"0535 Failed setup of fast-path FCP "
6931					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6932			goto out_destroy_fcp_wq;
6933		}
6934		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6935				"2591 FCP WQ setup: wq[%d]-id=%d, "
6936				"parent cq[%d]-id=%d\n",
6937				fcp_wqidx,
6938				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6939				fcp_cq_index,
6940				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6941		/* Round robin FCP Work Queue's Completion Queue assignment */
6942		if (phba->cfg_fcp_eq_count)
6943			fcp_cq_index = ((fcp_cq_index + 1) %
6944					phba->cfg_fcp_eq_count);
6945	}
6946
6947	/*
6948	 * Create Receive Queue (RQ)
6949	 */
6950	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6951		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6952				"0540 Receive Queue not allocated\n");
6953		rc = -ENOMEM;
6954		goto out_destroy_fcp_wq;
6955	}
6956
6957	lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
6958	lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
6959
6960	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6961			    phba->sli4_hba.els_cq, LPFC_USOL);
6962	if (rc) {
6963		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6964				"0541 Failed setup of Receive Queue: "
6965				"rc = 0x%x\n", rc);
6966		goto out_destroy_fcp_wq;
6967	}
6968
6969	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6970			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6971			"parent cq-id=%d\n",
6972			phba->sli4_hba.hdr_rq->queue_id,
6973			phba->sli4_hba.dat_rq->queue_id,
6974			phba->sli4_hba.els_cq->queue_id);
6975	return 0;
6976
6977out_destroy_fcp_wq:
6978	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6979		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6980out_destroy_els_wq:
6981	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6982out_destroy_mbx_wq:
6983	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6984out_destroy_fcp_cq:
6985	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6986		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6987out_destroy_els_cq:
6988	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6989out_destroy_mbx_cq:
6990	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6991out_destroy_fp_eq:
6992	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6993		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6994out_destroy_sp_eq:
6995	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6996out_error:
6997	return rc;
6998}
6999
7000/**
7001 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
7002 * @phba: pointer to lpfc hba data structure.
7003 *
7004 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
7005 * operation.
7006 *
7007 * Return codes
7008 *      0 - successful
7009 *      -ENOMEM - No available memory
7010 *      -EIO - The mailbox failed to complete successfully.
7011 **/
7012void
7013lpfc_sli4_queue_unset(struct lpfc_hba *phba)
7014{
7015	int fcp_qidx;
7016
7017	/* Unset mailbox command work queue */
7018	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7019	/* Unset ELS work queue */
7020	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7021	/* Unset unsolicited receive queue */
7022	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7023	/* Unset FCP work queue */
7024	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
7025		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
7026	/* Unset mailbox command complete queue */
7027	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7028	/* Unset ELS complete queue */
7029	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7030	/* Unset FCP response complete queue */
7031	if (phba->sli4_hba.fcp_cq) {
7032		fcp_qidx = 0;
7033		do {
7034			lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
7035		} while (++fcp_qidx < phba->cfg_fcp_eq_count);
7036	}
7037	/* Unset fast-path event queue */
7038	if (phba->sli4_hba.fp_eq) {
7039		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
7040		     fcp_qidx++)
7041			lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
7042	}
7043	/* Unset slow-path event queue */
7044	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
7045}
7046
7047/**
7048 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
7049 * @phba: pointer to lpfc hba data structure.
7050 *
7051 * This routine is invoked to allocate and set up a pool of completion queue
7052 * events. The body of the completion queue event is a completion queue entry
7053 * CQE. For now, this pool is used for the interrupt service routine to queue
7054 * the following HBA completion queue events for the worker thread to process:
7055 *   - Mailbox asynchronous events
7056 *   - Receive queue completion unsolicited events
7057 * Later, this can be used for all the slow-path events.
7058 *
7059 * Return codes
7060 *      0 - successful
7061 *      -ENOMEM - No available memory
7062 **/
7063static int
7064lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
7065{
7066	struct lpfc_cq_event *cq_event;
7067	int i;
7068
7069	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
7070		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
7071		if (!cq_event)
7072			goto out_pool_create_fail;
7073		list_add_tail(&cq_event->list,
7074			      &phba->sli4_hba.sp_cqe_event_pool);
7075	}
7076	return 0;
7077
7078out_pool_create_fail:
7079	lpfc_sli4_cq_event_pool_destroy(phba);
7080	return -ENOMEM;
7081}
7082
7083/**
7084 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
7085 * @phba: pointer to lpfc hba data structure.
7086 *
7087 * This routine is invoked to free the pool of completion queue events at
7088 * driver unload time. Note that, it is the responsibility of the driver
7089 * cleanup routine to free all the outstanding completion-queue events
7090 * allocated from this pool back into the pool before invoking this routine
7091 * to destroy the pool.
7092 **/
7093static void
7094lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
7095{
7096	struct lpfc_cq_event *cq_event, *next_cq_event;
7097
7098	list_for_each_entry_safe(cq_event, next_cq_event,
7099				 &phba->sli4_hba.sp_cqe_event_pool, list) {
7100		list_del(&cq_event->list);
7101		kfree(cq_event);
7102	}
7103}
7104
7105/**
7106 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
7107 * @phba: pointer to lpfc hba data structure.
7108 *
7109 * This routine is the lock free version of the API invoked to allocate a
7110 * completion-queue event from the free pool.
7111 *
7112 * Return: Pointer to the newly allocated completion-queue event if successful
7113 *         NULL otherwise.
7114 **/
7115struct lpfc_cq_event *
7116__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
7117{
7118	struct lpfc_cq_event *cq_event = NULL;
7119
7120	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
7121			 struct lpfc_cq_event, list);
7122	return cq_event;
7123}
7124
7125/**
7126 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
7127 * @phba: pointer to lpfc hba data structure.
7128 *
7129 * This routine is the lock version of the API invoked to allocate a
7130 * completion-queue event from the free pool.
7131 *
7132 * Return: Pointer to the newly allocated completion-queue event if successful
7133 *         NULL otherwise.
7134 **/
7135struct lpfc_cq_event *
7136lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
7137{
7138	struct lpfc_cq_event *cq_event;
7139	unsigned long iflags;
7140
7141	spin_lock_irqsave(&phba->hbalock, iflags);
7142	cq_event = __lpfc_sli4_cq_event_alloc(phba);
7143	spin_unlock_irqrestore(&phba->hbalock, iflags);
7144	return cq_event;
7145}
7146
7147/**
7148 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
7149 * @phba: pointer to lpfc hba data structure.
7150 * @cq_event: pointer to the completion queue event to be freed.
7151 *
7152 * This routine is the lock free version of the API invoked to release a
7153 * completion-queue event back into the free pool.
7154 **/
7155void
7156__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
7157			     struct lpfc_cq_event *cq_event)
7158{
7159	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
7160}
7161
7162/**
7163 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
7164 * @phba: pointer to lpfc hba data structure.
7165 * @cq_event: pointer to the completion queue event to be freed.
7166 *
7167 * This routine is the lock version of the API invoked to release a
7168 * completion-queue event back into the free pool.
7169 **/
7170void
7171lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
7172			   struct lpfc_cq_event *cq_event)
7173{
7174	unsigned long iflags;
7175	spin_lock_irqsave(&phba->hbalock, iflags);
7176	__lpfc_sli4_cq_event_release(phba, cq_event);
7177	spin_unlock_irqrestore(&phba->hbalock, iflags);
7178}
7179
7180/**
7181 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
7182 * @phba: pointer to lpfc hba data structure.
7183 *
7184 * This routine is to free all the pending completion-queue events to the
7185 * back into the free pool for device reset.
7186 **/
7187static void
7188lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
7189{
7190	LIST_HEAD(cqelist);
7191	struct lpfc_cq_event *cqe;
7192	unsigned long iflags;
7193
7194	/* Retrieve all the pending WCQEs from pending WCQE lists */
7195	spin_lock_irqsave(&phba->hbalock, iflags);
7196	/* Pending FCP XRI abort events */
7197	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
7198			 &cqelist);
7199	/* Pending ELS XRI abort events */
7200	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
7201			 &cqelist);
7202	/* Pending asynnc events */
7203	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
7204			 &cqelist);
7205	spin_unlock_irqrestore(&phba->hbalock, iflags);
7206
7207	while (!list_empty(&cqelist)) {
7208		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
7209		lpfc_sli4_cq_event_release(phba, cqe);
7210	}
7211}
7212
7213/**
7214 * lpfc_pci_function_reset - Reset pci function.
7215 * @phba: pointer to lpfc hba data structure.
7216 *
7217 * This routine is invoked to request a PCI function reset. It will destroys
7218 * all resources assigned to the PCI function which originates this request.
7219 *
7220 * Return codes
7221 *      0 - successful
7222 *      -ENOMEM - No available memory
7223 *      -EIO - The mailbox failed to complete successfully.
7224 **/
7225int
7226lpfc_pci_function_reset(struct lpfc_hba *phba)
7227{
7228	LPFC_MBOXQ_t *mboxq;
7229	uint32_t rc = 0, if_type;
7230	uint32_t shdr_status, shdr_add_status;
7231	uint32_t rdy_chk, num_resets = 0, reset_again = 0;
7232	union lpfc_sli4_cfg_shdr *shdr;
7233	struct lpfc_register reg_data;
7234
7235	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7236	switch (if_type) {
7237	case LPFC_SLI_INTF_IF_TYPE_0:
7238		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7239						       GFP_KERNEL);
7240		if (!mboxq) {
7241			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7242					"0494 Unable to allocate memory for "
7243					"issuing SLI_FUNCTION_RESET mailbox "
7244					"command\n");
7245			return -ENOMEM;
7246		}
7247
7248		/* Setup PCI function reset mailbox-ioctl command */
7249		lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7250				 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
7251				 LPFC_SLI4_MBX_EMBED);
7252		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7253		shdr = (union lpfc_sli4_cfg_shdr *)
7254			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7255		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7256		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7257					 &shdr->response);
7258		if (rc != MBX_TIMEOUT)
7259			mempool_free(mboxq, phba->mbox_mem_pool);
7260		if (shdr_status || shdr_add_status || rc) {
7261			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7262					"0495 SLI_FUNCTION_RESET mailbox "
7263					"failed with status x%x add_status x%x,"
7264					" mbx status x%x\n",
7265					shdr_status, shdr_add_status, rc);
7266			rc = -ENXIO;
7267		}
7268		break;
7269	case LPFC_SLI_INTF_IF_TYPE_2:
7270		for (num_resets = 0;
7271		     num_resets < MAX_IF_TYPE_2_RESETS;
7272		     num_resets++) {
7273			reg_data.word0 = 0;
7274			bf_set(lpfc_sliport_ctrl_end, &reg_data,
7275			       LPFC_SLIPORT_LITTLE_ENDIAN);
7276			bf_set(lpfc_sliport_ctrl_ip, &reg_data,
7277			       LPFC_SLIPORT_INIT_PORT);
7278			writel(reg_data.word0, phba->sli4_hba.u.if_type2.
7279			       CTRLregaddr);
7280
7281			/*
7282			 * Poll the Port Status Register and wait for RDY for
7283			 * up to 10 seconds.  If the port doesn't respond, treat
7284			 * it as an error.  If the port responds with RN, start
7285			 * the loop again.
7286			 */
7287			for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
7288				msleep(10);
7289				if (lpfc_readl(phba->sli4_hba.u.if_type2.
7290					      STATUSregaddr, &reg_data.word0)) {
7291					rc = -ENODEV;
7292					goto out;
7293				}
7294				if (bf_get(lpfc_sliport_status_rn, &reg_data))
7295					reset_again++;
7296				if (bf_get(lpfc_sliport_status_rdy, &reg_data))
7297					break;
7298			}
7299
7300			/*
7301			 * If the port responds to the init request with
7302			 * reset needed, delay for a bit and restart the loop.
7303			 */
7304			if (reset_again && (rdy_chk < 1000)) {
7305				msleep(10);
7306				reset_again = 0;
7307				continue;
7308			}
7309
7310			/* Detect any port errors. */
7311			if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
7312			    (rdy_chk >= 1000)) {
7313				phba->work_status[0] = readl(
7314					phba->sli4_hba.u.if_type2.ERR1regaddr);
7315				phba->work_status[1] = readl(
7316					phba->sli4_hba.u.if_type2.ERR2regaddr);
7317				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7318					"2890 Port Error Detected "
7319					"during Port Reset: "
7320					"port status reg 0x%x, "
7321					"error 1=0x%x, error 2=0x%x\n",
7322					reg_data.word0,
7323					phba->work_status[0],
7324					phba->work_status[1]);
7325				rc = -ENODEV;
7326			}
7327
7328			/*
7329			 * Terminate the outer loop provided the Port indicated
7330			 * ready within 10 seconds.
7331			 */
7332			if (rdy_chk < 1000)
7333				break;
7334		}
7335		/* delay driver action following IF_TYPE_2 function reset */
7336		msleep(100);
7337		break;
7338	case LPFC_SLI_INTF_IF_TYPE_1:
7339	default:
7340		break;
7341	}
7342
7343out:
7344	/* Catch the not-ready port failure after a port reset. */
7345	if (num_resets >= MAX_IF_TYPE_2_RESETS)
7346		rc = -ENODEV;
7347
7348	return rc;
7349}
7350
7351/**
7352 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
7353 * @phba: pointer to lpfc hba data structure.
7354 * @cnt: number of nop mailbox commands to send.
7355 *
7356 * This routine is invoked to send a number @cnt of NOP mailbox command and
7357 * wait for each command to complete.
7358 *
7359 * Return: the number of NOP mailbox command completed.
7360 **/
7361static int
7362lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
7363{
7364	LPFC_MBOXQ_t *mboxq;
7365	int length, cmdsent;
7366	uint32_t mbox_tmo;
7367	uint32_t rc = 0;
7368	uint32_t shdr_status, shdr_add_status;
7369	union lpfc_sli4_cfg_shdr *shdr;
7370
7371	if (cnt == 0) {
7372		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7373				"2518 Requested to send 0 NOP mailbox cmd\n");
7374		return cnt;
7375	}
7376
7377	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7378	if (!mboxq) {
7379		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7380				"2519 Unable to allocate memory for issuing "
7381				"NOP mailbox command\n");
7382		return 0;
7383	}
7384
7385	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
7386	length = (sizeof(struct lpfc_mbx_nop) -
7387		  sizeof(struct lpfc_sli4_cfg_mhdr));
7388	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7389			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
7390
7391	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
7392		if (!phba->sli4_hba.intr_enable)
7393			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7394		else {
7395			mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
7396			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7397		}
7398		if (rc == MBX_TIMEOUT)
7399			break;
7400		/* Check return status */
7401		shdr = (union lpfc_sli4_cfg_shdr *)
7402			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7403		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7404		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7405					 &shdr->response);
7406		if (shdr_status || shdr_add_status || rc) {
7407			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7408					"2520 NOP mailbox command failed "
7409					"status x%x add_status x%x mbx "
7410					"status x%x\n", shdr_status,
7411					shdr_add_status, rc);
7412			break;
7413		}
7414	}
7415
7416	if (rc != MBX_TIMEOUT)
7417		mempool_free(mboxq, phba->mbox_mem_pool);
7418
7419	return cmdsent;
7420}
7421
7422/**
7423 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
7424 * @phba: pointer to lpfc hba data structure.
7425 *
7426 * This routine is invoked to set up the PCI device memory space for device
7427 * with SLI-4 interface spec.
7428 *
7429 * Return codes
7430 * 	0 - successful
7431 * 	other values - error
7432 **/
7433static int
7434lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7435{
7436	struct pci_dev *pdev;
7437	unsigned long bar0map_len, bar1map_len, bar2map_len;
7438	int error = -ENODEV;
7439	uint32_t if_type;
7440
7441	/* Obtain PCI device reference */
7442	if (!phba->pcidev)
7443		return error;
7444	else
7445		pdev = phba->pcidev;
7446
7447	/* Set the device DMA mask size */
7448	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
7449	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
7450		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
7451		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
7452			return error;
7453		}
7454	}
7455
7456	/*
7457	 * The BARs and register set definitions and offset locations are
7458	 * dependent on the if_type.
7459	 */
7460	if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
7461				  &phba->sli4_hba.sli_intf.word0)) {
7462		return error;
7463	}
7464
7465	/* There is no SLI3 failback for SLI4 devices. */
7466	if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
7467	    LPFC_SLI_INTF_VALID) {
7468		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7469				"2894 SLI_INTF reg contents invalid "
7470				"sli_intf reg 0x%x\n",
7471				phba->sli4_hba.sli_intf.word0);
7472		return error;
7473	}
7474
7475	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7476	/*
7477	 * Get the bus address of SLI4 device Bar regions and the
7478	 * number of bytes required by each mapping. The mapping of the
7479	 * particular PCI BARs regions is dependent on the type of
7480	 * SLI4 device.
7481	 */
7482	if (pci_resource_start(pdev, 0)) {
7483		phba->pci_bar0_map = pci_resource_start(pdev, 0);
7484		bar0map_len = pci_resource_len(pdev, 0);
7485
7486		/*
7487		 * Map SLI4 PCI Config Space Register base to a kernel virtual
7488		 * addr
7489		 */
7490		phba->sli4_hba.conf_regs_memmap_p =
7491			ioremap(phba->pci_bar0_map, bar0map_len);
7492		if (!phba->sli4_hba.conf_regs_memmap_p) {
7493			dev_printk(KERN_ERR, &pdev->dev,
7494				   "ioremap failed for SLI4 PCI config "
7495				   "registers.\n");
7496			goto out;
7497		}
7498		/* Set up BAR0 PCI config space register memory map */
7499		lpfc_sli4_bar0_register_memmap(phba, if_type);
7500	} else {
7501		phba->pci_bar0_map = pci_resource_start(pdev, 1);
7502		bar0map_len = pci_resource_len(pdev, 1);
7503		if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7504			dev_printk(KERN_ERR, &pdev->dev,
7505			   "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
7506			goto out;
7507		}
7508		phba->sli4_hba.conf_regs_memmap_p =
7509				ioremap(phba->pci_bar0_map, bar0map_len);
7510		if (!phba->sli4_hba.conf_regs_memmap_p) {
7511			dev_printk(KERN_ERR, &pdev->dev,
7512				"ioremap failed for SLI4 PCI config "
7513				"registers.\n");
7514				goto out;
7515		}
7516		lpfc_sli4_bar0_register_memmap(phba, if_type);
7517	}
7518
7519	if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7520	    (pci_resource_start(pdev, 2))) {
7521		/*
7522		 * Map SLI4 if type 0 HBA Control Register base to a kernel
7523		 * virtual address and setup the registers.
7524		 */
7525		phba->pci_bar1_map = pci_resource_start(pdev, 2);
7526		bar1map_len = pci_resource_len(pdev, 2);
7527		phba->sli4_hba.ctrl_regs_memmap_p =
7528				ioremap(phba->pci_bar1_map, bar1map_len);
7529		if (!phba->sli4_hba.ctrl_regs_memmap_p) {
7530			dev_printk(KERN_ERR, &pdev->dev,
7531			   "ioremap failed for SLI4 HBA control registers.\n");
7532			goto out_iounmap_conf;
7533		}
7534		lpfc_sli4_bar1_register_memmap(phba);
7535	}
7536
7537	if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7538	    (pci_resource_start(pdev, 4))) {
7539		/*
7540		 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
7541		 * virtual address and setup the registers.
7542		 */
7543		phba->pci_bar2_map = pci_resource_start(pdev, 4);
7544		bar2map_len = pci_resource_len(pdev, 4);
7545		phba->sli4_hba.drbl_regs_memmap_p =
7546				ioremap(phba->pci_bar2_map, bar2map_len);
7547		if (!phba->sli4_hba.drbl_regs_memmap_p) {
7548			dev_printk(KERN_ERR, &pdev->dev,
7549			   "ioremap failed for SLI4 HBA doorbell registers.\n");
7550			goto out_iounmap_ctrl;
7551		}
7552		error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
7553		if (error)
7554			goto out_iounmap_all;
7555	}
7556
7557	return 0;
7558
7559out_iounmap_all:
7560	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7561out_iounmap_ctrl:
7562	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7563out_iounmap_conf:
7564	iounmap(phba->sli4_hba.conf_regs_memmap_p);
7565out:
7566	return error;
7567}
7568
7569/**
7570 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
7571 * @phba: pointer to lpfc hba data structure.
7572 *
7573 * This routine is invoked to unset the PCI device memory space for device
7574 * with SLI-4 interface spec.
7575 **/
7576static void
7577lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
7578{
7579	uint32_t if_type;
7580	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7581
7582	switch (if_type) {
7583	case LPFC_SLI_INTF_IF_TYPE_0:
7584		iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7585		iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7586		iounmap(phba->sli4_hba.conf_regs_memmap_p);
7587		break;
7588	case LPFC_SLI_INTF_IF_TYPE_2:
7589		iounmap(phba->sli4_hba.conf_regs_memmap_p);
7590		break;
7591	case LPFC_SLI_INTF_IF_TYPE_1:
7592	default:
7593		dev_printk(KERN_ERR, &phba->pcidev->dev,
7594			   "FATAL - unsupported SLI4 interface type - %d\n",
7595			   if_type);
7596		break;
7597	}
7598}
7599
7600/**
7601 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
7602 * @phba: pointer to lpfc hba data structure.
7603 *
7604 * This routine is invoked to enable the MSI-X interrupt vectors to device
7605 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
7606 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
7607 * invoked, enables either all or nothing, depending on the current
7608 * availability of PCI vector resources. The device driver is responsible
7609 * for calling the individual request_irq() to register each MSI-X vector
7610 * with a interrupt handler, which is done in this function. Note that
7611 * later when device is unloading, the driver should always call free_irq()
7612 * on all MSI-X vectors it has done request_irq() on before calling
7613 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
7614 * will be left with MSI-X enabled and leaks its vectors.
7615 *
7616 * Return codes
7617 *   0 - successful
7618 *   other values - error
7619 **/
7620static int
7621lpfc_sli_enable_msix(struct lpfc_hba *phba)
7622{
7623	int rc, i;
7624	LPFC_MBOXQ_t *pmb;
7625
7626	/* Set up MSI-X multi-message vectors */
7627	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7628		phba->msix_entries[i].entry = i;
7629
7630	/* Configure MSI-X capability structure */
7631	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
7632				ARRAY_SIZE(phba->msix_entries));
7633	if (rc) {
7634		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7635				"0420 PCI enable MSI-X failed (%d)\n", rc);
7636		goto msi_fail_out;
7637	}
7638	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7639		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7640				"0477 MSI-X entry[%d]: vector=x%x "
7641				"message=%d\n", i,
7642				phba->msix_entries[i].vector,
7643				phba->msix_entries[i].entry);
7644	/*
7645	 * Assign MSI-X vectors to interrupt handlers
7646	 */
7647
7648	/* vector-0 is associated to slow-path handler */
7649	rc = request_irq(phba->msix_entries[0].vector,
7650			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
7651			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7652	if (rc) {
7653		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7654				"0421 MSI-X slow-path request_irq failed "
7655				"(%d)\n", rc);
7656		goto msi_fail_out;
7657	}
7658
7659	/* vector-1 is associated to fast-path handler */
7660	rc = request_irq(phba->msix_entries[1].vector,
7661			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
7662			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
7663
7664	if (rc) {
7665		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7666				"0429 MSI-X fast-path request_irq failed "
7667				"(%d)\n", rc);
7668		goto irq_fail_out;
7669	}
7670
7671	/*
7672	 * Configure HBA MSI-X attention conditions to messages
7673	 */
7674	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7675
7676	if (!pmb) {
7677		rc = -ENOMEM;
7678		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7679				"0474 Unable to allocate memory for issuing "
7680				"MBOX_CONFIG_MSI command\n");
7681		goto mem_fail_out;
7682	}
7683	rc = lpfc_config_msi(phba, pmb);
7684	if (rc)
7685		goto mbx_fail_out;
7686	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
7687	if (rc != MBX_SUCCESS) {
7688		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
7689				"0351 Config MSI mailbox command failed, "
7690				"mbxCmd x%x, mbxStatus x%x\n",
7691				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
7692		goto mbx_fail_out;
7693	}
7694
7695	/* Free memory allocated for mailbox command */
7696	mempool_free(pmb, phba->mbox_mem_pool);
7697	return rc;
7698
7699mbx_fail_out:
7700	/* Free memory allocated for mailbox command */
7701	mempool_free(pmb, phba->mbox_mem_pool);
7702
7703mem_fail_out:
7704	/* free the irq already requested */
7705	free_irq(phba->msix_entries[1].vector, phba);
7706
7707irq_fail_out:
7708	/* free the irq already requested */
7709	free_irq(phba->msix_entries[0].vector, phba);
7710
7711msi_fail_out:
7712	/* Unconfigure MSI-X capability structure */
7713	pci_disable_msix(phba->pcidev);
7714	return rc;
7715}
7716
7717/**
7718 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
7719 * @phba: pointer to lpfc hba data structure.
7720 *
7721 * This routine is invoked to release the MSI-X vectors and then disable the
7722 * MSI-X interrupt mode to device with SLI-3 interface spec.
7723 **/
7724static void
7725lpfc_sli_disable_msix(struct lpfc_hba *phba)
7726{
7727	int i;
7728
7729	/* Free up MSI-X multi-message vectors */
7730	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7731		free_irq(phba->msix_entries[i].vector, phba);
7732	/* Disable MSI-X */
7733	pci_disable_msix(phba->pcidev);
7734
7735	return;
7736}
7737
7738/**
7739 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
7740 * @phba: pointer to lpfc hba data structure.
7741 *
7742 * This routine is invoked to enable the MSI interrupt mode to device with
7743 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
7744 * enable the MSI vector. The device driver is responsible for calling the
7745 * request_irq() to register MSI vector with a interrupt the handler, which
7746 * is done in this function.
7747 *
7748 * Return codes
7749 * 	0 - successful
7750 * 	other values - error
7751 */
7752static int
7753lpfc_sli_enable_msi(struct lpfc_hba *phba)
7754{
7755	int rc;
7756
7757	rc = pci_enable_msi(phba->pcidev);
7758	if (!rc)
7759		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7760				"0462 PCI enable MSI mode success.\n");
7761	else {
7762		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7763				"0471 PCI enable MSI mode failed (%d)\n", rc);
7764		return rc;
7765	}
7766
7767	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7768			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7769	if (rc) {
7770		pci_disable_msi(phba->pcidev);
7771		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7772				"0478 MSI request_irq failed (%d)\n", rc);
7773	}
7774	return rc;
7775}
7776
7777/**
7778 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
7779 * @phba: pointer to lpfc hba data structure.
7780 *
7781 * This routine is invoked to disable the MSI interrupt mode to device with
7782 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
7783 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7784 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7785 * its vector.
7786 */
7787static void
7788lpfc_sli_disable_msi(struct lpfc_hba *phba)
7789{
7790	free_irq(phba->pcidev->irq, phba);
7791	pci_disable_msi(phba->pcidev);
7792	return;
7793}
7794
7795/**
7796 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
7797 * @phba: pointer to lpfc hba data structure.
7798 *
7799 * This routine is invoked to enable device interrupt and associate driver's
7800 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
7801 * spec. Depends on the interrupt mode configured to the driver, the driver
7802 * will try to fallback from the configured interrupt mode to an interrupt
7803 * mode which is supported by the platform, kernel, and device in the order
7804 * of:
7805 * MSI-X -> MSI -> IRQ.
7806 *
7807 * Return codes
7808 *   0 - successful
7809 *   other values - error
7810 **/
7811static uint32_t
7812lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7813{
7814	uint32_t intr_mode = LPFC_INTR_ERROR;
7815	int retval;
7816
7817	if (cfg_mode == 2) {
7818		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
7819		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
7820		if (!retval) {
7821			/* Now, try to enable MSI-X interrupt mode */
7822			retval = lpfc_sli_enable_msix(phba);
7823			if (!retval) {
7824				/* Indicate initialization to MSI-X mode */
7825				phba->intr_type = MSIX;
7826				intr_mode = 2;
7827			}
7828		}
7829	}
7830
7831	/* Fallback to MSI if MSI-X initialization failed */
7832	if (cfg_mode >= 1 && phba->intr_type == NONE) {
7833		retval = lpfc_sli_enable_msi(phba);
7834		if (!retval) {
7835			/* Indicate initialization to MSI mode */
7836			phba->intr_type = MSI;
7837			intr_mode = 1;
7838		}
7839	}
7840
7841	/* Fallback to INTx if both MSI-X/MSI initalization failed */
7842	if (phba->intr_type == NONE) {
7843		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7844				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7845		if (!retval) {
7846			/* Indicate initialization to INTx mode */
7847			phba->intr_type = INTx;
7848			intr_mode = 0;
7849		}
7850	}
7851	return intr_mode;
7852}
7853
7854/**
7855 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
7856 * @phba: pointer to lpfc hba data structure.
7857 *
7858 * This routine is invoked to disable device interrupt and disassociate the
7859 * driver's interrupt handler(s) from interrupt vector(s) to device with
7860 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
7861 * release the interrupt vector(s) for the message signaled interrupt.
7862 **/
7863static void
7864lpfc_sli_disable_intr(struct lpfc_hba *phba)
7865{
7866	/* Disable the currently initialized interrupt mode */
7867	if (phba->intr_type == MSIX)
7868		lpfc_sli_disable_msix(phba);
7869	else if (phba->intr_type == MSI)
7870		lpfc_sli_disable_msi(phba);
7871	else if (phba->intr_type == INTx)
7872		free_irq(phba->pcidev->irq, phba);
7873
7874	/* Reset interrupt management states */
7875	phba->intr_type = NONE;
7876	phba->sli.slistat.sli_intr = 0;
7877
7878	return;
7879}
7880
7881/**
7882 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
7883 * @phba: pointer to lpfc hba data structure.
7884 *
7885 * This routine is invoked to enable the MSI-X interrupt vectors to device
7886 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
7887 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
7888 * enables either all or nothing, depending on the current availability of
7889 * PCI vector resources. The device driver is responsible for calling the
7890 * individual request_irq() to register each MSI-X vector with a interrupt
7891 * handler, which is done in this function. Note that later when device is
7892 * unloading, the driver should always call free_irq() on all MSI-X vectors
7893 * it has done request_irq() on before calling pci_disable_msix(). Failure
7894 * to do so results in a BUG_ON() and a device will be left with MSI-X
7895 * enabled and leaks its vectors.
7896 *
7897 * Return codes
7898 * 0 - successful
7899 * other values - error
7900 **/
7901static int
7902lpfc_sli4_enable_msix(struct lpfc_hba *phba)
7903{
7904	int vectors, rc, index;
7905
7906	/* Set up MSI-X multi-message vectors */
7907	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
7908		phba->sli4_hba.msix_entries[index].entry = index;
7909
7910	/* Configure MSI-X capability structure */
7911	vectors = phba->sli4_hba.cfg_eqn;
7912enable_msix_vectors:
7913	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
7914			     vectors);
7915	if (rc > 1) {
7916		vectors = rc;
7917		goto enable_msix_vectors;
7918	} else if (rc) {
7919		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7920				"0484 PCI enable MSI-X failed (%d)\n", rc);
7921		goto msi_fail_out;
7922	}
7923
7924	/* Log MSI-X vector assignment */
7925	for (index = 0; index < vectors; index++)
7926		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7927				"0489 MSI-X entry[%d]: vector=x%x "
7928				"message=%d\n", index,
7929				phba->sli4_hba.msix_entries[index].vector,
7930				phba->sli4_hba.msix_entries[index].entry);
7931	/*
7932	 * Assign MSI-X vectors to interrupt handlers
7933	 */
7934	if (vectors > 1)
7935		rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7936				 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
7937				 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7938	else
7939		/* All Interrupts need to be handled by one EQ */
7940		rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7941				 &lpfc_sli4_intr_handler, IRQF_SHARED,
7942				 LPFC_DRIVER_NAME, phba);
7943	if (rc) {
7944		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7945				"0485 MSI-X slow-path request_irq failed "
7946				"(%d)\n", rc);
7947		goto msi_fail_out;
7948	}
7949
7950	/* The rest of the vector(s) are associated to fast-path handler(s) */
7951	for (index = 1; index < vectors; index++) {
7952		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
7953		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
7954		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
7955				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
7956				 LPFC_FP_DRIVER_HANDLER_NAME,
7957				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7958		if (rc) {
7959			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7960					"0486 MSI-X fast-path (%d) "
7961					"request_irq failed (%d)\n", index, rc);
7962			goto cfg_fail_out;
7963		}
7964	}
7965	phba->sli4_hba.msix_vec_nr = vectors;
7966
7967	return rc;
7968
7969cfg_fail_out:
7970	/* free the irq already requested */
7971	for (--index; index >= 1; index--)
7972		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
7973			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7974
7975	/* free the irq already requested */
7976	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7977
7978msi_fail_out:
7979	/* Unconfigure MSI-X capability structure */
7980	pci_disable_msix(phba->pcidev);
7981	return rc;
7982}
7983
7984/**
7985 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
7986 * @phba: pointer to lpfc hba data structure.
7987 *
7988 * This routine is invoked to release the MSI-X vectors and then disable the
7989 * MSI-X interrupt mode to device with SLI-4 interface spec.
7990 **/
7991static void
7992lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7993{
7994	int index;
7995
7996	/* Free up MSI-X multi-message vectors */
7997	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7998
7999	for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
8000		free_irq(phba->sli4_hba.msix_entries[index].vector,
8001			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
8002
8003	/* Disable MSI-X */
8004	pci_disable_msix(phba->pcidev);
8005
8006	return;
8007}
8008
8009/**
8010 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
8011 * @phba: pointer to lpfc hba data structure.
8012 *
8013 * This routine is invoked to enable the MSI interrupt mode to device with
8014 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
8015 * to enable the MSI vector. The device driver is responsible for calling
8016 * the request_irq() to register MSI vector with a interrupt the handler,
8017 * which is done in this function.
8018 *
8019 * Return codes
8020 * 	0 - successful
8021 * 	other values - error
8022 **/
8023static int
8024lpfc_sli4_enable_msi(struct lpfc_hba *phba)
8025{
8026	int rc, index;
8027
8028	rc = pci_enable_msi(phba->pcidev);
8029	if (!rc)
8030		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8031				"0487 PCI enable MSI mode success.\n");
8032	else {
8033		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8034				"0488 PCI enable MSI mode failed (%d)\n", rc);
8035		return rc;
8036	}
8037
8038	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
8039			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8040	if (rc) {
8041		pci_disable_msi(phba->pcidev);
8042		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8043				"0490 MSI request_irq failed (%d)\n", rc);
8044		return rc;
8045	}
8046
8047	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
8048		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8049		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8050	}
8051
8052	return 0;
8053}
8054
8055/**
8056 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
8057 * @phba: pointer to lpfc hba data structure.
8058 *
8059 * This routine is invoked to disable the MSI interrupt mode to device with
8060 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
8061 * done request_irq() on before calling pci_disable_msi(). Failure to do so
8062 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
8063 * its vector.
8064 **/
8065static void
8066lpfc_sli4_disable_msi(struct lpfc_hba *phba)
8067{
8068	free_irq(phba->pcidev->irq, phba);
8069	pci_disable_msi(phba->pcidev);
8070	return;
8071}
8072
8073/**
8074 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
8075 * @phba: pointer to lpfc hba data structure.
8076 *
8077 * This routine is invoked to enable device interrupt and associate driver's
8078 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
8079 * interface spec. Depends on the interrupt mode configured to the driver,
8080 * the driver will try to fallback from the configured interrupt mode to an
8081 * interrupt mode which is supported by the platform, kernel, and device in
8082 * the order of:
8083 * MSI-X -> MSI -> IRQ.
8084 *
8085 * Return codes
8086 * 	0 - successful
8087 * 	other values - error
8088 **/
8089static uint32_t
8090lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8091{
8092	uint32_t intr_mode = LPFC_INTR_ERROR;
8093	int retval, index;
8094
8095	if (cfg_mode == 2) {
8096		/* Preparation before conf_msi mbox cmd */
8097		retval = 0;
8098		if (!retval) {
8099			/* Now, try to enable MSI-X interrupt mode */
8100			retval = lpfc_sli4_enable_msix(phba);
8101			if (!retval) {
8102				/* Indicate initialization to MSI-X mode */
8103				phba->intr_type = MSIX;
8104				intr_mode = 2;
8105			}
8106		}
8107	}
8108
8109	/* Fallback to MSI if MSI-X initialization failed */
8110	if (cfg_mode >= 1 && phba->intr_type == NONE) {
8111		retval = lpfc_sli4_enable_msi(phba);
8112		if (!retval) {
8113			/* Indicate initialization to MSI mode */
8114			phba->intr_type = MSI;
8115			intr_mode = 1;
8116		}
8117	}
8118
8119	/* Fallback to INTx if both MSI-X/MSI initalization failed */
8120	if (phba->intr_type == NONE) {
8121		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
8122				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8123		if (!retval) {
8124			/* Indicate initialization to INTx mode */
8125			phba->intr_type = INTx;
8126			intr_mode = 0;
8127			for (index = 0; index < phba->cfg_fcp_eq_count;
8128			     index++) {
8129				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8130				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8131			}
8132		}
8133	}
8134	return intr_mode;
8135}
8136
8137/**
8138 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
8139 * @phba: pointer to lpfc hba data structure.
8140 *
8141 * This routine is invoked to disable device interrupt and disassociate
8142 * the driver's interrupt handler(s) from interrupt vector(s) to device
8143 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
8144 * will release the interrupt vector(s) for the message signaled interrupt.
8145 **/
8146static void
8147lpfc_sli4_disable_intr(struct lpfc_hba *phba)
8148{
8149	/* Disable the currently initialized interrupt mode */
8150	if (phba->intr_type == MSIX)
8151		lpfc_sli4_disable_msix(phba);
8152	else if (phba->intr_type == MSI)
8153		lpfc_sli4_disable_msi(phba);
8154	else if (phba->intr_type == INTx)
8155		free_irq(phba->pcidev->irq, phba);
8156
8157	/* Reset interrupt management states */
8158	phba->intr_type = NONE;
8159	phba->sli.slistat.sli_intr = 0;
8160
8161	return;
8162}
8163
8164/**
8165 * lpfc_unset_hba - Unset SLI3 hba device initialization
8166 * @phba: pointer to lpfc hba data structure.
8167 *
8168 * This routine is invoked to unset the HBA device initialization steps to
8169 * a device with SLI-3 interface spec.
8170 **/
8171static void
8172lpfc_unset_hba(struct lpfc_hba *phba)
8173{
8174	struct lpfc_vport *vport = phba->pport;
8175	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
8176
8177	spin_lock_irq(shost->host_lock);
8178	vport->load_flag |= FC_UNLOADING;
8179	spin_unlock_irq(shost->host_lock);
8180
8181	lpfc_stop_hba_timers(phba);
8182
8183	phba->pport->work_port_events = 0;
8184
8185	lpfc_sli_hba_down(phba);
8186
8187	lpfc_sli_brdrestart(phba);
8188
8189	lpfc_sli_disable_intr(phba);
8190
8191	return;
8192}
8193
8194/**
8195 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
8196 * @phba: pointer to lpfc hba data structure.
8197 *
8198 * This routine is invoked to unset the HBA device initialization steps to
8199 * a device with SLI-4 interface spec.
8200 **/
8201static void
8202lpfc_sli4_unset_hba(struct lpfc_hba *phba)
8203{
8204	struct lpfc_vport *vport = phba->pport;
8205	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
8206
8207	spin_lock_irq(shost->host_lock);
8208	vport->load_flag |= FC_UNLOADING;
8209	spin_unlock_irq(shost->host_lock);
8210
8211	phba->pport->work_port_events = 0;
8212
8213	/* Stop the SLI4 device port */
8214	lpfc_stop_port(phba);
8215
8216	lpfc_sli4_disable_intr(phba);
8217
8218	/* Reset SLI4 HBA FCoE function */
8219	lpfc_pci_function_reset(phba);
8220	lpfc_sli4_queue_destroy(phba);
8221
8222	return;
8223}
8224
8225/**
8226 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
8227 * @phba: Pointer to HBA context object.
8228 *
8229 * This function is called in the SLI4 code path to wait for completion
8230 * of device's XRIs exchange busy. It will check the XRI exchange busy
8231 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
8232 * that, it will check the XRI exchange busy on outstanding FCP and ELS
8233 * I/Os every 30 seconds, log error message, and wait forever. Only when
8234 * all XRI exchange busy complete, the driver unload shall proceed with
8235 * invoking the function reset ioctl mailbox command to the CNA and the
8236 * the rest of the driver unload resource release.
8237 **/
8238static void
8239lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
8240{
8241	int wait_time = 0;
8242	int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8243	int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8244
8245	while (!fcp_xri_cmpl || !els_xri_cmpl) {
8246		if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
8247			if (!fcp_xri_cmpl)
8248				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8249						"2877 FCP XRI exchange busy "
8250						"wait time: %d seconds.\n",
8251						wait_time/1000);
8252			if (!els_xri_cmpl)
8253				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8254						"2878 ELS XRI exchange busy "
8255						"wait time: %d seconds.\n",
8256						wait_time/1000);
8257			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
8258			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
8259		} else {
8260			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
8261			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
8262		}
8263		fcp_xri_cmpl =
8264			list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8265		els_xri_cmpl =
8266			list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8267	}
8268}
8269
8270/**
8271 * lpfc_sli4_hba_unset - Unset the fcoe hba
8272 * @phba: Pointer to HBA context object.
8273 *
8274 * This function is called in the SLI4 code path to reset the HBA's FCoE
8275 * function. The caller is not required to hold any lock. This routine
8276 * issues PCI function reset mailbox command to reset the FCoE function.
8277 * At the end of the function, it calls lpfc_hba_down_post function to
8278 * free any pending commands.
8279 **/
8280static void
8281lpfc_sli4_hba_unset(struct lpfc_hba *phba)
8282{
8283	int wait_cnt = 0;
8284	LPFC_MBOXQ_t *mboxq;
8285	struct pci_dev *pdev = phba->pcidev;
8286
8287	lpfc_stop_hba_timers(phba);
8288	phba->sli4_hba.intr_enable = 0;
8289
8290	/*
8291	 * Gracefully wait out the potential current outstanding asynchronous
8292	 * mailbox command.
8293	 */
8294
8295	/* First, block any pending async mailbox command from posted */
8296	spin_lock_irq(&phba->hbalock);
8297	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8298	spin_unlock_irq(&phba->hbalock);
8299	/* Now, trying to wait it out if we can */
8300	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8301		msleep(10);
8302		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
8303			break;
8304	}
8305	/* Forcefully release the outstanding mailbox command if timed out */
8306	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8307		spin_lock_irq(&phba->hbalock);
8308		mboxq = phba->sli.mbox_active;
8309		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8310		__lpfc_mbox_cmpl_put(phba, mboxq);
8311		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8312		phba->sli.mbox_active = NULL;
8313		spin_unlock_irq(&phba->hbalock);
8314	}
8315
8316	/* Abort all iocbs associated with the hba */
8317	lpfc_sli_hba_iocb_abort(phba);
8318
8319	/* Wait for completion of device XRI exchange busy */
8320	lpfc_sli4_xri_exchange_busy_wait(phba);
8321
8322	/* Disable PCI subsystem interrupt */
8323	lpfc_sli4_disable_intr(phba);
8324
8325	/* Disable SR-IOV if enabled */
8326	if (phba->cfg_sriov_nr_virtfn)
8327		pci_disable_sriov(pdev);
8328
8329	/* Stop kthread signal shall trigger work_done one more time */
8330	kthread_stop(phba->worker_thread);
8331
8332	/* Reset SLI4 HBA FCoE function */
8333	lpfc_pci_function_reset(phba);
8334	lpfc_sli4_queue_destroy(phba);
8335
8336	/* Stop the SLI4 device port */
8337	phba->pport->work_port_events = 0;
8338}
8339
8340 /**
8341 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
8342 * @phba: Pointer to HBA context object.
8343 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8344 *
8345 * This function is called in the SLI4 code path to read the port's
8346 * sli4 capabilities.
8347 *
8348 * This function may be be called from any context that can block-wait
8349 * for the completion.  The expectation is that this routine is called
8350 * typically from probe_one or from the online routine.
8351 **/
8352int
8353lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8354{
8355	int rc;
8356	struct lpfc_mqe *mqe;
8357	struct lpfc_pc_sli4_params *sli4_params;
8358	uint32_t mbox_tmo;
8359
8360	rc = 0;
8361	mqe = &mboxq->u.mqe;
8362
8363	/* Read the port's SLI4 Parameters port capabilities */
8364	lpfc_pc_sli4_params(mboxq);
8365	if (!phba->sli4_hba.intr_enable)
8366		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8367	else {
8368		mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
8369		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8370	}
8371
8372	if (unlikely(rc))
8373		return 1;
8374
8375	sli4_params = &phba->sli4_hba.pc_sli4_params;
8376	sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
8377	sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
8378	sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
8379	sli4_params->featurelevel_1 = bf_get(featurelevel_1,
8380					     &mqe->un.sli4_params);
8381	sli4_params->featurelevel_2 = bf_get(featurelevel_2,
8382					     &mqe->un.sli4_params);
8383	sli4_params->proto_types = mqe->un.sli4_params.word3;
8384	sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
8385	sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
8386	sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
8387	sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
8388	sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
8389	sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
8390	sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
8391	sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
8392	sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
8393	sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
8394	sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
8395	sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
8396	sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
8397	sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
8398	sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
8399	sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
8400	sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
8401	sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
8402	sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
8403	sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
8404
8405	/* Make sure that sge_supp_len can be handled by the driver */
8406	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8407		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8408
8409	return rc;
8410}
8411
8412/**
8413 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
8414 * @phba: Pointer to HBA context object.
8415 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8416 *
8417 * This function is called in the SLI4 code path to read the port's
8418 * sli4 capabilities.
8419 *
8420 * This function may be be called from any context that can block-wait
8421 * for the completion.  The expectation is that this routine is called
8422 * typically from probe_one or from the online routine.
8423 **/
8424int
8425lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8426{
8427	int rc;
8428	struct lpfc_mqe *mqe = &mboxq->u.mqe;
8429	struct lpfc_pc_sli4_params *sli4_params;
8430	uint32_t mbox_tmo;
8431	int length;
8432	struct lpfc_sli4_parameters *mbx_sli4_parameters;
8433
8434	/*
8435	 * By default, the driver assumes the SLI4 port requires RPI
8436	 * header postings.  The SLI4_PARAM response will correct this
8437	 * assumption.
8438	 */
8439	phba->sli4_hba.rpi_hdrs_in_use = 1;
8440
8441	/* Read the port's SLI4 Config Parameters */
8442	length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
8443		  sizeof(struct lpfc_sli4_cfg_mhdr));
8444	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8445			 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
8446			 length, LPFC_SLI4_MBX_EMBED);
8447	if (!phba->sli4_hba.intr_enable)
8448		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8449	else {
8450		mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
8451		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8452	}
8453	if (unlikely(rc))
8454		return rc;
8455	sli4_params = &phba->sli4_hba.pc_sli4_params;
8456	mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
8457	sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
8458	sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
8459	sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
8460	sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
8461					     mbx_sli4_parameters);
8462	sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
8463					     mbx_sli4_parameters);
8464	if (bf_get(cfg_phwq, mbx_sli4_parameters))
8465		phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
8466	else
8467		phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
8468	sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
8469	sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
8470	sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
8471	sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
8472	sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
8473	sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
8474	sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
8475					    mbx_sli4_parameters);
8476	sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
8477					   mbx_sli4_parameters);
8478	phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
8479	phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
8480
8481	/* Make sure that sge_supp_len can be handled by the driver */
8482	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8483		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8484
8485	return 0;
8486}
8487
8488/**
8489 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
8490 * @pdev: pointer to PCI device
8491 * @pid: pointer to PCI device identifier
8492 *
8493 * This routine is to be called to attach a device with SLI-3 interface spec
8494 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8495 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8496 * information of the device and driver to see if the driver state that it can
8497 * support this kind of device. If the match is successful, the driver core
8498 * invokes this routine. If this routine determines it can claim the HBA, it
8499 * does all the initialization that it needs to do to handle the HBA properly.
8500 *
8501 * Return code
8502 * 	0 - driver can claim the device
8503 * 	negative value - driver can not claim the device
8504 **/
8505static int __devinit
8506lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
8507{
8508	struct lpfc_hba   *phba;
8509	struct lpfc_vport *vport = NULL;
8510	struct Scsi_Host  *shost = NULL;
8511	int error;
8512	uint32_t cfg_mode, intr_mode;
8513
8514	/* Allocate memory for HBA structure */
8515	phba = lpfc_hba_alloc(pdev);
8516	if (!phba)
8517		return -ENOMEM;
8518
8519	/* Perform generic PCI device enabling operation */
8520	error = lpfc_enable_pci_dev(phba);
8521	if (error)
8522		goto out_free_phba;
8523
8524	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
8525	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
8526	if (error)
8527		goto out_disable_pci_dev;
8528
8529	/* Set up SLI-3 specific device PCI memory space */
8530	error = lpfc_sli_pci_mem_setup(phba);
8531	if (error) {
8532		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8533				"1402 Failed to set up pci memory space.\n");
8534		goto out_disable_pci_dev;
8535	}
8536
8537	/* Set up phase-1 common device driver resources */
8538	error = lpfc_setup_driver_resource_phase1(phba);
8539	if (error) {
8540		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8541				"1403 Failed to set up driver resource.\n");
8542		goto out_unset_pci_mem_s3;
8543	}
8544
8545	/* Set up SLI-3 specific device driver resources */
8546	error = lpfc_sli_driver_resource_setup(phba);
8547	if (error) {
8548		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8549				"1404 Failed to set up driver resource.\n");
8550		goto out_unset_pci_mem_s3;
8551	}
8552
8553	/* Initialize and populate the iocb list per host */
8554	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
8555	if (error) {
8556		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8557				"1405 Failed to initialize iocb list.\n");
8558		goto out_unset_driver_resource_s3;
8559	}
8560
8561	/* Set up common device driver resources */
8562	error = lpfc_setup_driver_resource_phase2(phba);
8563	if (error) {
8564		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8565				"1406 Failed to set up driver resource.\n");
8566		goto out_free_iocb_list;
8567	}
8568
8569	/* Get the default values for Model Name and Description */
8570	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
8571
8572	/* Create SCSI host to the physical port */
8573	error = lpfc_create_shost(phba);
8574	if (error) {
8575		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8576				"1407 Failed to create scsi host.\n");
8577		goto out_unset_driver_resource;
8578	}
8579
8580	/* Configure sysfs attributes */
8581	vport = phba->pport;
8582	error = lpfc_alloc_sysfs_attr(vport);
8583	if (error) {
8584		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8585				"1476 Failed to allocate sysfs attr\n");
8586		goto out_destroy_shost;
8587	}
8588
8589	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8590	/* Now, trying to enable interrupt and bring up the device */
8591	cfg_mode = phba->cfg_use_msi;
8592	while (true) {
8593		/* Put device to a known state before enabling interrupt */
8594		lpfc_stop_port(phba);
8595		/* Configure and enable interrupt */
8596		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
8597		if (intr_mode == LPFC_INTR_ERROR) {
8598			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8599					"0431 Failed to enable interrupt.\n");
8600			error = -ENODEV;
8601			goto out_free_sysfs_attr;
8602		}
8603		/* SLI-3 HBA setup */
8604		if (lpfc_sli_hba_setup(phba)) {
8605			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8606					"1477 Failed to set up hba\n");
8607			error = -ENODEV;
8608			goto out_remove_device;
8609		}
8610
8611		/* Wait 50ms for the interrupts of previous mailbox commands */
8612		msleep(50);
8613		/* Check active interrupts on message signaled interrupts */
8614		if (intr_mode == 0 ||
8615		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
8616			/* Log the current active interrupt mode */
8617			phba->intr_mode = intr_mode;
8618			lpfc_log_intr_mode(phba, intr_mode);
8619			break;
8620		} else {
8621			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8622					"0447 Configure interrupt mode (%d) "
8623					"failed active interrupt test.\n",
8624					intr_mode);
8625			/* Disable the current interrupt mode */
8626			lpfc_sli_disable_intr(phba);
8627			/* Try next level of interrupt mode */
8628			cfg_mode = --intr_mode;
8629		}
8630	}
8631
8632	/* Perform post initialization setup */
8633	lpfc_post_init_setup(phba);
8634
8635	/* Check if there are static vports to be created. */
8636	lpfc_create_static_vport(phba);
8637
8638	return 0;
8639
8640out_remove_device:
8641	lpfc_unset_hba(phba);
8642out_free_sysfs_attr:
8643	lpfc_free_sysfs_attr(vport);
8644out_destroy_shost:
8645	lpfc_destroy_shost(phba);
8646out_unset_driver_resource:
8647	lpfc_unset_driver_resource_phase2(phba);
8648out_free_iocb_list:
8649	lpfc_free_iocb_list(phba);
8650out_unset_driver_resource_s3:
8651	lpfc_sli_driver_resource_unset(phba);
8652out_unset_pci_mem_s3:
8653	lpfc_sli_pci_mem_unset(phba);
8654out_disable_pci_dev:
8655	lpfc_disable_pci_dev(phba);
8656	if (shost)
8657		scsi_host_put(shost);
8658out_free_phba:
8659	lpfc_hba_free(phba);
8660	return error;
8661}
8662
8663/**
8664 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
8665 * @pdev: pointer to PCI device
8666 *
8667 * This routine is to be called to disattach a device with SLI-3 interface
8668 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8669 * removed from PCI bus, it performs all the necessary cleanup for the HBA
8670 * device to be removed from the PCI subsystem properly.
8671 **/
8672static void __devexit
8673lpfc_pci_remove_one_s3(struct pci_dev *pdev)
8674{
8675	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
8676	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8677	struct lpfc_vport **vports;
8678	struct lpfc_hba   *phba = vport->phba;
8679	int i;
8680	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
8681
8682	spin_lock_irq(&phba->hbalock);
8683	vport->load_flag |= FC_UNLOADING;
8684	spin_unlock_irq(&phba->hbalock);
8685
8686	lpfc_free_sysfs_attr(vport);
8687
8688	/* Release all the vports against this physical port */
8689	vports = lpfc_create_vport_work_array(phba);
8690	if (vports != NULL)
8691		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8692			fc_vport_terminate(vports[i]->fc_vport);
8693	lpfc_destroy_vport_work_array(phba, vports);
8694
8695	/* Remove FC host and then SCSI host with the physical port */
8696	fc_remove_host(shost);
8697	scsi_remove_host(shost);
8698	lpfc_cleanup(vport);
8699
8700	/*
8701	 * Bring down the SLI Layer. This step disable all interrupts,
8702	 * clears the rings, discards all mailbox commands, and resets
8703	 * the HBA.
8704	 */
8705
8706	/* HBA interrupt will be disabled after this call */
8707	lpfc_sli_hba_down(phba);
8708	/* Stop kthread signal shall trigger work_done one more time */
8709	kthread_stop(phba->worker_thread);
8710	/* Final cleanup of txcmplq and reset the HBA */
8711	lpfc_sli_brdrestart(phba);
8712
8713	lpfc_stop_hba_timers(phba);
8714	spin_lock_irq(&phba->hbalock);
8715	list_del_init(&vport->listentry);
8716	spin_unlock_irq(&phba->hbalock);
8717
8718	lpfc_debugfs_terminate(vport);
8719
8720	/* Disable SR-IOV if enabled */
8721	if (phba->cfg_sriov_nr_virtfn)
8722		pci_disable_sriov(pdev);
8723
8724	/* Disable interrupt */
8725	lpfc_sli_disable_intr(phba);
8726
8727	pci_set_drvdata(pdev, NULL);
8728	scsi_host_put(shost);
8729
8730	/*
8731	 * Call scsi_free before mem_free since scsi bufs are released to their
8732	 * corresponding pools here.
8733	 */
8734	lpfc_scsi_free(phba);
8735	lpfc_mem_free_all(phba);
8736
8737	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
8738			  phba->hbqslimp.virt, phba->hbqslimp.phys);
8739
8740	/* Free resources associated with SLI2 interface */
8741	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8742			  phba->slim2p.virt, phba->slim2p.phys);
8743
8744	/* unmap adapter SLIM and Control Registers */
8745	iounmap(phba->ctrl_regs_memmap_p);
8746	iounmap(phba->slim_memmap_p);
8747
8748	lpfc_hba_free(phba);
8749
8750	pci_release_selected_regions(pdev, bars);
8751	pci_disable_device(pdev);
8752}
8753
8754/**
8755 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
8756 * @pdev: pointer to PCI device
8757 * @msg: power management message
8758 *
8759 * This routine is to be called from the kernel's PCI subsystem to support
8760 * system Power Management (PM) to device with SLI-3 interface spec. When
8761 * PM invokes this method, it quiesces the device by stopping the driver's
8762 * worker thread for the device, turning off device's interrupt and DMA,
8763 * and bring the device offline. Note that as the driver implements the
8764 * minimum PM requirements to a power-aware driver's PM support for the
8765 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8766 * to the suspend() method call will be treated as SUSPEND and the driver will
8767 * fully reinitialize its device during resume() method call, the driver will
8768 * set device to PCI_D3hot state in PCI config space instead of setting it
8769 * according to the @msg provided by the PM.
8770 *
8771 * Return code
8772 * 	0 - driver suspended the device
8773 * 	Error otherwise
8774 **/
8775static int
8776lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
8777{
8778	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8779	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8780
8781	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8782			"0473 PCI device Power Management suspend.\n");
8783
8784	/* Bring down the device */
8785	lpfc_offline_prep(phba);
8786	lpfc_offline(phba);
8787	kthread_stop(phba->worker_thread);
8788
8789	/* Disable interrupt from device */
8790	lpfc_sli_disable_intr(phba);
8791
8792	/* Save device state to PCI config space */
8793	pci_save_state(pdev);
8794	pci_set_power_state(pdev, PCI_D3hot);
8795
8796	return 0;
8797}
8798
8799/**
8800 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
8801 * @pdev: pointer to PCI device
8802 *
8803 * This routine is to be called from the kernel's PCI subsystem to support
8804 * system Power Management (PM) to device with SLI-3 interface spec. When PM
8805 * invokes this method, it restores the device's PCI config space state and
8806 * fully reinitializes the device and brings it online. Note that as the
8807 * driver implements the minimum PM requirements to a power-aware driver's
8808 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
8809 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
8810 * driver will fully reinitialize its device during resume() method call,
8811 * the device will be set to PCI_D0 directly in PCI config space before
8812 * restoring the state.
8813 *
8814 * Return code
8815 * 	0 - driver suspended the device
8816 * 	Error otherwise
8817 **/
8818static int
8819lpfc_pci_resume_one_s3(struct pci_dev *pdev)
8820{
8821	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8822	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8823	uint32_t intr_mode;
8824	int error;
8825
8826	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8827			"0452 PCI device Power Management resume.\n");
8828
8829	/* Restore device state from PCI config space */
8830	pci_set_power_state(pdev, PCI_D0);
8831	pci_restore_state(pdev);
8832
8833	/*
8834	 * As the new kernel behavior of pci_restore_state() API call clears
8835	 * device saved_state flag, need to save the restored state again.
8836	 */
8837	pci_save_state(pdev);
8838
8839	if (pdev->is_busmaster)
8840		pci_set_master(pdev);
8841
8842	/* Startup the kernel thread for this host adapter. */
8843	phba->worker_thread = kthread_run(lpfc_do_work, phba,
8844					"lpfc_worker_%d", phba->brd_no);
8845	if (IS_ERR(phba->worker_thread)) {
8846		error = PTR_ERR(phba->worker_thread);
8847		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8848				"0434 PM resume failed to start worker "
8849				"thread: error=x%x.\n", error);
8850		return error;
8851	}
8852
8853	/* Configure and enable interrupt */
8854	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8855	if (intr_mode == LPFC_INTR_ERROR) {
8856		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8857				"0430 PM resume Failed to enable interrupt\n");
8858		return -EIO;
8859	} else
8860		phba->intr_mode = intr_mode;
8861
8862	/* Restart HBA and bring it online */
8863	lpfc_sli_brdrestart(phba);
8864	lpfc_online(phba);
8865
8866	/* Log the current active interrupt mode */
8867	lpfc_log_intr_mode(phba, phba->intr_mode);
8868
8869	return 0;
8870}
8871
8872/**
8873 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
8874 * @phba: pointer to lpfc hba data structure.
8875 *
8876 * This routine is called to prepare the SLI3 device for PCI slot recover. It
8877 * aborts all the outstanding SCSI I/Os to the pci device.
8878 **/
8879static void
8880lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
8881{
8882	struct lpfc_sli *psli = &phba->sli;
8883	struct lpfc_sli_ring  *pring;
8884
8885	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8886			"2723 PCI channel I/O abort preparing for recovery\n");
8887
8888	/*
8889	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8890	 * and let the SCSI mid-layer to retry them to recover.
8891	 */
8892	pring = &psli->ring[psli->fcp_ring];
8893	lpfc_sli_abort_iocb_ring(phba, pring);
8894}
8895
8896/**
8897 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
8898 * @phba: pointer to lpfc hba data structure.
8899 *
8900 * This routine is called to prepare the SLI3 device for PCI slot reset. It
8901 * disables the device interrupt and pci device, and aborts the internal FCP
8902 * pending I/Os.
8903 **/
8904static void
8905lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
8906{
8907	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8908			"2710 PCI channel disable preparing for reset\n");
8909
8910	/* Block any management I/Os to the device */
8911	lpfc_block_mgmt_io(phba);
8912
8913	/* Block all SCSI devices' I/Os on the host */
8914	lpfc_scsi_dev_block(phba);
8915
8916	/* stop all timers */
8917	lpfc_stop_hba_timers(phba);
8918
8919	/* Disable interrupt and pci device */
8920	lpfc_sli_disable_intr(phba);
8921	pci_disable_device(phba->pcidev);
8922
8923	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
8924	lpfc_sli_flush_fcp_rings(phba);
8925}
8926
8927/**
8928 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
8929 * @phba: pointer to lpfc hba data structure.
8930 *
8931 * This routine is called to prepare the SLI3 device for PCI slot permanently
8932 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
8933 * pending I/Os.
8934 **/
8935static void
8936lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
8937{
8938	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8939			"2711 PCI channel permanent disable for failure\n");
8940	/* Block all SCSI devices' I/Os on the host */
8941	lpfc_scsi_dev_block(phba);
8942
8943	/* stop all timers */
8944	lpfc_stop_hba_timers(phba);
8945
8946	/* Clean up all driver's outstanding SCSI I/Os */
8947	lpfc_sli_flush_fcp_rings(phba);
8948}
8949
8950/**
8951 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
8952 * @pdev: pointer to PCI device.
8953 * @state: the current PCI connection state.
8954 *
8955 * This routine is called from the PCI subsystem for I/O error handling to
8956 * device with SLI-3 interface spec. This function is called by the PCI
8957 * subsystem after a PCI bus error affecting this device has been detected.
8958 * When this function is invoked, it will need to stop all the I/Os and
8959 * interrupt(s) to the device. Once that is done, it will return
8960 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
8961 * as desired.
8962 *
8963 * Return codes
8964 * 	PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
8965 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8966 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8967 **/
8968static pci_ers_result_t
8969lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
8970{
8971	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8972	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8973
8974	switch (state) {
8975	case pci_channel_io_normal:
8976		/* Non-fatal error, prepare for recovery */
8977		lpfc_sli_prep_dev_for_recover(phba);
8978		return PCI_ERS_RESULT_CAN_RECOVER;
8979	case pci_channel_io_frozen:
8980		/* Fatal error, prepare for slot reset */
8981		lpfc_sli_prep_dev_for_reset(phba);
8982		return PCI_ERS_RESULT_NEED_RESET;
8983	case pci_channel_io_perm_failure:
8984		/* Permanent failure, prepare for device down */
8985		lpfc_sli_prep_dev_for_perm_failure(phba);
8986		return PCI_ERS_RESULT_DISCONNECT;
8987	default:
8988		/* Unknown state, prepare and request slot reset */
8989		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8990				"0472 Unknown PCI error state: x%x\n", state);
8991		lpfc_sli_prep_dev_for_reset(phba);
8992		return PCI_ERS_RESULT_NEED_RESET;
8993	}
8994}
8995
8996/**
8997 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
8998 * @pdev: pointer to PCI device.
8999 *
9000 * This routine is called from the PCI subsystem for error handling to
9001 * device with SLI-3 interface spec. This is called after PCI bus has been
9002 * reset to restart the PCI card from scratch, as if from a cold-boot.
9003 * During the PCI subsystem error recovery, after driver returns
9004 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
9005 * recovery and then call this routine before calling the .resume method
9006 * to recover the device. This function will initialize the HBA device,
9007 * enable the interrupt, but it will just put the HBA to offline state
9008 * without passing any I/O traffic.
9009 *
9010 * Return codes
9011 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
9012 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9013 */
9014static pci_ers_result_t
9015lpfc_io_slot_reset_s3(struct pci_dev *pdev)
9016{
9017	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9018	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9019	struct lpfc_sli *psli = &phba->sli;
9020	uint32_t intr_mode;
9021
9022	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
9023	if (pci_enable_device_mem(pdev)) {
9024		printk(KERN_ERR "lpfc: Cannot re-enable "
9025			"PCI device after reset.\n");
9026		return PCI_ERS_RESULT_DISCONNECT;
9027	}
9028
9029	pci_restore_state(pdev);
9030
9031	/*
9032	 * As the new kernel behavior of pci_restore_state() API call clears
9033	 * device saved_state flag, need to save the restored state again.
9034	 */
9035	pci_save_state(pdev);
9036
9037	if (pdev->is_busmaster)
9038		pci_set_master(pdev);
9039
9040	spin_lock_irq(&phba->hbalock);
9041	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9042	spin_unlock_irq(&phba->hbalock);
9043
9044	/* Configure and enable interrupt */
9045	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
9046	if (intr_mode == LPFC_INTR_ERROR) {
9047		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9048				"0427 Cannot re-enable interrupt after "
9049				"slot reset.\n");
9050		return PCI_ERS_RESULT_DISCONNECT;
9051	} else
9052		phba->intr_mode = intr_mode;
9053
9054	/* Take device offline, it will perform cleanup */
9055	lpfc_offline_prep(phba);
9056	lpfc_offline(phba);
9057	lpfc_sli_brdrestart(phba);
9058
9059	/* Log the current active interrupt mode */
9060	lpfc_log_intr_mode(phba, phba->intr_mode);
9061
9062	return PCI_ERS_RESULT_RECOVERED;
9063}
9064
9065/**
9066 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
9067 * @pdev: pointer to PCI device
9068 *
9069 * This routine is called from the PCI subsystem for error handling to device
9070 * with SLI-3 interface spec. It is called when kernel error recovery tells
9071 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
9072 * error recovery. After this call, traffic can start to flow from this device
9073 * again.
9074 */
9075static void
9076lpfc_io_resume_s3(struct pci_dev *pdev)
9077{
9078	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9079	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9080
9081	/* Bring device online, it will be no-op for non-fatal error resume */
9082	lpfc_online(phba);
9083
9084	/* Clean up Advanced Error Reporting (AER) if needed */
9085	if (phba->hba_flag & HBA_AER_ENABLED)
9086		pci_cleanup_aer_uncorrect_error_status(pdev);
9087}
9088
9089/**
9090 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
9091 * @phba: pointer to lpfc hba data structure.
9092 *
9093 * returns the number of ELS/CT IOCBs to reserve
9094 **/
9095int
9096lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
9097{
9098	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
9099
9100	if (phba->sli_rev == LPFC_SLI_REV4) {
9101		if (max_xri <= 100)
9102			return 10;
9103		else if (max_xri <= 256)
9104			return 25;
9105		else if (max_xri <= 512)
9106			return 50;
9107		else if (max_xri <= 1024)
9108			return 100;
9109		else
9110			return 150;
9111	} else
9112		return 0;
9113}
9114
9115/**
9116 * lpfc_write_firmware - attempt to write a firmware image to the port
9117 * @phba: pointer to lpfc hba data structure.
9118 * @fw: pointer to firmware image returned from request_firmware.
9119 *
9120 * returns the number of bytes written if write is successful.
9121 * returns a negative error value if there were errors.
9122 * returns 0 if firmware matches currently active firmware on port.
9123 **/
9124int
9125lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
9126{
9127	char fwrev[FW_REV_STR_SIZE];
9128	struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
9129	struct list_head dma_buffer_list;
9130	int i, rc = 0;
9131	struct lpfc_dmabuf *dmabuf, *next;
9132	uint32_t offset = 0, temp_offset = 0;
9133
9134	INIT_LIST_HEAD(&dma_buffer_list);
9135	if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
9136	    (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
9137	     LPFC_FILE_TYPE_GROUP) ||
9138	    (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
9139	    (be32_to_cpu(image->size) != fw->size)) {
9140		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9141				"3022 Invalid FW image found. "
9142				"Magic:%x Type:%x ID:%x\n",
9143				be32_to_cpu(image->magic_number),
9144				bf_get_be32(lpfc_grp_hdr_file_type, image),
9145				bf_get_be32(lpfc_grp_hdr_id, image));
9146		return -EINVAL;
9147	}
9148	lpfc_decode_firmware_rev(phba, fwrev, 1);
9149	if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
9150		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9151				"3023 Updating Firmware. Current Version:%s "
9152				"New Version:%s\n",
9153				fwrev, image->revision);
9154		for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
9155			dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
9156					 GFP_KERNEL);
9157			if (!dmabuf) {
9158				rc = -ENOMEM;
9159				goto out;
9160			}
9161			dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9162							  SLI4_PAGE_SIZE,
9163							  &dmabuf->phys,
9164							  GFP_KERNEL);
9165			if (!dmabuf->virt) {
9166				kfree(dmabuf);
9167				rc = -ENOMEM;
9168				goto out;
9169			}
9170			list_add_tail(&dmabuf->list, &dma_buffer_list);
9171		}
9172		while (offset < fw->size) {
9173			temp_offset = offset;
9174			list_for_each_entry(dmabuf, &dma_buffer_list, list) {
9175				if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
9176					memcpy(dmabuf->virt,
9177					       fw->data + temp_offset,
9178					       fw->size - temp_offset);
9179					temp_offset = fw->size;
9180					break;
9181				}
9182				memcpy(dmabuf->virt, fw->data + temp_offset,
9183				       SLI4_PAGE_SIZE);
9184				temp_offset += SLI4_PAGE_SIZE;
9185			}
9186			rc = lpfc_wr_object(phba, &dma_buffer_list,
9187				    (fw->size - offset), &offset);
9188			if (rc) {
9189				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9190						"3024 Firmware update failed. "
9191						"%d\n", rc);
9192				goto out;
9193			}
9194		}
9195		rc = offset;
9196	}
9197out:
9198	list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
9199		list_del(&dmabuf->list);
9200		dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
9201				  dmabuf->virt, dmabuf->phys);
9202		kfree(dmabuf);
9203	}
9204	return rc;
9205}
9206
9207/**
9208 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
9209 * @pdev: pointer to PCI device
9210 * @pid: pointer to PCI device identifier
9211 *
9212 * This routine is called from the kernel's PCI subsystem to device with
9213 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
9214 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
9215 * information of the device and driver to see if the driver state that it
9216 * can support this kind of device. If the match is successful, the driver
9217 * core invokes this routine. If this routine determines it can claim the HBA,
9218 * it does all the initialization that it needs to do to handle the HBA
9219 * properly.
9220 *
9221 * Return code
9222 * 	0 - driver can claim the device
9223 * 	negative value - driver can not claim the device
9224 **/
9225static int __devinit
9226lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9227{
9228	struct lpfc_hba   *phba;
9229	struct lpfc_vport *vport = NULL;
9230	struct Scsi_Host  *shost = NULL;
9231	int error;
9232	uint32_t cfg_mode, intr_mode;
9233	int mcnt;
9234	int adjusted_fcp_eq_count;
9235	const struct firmware *fw;
9236	uint8_t file_name[16];
9237
9238	/* Allocate memory for HBA structure */
9239	phba = lpfc_hba_alloc(pdev);
9240	if (!phba)
9241		return -ENOMEM;
9242
9243	/* Perform generic PCI device enabling operation */
9244	error = lpfc_enable_pci_dev(phba);
9245	if (error)
9246		goto out_free_phba;
9247
9248	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
9249	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
9250	if (error)
9251		goto out_disable_pci_dev;
9252
9253	/* Set up SLI-4 specific device PCI memory space */
9254	error = lpfc_sli4_pci_mem_setup(phba);
9255	if (error) {
9256		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9257				"1410 Failed to set up pci memory space.\n");
9258		goto out_disable_pci_dev;
9259	}
9260
9261	/* Set up phase-1 common device driver resources */
9262	error = lpfc_setup_driver_resource_phase1(phba);
9263	if (error) {
9264		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9265				"1411 Failed to set up driver resource.\n");
9266		goto out_unset_pci_mem_s4;
9267	}
9268
9269	/* Set up SLI-4 Specific device driver resources */
9270	error = lpfc_sli4_driver_resource_setup(phba);
9271	if (error) {
9272		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9273				"1412 Failed to set up driver resource.\n");
9274		goto out_unset_pci_mem_s4;
9275	}
9276
9277	/* Initialize and populate the iocb list per host */
9278
9279	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9280			"2821 initialize iocb list %d.\n",
9281			phba->cfg_iocb_cnt*1024);
9282	error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
9283
9284	if (error) {
9285		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9286				"1413 Failed to initialize iocb list.\n");
9287		goto out_unset_driver_resource_s4;
9288	}
9289
9290	INIT_LIST_HEAD(&phba->active_rrq_list);
9291	INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
9292
9293	/* Set up common device driver resources */
9294	error = lpfc_setup_driver_resource_phase2(phba);
9295	if (error) {
9296		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9297				"1414 Failed to set up driver resource.\n");
9298		goto out_free_iocb_list;
9299	}
9300
9301	/* Get the default values for Model Name and Description */
9302	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9303
9304	/* Create SCSI host to the physical port */
9305	error = lpfc_create_shost(phba);
9306	if (error) {
9307		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9308				"1415 Failed to create scsi host.\n");
9309		goto out_unset_driver_resource;
9310	}
9311
9312	/* Configure sysfs attributes */
9313	vport = phba->pport;
9314	error = lpfc_alloc_sysfs_attr(vport);
9315	if (error) {
9316		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9317				"1416 Failed to allocate sysfs attr\n");
9318		goto out_destroy_shost;
9319	}
9320
9321	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
9322	/* Now, trying to enable interrupt and bring up the device */
9323	cfg_mode = phba->cfg_use_msi;
9324	while (true) {
9325		/* Put device to a known state before enabling interrupt */
9326		lpfc_stop_port(phba);
9327		/* Configure and enable interrupt */
9328		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
9329		if (intr_mode == LPFC_INTR_ERROR) {
9330			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9331					"0426 Failed to enable interrupt.\n");
9332			error = -ENODEV;
9333			goto out_free_sysfs_attr;
9334		}
9335		/* Default to single EQ for non-MSI-X */
9336		if (phba->intr_type != MSIX)
9337			adjusted_fcp_eq_count = 0;
9338		else if (phba->sli4_hba.msix_vec_nr <
9339					phba->cfg_fcp_eq_count + 1)
9340			adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
9341		else
9342			adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
9343		phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
9344		/* Set up SLI-4 HBA */
9345		if (lpfc_sli4_hba_setup(phba)) {
9346			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9347					"1421 Failed to set up hba\n");
9348			error = -ENODEV;
9349			goto out_disable_intr;
9350		}
9351
9352		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
9353		if (intr_mode != 0)
9354			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
9355							    LPFC_ACT_INTR_CNT);
9356
9357		/* Check active interrupts received only for MSI/MSI-X */
9358		if (intr_mode == 0 ||
9359		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
9360			/* Log the current active interrupt mode */
9361			phba->intr_mode = intr_mode;
9362			lpfc_log_intr_mode(phba, intr_mode);
9363			break;
9364		}
9365		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9366				"0451 Configure interrupt mode (%d) "
9367				"failed active interrupt test.\n",
9368				intr_mode);
9369		/* Unset the previous SLI-4 HBA setup. */
9370		/*
9371		 * TODO:  Is this operation compatible with IF TYPE 2
9372		 * devices?  All port state is deleted and cleared.
9373		 */
9374		lpfc_sli4_unset_hba(phba);
9375		/* Try next level of interrupt mode */
9376		cfg_mode = --intr_mode;
9377	}
9378
9379	/* Perform post initialization setup */
9380	lpfc_post_init_setup(phba);
9381
9382	/* check for firmware upgrade or downgrade (if_type 2 only) */
9383	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9384	    LPFC_SLI_INTF_IF_TYPE_2) {
9385		snprintf(file_name, 16, "%s.grp", phba->ModelName);
9386		error = request_firmware(&fw, file_name, &phba->pcidev->dev);
9387		if (!error) {
9388			lpfc_write_firmware(phba, fw);
9389			release_firmware(fw);
9390		}
9391	}
9392
9393	/* Check if there are static vports to be created. */
9394	lpfc_create_static_vport(phba);
9395	return 0;
9396
9397out_disable_intr:
9398	lpfc_sli4_disable_intr(phba);
9399out_free_sysfs_attr:
9400	lpfc_free_sysfs_attr(vport);
9401out_destroy_shost:
9402	lpfc_destroy_shost(phba);
9403out_unset_driver_resource:
9404	lpfc_unset_driver_resource_phase2(phba);
9405out_free_iocb_list:
9406	lpfc_free_iocb_list(phba);
9407out_unset_driver_resource_s4:
9408	lpfc_sli4_driver_resource_unset(phba);
9409out_unset_pci_mem_s4:
9410	lpfc_sli4_pci_mem_unset(phba);
9411out_disable_pci_dev:
9412	lpfc_disable_pci_dev(phba);
9413	if (shost)
9414		scsi_host_put(shost);
9415out_free_phba:
9416	lpfc_hba_free(phba);
9417	return error;
9418}
9419
9420/**
9421 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
9422 * @pdev: pointer to PCI device
9423 *
9424 * This routine is called from the kernel's PCI subsystem to device with
9425 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
9426 * removed from PCI bus, it performs all the necessary cleanup for the HBA
9427 * device to be removed from the PCI subsystem properly.
9428 **/
9429static void __devexit
9430lpfc_pci_remove_one_s4(struct pci_dev *pdev)
9431{
9432	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9433	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
9434	struct lpfc_vport **vports;
9435	struct lpfc_hba *phba = vport->phba;
9436	int i;
9437
9438	/* Mark the device unloading flag */
9439	spin_lock_irq(&phba->hbalock);
9440	vport->load_flag |= FC_UNLOADING;
9441	spin_unlock_irq(&phba->hbalock);
9442
9443	/* Free the HBA sysfs attributes */
9444	lpfc_free_sysfs_attr(vport);
9445
9446	/* Release all the vports against this physical port */
9447	vports = lpfc_create_vport_work_array(phba);
9448	if (vports != NULL)
9449		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
9450			fc_vport_terminate(vports[i]->fc_vport);
9451	lpfc_destroy_vport_work_array(phba, vports);
9452
9453	/* Remove FC host and then SCSI host with the physical port */
9454	fc_remove_host(shost);
9455	scsi_remove_host(shost);
9456
9457	/* Perform cleanup on the physical port */
9458	lpfc_cleanup(vport);
9459
9460	/*
9461	 * Bring down the SLI Layer. This step disables all interrupts,
9462	 * clears the rings, discards all mailbox commands, and resets
9463	 * the HBA FCoE function.
9464	 */
9465	lpfc_debugfs_terminate(vport);
9466	lpfc_sli4_hba_unset(phba);
9467
9468	spin_lock_irq(&phba->hbalock);
9469	list_del_init(&vport->listentry);
9470	spin_unlock_irq(&phba->hbalock);
9471
9472	/* Perform scsi free before driver resource_unset since scsi
9473	 * buffers are released to their corresponding pools here.
9474	 */
9475	lpfc_scsi_free(phba);
9476	lpfc_sli4_driver_resource_unset(phba);
9477
9478	/* Unmap adapter Control and Doorbell registers */
9479	lpfc_sli4_pci_mem_unset(phba);
9480
9481	/* Release PCI resources and disable device's PCI function */
9482	scsi_host_put(shost);
9483	lpfc_disable_pci_dev(phba);
9484
9485	/* Finally, free the driver's device data structure */
9486	lpfc_hba_free(phba);
9487
9488	return;
9489}
9490
9491/**
9492 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
9493 * @pdev: pointer to PCI device
9494 * @msg: power management message
9495 *
9496 * This routine is called from the kernel's PCI subsystem to support system
9497 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
9498 * this method, it quiesces the device by stopping the driver's worker
9499 * thread for the device, turning off device's interrupt and DMA, and bring
9500 * the device offline. Note that as the driver implements the minimum PM
9501 * requirements to a power-aware driver's PM support for suspend/resume -- all
9502 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
9503 * method call will be treated as SUSPEND and the driver will fully
9504 * reinitialize its device during resume() method call, the driver will set
9505 * device to PCI_D3hot state in PCI config space instead of setting it
9506 * according to the @msg provided by the PM.
9507 *
9508 * Return code
9509 * 	0 - driver suspended the device
9510 * 	Error otherwise
9511 **/
9512static int
9513lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
9514{
9515	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9516	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9517
9518	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9519			"2843 PCI device Power Management suspend.\n");
9520
9521	/* Bring down the device */
9522	lpfc_offline_prep(phba);
9523	lpfc_offline(phba);
9524	kthread_stop(phba->worker_thread);
9525
9526	/* Disable interrupt from device */
9527	lpfc_sli4_disable_intr(phba);
9528	lpfc_sli4_queue_destroy(phba);
9529
9530	/* Save device state to PCI config space */
9531	pci_save_state(pdev);
9532	pci_set_power_state(pdev, PCI_D3hot);
9533
9534	return 0;
9535}
9536
9537/**
9538 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
9539 * @pdev: pointer to PCI device
9540 *
9541 * This routine is called from the kernel's PCI subsystem to support system
9542 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
9543 * this method, it restores the device's PCI config space state and fully
9544 * reinitializes the device and brings it online. Note that as the driver
9545 * implements the minimum PM requirements to a power-aware driver's PM for
9546 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
9547 * to the suspend() method call will be treated as SUSPEND and the driver
9548 * will fully reinitialize its device during resume() method call, the device
9549 * will be set to PCI_D0 directly in PCI config space before restoring the
9550 * state.
9551 *
9552 * Return code
9553 * 	0 - driver suspended the device
9554 * 	Error otherwise
9555 **/
9556static int
9557lpfc_pci_resume_one_s4(struct pci_dev *pdev)
9558{
9559	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9560	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9561	uint32_t intr_mode;
9562	int error;
9563
9564	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9565			"0292 PCI device Power Management resume.\n");
9566
9567	/* Restore device state from PCI config space */
9568	pci_set_power_state(pdev, PCI_D0);
9569	pci_restore_state(pdev);
9570
9571	/*
9572	 * As the new kernel behavior of pci_restore_state() API call clears
9573	 * device saved_state flag, need to save the restored state again.
9574	 */
9575	pci_save_state(pdev);
9576
9577	if (pdev->is_busmaster)
9578		pci_set_master(pdev);
9579
9580	 /* Startup the kernel thread for this host adapter. */
9581	phba->worker_thread = kthread_run(lpfc_do_work, phba,
9582					"lpfc_worker_%d", phba->brd_no);
9583	if (IS_ERR(phba->worker_thread)) {
9584		error = PTR_ERR(phba->worker_thread);
9585		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9586				"0293 PM resume failed to start worker "
9587				"thread: error=x%x.\n", error);
9588		return error;
9589	}
9590
9591	/* Configure and enable interrupt */
9592	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9593	if (intr_mode == LPFC_INTR_ERROR) {
9594		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9595				"0294 PM resume Failed to enable interrupt\n");
9596		return -EIO;
9597	} else
9598		phba->intr_mode = intr_mode;
9599
9600	/* Restart HBA and bring it online */
9601	lpfc_sli_brdrestart(phba);
9602	lpfc_online(phba);
9603
9604	/* Log the current active interrupt mode */
9605	lpfc_log_intr_mode(phba, phba->intr_mode);
9606
9607	return 0;
9608}
9609
9610/**
9611 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
9612 * @phba: pointer to lpfc hba data structure.
9613 *
9614 * This routine is called to prepare the SLI4 device for PCI slot recover. It
9615 * aborts all the outstanding SCSI I/Os to the pci device.
9616 **/
9617static void
9618lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
9619{
9620	struct lpfc_sli *psli = &phba->sli;
9621	struct lpfc_sli_ring  *pring;
9622
9623	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9624			"2828 PCI channel I/O abort preparing for recovery\n");
9625	/*
9626	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9627	 * and let the SCSI mid-layer to retry them to recover.
9628	 */
9629	pring = &psli->ring[psli->fcp_ring];
9630	lpfc_sli_abort_iocb_ring(phba, pring);
9631}
9632
9633/**
9634 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
9635 * @phba: pointer to lpfc hba data structure.
9636 *
9637 * This routine is called to prepare the SLI4 device for PCI slot reset. It
9638 * disables the device interrupt and pci device, and aborts the internal FCP
9639 * pending I/Os.
9640 **/
9641static void
9642lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9643{
9644	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9645			"2826 PCI channel disable preparing for reset\n");
9646
9647	/* Block any management I/Os to the device */
9648	lpfc_block_mgmt_io(phba);
9649
9650	/* Block all SCSI devices' I/Os on the host */
9651	lpfc_scsi_dev_block(phba);
9652
9653	/* stop all timers */
9654	lpfc_stop_hba_timers(phba);
9655
9656	/* Disable interrupt and pci device */
9657	lpfc_sli4_disable_intr(phba);
9658	lpfc_sli4_queue_destroy(phba);
9659	pci_disable_device(phba->pcidev);
9660
9661	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
9662	lpfc_sli_flush_fcp_rings(phba);
9663}
9664
9665/**
9666 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
9667 * @phba: pointer to lpfc hba data structure.
9668 *
9669 * This routine is called to prepare the SLI4 device for PCI slot permanently
9670 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
9671 * pending I/Os.
9672 **/
9673static void
9674lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
9675{
9676	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9677			"2827 PCI channel permanent disable for failure\n");
9678
9679	/* Block all SCSI devices' I/Os on the host */
9680	lpfc_scsi_dev_block(phba);
9681
9682	/* stop all timers */
9683	lpfc_stop_hba_timers(phba);
9684
9685	/* Clean up all driver's outstanding SCSI I/Os */
9686	lpfc_sli_flush_fcp_rings(phba);
9687}
9688
9689/**
9690 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
9691 * @pdev: pointer to PCI device.
9692 * @state: the current PCI connection state.
9693 *
9694 * This routine is called from the PCI subsystem for error handling to device
9695 * with SLI-4 interface spec. This function is called by the PCI subsystem
9696 * after a PCI bus error affecting this device has been detected. When this
9697 * function is invoked, it will need to stop all the I/Os and interrupt(s)
9698 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
9699 * for the PCI subsystem to perform proper recovery as desired.
9700 *
9701 * Return codes
9702 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9703 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9704 **/
9705static pci_ers_result_t
9706lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
9707{
9708	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9709	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9710
9711	switch (state) {
9712	case pci_channel_io_normal:
9713		/* Non-fatal error, prepare for recovery */
9714		lpfc_sli4_prep_dev_for_recover(phba);
9715		return PCI_ERS_RESULT_CAN_RECOVER;
9716	case pci_channel_io_frozen:
9717		/* Fatal error, prepare for slot reset */
9718		lpfc_sli4_prep_dev_for_reset(phba);
9719		return PCI_ERS_RESULT_NEED_RESET;
9720	case pci_channel_io_perm_failure:
9721		/* Permanent failure, prepare for device down */
9722		lpfc_sli4_prep_dev_for_perm_failure(phba);
9723		return PCI_ERS_RESULT_DISCONNECT;
9724	default:
9725		/* Unknown state, prepare and request slot reset */
9726		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9727				"2825 Unknown PCI error state: x%x\n", state);
9728		lpfc_sli4_prep_dev_for_reset(phba);
9729		return PCI_ERS_RESULT_NEED_RESET;
9730	}
9731}
9732
9733/**
9734 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
9735 * @pdev: pointer to PCI device.
9736 *
9737 * This routine is called from the PCI subsystem for error handling to device
9738 * with SLI-4 interface spec. It is called after PCI bus has been reset to
9739 * restart the PCI card from scratch, as if from a cold-boot. During the
9740 * PCI subsystem error recovery, after the driver returns
9741 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
9742 * recovery and then call this routine before calling the .resume method to
9743 * recover the device. This function will initialize the HBA device, enable
9744 * the interrupt, but it will just put the HBA to offline state without
9745 * passing any I/O traffic.
9746 *
9747 * Return codes
9748 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
9749 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9750 */
9751static pci_ers_result_t
9752lpfc_io_slot_reset_s4(struct pci_dev *pdev)
9753{
9754	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9755	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9756	struct lpfc_sli *psli = &phba->sli;
9757	uint32_t intr_mode;
9758
9759	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
9760	if (pci_enable_device_mem(pdev)) {
9761		printk(KERN_ERR "lpfc: Cannot re-enable "
9762			"PCI device after reset.\n");
9763		return PCI_ERS_RESULT_DISCONNECT;
9764	}
9765
9766	pci_restore_state(pdev);
9767
9768	/*
9769	 * As the new kernel behavior of pci_restore_state() API call clears
9770	 * device saved_state flag, need to save the restored state again.
9771	 */
9772	pci_save_state(pdev);
9773
9774	if (pdev->is_busmaster)
9775		pci_set_master(pdev);
9776
9777	spin_lock_irq(&phba->hbalock);
9778	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9779	spin_unlock_irq(&phba->hbalock);
9780
9781	/* Configure and enable interrupt */
9782	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9783	if (intr_mode == LPFC_INTR_ERROR) {
9784		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9785				"2824 Cannot re-enable interrupt after "
9786				"slot reset.\n");
9787		return PCI_ERS_RESULT_DISCONNECT;
9788	} else
9789		phba->intr_mode = intr_mode;
9790
9791	/* Log the current active interrupt mode */
9792	lpfc_log_intr_mode(phba, phba->intr_mode);
9793
9794	return PCI_ERS_RESULT_RECOVERED;
9795}
9796
9797/**
9798 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
9799 * @pdev: pointer to PCI device
9800 *
9801 * This routine is called from the PCI subsystem for error handling to device
9802 * with SLI-4 interface spec. It is called when kernel error recovery tells
9803 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
9804 * error recovery. After this call, traffic can start to flow from this device
9805 * again.
9806 **/
9807static void
9808lpfc_io_resume_s4(struct pci_dev *pdev)
9809{
9810	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9811	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9812
9813	/*
9814	 * In case of slot reset, as function reset is performed through
9815	 * mailbox command which needs DMA to be enabled, this operation
9816	 * has to be moved to the io resume phase. Taking device offline
9817	 * will perform the necessary cleanup.
9818	 */
9819	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
9820		/* Perform device reset */
9821		lpfc_offline_prep(phba);
9822		lpfc_offline(phba);
9823		lpfc_sli_brdrestart(phba);
9824		/* Bring the device back online */
9825		lpfc_online(phba);
9826	}
9827
9828	/* Clean up Advanced Error Reporting (AER) if needed */
9829	if (phba->hba_flag & HBA_AER_ENABLED)
9830		pci_cleanup_aer_uncorrect_error_status(pdev);
9831}
9832
9833/**
9834 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
9835 * @pdev: pointer to PCI device
9836 * @pid: pointer to PCI device identifier
9837 *
9838 * This routine is to be registered to the kernel's PCI subsystem. When an
9839 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
9840 * at PCI device-specific information of the device and driver to see if the
9841 * driver state that it can support this kind of device. If the match is
9842 * successful, the driver core invokes this routine. This routine dispatches
9843 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
9844 * do all the initialization that it needs to do to handle the HBA device
9845 * properly.
9846 *
9847 * Return code
9848 * 	0 - driver can claim the device
9849 * 	negative value - driver can not claim the device
9850 **/
9851static int __devinit
9852lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
9853{
9854	int rc;
9855	struct lpfc_sli_intf intf;
9856
9857	if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
9858		return -ENODEV;
9859
9860	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
9861	    (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
9862		rc = lpfc_pci_probe_one_s4(pdev, pid);
9863	else
9864		rc = lpfc_pci_probe_one_s3(pdev, pid);
9865
9866	return rc;
9867}
9868
9869/**
9870 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
9871 * @pdev: pointer to PCI device
9872 *
9873 * This routine is to be registered to the kernel's PCI subsystem. When an
9874 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
9875 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
9876 * remove routine, which will perform all the necessary cleanup for the
9877 * device to be removed from the PCI subsystem properly.
9878 **/
9879static void __devexit
9880lpfc_pci_remove_one(struct pci_dev *pdev)
9881{
9882	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9883	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9884
9885	switch (phba->pci_dev_grp) {
9886	case LPFC_PCI_DEV_LP:
9887		lpfc_pci_remove_one_s3(pdev);
9888		break;
9889	case LPFC_PCI_DEV_OC:
9890		lpfc_pci_remove_one_s4(pdev);
9891		break;
9892	default:
9893		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9894				"1424 Invalid PCI device group: 0x%x\n",
9895				phba->pci_dev_grp);
9896		break;
9897	}
9898	return;
9899}
9900
9901/**
9902 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
9903 * @pdev: pointer to PCI device
9904 * @msg: power management message
9905 *
9906 * This routine is to be registered to the kernel's PCI subsystem to support
9907 * system Power Management (PM). When PM invokes this method, it dispatches
9908 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
9909 * suspend the device.
9910 *
9911 * Return code
9912 * 	0 - driver suspended the device
9913 * 	Error otherwise
9914 **/
9915static int
9916lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
9917{
9918	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9919	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9920	int rc = -ENODEV;
9921
9922	switch (phba->pci_dev_grp) {
9923	case LPFC_PCI_DEV_LP:
9924		rc = lpfc_pci_suspend_one_s3(pdev, msg);
9925		break;
9926	case LPFC_PCI_DEV_OC:
9927		rc = lpfc_pci_suspend_one_s4(pdev, msg);
9928		break;
9929	default:
9930		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9931				"1425 Invalid PCI device group: 0x%x\n",
9932				phba->pci_dev_grp);
9933		break;
9934	}
9935	return rc;
9936}
9937
9938/**
9939 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
9940 * @pdev: pointer to PCI device
9941 *
9942 * This routine is to be registered to the kernel's PCI subsystem to support
9943 * system Power Management (PM). When PM invokes this method, it dispatches
9944 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
9945 * resume the device.
9946 *
9947 * Return code
9948 * 	0 - driver suspended the device
9949 * 	Error otherwise
9950 **/
9951static int
9952lpfc_pci_resume_one(struct pci_dev *pdev)
9953{
9954	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9955	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9956	int rc = -ENODEV;
9957
9958	switch (phba->pci_dev_grp) {
9959	case LPFC_PCI_DEV_LP:
9960		rc = lpfc_pci_resume_one_s3(pdev);
9961		break;
9962	case LPFC_PCI_DEV_OC:
9963		rc = lpfc_pci_resume_one_s4(pdev);
9964		break;
9965	default:
9966		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9967				"1426 Invalid PCI device group: 0x%x\n",
9968				phba->pci_dev_grp);
9969		break;
9970	}
9971	return rc;
9972}
9973
9974/**
9975 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
9976 * @pdev: pointer to PCI device.
9977 * @state: the current PCI connection state.
9978 *
9979 * This routine is registered to the PCI subsystem for error handling. This
9980 * function is called by the PCI subsystem after a PCI bus error affecting
9981 * this device has been detected. When this routine is invoked, it dispatches
9982 * the action to the proper SLI-3 or SLI-4 device error detected handling
9983 * routine, which will perform the proper error detected operation.
9984 *
9985 * Return codes
9986 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9987 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9988 **/
9989static pci_ers_result_t
9990lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
9991{
9992	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9993	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9994	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
9995
9996	switch (phba->pci_dev_grp) {
9997	case LPFC_PCI_DEV_LP:
9998		rc = lpfc_io_error_detected_s3(pdev, state);
9999		break;
10000	case LPFC_PCI_DEV_OC:
10001		rc = lpfc_io_error_detected_s4(pdev, state);
10002		break;
10003	default:
10004		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10005				"1427 Invalid PCI device group: 0x%x\n",
10006				phba->pci_dev_grp);
10007		break;
10008	}
10009	return rc;
10010}
10011
10012/**
10013 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
10014 * @pdev: pointer to PCI device.
10015 *
10016 * This routine is registered to the PCI subsystem for error handling. This
10017 * function is called after PCI bus has been reset to restart the PCI card
10018 * from scratch, as if from a cold-boot. When this routine is invoked, it
10019 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
10020 * routine, which will perform the proper device reset.
10021 *
10022 * Return codes
10023 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
10024 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10025 **/
10026static pci_ers_result_t
10027lpfc_io_slot_reset(struct pci_dev *pdev)
10028{
10029	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10030	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10031	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
10032
10033	switch (phba->pci_dev_grp) {
10034	case LPFC_PCI_DEV_LP:
10035		rc = lpfc_io_slot_reset_s3(pdev);
10036		break;
10037	case LPFC_PCI_DEV_OC:
10038		rc = lpfc_io_slot_reset_s4(pdev);
10039		break;
10040	default:
10041		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10042				"1428 Invalid PCI device group: 0x%x\n",
10043				phba->pci_dev_grp);
10044		break;
10045	}
10046	return rc;
10047}
10048
10049/**
10050 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
10051 * @pdev: pointer to PCI device
10052 *
10053 * This routine is registered to the PCI subsystem for error handling. It
10054 * is called when kernel error recovery tells the lpfc driver that it is
10055 * OK to resume normal PCI operation after PCI bus error recovery. When
10056 * this routine is invoked, it dispatches the action to the proper SLI-3
10057 * or SLI-4 device io_resume routine, which will resume the device operation.
10058 **/
10059static void
10060lpfc_io_resume(struct pci_dev *pdev)
10061{
10062	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10063	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10064
10065	switch (phba->pci_dev_grp) {
10066	case LPFC_PCI_DEV_LP:
10067		lpfc_io_resume_s3(pdev);
10068		break;
10069	case LPFC_PCI_DEV_OC:
10070		lpfc_io_resume_s4(pdev);
10071		break;
10072	default:
10073		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10074				"1429 Invalid PCI device group: 0x%x\n",
10075				phba->pci_dev_grp);
10076		break;
10077	}
10078	return;
10079}
10080
10081/**
10082 * lpfc_mgmt_open - method called when 'lpfcmgmt' is opened from userspace
10083 * @inode: pointer to the inode representing the lpfcmgmt device
10084 * @filep: pointer to the file representing the open lpfcmgmt device
10085 *
10086 * This routine puts a reference count on the lpfc module whenever the
10087 * character device is opened
10088 **/
10089static int
10090lpfc_mgmt_open(struct inode *inode, struct file *filep)
10091{
10092	try_module_get(THIS_MODULE);
10093	return 0;
10094}
10095
10096/**
10097 * lpfc_mgmt_release - method called when 'lpfcmgmt' is closed in userspace
10098 * @inode: pointer to the inode representing the lpfcmgmt device
10099 * @filep: pointer to the file representing the open lpfcmgmt device
10100 *
10101 * This routine removes a reference count from the lpfc module when the
10102 * character device is closed
10103 **/
10104static int
10105lpfc_mgmt_release(struct inode *inode, struct file *filep)
10106{
10107	module_put(THIS_MODULE);
10108	return 0;
10109}
10110
10111static struct pci_device_id lpfc_id_table[] = {
10112	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
10113		PCI_ANY_ID, PCI_ANY_ID, },
10114	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
10115		PCI_ANY_ID, PCI_ANY_ID, },
10116	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
10117		PCI_ANY_ID, PCI_ANY_ID, },
10118	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
10119		PCI_ANY_ID, PCI_ANY_ID, },
10120	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
10121		PCI_ANY_ID, PCI_ANY_ID, },
10122	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
10123		PCI_ANY_ID, PCI_ANY_ID, },
10124	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
10125		PCI_ANY_ID, PCI_ANY_ID, },
10126	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
10127		PCI_ANY_ID, PCI_ANY_ID, },
10128	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
10129		PCI_ANY_ID, PCI_ANY_ID, },
10130	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
10131		PCI_ANY_ID, PCI_ANY_ID, },
10132	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
10133		PCI_ANY_ID, PCI_ANY_ID, },
10134	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
10135		PCI_ANY_ID, PCI_ANY_ID, },
10136	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
10137		PCI_ANY_ID, PCI_ANY_ID, },
10138	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
10139		PCI_ANY_ID, PCI_ANY_ID, },
10140	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
10141		PCI_ANY_ID, PCI_ANY_ID, },
10142	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
10143		PCI_ANY_ID, PCI_ANY_ID, },
10144	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
10145		PCI_ANY_ID, PCI_ANY_ID, },
10146	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
10147		PCI_ANY_ID, PCI_ANY_ID, },
10148	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
10149		PCI_ANY_ID, PCI_ANY_ID, },
10150	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
10151		PCI_ANY_ID, PCI_ANY_ID, },
10152	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
10153		PCI_ANY_ID, PCI_ANY_ID, },
10154	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
10155		PCI_ANY_ID, PCI_ANY_ID, },
10156	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
10157		PCI_ANY_ID, PCI_ANY_ID, },
10158	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
10159		PCI_ANY_ID, PCI_ANY_ID, },
10160	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
10161		PCI_ANY_ID, PCI_ANY_ID, },
10162	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
10163		PCI_ANY_ID, PCI_ANY_ID, },
10164	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
10165		PCI_ANY_ID, PCI_ANY_ID, },
10166	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
10167		PCI_ANY_ID, PCI_ANY_ID, },
10168	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
10169		PCI_ANY_ID, PCI_ANY_ID, },
10170	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
10171		PCI_ANY_ID, PCI_ANY_ID, },
10172	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
10173		PCI_ANY_ID, PCI_ANY_ID, },
10174	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
10175		PCI_ANY_ID, PCI_ANY_ID, },
10176	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
10177		PCI_ANY_ID, PCI_ANY_ID, },
10178	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
10179		PCI_ANY_ID, PCI_ANY_ID, },
10180	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
10181		PCI_ANY_ID, PCI_ANY_ID, },
10182	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
10183		PCI_ANY_ID, PCI_ANY_ID, },
10184	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
10185		PCI_ANY_ID, PCI_ANY_ID, },
10186	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
10187		PCI_ANY_ID, PCI_ANY_ID, },
10188	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
10189		PCI_ANY_ID, PCI_ANY_ID, },
10190	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
10191		PCI_ANY_ID, PCI_ANY_ID, },
10192	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
10193		PCI_ANY_ID, PCI_ANY_ID, },
10194	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
10195		PCI_ANY_ID, PCI_ANY_ID, },
10196	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
10197		PCI_ANY_ID, PCI_ANY_ID, },
10198	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
10199		PCI_ANY_ID, PCI_ANY_ID, },
10200	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
10201		PCI_ANY_ID, PCI_ANY_ID, },
10202	{ 0 }
10203};
10204
10205MODULE_DEVICE_TABLE(pci, lpfc_id_table);
10206
10207static struct pci_error_handlers lpfc_err_handler = {
10208	.error_detected = lpfc_io_error_detected,
10209	.slot_reset = lpfc_io_slot_reset,
10210	.resume = lpfc_io_resume,
10211};
10212
10213static struct pci_driver lpfc_driver = {
10214	.name		= LPFC_DRIVER_NAME,
10215	.id_table	= lpfc_id_table,
10216	.probe		= lpfc_pci_probe_one,
10217	.remove		= __devexit_p(lpfc_pci_remove_one),
10218	.suspend        = lpfc_pci_suspend_one,
10219	.resume		= lpfc_pci_resume_one,
10220	.err_handler    = &lpfc_err_handler,
10221};
10222
10223static const struct file_operations lpfc_mgmt_fop = {
10224	.open = lpfc_mgmt_open,
10225	.release = lpfc_mgmt_release,
10226};
10227
10228static struct miscdevice lpfc_mgmt_dev = {
10229	.minor = MISC_DYNAMIC_MINOR,
10230	.name = "lpfcmgmt",
10231	.fops = &lpfc_mgmt_fop,
10232};
10233
10234/**
10235 * lpfc_init - lpfc module initialization routine
10236 *
10237 * This routine is to be invoked when the lpfc module is loaded into the
10238 * kernel. The special kernel macro module_init() is used to indicate the
10239 * role of this routine to the kernel as lpfc module entry point.
10240 *
10241 * Return codes
10242 *   0 - successful
10243 *   -ENOMEM - FC attach transport failed
10244 *   all others - failed
10245 */
10246static int __init
10247lpfc_init(void)
10248{
10249	int error = 0;
10250
10251	printk(LPFC_MODULE_DESC "\n");
10252	printk(LPFC_COPYRIGHT "\n");
10253
10254	error = misc_register(&lpfc_mgmt_dev);
10255	if (error)
10256		printk(KERN_ERR "Could not register lpfcmgmt device, "
10257			"misc_register returned with status %d", error);
10258
10259	if (lpfc_enable_npiv) {
10260		lpfc_transport_functions.vport_create = lpfc_vport_create;
10261		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
10262	}
10263	lpfc_transport_template =
10264				fc_attach_transport(&lpfc_transport_functions);
10265	if (lpfc_transport_template == NULL)
10266		return -ENOMEM;
10267	if (lpfc_enable_npiv) {
10268		lpfc_vport_transport_template =
10269			fc_attach_transport(&lpfc_vport_transport_functions);
10270		if (lpfc_vport_transport_template == NULL) {
10271			fc_release_transport(lpfc_transport_template);
10272			return -ENOMEM;
10273		}
10274	}
10275	error = pci_register_driver(&lpfc_driver);
10276	if (error) {
10277		fc_release_transport(lpfc_transport_template);
10278		if (lpfc_enable_npiv)
10279			fc_release_transport(lpfc_vport_transport_template);
10280	}
10281
10282	return error;
10283}
10284
10285/**
10286 * lpfc_exit - lpfc module removal routine
10287 *
10288 * This routine is invoked when the lpfc module is removed from the kernel.
10289 * The special kernel macro module_exit() is used to indicate the role of
10290 * this routine to the kernel as lpfc module exit point.
10291 */
10292static void __exit
10293lpfc_exit(void)
10294{
10295	misc_deregister(&lpfc_mgmt_dev);
10296	pci_unregister_driver(&lpfc_driver);
10297	fc_release_transport(lpfc_transport_template);
10298	if (lpfc_enable_npiv)
10299		fc_release_transport(lpfc_vport_transport_template);
10300	if (_dump_buf_data) {
10301		printk(KERN_ERR	"9062 BLKGRD: freeing %lu pages for "
10302				"_dump_buf_data at 0x%p\n",
10303				(1L << _dump_buf_data_order), _dump_buf_data);
10304		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
10305	}
10306
10307	if (_dump_buf_dif) {
10308		printk(KERN_ERR	"9049 BLKGRD: freeing %lu pages for "
10309				"_dump_buf_dif at 0x%p\n",
10310				(1L << _dump_buf_dif_order), _dump_buf_dif);
10311		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
10312	}
10313}
10314
10315module_init(lpfc_init);
10316module_exit(lpfc_exit);
10317MODULE_LICENSE("GPL");
10318MODULE_DESCRIPTION(LPFC_MODULE_DESC);
10319MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
10320MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
10321