lpfc_init.c revision 6669f9bb902b8c3f5e33cb8c32c8c0eec6ed68ed
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
29#include <linux/spinlock.h>
30#include <linux/ctype.h>
31
32#include <scsi/scsi.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h>
36
37#include "lpfc_hw4.h"
38#include "lpfc_hw.h"
39#include "lpfc_sli.h"
40#include "lpfc_sli4.h"
41#include "lpfc_nl.h"
42#include "lpfc_disc.h"
43#include "lpfc_scsi.h"
44#include "lpfc.h"
45#include "lpfc_logmsg.h"
46#include "lpfc_crtn.h"
47#include "lpfc_vport.h"
48#include "lpfc_version.h"
49
50char *_dump_buf_data;
51unsigned long _dump_buf_data_order;
52char *_dump_buf_dif;
53unsigned long _dump_buf_dif_order;
54spinlock_t _dump_buf_lock;
55
56static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
57static int lpfc_post_rcv_buf(struct lpfc_hba *);
58static int lpfc_sli4_queue_create(struct lpfc_hba *);
59static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
60static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
61static int lpfc_setup_endian_order(struct lpfc_hba *);
62static int lpfc_sli4_read_config(struct lpfc_hba *);
63static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
64static void lpfc_free_sgl_list(struct lpfc_hba *);
65static int lpfc_init_sgl_list(struct lpfc_hba *);
66static int lpfc_init_active_sgl_array(struct lpfc_hba *);
67static void lpfc_free_active_sgl(struct lpfc_hba *);
68static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
69static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
70static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
71static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
72static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
73
74static struct scsi_transport_template *lpfc_transport_template = NULL;
75static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
76static DEFINE_IDR(lpfc_hba_index);
77
78/**
79 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
80 * @phba: pointer to lpfc hba data structure.
81 *
82 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
83 * mailbox command. It retrieves the revision information from the HBA and
84 * collects the Vital Product Data (VPD) about the HBA for preparing the
85 * configuration of the HBA.
86 *
87 * Return codes:
88 *   0 - success.
89 *   -ERESTART - requests the SLI layer to reset the HBA and try again.
90 *   Any other value - indicates an error.
91 **/
92int
93lpfc_config_port_prep(struct lpfc_hba *phba)
94{
95	lpfc_vpd_t *vp = &phba->vpd;
96	int i = 0, rc;
97	LPFC_MBOXQ_t *pmb;
98	MAILBOX_t *mb;
99	char *lpfc_vpd_data = NULL;
100	uint16_t offset = 0;
101	static char licensed[56] =
102		    "key unlock for use with gnu public licensed code only\0";
103	static int init_key = 1;
104
105	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
106	if (!pmb) {
107		phba->link_state = LPFC_HBA_ERROR;
108		return -ENOMEM;
109	}
110
111	mb = &pmb->u.mb;
112	phba->link_state = LPFC_INIT_MBX_CMDS;
113
114	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
115		if (init_key) {
116			uint32_t *ptext = (uint32_t *) licensed;
117
118			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
119				*ptext = cpu_to_be32(*ptext);
120			init_key = 0;
121		}
122
123		lpfc_read_nv(phba, pmb);
124		memset((char*)mb->un.varRDnvp.rsvd3, 0,
125			sizeof (mb->un.varRDnvp.rsvd3));
126		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
127			 sizeof (licensed));
128
129		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
130
131		if (rc != MBX_SUCCESS) {
132			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
133					"0324 Config Port initialization "
134					"error, mbxCmd x%x READ_NVPARM, "
135					"mbxStatus x%x\n",
136					mb->mbxCommand, mb->mbxStatus);
137			mempool_free(pmb, phba->mbox_mem_pool);
138			return -ERESTART;
139		}
140		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
141		       sizeof(phba->wwnn));
142		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
143		       sizeof(phba->wwpn));
144	}
145
146	phba->sli3_options = 0x0;
147
148	/* Setup and issue mailbox READ REV command */
149	lpfc_read_rev(phba, pmb);
150	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
151	if (rc != MBX_SUCCESS) {
152		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
153				"0439 Adapter failed to init, mbxCmd x%x "
154				"READ_REV, mbxStatus x%x\n",
155				mb->mbxCommand, mb->mbxStatus);
156		mempool_free( pmb, phba->mbox_mem_pool);
157		return -ERESTART;
158	}
159
160
161	/*
162	 * The value of rr must be 1 since the driver set the cv field to 1.
163	 * This setting requires the FW to set all revision fields.
164	 */
165	if (mb->un.varRdRev.rr == 0) {
166		vp->rev.rBit = 0;
167		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
168				"0440 Adapter failed to init, READ_REV has "
169				"missing revision information.\n");
170		mempool_free(pmb, phba->mbox_mem_pool);
171		return -ERESTART;
172	}
173
174	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
175		mempool_free(pmb, phba->mbox_mem_pool);
176		return -EINVAL;
177	}
178
179	/* Save information as VPD data */
180	vp->rev.rBit = 1;
181	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
182	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
183	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
184	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
185	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
186	vp->rev.biuRev = mb->un.varRdRev.biuRev;
187	vp->rev.smRev = mb->un.varRdRev.smRev;
188	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
189	vp->rev.endecRev = mb->un.varRdRev.endecRev;
190	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
191	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
192	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
193	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
194	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
195	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
196
197	/* If the sli feature level is less then 9, we must
198	 * tear down all RPIs and VPIs on link down if NPIV
199	 * is enabled.
200	 */
201	if (vp->rev.feaLevelHigh < 9)
202		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
203
204	if (lpfc_is_LC_HBA(phba->pcidev->device))
205		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
206						sizeof (phba->RandomData));
207
208	/* Get adapter VPD information */
209	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
210	if (!lpfc_vpd_data)
211		goto out_free_mbox;
212
213	do {
214		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
215		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
216
217		if (rc != MBX_SUCCESS) {
218			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
219					"0441 VPD not present on adapter, "
220					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
221					mb->mbxCommand, mb->mbxStatus);
222			mb->un.varDmp.word_cnt = 0;
223		}
224		/* dump mem may return a zero when finished or we got a
225		 * mailbox error, either way we are done.
226		 */
227		if (mb->un.varDmp.word_cnt == 0)
228			break;
229		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
230			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
231		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
232				      lpfc_vpd_data + offset,
233				      mb->un.varDmp.word_cnt);
234		offset += mb->un.varDmp.word_cnt;
235	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
236	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
237
238	kfree(lpfc_vpd_data);
239out_free_mbox:
240	mempool_free(pmb, phba->mbox_mem_pool);
241	return 0;
242}
243
244/**
245 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
246 * @phba: pointer to lpfc hba data structure.
247 * @pmboxq: pointer to the driver internal queue element for mailbox command.
248 *
249 * This is the completion handler for driver's configuring asynchronous event
250 * mailbox command to the device. If the mailbox command returns successfully,
251 * it will set internal async event support flag to 1; otherwise, it will
252 * set internal async event support flag to 0.
253 **/
254static void
255lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
256{
257	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
258		phba->temp_sensor_support = 1;
259	else
260		phba->temp_sensor_support = 0;
261	mempool_free(pmboxq, phba->mbox_mem_pool);
262	return;
263}
264
265/**
266 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
267 * @phba: pointer to lpfc hba data structure.
268 * @pmboxq: pointer to the driver internal queue element for mailbox command.
269 *
270 * This is the completion handler for dump mailbox command for getting
271 * wake up parameters. When this command complete, the response contain
272 * Option rom version of the HBA. This function translate the version number
273 * into a human readable string and store it in OptionROMVersion.
274 **/
275static void
276lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
277{
278	struct prog_id *prg;
279	uint32_t prog_id_word;
280	char dist = ' ';
281	/* character array used for decoding dist type. */
282	char dist_char[] = "nabx";
283
284	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
285		mempool_free(pmboxq, phba->mbox_mem_pool);
286		return;
287	}
288
289	prg = (struct prog_id *) &prog_id_word;
290
291	/* word 7 contain option rom version */
292	prog_id_word = pmboxq->u.mb.un.varWords[7];
293
294	/* Decode the Option rom version word to a readable string */
295	if (prg->dist < 4)
296		dist = dist_char[prg->dist];
297
298	if ((prg->dist == 3) && (prg->num == 0))
299		sprintf(phba->OptionROMVersion, "%d.%d%d",
300			prg->ver, prg->rev, prg->lev);
301	else
302		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
303			prg->ver, prg->rev, prg->lev,
304			dist, prg->num);
305	mempool_free(pmboxq, phba->mbox_mem_pool);
306	return;
307}
308
309/**
310 * lpfc_config_port_post - Perform lpfc initialization after config port
311 * @phba: pointer to lpfc hba data structure.
312 *
313 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
314 * command call. It performs all internal resource and state setups on the
315 * port: post IOCB buffers, enable appropriate host interrupt attentions,
316 * ELS ring timers, etc.
317 *
318 * Return codes
319 *   0 - success.
320 *   Any other value - error.
321 **/
322int
323lpfc_config_port_post(struct lpfc_hba *phba)
324{
325	struct lpfc_vport *vport = phba->pport;
326	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
327	LPFC_MBOXQ_t *pmb;
328	MAILBOX_t *mb;
329	struct lpfc_dmabuf *mp;
330	struct lpfc_sli *psli = &phba->sli;
331	uint32_t status, timeout;
332	int i, j;
333	int rc;
334
335	spin_lock_irq(&phba->hbalock);
336	/*
337	 * If the Config port completed correctly the HBA is not
338	 * over heated any more.
339	 */
340	if (phba->over_temp_state == HBA_OVER_TEMP)
341		phba->over_temp_state = HBA_NORMAL_TEMP;
342	spin_unlock_irq(&phba->hbalock);
343
344	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
345	if (!pmb) {
346		phba->link_state = LPFC_HBA_ERROR;
347		return -ENOMEM;
348	}
349	mb = &pmb->u.mb;
350
351	/* Get login parameters for NID.  */
352	lpfc_read_sparam(phba, pmb, 0);
353	pmb->vport = vport;
354	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
355		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
356				"0448 Adapter failed init, mbxCmd x%x "
357				"READ_SPARM mbxStatus x%x\n",
358				mb->mbxCommand, mb->mbxStatus);
359		phba->link_state = LPFC_HBA_ERROR;
360		mp = (struct lpfc_dmabuf *) pmb->context1;
361		mempool_free( pmb, phba->mbox_mem_pool);
362		lpfc_mbuf_free(phba, mp->virt, mp->phys);
363		kfree(mp);
364		return -EIO;
365	}
366
367	mp = (struct lpfc_dmabuf *) pmb->context1;
368
369	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
370	lpfc_mbuf_free(phba, mp->virt, mp->phys);
371	kfree(mp);
372	pmb->context1 = NULL;
373
374	if (phba->cfg_soft_wwnn)
375		u64_to_wwn(phba->cfg_soft_wwnn,
376			   vport->fc_sparam.nodeName.u.wwn);
377	if (phba->cfg_soft_wwpn)
378		u64_to_wwn(phba->cfg_soft_wwpn,
379			   vport->fc_sparam.portName.u.wwn);
380	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
381	       sizeof (struct lpfc_name));
382	memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
383	       sizeof (struct lpfc_name));
384
385	/* Update the fc_host data structures with new wwn. */
386	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
387	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
388	fc_host_max_npiv_vports(shost) = phba->max_vpi;
389
390	/* If no serial number in VPD data, use low 6 bytes of WWNN */
391	/* This should be consolidated into parse_vpd ? - mr */
392	if (phba->SerialNumber[0] == 0) {
393		uint8_t *outptr;
394
395		outptr = &vport->fc_nodename.u.s.IEEE[0];
396		for (i = 0; i < 12; i++) {
397			status = *outptr++;
398			j = ((status & 0xf0) >> 4);
399			if (j <= 9)
400				phba->SerialNumber[i] =
401				    (char)((uint8_t) 0x30 + (uint8_t) j);
402			else
403				phba->SerialNumber[i] =
404				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
405			i++;
406			j = (status & 0xf);
407			if (j <= 9)
408				phba->SerialNumber[i] =
409				    (char)((uint8_t) 0x30 + (uint8_t) j);
410			else
411				phba->SerialNumber[i] =
412				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
413		}
414	}
415
416	lpfc_read_config(phba, pmb);
417	pmb->vport = vport;
418	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
419		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
420				"0453 Adapter failed to init, mbxCmd x%x "
421				"READ_CONFIG, mbxStatus x%x\n",
422				mb->mbxCommand, mb->mbxStatus);
423		phba->link_state = LPFC_HBA_ERROR;
424		mempool_free( pmb, phba->mbox_mem_pool);
425		return -EIO;
426	}
427
428	/* Check if the port is disabled */
429	lpfc_sli_read_link_ste(phba);
430
431	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
432	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
433		phba->cfg_hba_queue_depth =
434			(mb->un.varRdConfig.max_xri + 1) -
435					lpfc_sli4_get_els_iocb_cnt(phba);
436
437	phba->lmt = mb->un.varRdConfig.lmt;
438
439	/* Get the default values for Model Name and Description */
440	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
441
442	if ((phba->cfg_link_speed > LINK_SPEED_10G)
443	    || ((phba->cfg_link_speed == LINK_SPEED_1G)
444		&& !(phba->lmt & LMT_1Gb))
445	    || ((phba->cfg_link_speed == LINK_SPEED_2G)
446		&& !(phba->lmt & LMT_2Gb))
447	    || ((phba->cfg_link_speed == LINK_SPEED_4G)
448		&& !(phba->lmt & LMT_4Gb))
449	    || ((phba->cfg_link_speed == LINK_SPEED_8G)
450		&& !(phba->lmt & LMT_8Gb))
451	    || ((phba->cfg_link_speed == LINK_SPEED_10G)
452		&& !(phba->lmt & LMT_10Gb))) {
453		/* Reset link speed to auto */
454		lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
455			"1302 Invalid speed for this board: "
456			"Reset link speed to auto: x%x\n",
457			phba->cfg_link_speed);
458			phba->cfg_link_speed = LINK_SPEED_AUTO;
459	}
460
461	phba->link_state = LPFC_LINK_DOWN;
462
463	/* Only process IOCBs on ELS ring till hba_state is READY */
464	if (psli->ring[psli->extra_ring].cmdringaddr)
465		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
466	if (psli->ring[psli->fcp_ring].cmdringaddr)
467		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
468	if (psli->ring[psli->next_ring].cmdringaddr)
469		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
470
471	/* Post receive buffers for desired rings */
472	if (phba->sli_rev != 3)
473		lpfc_post_rcv_buf(phba);
474
475	/*
476	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
477	 */
478	if (phba->intr_type == MSIX) {
479		rc = lpfc_config_msi(phba, pmb);
480		if (rc) {
481			mempool_free(pmb, phba->mbox_mem_pool);
482			return -EIO;
483		}
484		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
485		if (rc != MBX_SUCCESS) {
486			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
487					"0352 Config MSI mailbox command "
488					"failed, mbxCmd x%x, mbxStatus x%x\n",
489					pmb->u.mb.mbxCommand,
490					pmb->u.mb.mbxStatus);
491			mempool_free(pmb, phba->mbox_mem_pool);
492			return -EIO;
493		}
494	}
495
496	spin_lock_irq(&phba->hbalock);
497	/* Initialize ERATT handling flag */
498	phba->hba_flag &= ~HBA_ERATT_HANDLED;
499
500	/* Enable appropriate host interrupts */
501	status = readl(phba->HCregaddr);
502	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
503	if (psli->num_rings > 0)
504		status |= HC_R0INT_ENA;
505	if (psli->num_rings > 1)
506		status |= HC_R1INT_ENA;
507	if (psli->num_rings > 2)
508		status |= HC_R2INT_ENA;
509	if (psli->num_rings > 3)
510		status |= HC_R3INT_ENA;
511
512	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
513	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
514		status &= ~(HC_R0INT_ENA);
515
516	writel(status, phba->HCregaddr);
517	readl(phba->HCregaddr); /* flush */
518	spin_unlock_irq(&phba->hbalock);
519
520	/* Set up ring-0 (ELS) timer */
521	timeout = phba->fc_ratov * 2;
522	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
523	/* Set up heart beat (HB) timer */
524	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
525	phba->hb_outstanding = 0;
526	phba->last_completion_time = jiffies;
527	/* Set up error attention (ERATT) polling timer */
528	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
529
530	if (phba->hba_flag & LINK_DISABLED) {
531		lpfc_printf_log(phba,
532			KERN_ERR, LOG_INIT,
533			"2598 Adapter Link is disabled.\n");
534		lpfc_down_link(phba, pmb);
535		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
536		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
537		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
538			lpfc_printf_log(phba,
539			KERN_ERR, LOG_INIT,
540			"2599 Adapter failed to issue DOWN_LINK"
541			" mbox command rc 0x%x\n", rc);
542
543			mempool_free(pmb, phba->mbox_mem_pool);
544			return -EIO;
545		}
546	} else {
547		lpfc_init_link(phba, pmb, phba->cfg_topology,
548			phba->cfg_link_speed);
549		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
550		lpfc_set_loopback_flag(phba);
551		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
552		if (rc != MBX_SUCCESS) {
553			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
554				"0454 Adapter failed to init, mbxCmd x%x "
555				"INIT_LINK, mbxStatus x%x\n",
556				mb->mbxCommand, mb->mbxStatus);
557
558			/* Clear all interrupt enable conditions */
559			writel(0, phba->HCregaddr);
560			readl(phba->HCregaddr); /* flush */
561			/* Clear all pending interrupts */
562			writel(0xffffffff, phba->HAregaddr);
563			readl(phba->HAregaddr); /* flush */
564
565			phba->link_state = LPFC_HBA_ERROR;
566			if (rc != MBX_BUSY)
567				mempool_free(pmb, phba->mbox_mem_pool);
568			return -EIO;
569		}
570	}
571	/* MBOX buffer will be freed in mbox compl */
572	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
573	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
574	pmb->mbox_cmpl = lpfc_config_async_cmpl;
575	pmb->vport = phba->pport;
576	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
577
578	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
579		lpfc_printf_log(phba,
580				KERN_ERR,
581				LOG_INIT,
582				"0456 Adapter failed to issue "
583				"ASYNCEVT_ENABLE mbox status x%x\n",
584				rc);
585		mempool_free(pmb, phba->mbox_mem_pool);
586	}
587
588	/* Get Option rom version */
589	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
590	lpfc_dump_wakeup_param(phba, pmb);
591	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
592	pmb->vport = phba->pport;
593	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
594
595	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
596		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
597				"to get Option ROM version status x%x\n", rc);
598		mempool_free(pmb, phba->mbox_mem_pool);
599	}
600
601	return 0;
602}
603
604/**
605 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
606 * @phba: pointer to lpfc HBA data structure.
607 *
608 * This routine will do LPFC uninitialization before the HBA is reset when
609 * bringing down the SLI Layer.
610 *
611 * Return codes
612 *   0 - success.
613 *   Any other value - error.
614 **/
615int
616lpfc_hba_down_prep(struct lpfc_hba *phba)
617{
618	struct lpfc_vport **vports;
619	int i;
620
621	if (phba->sli_rev <= LPFC_SLI_REV3) {
622		/* Disable interrupts */
623		writel(0, phba->HCregaddr);
624		readl(phba->HCregaddr); /* flush */
625	}
626
627	if (phba->pport->load_flag & FC_UNLOADING)
628		lpfc_cleanup_discovery_resources(phba->pport);
629	else {
630		vports = lpfc_create_vport_work_array(phba);
631		if (vports != NULL)
632			for (i = 0; i <= phba->max_vports &&
633				vports[i] != NULL; i++)
634				lpfc_cleanup_discovery_resources(vports[i]);
635		lpfc_destroy_vport_work_array(phba, vports);
636	}
637	return 0;
638}
639
640/**
641 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
642 * @phba: pointer to lpfc HBA data structure.
643 *
644 * This routine will do uninitialization after the HBA is reset when bring
645 * down the SLI Layer.
646 *
647 * Return codes
648 *   0 - sucess.
649 *   Any other value - error.
650 **/
651static int
652lpfc_hba_down_post_s3(struct lpfc_hba *phba)
653{
654	struct lpfc_sli *psli = &phba->sli;
655	struct lpfc_sli_ring *pring;
656	struct lpfc_dmabuf *mp, *next_mp;
657	LIST_HEAD(completions);
658	int i;
659
660	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
661		lpfc_sli_hbqbuf_free_all(phba);
662	else {
663		/* Cleanup preposted buffers on the ELS ring */
664		pring = &psli->ring[LPFC_ELS_RING];
665		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
666			list_del(&mp->list);
667			pring->postbufq_cnt--;
668			lpfc_mbuf_free(phba, mp->virt, mp->phys);
669			kfree(mp);
670		}
671	}
672
673	spin_lock_irq(&phba->hbalock);
674	for (i = 0; i < psli->num_rings; i++) {
675		pring = &psli->ring[i];
676
677		/* At this point in time the HBA is either reset or DOA. Either
678		 * way, nothing should be on txcmplq as it will NEVER complete.
679		 */
680		list_splice_init(&pring->txcmplq, &completions);
681		pring->txcmplq_cnt = 0;
682		spin_unlock_irq(&phba->hbalock);
683
684		/* Cancel all the IOCBs from the completions list */
685		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
686				      IOERR_SLI_ABORTED);
687
688		lpfc_sli_abort_iocb_ring(phba, pring);
689		spin_lock_irq(&phba->hbalock);
690	}
691	spin_unlock_irq(&phba->hbalock);
692
693	return 0;
694}
695/**
696 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
697 * @phba: pointer to lpfc HBA data structure.
698 *
699 * This routine will do uninitialization after the HBA is reset when bring
700 * down the SLI Layer.
701 *
702 * Return codes
703 *   0 - sucess.
704 *   Any other value - error.
705 **/
706static int
707lpfc_hba_down_post_s4(struct lpfc_hba *phba)
708{
709	struct lpfc_scsi_buf *psb, *psb_next;
710	LIST_HEAD(aborts);
711	int ret;
712	unsigned long iflag = 0;
713	ret = lpfc_hba_down_post_s3(phba);
714	if (ret)
715		return ret;
716	/* At this point in time the HBA is either reset or DOA. Either
717	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
718	 * on the lpfc_sgl_list so that it can either be freed if the
719	 * driver is unloading or reposted if the driver is restarting
720	 * the port.
721	 */
722	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
723					/* scsl_buf_list */
724	/* abts_sgl_list_lock required because worker thread uses this
725	 * list.
726	 */
727	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
728	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
729			&phba->sli4_hba.lpfc_sgl_list);
730	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
731	/* abts_scsi_buf_list_lock required because worker thread uses this
732	 * list.
733	 */
734	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
735	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
736			&aborts);
737	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
738	spin_unlock_irq(&phba->hbalock);
739
740	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
741		psb->pCmd = NULL;
742		psb->status = IOSTAT_SUCCESS;
743	}
744	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
745	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
746	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
747	return 0;
748}
749
750/**
751 * lpfc_hba_down_post - Wrapper func for hba down post routine
752 * @phba: pointer to lpfc HBA data structure.
753 *
754 * This routine wraps the actual SLI3 or SLI4 routine for performing
755 * uninitialization after the HBA is reset when bring down the SLI Layer.
756 *
757 * Return codes
758 *   0 - sucess.
759 *   Any other value - error.
760 **/
761int
762lpfc_hba_down_post(struct lpfc_hba *phba)
763{
764	return (*phba->lpfc_hba_down_post)(phba);
765}
766
767/**
768 * lpfc_hb_timeout - The HBA-timer timeout handler
769 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
770 *
771 * This is the HBA-timer timeout handler registered to the lpfc driver. When
772 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
773 * work-port-events bitmap and the worker thread is notified. This timeout
774 * event will be used by the worker thread to invoke the actual timeout
775 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
776 * be performed in the timeout handler and the HBA timeout event bit shall
777 * be cleared by the worker thread after it has taken the event bitmap out.
778 **/
779static void
780lpfc_hb_timeout(unsigned long ptr)
781{
782	struct lpfc_hba *phba;
783	uint32_t tmo_posted;
784	unsigned long iflag;
785
786	phba = (struct lpfc_hba *)ptr;
787
788	/* Check for heart beat timeout conditions */
789	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
790	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
791	if (!tmo_posted)
792		phba->pport->work_port_events |= WORKER_HB_TMO;
793	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
794
795	/* Tell the worker thread there is work to do */
796	if (!tmo_posted)
797		lpfc_worker_wake_up(phba);
798	return;
799}
800
801/**
802 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
803 * @phba: pointer to lpfc hba data structure.
804 * @pmboxq: pointer to the driver internal queue element for mailbox command.
805 *
806 * This is the callback function to the lpfc heart-beat mailbox command.
807 * If configured, the lpfc driver issues the heart-beat mailbox command to
808 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
809 * heart-beat mailbox command is issued, the driver shall set up heart-beat
810 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
811 * heart-beat outstanding state. Once the mailbox command comes back and
812 * no error conditions detected, the heart-beat mailbox command timer is
813 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
814 * state is cleared for the next heart-beat. If the timer expired with the
815 * heart-beat outstanding state set, the driver will put the HBA offline.
816 **/
817static void
818lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
819{
820	unsigned long drvr_flag;
821
822	spin_lock_irqsave(&phba->hbalock, drvr_flag);
823	phba->hb_outstanding = 0;
824	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
825
826	/* Check and reset heart-beat timer is necessary */
827	mempool_free(pmboxq, phba->mbox_mem_pool);
828	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
829		!(phba->link_state == LPFC_HBA_ERROR) &&
830		!(phba->pport->load_flag & FC_UNLOADING))
831		mod_timer(&phba->hb_tmofunc,
832			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
833	return;
834}
835
836/**
837 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
838 * @phba: pointer to lpfc hba data structure.
839 *
840 * This is the actual HBA-timer timeout handler to be invoked by the worker
841 * thread whenever the HBA timer fired and HBA-timeout event posted. This
842 * handler performs any periodic operations needed for the device. If such
843 * periodic event has already been attended to either in the interrupt handler
844 * or by processing slow-ring or fast-ring events within the HBA-timer
845 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
846 * the timer for the next timeout period. If lpfc heart-beat mailbox command
847 * is configured and there is no heart-beat mailbox command outstanding, a
848 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
849 * has been a heart-beat mailbox command outstanding, the HBA shall be put
850 * to offline.
851 **/
852void
853lpfc_hb_timeout_handler(struct lpfc_hba *phba)
854{
855	LPFC_MBOXQ_t *pmboxq;
856	struct lpfc_dmabuf *buf_ptr;
857	int retval;
858	struct lpfc_sli *psli = &phba->sli;
859	LIST_HEAD(completions);
860
861	if ((phba->link_state == LPFC_HBA_ERROR) ||
862		(phba->pport->load_flag & FC_UNLOADING) ||
863		(phba->pport->fc_flag & FC_OFFLINE_MODE))
864		return;
865
866	spin_lock_irq(&phba->pport->work_port_lock);
867
868	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
869		jiffies)) {
870		spin_unlock_irq(&phba->pport->work_port_lock);
871		if (!phba->hb_outstanding)
872			mod_timer(&phba->hb_tmofunc,
873				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
874		else
875			mod_timer(&phba->hb_tmofunc,
876				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
877		return;
878	}
879	spin_unlock_irq(&phba->pport->work_port_lock);
880
881	if (phba->elsbuf_cnt &&
882		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
883		spin_lock_irq(&phba->hbalock);
884		list_splice_init(&phba->elsbuf, &completions);
885		phba->elsbuf_cnt = 0;
886		phba->elsbuf_prev_cnt = 0;
887		spin_unlock_irq(&phba->hbalock);
888
889		while (!list_empty(&completions)) {
890			list_remove_head(&completions, buf_ptr,
891				struct lpfc_dmabuf, list);
892			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
893			kfree(buf_ptr);
894		}
895	}
896	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
897
898	/* If there is no heart beat outstanding, issue a heartbeat command */
899	if (phba->cfg_enable_hba_heartbeat) {
900		if (!phba->hb_outstanding) {
901			pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
902			if (!pmboxq) {
903				mod_timer(&phba->hb_tmofunc,
904					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
905				return;
906			}
907
908			lpfc_heart_beat(phba, pmboxq);
909			pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
910			pmboxq->vport = phba->pport;
911			retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
912
913			if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
914				mempool_free(pmboxq, phba->mbox_mem_pool);
915				mod_timer(&phba->hb_tmofunc,
916					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
917				return;
918			}
919			mod_timer(&phba->hb_tmofunc,
920				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
921			phba->hb_outstanding = 1;
922			return;
923		} else {
924			/*
925			* If heart beat timeout called with hb_outstanding set
926			* we need to take the HBA offline.
927			*/
928			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
929					"0459 Adapter heartbeat failure, "
930					"taking this port offline.\n");
931
932			spin_lock_irq(&phba->hbalock);
933			psli->sli_flag &= ~LPFC_SLI_ACTIVE;
934			spin_unlock_irq(&phba->hbalock);
935
936			lpfc_offline_prep(phba);
937			lpfc_offline(phba);
938			lpfc_unblock_mgmt_io(phba);
939			phba->link_state = LPFC_HBA_ERROR;
940			lpfc_hba_down_post(phba);
941		}
942	}
943}
944
945/**
946 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
947 * @phba: pointer to lpfc hba data structure.
948 *
949 * This routine is called to bring the HBA offline when HBA hardware error
950 * other than Port Error 6 has been detected.
951 **/
952static void
953lpfc_offline_eratt(struct lpfc_hba *phba)
954{
955	struct lpfc_sli   *psli = &phba->sli;
956
957	spin_lock_irq(&phba->hbalock);
958	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
959	spin_unlock_irq(&phba->hbalock);
960	lpfc_offline_prep(phba);
961
962	lpfc_offline(phba);
963	lpfc_reset_barrier(phba);
964	spin_lock_irq(&phba->hbalock);
965	lpfc_sli_brdreset(phba);
966	spin_unlock_irq(&phba->hbalock);
967	lpfc_hba_down_post(phba);
968	lpfc_sli_brdready(phba, HS_MBRDY);
969	lpfc_unblock_mgmt_io(phba);
970	phba->link_state = LPFC_HBA_ERROR;
971	return;
972}
973
974/**
975 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
976 * @phba: pointer to lpfc hba data structure.
977 *
978 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
979 * other than Port Error 6 has been detected.
980 **/
981static void
982lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
983{
984	lpfc_offline_prep(phba);
985	lpfc_offline(phba);
986	lpfc_sli4_brdreset(phba);
987	lpfc_hba_down_post(phba);
988	lpfc_sli4_post_status_check(phba);
989	lpfc_unblock_mgmt_io(phba);
990	phba->link_state = LPFC_HBA_ERROR;
991}
992
993/**
994 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
995 * @phba: pointer to lpfc hba data structure.
996 *
997 * This routine is invoked to handle the deferred HBA hardware error
998 * conditions. This type of error is indicated by HBA by setting ER1
999 * and another ER bit in the host status register. The driver will
1000 * wait until the ER1 bit clears before handling the error condition.
1001 **/
1002static void
1003lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1004{
1005	uint32_t old_host_status = phba->work_hs;
1006	struct lpfc_sli_ring  *pring;
1007	struct lpfc_sli *psli = &phba->sli;
1008
1009	/* If the pci channel is offline, ignore possible errors,
1010	 * since we cannot communicate with the pci card anyway.
1011	 */
1012	if (pci_channel_offline(phba->pcidev)) {
1013		spin_lock_irq(&phba->hbalock);
1014		phba->hba_flag &= ~DEFER_ERATT;
1015		spin_unlock_irq(&phba->hbalock);
1016		return;
1017	}
1018
1019	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1020		"0479 Deferred Adapter Hardware Error "
1021		"Data: x%x x%x x%x\n",
1022		phba->work_hs,
1023		phba->work_status[0], phba->work_status[1]);
1024
1025	spin_lock_irq(&phba->hbalock);
1026	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1027	spin_unlock_irq(&phba->hbalock);
1028
1029
1030	/*
1031	 * Firmware stops when it triggred erratt. That could cause the I/Os
1032	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1033	 * SCSI layer retry it after re-establishing link.
1034	 */
1035	pring = &psli->ring[psli->fcp_ring];
1036	lpfc_sli_abort_iocb_ring(phba, pring);
1037
1038	/*
1039	 * There was a firmware error. Take the hba offline and then
1040	 * attempt to restart it.
1041	 */
1042	lpfc_offline_prep(phba);
1043	lpfc_offline(phba);
1044
1045	/* Wait for the ER1 bit to clear.*/
1046	while (phba->work_hs & HS_FFER1) {
1047		msleep(100);
1048		phba->work_hs = readl(phba->HSregaddr);
1049		/* If driver is unloading let the worker thread continue */
1050		if (phba->pport->load_flag & FC_UNLOADING) {
1051			phba->work_hs = 0;
1052			break;
1053		}
1054	}
1055
1056	/*
1057	 * This is to ptrotect against a race condition in which
1058	 * first write to the host attention register clear the
1059	 * host status register.
1060	 */
1061	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1062		phba->work_hs = old_host_status & ~HS_FFER1;
1063
1064	spin_lock_irq(&phba->hbalock);
1065	phba->hba_flag &= ~DEFER_ERATT;
1066	spin_unlock_irq(&phba->hbalock);
1067	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1068	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1069}
1070
1071static void
1072lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1073{
1074	struct lpfc_board_event_header board_event;
1075	struct Scsi_Host *shost;
1076
1077	board_event.event_type = FC_REG_BOARD_EVENT;
1078	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1079	shost = lpfc_shost_from_vport(phba->pport);
1080	fc_host_post_vendor_event(shost, fc_get_event_number(),
1081				  sizeof(board_event),
1082				  (char *) &board_event,
1083				  LPFC_NL_VENDOR_ID);
1084}
1085
1086/**
1087 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1088 * @phba: pointer to lpfc hba data structure.
1089 *
1090 * This routine is invoked to handle the following HBA hardware error
1091 * conditions:
1092 * 1 - HBA error attention interrupt
1093 * 2 - DMA ring index out of range
1094 * 3 - Mailbox command came back as unknown
1095 **/
1096static void
1097lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1098{
1099	struct lpfc_vport *vport = phba->pport;
1100	struct lpfc_sli   *psli = &phba->sli;
1101	struct lpfc_sli_ring  *pring;
1102	uint32_t event_data;
1103	unsigned long temperature;
1104	struct temp_event temp_event_data;
1105	struct Scsi_Host  *shost;
1106
1107	/* If the pci channel is offline, ignore possible errors,
1108	 * since we cannot communicate with the pci card anyway.
1109	 */
1110	if (pci_channel_offline(phba->pcidev)) {
1111		spin_lock_irq(&phba->hbalock);
1112		phba->hba_flag &= ~DEFER_ERATT;
1113		spin_unlock_irq(&phba->hbalock);
1114		return;
1115	}
1116
1117	/* If resets are disabled then leave the HBA alone and return */
1118	if (!phba->cfg_enable_hba_reset)
1119		return;
1120
1121	/* Send an internal error event to mgmt application */
1122	lpfc_board_errevt_to_mgmt(phba);
1123
1124	if (phba->hba_flag & DEFER_ERATT)
1125		lpfc_handle_deferred_eratt(phba);
1126
1127	if (phba->work_hs & HS_FFER6) {
1128		/* Re-establishing Link */
1129		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1130				"1301 Re-establishing Link "
1131				"Data: x%x x%x x%x\n",
1132				phba->work_hs,
1133				phba->work_status[0], phba->work_status[1]);
1134
1135		spin_lock_irq(&phba->hbalock);
1136		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1137		spin_unlock_irq(&phba->hbalock);
1138
1139		/*
1140		* Firmware stops when it triggled erratt with HS_FFER6.
1141		* That could cause the I/Os dropped by the firmware.
1142		* Error iocb (I/O) on txcmplq and let the SCSI layer
1143		* retry it after re-establishing link.
1144		*/
1145		pring = &psli->ring[psli->fcp_ring];
1146		lpfc_sli_abort_iocb_ring(phba, pring);
1147
1148		/*
1149		 * There was a firmware error.  Take the hba offline and then
1150		 * attempt to restart it.
1151		 */
1152		lpfc_offline_prep(phba);
1153		lpfc_offline(phba);
1154		lpfc_sli_brdrestart(phba);
1155		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1156			lpfc_unblock_mgmt_io(phba);
1157			return;
1158		}
1159		lpfc_unblock_mgmt_io(phba);
1160	} else if (phba->work_hs & HS_CRIT_TEMP) {
1161		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1162		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1163		temp_event_data.event_code = LPFC_CRIT_TEMP;
1164		temp_event_data.data = (uint32_t)temperature;
1165
1166		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1167				"0406 Adapter maximum temperature exceeded "
1168				"(%ld), taking this port offline "
1169				"Data: x%x x%x x%x\n",
1170				temperature, phba->work_hs,
1171				phba->work_status[0], phba->work_status[1]);
1172
1173		shost = lpfc_shost_from_vport(phba->pport);
1174		fc_host_post_vendor_event(shost, fc_get_event_number(),
1175					  sizeof(temp_event_data),
1176					  (char *) &temp_event_data,
1177					  SCSI_NL_VID_TYPE_PCI
1178					  | PCI_VENDOR_ID_EMULEX);
1179
1180		spin_lock_irq(&phba->hbalock);
1181		phba->over_temp_state = HBA_OVER_TEMP;
1182		spin_unlock_irq(&phba->hbalock);
1183		lpfc_offline_eratt(phba);
1184
1185	} else {
1186		/* The if clause above forces this code path when the status
1187		 * failure is a value other than FFER6. Do not call the offline
1188		 * twice. This is the adapter hardware error path.
1189		 */
1190		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1191				"0457 Adapter Hardware Error "
1192				"Data: x%x x%x x%x\n",
1193				phba->work_hs,
1194				phba->work_status[0], phba->work_status[1]);
1195
1196		event_data = FC_REG_DUMP_EVENT;
1197		shost = lpfc_shost_from_vport(vport);
1198		fc_host_post_vendor_event(shost, fc_get_event_number(),
1199				sizeof(event_data), (char *) &event_data,
1200				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1201
1202		lpfc_offline_eratt(phba);
1203	}
1204	return;
1205}
1206
1207/**
1208 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1209 * @phba: pointer to lpfc hba data structure.
1210 *
1211 * This routine is invoked to handle the SLI4 HBA hardware error attention
1212 * conditions.
1213 **/
1214static void
1215lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1216{
1217	struct lpfc_vport *vport = phba->pport;
1218	uint32_t event_data;
1219	struct Scsi_Host *shost;
1220
1221	/* If the pci channel is offline, ignore possible errors, since
1222	 * we cannot communicate with the pci card anyway.
1223	 */
1224	if (pci_channel_offline(phba->pcidev))
1225		return;
1226	/* If resets are disabled then leave the HBA alone and return */
1227	if (!phba->cfg_enable_hba_reset)
1228		return;
1229
1230	/* Send an internal error event to mgmt application */
1231	lpfc_board_errevt_to_mgmt(phba);
1232
1233	/* For now, the actual action for SLI4 device handling is not
1234	 * specified yet, just treated it as adaptor hardware failure
1235	 */
1236	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1237			"0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1238			phba->work_status[0], phba->work_status[1]);
1239
1240	event_data = FC_REG_DUMP_EVENT;
1241	shost = lpfc_shost_from_vport(vport);
1242	fc_host_post_vendor_event(shost, fc_get_event_number(),
1243				  sizeof(event_data), (char *) &event_data,
1244				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1245
1246	lpfc_sli4_offline_eratt(phba);
1247}
1248
1249/**
1250 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1251 * @phba: pointer to lpfc HBA data structure.
1252 *
1253 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1254 * routine from the API jump table function pointer from the lpfc_hba struct.
1255 *
1256 * Return codes
1257 *   0 - sucess.
1258 *   Any other value - error.
1259 **/
1260void
1261lpfc_handle_eratt(struct lpfc_hba *phba)
1262{
1263	(*phba->lpfc_handle_eratt)(phba);
1264}
1265
1266/**
1267 * lpfc_handle_latt - The HBA link event handler
1268 * @phba: pointer to lpfc hba data structure.
1269 *
1270 * This routine is invoked from the worker thread to handle a HBA host
1271 * attention link event.
1272 **/
1273void
1274lpfc_handle_latt(struct lpfc_hba *phba)
1275{
1276	struct lpfc_vport *vport = phba->pport;
1277	struct lpfc_sli   *psli = &phba->sli;
1278	LPFC_MBOXQ_t *pmb;
1279	volatile uint32_t control;
1280	struct lpfc_dmabuf *mp;
1281	int rc = 0;
1282
1283	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1284	if (!pmb) {
1285		rc = 1;
1286		goto lpfc_handle_latt_err_exit;
1287	}
1288
1289	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1290	if (!mp) {
1291		rc = 2;
1292		goto lpfc_handle_latt_free_pmb;
1293	}
1294
1295	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1296	if (!mp->virt) {
1297		rc = 3;
1298		goto lpfc_handle_latt_free_mp;
1299	}
1300
1301	/* Cleanup any outstanding ELS commands */
1302	lpfc_els_flush_all_cmd(phba);
1303
1304	psli->slistat.link_event++;
1305	lpfc_read_la(phba, pmb, mp);
1306	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
1307	pmb->vport = vport;
1308	/* Block ELS IOCBs until we have processed this mbox command */
1309	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1310	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1311	if (rc == MBX_NOT_FINISHED) {
1312		rc = 4;
1313		goto lpfc_handle_latt_free_mbuf;
1314	}
1315
1316	/* Clear Link Attention in HA REG */
1317	spin_lock_irq(&phba->hbalock);
1318	writel(HA_LATT, phba->HAregaddr);
1319	readl(phba->HAregaddr); /* flush */
1320	spin_unlock_irq(&phba->hbalock);
1321
1322	return;
1323
1324lpfc_handle_latt_free_mbuf:
1325	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1326	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1327lpfc_handle_latt_free_mp:
1328	kfree(mp);
1329lpfc_handle_latt_free_pmb:
1330	mempool_free(pmb, phba->mbox_mem_pool);
1331lpfc_handle_latt_err_exit:
1332	/* Enable Link attention interrupts */
1333	spin_lock_irq(&phba->hbalock);
1334	psli->sli_flag |= LPFC_PROCESS_LA;
1335	control = readl(phba->HCregaddr);
1336	control |= HC_LAINT_ENA;
1337	writel(control, phba->HCregaddr);
1338	readl(phba->HCregaddr); /* flush */
1339
1340	/* Clear Link Attention in HA REG */
1341	writel(HA_LATT, phba->HAregaddr);
1342	readl(phba->HAregaddr); /* flush */
1343	spin_unlock_irq(&phba->hbalock);
1344	lpfc_linkdown(phba);
1345	phba->link_state = LPFC_HBA_ERROR;
1346
1347	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1348		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1349
1350	return;
1351}
1352
1353/**
1354 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1355 * @phba: pointer to lpfc hba data structure.
1356 * @vpd: pointer to the vital product data.
1357 * @len: length of the vital product data in bytes.
1358 *
1359 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1360 * an array of characters. In this routine, the ModelName, ProgramType, and
1361 * ModelDesc, etc. fields of the phba data structure will be populated.
1362 *
1363 * Return codes
1364 *   0 - pointer to the VPD passed in is NULL
1365 *   1 - success
1366 **/
1367int
1368lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1369{
1370	uint8_t lenlo, lenhi;
1371	int Length;
1372	int i, j;
1373	int finished = 0;
1374	int index = 0;
1375
1376	if (!vpd)
1377		return 0;
1378
1379	/* Vital Product */
1380	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1381			"0455 Vital Product Data: x%x x%x x%x x%x\n",
1382			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1383			(uint32_t) vpd[3]);
1384	while (!finished && (index < (len - 4))) {
1385		switch (vpd[index]) {
1386		case 0x82:
1387		case 0x91:
1388			index += 1;
1389			lenlo = vpd[index];
1390			index += 1;
1391			lenhi = vpd[index];
1392			index += 1;
1393			i = ((((unsigned short)lenhi) << 8) + lenlo);
1394			index += i;
1395			break;
1396		case 0x90:
1397			index += 1;
1398			lenlo = vpd[index];
1399			index += 1;
1400			lenhi = vpd[index];
1401			index += 1;
1402			Length = ((((unsigned short)lenhi) << 8) + lenlo);
1403			if (Length > len - index)
1404				Length = len - index;
1405			while (Length > 0) {
1406			/* Look for Serial Number */
1407			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1408				index += 2;
1409				i = vpd[index];
1410				index += 1;
1411				j = 0;
1412				Length -= (3+i);
1413				while(i--) {
1414					phba->SerialNumber[j++] = vpd[index++];
1415					if (j == 31)
1416						break;
1417				}
1418				phba->SerialNumber[j] = 0;
1419				continue;
1420			}
1421			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1422				phba->vpd_flag |= VPD_MODEL_DESC;
1423				index += 2;
1424				i = vpd[index];
1425				index += 1;
1426				j = 0;
1427				Length -= (3+i);
1428				while(i--) {
1429					phba->ModelDesc[j++] = vpd[index++];
1430					if (j == 255)
1431						break;
1432				}
1433				phba->ModelDesc[j] = 0;
1434				continue;
1435			}
1436			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1437				phba->vpd_flag |= VPD_MODEL_NAME;
1438				index += 2;
1439				i = vpd[index];
1440				index += 1;
1441				j = 0;
1442				Length -= (3+i);
1443				while(i--) {
1444					phba->ModelName[j++] = vpd[index++];
1445					if (j == 79)
1446						break;
1447				}
1448				phba->ModelName[j] = 0;
1449				continue;
1450			}
1451			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1452				phba->vpd_flag |= VPD_PROGRAM_TYPE;
1453				index += 2;
1454				i = vpd[index];
1455				index += 1;
1456				j = 0;
1457				Length -= (3+i);
1458				while(i--) {
1459					phba->ProgramType[j++] = vpd[index++];
1460					if (j == 255)
1461						break;
1462				}
1463				phba->ProgramType[j] = 0;
1464				continue;
1465			}
1466			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1467				phba->vpd_flag |= VPD_PORT;
1468				index += 2;
1469				i = vpd[index];
1470				index += 1;
1471				j = 0;
1472				Length -= (3+i);
1473				while(i--) {
1474				phba->Port[j++] = vpd[index++];
1475				if (j == 19)
1476					break;
1477				}
1478				phba->Port[j] = 0;
1479				continue;
1480			}
1481			else {
1482				index += 2;
1483				i = vpd[index];
1484				index += 1;
1485				index += i;
1486				Length -= (3 + i);
1487			}
1488		}
1489		finished = 0;
1490		break;
1491		case 0x78:
1492			finished = 1;
1493			break;
1494		default:
1495			index ++;
1496			break;
1497		}
1498	}
1499
1500	return(1);
1501}
1502
1503/**
1504 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1505 * @phba: pointer to lpfc hba data structure.
1506 * @mdp: pointer to the data structure to hold the derived model name.
1507 * @descp: pointer to the data structure to hold the derived description.
1508 *
1509 * This routine retrieves HBA's description based on its registered PCI device
1510 * ID. The @descp passed into this function points to an array of 256 chars. It
1511 * shall be returned with the model name, maximum speed, and the host bus type.
1512 * The @mdp passed into this function points to an array of 80 chars. When the
1513 * function returns, the @mdp will be filled with the model name.
1514 **/
1515static void
1516lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1517{
1518	lpfc_vpd_t *vp;
1519	uint16_t dev_id = phba->pcidev->device;
1520	int max_speed;
1521	int GE = 0;
1522	int oneConnect = 0; /* default is not a oneConnect */
1523	struct {
1524		char * name;
1525		int    max_speed;
1526		char * bus;
1527	} m = {"<Unknown>", 0, ""};
1528
1529	if (mdp && mdp[0] != '\0'
1530		&& descp && descp[0] != '\0')
1531		return;
1532
1533	if (phba->lmt & LMT_10Gb)
1534		max_speed = 10;
1535	else if (phba->lmt & LMT_8Gb)
1536		max_speed = 8;
1537	else if (phba->lmt & LMT_4Gb)
1538		max_speed = 4;
1539	else if (phba->lmt & LMT_2Gb)
1540		max_speed = 2;
1541	else
1542		max_speed = 1;
1543
1544	vp = &phba->vpd;
1545
1546	switch (dev_id) {
1547	case PCI_DEVICE_ID_FIREFLY:
1548		m = (typeof(m)){"LP6000", max_speed, "PCI"};
1549		break;
1550	case PCI_DEVICE_ID_SUPERFLY:
1551		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1552			m = (typeof(m)){"LP7000", max_speed,  "PCI"};
1553		else
1554			m = (typeof(m)){"LP7000E", max_speed, "PCI"};
1555		break;
1556	case PCI_DEVICE_ID_DRAGONFLY:
1557		m = (typeof(m)){"LP8000", max_speed, "PCI"};
1558		break;
1559	case PCI_DEVICE_ID_CENTAUR:
1560		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1561			m = (typeof(m)){"LP9002", max_speed, "PCI"};
1562		else
1563			m = (typeof(m)){"LP9000", max_speed, "PCI"};
1564		break;
1565	case PCI_DEVICE_ID_RFLY:
1566		m = (typeof(m)){"LP952", max_speed, "PCI"};
1567		break;
1568	case PCI_DEVICE_ID_PEGASUS:
1569		m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
1570		break;
1571	case PCI_DEVICE_ID_THOR:
1572		m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
1573		break;
1574	case PCI_DEVICE_ID_VIPER:
1575		m = (typeof(m)){"LPX1000", max_speed,  "PCI-X"};
1576		break;
1577	case PCI_DEVICE_ID_PFLY:
1578		m = (typeof(m)){"LP982", max_speed, "PCI-X"};
1579		break;
1580	case PCI_DEVICE_ID_TFLY:
1581		m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
1582		break;
1583	case PCI_DEVICE_ID_HELIOS:
1584		m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
1585		break;
1586	case PCI_DEVICE_ID_HELIOS_SCSP:
1587		m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
1588		break;
1589	case PCI_DEVICE_ID_HELIOS_DCSP:
1590		m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
1591		break;
1592	case PCI_DEVICE_ID_NEPTUNE:
1593		m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
1594		break;
1595	case PCI_DEVICE_ID_NEPTUNE_SCSP:
1596		m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
1597		break;
1598	case PCI_DEVICE_ID_NEPTUNE_DCSP:
1599		m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
1600		break;
1601	case PCI_DEVICE_ID_BMID:
1602		m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
1603		break;
1604	case PCI_DEVICE_ID_BSMB:
1605		m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
1606		break;
1607	case PCI_DEVICE_ID_ZEPHYR:
1608		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1609		break;
1610	case PCI_DEVICE_ID_ZEPHYR_SCSP:
1611		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1612		break;
1613	case PCI_DEVICE_ID_ZEPHYR_DCSP:
1614		m = (typeof(m)){"LP2105", max_speed, "PCIe"};
1615		GE = 1;
1616		break;
1617	case PCI_DEVICE_ID_ZMID:
1618		m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
1619		break;
1620	case PCI_DEVICE_ID_ZSMB:
1621		m = (typeof(m)){"LPe111", max_speed, "PCIe"};
1622		break;
1623	case PCI_DEVICE_ID_LP101:
1624		m = (typeof(m)){"LP101", max_speed, "PCI-X"};
1625		break;
1626	case PCI_DEVICE_ID_LP10000S:
1627		m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
1628		break;
1629	case PCI_DEVICE_ID_LP11000S:
1630		m = (typeof(m)){"LP11000-S", max_speed,
1631			"PCI-X2"};
1632		break;
1633	case PCI_DEVICE_ID_LPE11000S:
1634		m = (typeof(m)){"LPe11000-S", max_speed,
1635			"PCIe"};
1636		break;
1637	case PCI_DEVICE_ID_SAT:
1638		m = (typeof(m)){"LPe12000", max_speed, "PCIe"};
1639		break;
1640	case PCI_DEVICE_ID_SAT_MID:
1641		m = (typeof(m)){"LPe1250", max_speed, "PCIe"};
1642		break;
1643	case PCI_DEVICE_ID_SAT_SMB:
1644		m = (typeof(m)){"LPe121", max_speed, "PCIe"};
1645		break;
1646	case PCI_DEVICE_ID_SAT_DCSP:
1647		m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"};
1648		break;
1649	case PCI_DEVICE_ID_SAT_SCSP:
1650		m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"};
1651		break;
1652	case PCI_DEVICE_ID_SAT_S:
1653		m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
1654		break;
1655	case PCI_DEVICE_ID_HORNET:
1656		m = (typeof(m)){"LP21000", max_speed, "PCIe"};
1657		GE = 1;
1658		break;
1659	case PCI_DEVICE_ID_PROTEUS_VF:
1660		m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1661		break;
1662	case PCI_DEVICE_ID_PROTEUS_PF:
1663		m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1664		break;
1665	case PCI_DEVICE_ID_PROTEUS_S:
1666		m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
1667		break;
1668	case PCI_DEVICE_ID_TIGERSHARK:
1669		oneConnect = 1;
1670		m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
1671		break;
1672	case PCI_DEVICE_ID_TS_BE3:
1673		oneConnect = 1;
1674		m = (typeof(m)) {"OCeXXXXX-F", max_speed, "PCIe"};
1675		break;
1676	default:
1677		m = (typeof(m)){ NULL };
1678		break;
1679	}
1680
1681	if (mdp && mdp[0] == '\0')
1682		snprintf(mdp, 79,"%s", m.name);
1683	/* oneConnect hba requires special processing, they are all initiators
1684	 * and we put the port number on the end
1685	 */
1686	if (descp && descp[0] == '\0') {
1687		if (oneConnect)
1688			snprintf(descp, 255,
1689				"Emulex OneConnect %s, FCoE Initiator, Port %s",
1690				m.name,
1691				phba->Port);
1692		else
1693			snprintf(descp, 255,
1694				"Emulex %s %d%s %s %s",
1695				m.name, m.max_speed,
1696				(GE) ? "GE" : "Gb",
1697				m.bus,
1698				(GE) ? "FCoE Adapter" :
1699					"Fibre Channel Adapter");
1700	}
1701}
1702
1703/**
1704 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1705 * @phba: pointer to lpfc hba data structure.
1706 * @pring: pointer to a IOCB ring.
1707 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1708 *
1709 * This routine posts a given number of IOCBs with the associated DMA buffer
1710 * descriptors specified by the cnt argument to the given IOCB ring.
1711 *
1712 * Return codes
1713 *   The number of IOCBs NOT able to be posted to the IOCB ring.
1714 **/
1715int
1716lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1717{
1718	IOCB_t *icmd;
1719	struct lpfc_iocbq *iocb;
1720	struct lpfc_dmabuf *mp1, *mp2;
1721
1722	cnt += pring->missbufcnt;
1723
1724	/* While there are buffers to post */
1725	while (cnt > 0) {
1726		/* Allocate buffer for  command iocb */
1727		iocb = lpfc_sli_get_iocbq(phba);
1728		if (iocb == NULL) {
1729			pring->missbufcnt = cnt;
1730			return cnt;
1731		}
1732		icmd = &iocb->iocb;
1733
1734		/* 2 buffers can be posted per command */
1735		/* Allocate buffer to post */
1736		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1737		if (mp1)
1738		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1739		if (!mp1 || !mp1->virt) {
1740			kfree(mp1);
1741			lpfc_sli_release_iocbq(phba, iocb);
1742			pring->missbufcnt = cnt;
1743			return cnt;
1744		}
1745
1746		INIT_LIST_HEAD(&mp1->list);
1747		/* Allocate buffer to post */
1748		if (cnt > 1) {
1749			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1750			if (mp2)
1751				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1752							    &mp2->phys);
1753			if (!mp2 || !mp2->virt) {
1754				kfree(mp2);
1755				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1756				kfree(mp1);
1757				lpfc_sli_release_iocbq(phba, iocb);
1758				pring->missbufcnt = cnt;
1759				return cnt;
1760			}
1761
1762			INIT_LIST_HEAD(&mp2->list);
1763		} else {
1764			mp2 = NULL;
1765		}
1766
1767		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1768		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1769		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1770		icmd->ulpBdeCount = 1;
1771		cnt--;
1772		if (mp2) {
1773			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1774			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1775			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1776			cnt--;
1777			icmd->ulpBdeCount = 2;
1778		}
1779
1780		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1781		icmd->ulpLe = 1;
1782
1783		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1784		    IOCB_ERROR) {
1785			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1786			kfree(mp1);
1787			cnt++;
1788			if (mp2) {
1789				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1790				kfree(mp2);
1791				cnt++;
1792			}
1793			lpfc_sli_release_iocbq(phba, iocb);
1794			pring->missbufcnt = cnt;
1795			return cnt;
1796		}
1797		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1798		if (mp2)
1799			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1800	}
1801	pring->missbufcnt = 0;
1802	return 0;
1803}
1804
1805/**
1806 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
1807 * @phba: pointer to lpfc hba data structure.
1808 *
1809 * This routine posts initial receive IOCB buffers to the ELS ring. The
1810 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1811 * set to 64 IOCBs.
1812 *
1813 * Return codes
1814 *   0 - success (currently always success)
1815 **/
1816static int
1817lpfc_post_rcv_buf(struct lpfc_hba *phba)
1818{
1819	struct lpfc_sli *psli = &phba->sli;
1820
1821	/* Ring 0, ELS / CT buffers */
1822	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
1823	/* Ring 2 - FCP no buffers needed */
1824
1825	return 0;
1826}
1827
1828#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1829
1830/**
1831 * lpfc_sha_init - Set up initial array of hash table entries
1832 * @HashResultPointer: pointer to an array as hash table.
1833 *
1834 * This routine sets up the initial values to the array of hash table entries
1835 * for the LC HBAs.
1836 **/
1837static void
1838lpfc_sha_init(uint32_t * HashResultPointer)
1839{
1840	HashResultPointer[0] = 0x67452301;
1841	HashResultPointer[1] = 0xEFCDAB89;
1842	HashResultPointer[2] = 0x98BADCFE;
1843	HashResultPointer[3] = 0x10325476;
1844	HashResultPointer[4] = 0xC3D2E1F0;
1845}
1846
1847/**
1848 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
1849 * @HashResultPointer: pointer to an initial/result hash table.
1850 * @HashWorkingPointer: pointer to an working hash table.
1851 *
1852 * This routine iterates an initial hash table pointed by @HashResultPointer
1853 * with the values from the working hash table pointeed by @HashWorkingPointer.
1854 * The results are putting back to the initial hash table, returned through
1855 * the @HashResultPointer as the result hash table.
1856 **/
1857static void
1858lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
1859{
1860	int t;
1861	uint32_t TEMP;
1862	uint32_t A, B, C, D, E;
1863	t = 16;
1864	do {
1865		HashWorkingPointer[t] =
1866		    S(1,
1867		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
1868								     8] ^
1869		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
1870	} while (++t <= 79);
1871	t = 0;
1872	A = HashResultPointer[0];
1873	B = HashResultPointer[1];
1874	C = HashResultPointer[2];
1875	D = HashResultPointer[3];
1876	E = HashResultPointer[4];
1877
1878	do {
1879		if (t < 20) {
1880			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
1881		} else if (t < 40) {
1882			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
1883		} else if (t < 60) {
1884			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
1885		} else {
1886			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
1887		}
1888		TEMP += S(5, A) + E + HashWorkingPointer[t];
1889		E = D;
1890		D = C;
1891		C = S(30, B);
1892		B = A;
1893		A = TEMP;
1894	} while (++t <= 79);
1895
1896	HashResultPointer[0] += A;
1897	HashResultPointer[1] += B;
1898	HashResultPointer[2] += C;
1899	HashResultPointer[3] += D;
1900	HashResultPointer[4] += E;
1901
1902}
1903
1904/**
1905 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
1906 * @RandomChallenge: pointer to the entry of host challenge random number array.
1907 * @HashWorking: pointer to the entry of the working hash array.
1908 *
1909 * This routine calculates the working hash array referred by @HashWorking
1910 * from the challenge random numbers associated with the host, referred by
1911 * @RandomChallenge. The result is put into the entry of the working hash
1912 * array and returned by reference through @HashWorking.
1913 **/
1914static void
1915lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
1916{
1917	*HashWorking = (*RandomChallenge ^ *HashWorking);
1918}
1919
1920/**
1921 * lpfc_hba_init - Perform special handling for LC HBA initialization
1922 * @phba: pointer to lpfc hba data structure.
1923 * @hbainit: pointer to an array of unsigned 32-bit integers.
1924 *
1925 * This routine performs the special handling for LC HBA initialization.
1926 **/
1927void
1928lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1929{
1930	int t;
1931	uint32_t *HashWorking;
1932	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
1933
1934	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
1935	if (!HashWorking)
1936		return;
1937
1938	HashWorking[0] = HashWorking[78] = *pwwnn++;
1939	HashWorking[1] = HashWorking[79] = *pwwnn;
1940
1941	for (t = 0; t < 7; t++)
1942		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
1943
1944	lpfc_sha_init(hbainit);
1945	lpfc_sha_iterate(hbainit, HashWorking);
1946	kfree(HashWorking);
1947}
1948
1949/**
1950 * lpfc_cleanup - Performs vport cleanups before deleting a vport
1951 * @vport: pointer to a virtual N_Port data structure.
1952 *
1953 * This routine performs the necessary cleanups before deleting the @vport.
1954 * It invokes the discovery state machine to perform necessary state
1955 * transitions and to release the ndlps associated with the @vport. Note,
1956 * the physical port is treated as @vport 0.
1957 **/
1958void
1959lpfc_cleanup(struct lpfc_vport *vport)
1960{
1961	struct lpfc_hba   *phba = vport->phba;
1962	struct lpfc_nodelist *ndlp, *next_ndlp;
1963	int i = 0;
1964
1965	if (phba->link_state > LPFC_LINK_DOWN)
1966		lpfc_port_link_failure(vport);
1967
1968	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1969		if (!NLP_CHK_NODE_ACT(ndlp)) {
1970			ndlp = lpfc_enable_node(vport, ndlp,
1971						NLP_STE_UNUSED_NODE);
1972			if (!ndlp)
1973				continue;
1974			spin_lock_irq(&phba->ndlp_lock);
1975			NLP_SET_FREE_REQ(ndlp);
1976			spin_unlock_irq(&phba->ndlp_lock);
1977			/* Trigger the release of the ndlp memory */
1978			lpfc_nlp_put(ndlp);
1979			continue;
1980		}
1981		spin_lock_irq(&phba->ndlp_lock);
1982		if (NLP_CHK_FREE_REQ(ndlp)) {
1983			/* The ndlp should not be in memory free mode already */
1984			spin_unlock_irq(&phba->ndlp_lock);
1985			continue;
1986		} else
1987			/* Indicate request for freeing ndlp memory */
1988			NLP_SET_FREE_REQ(ndlp);
1989		spin_unlock_irq(&phba->ndlp_lock);
1990
1991		if (vport->port_type != LPFC_PHYSICAL_PORT &&
1992		    ndlp->nlp_DID == Fabric_DID) {
1993			/* Just free up ndlp with Fabric_DID for vports */
1994			lpfc_nlp_put(ndlp);
1995			continue;
1996		}
1997
1998		if (ndlp->nlp_type & NLP_FABRIC)
1999			lpfc_disc_state_machine(vport, ndlp, NULL,
2000					NLP_EVT_DEVICE_RECOVERY);
2001
2002		lpfc_disc_state_machine(vport, ndlp, NULL,
2003					     NLP_EVT_DEVICE_RM);
2004
2005	}
2006
2007	/* At this point, ALL ndlp's should be gone
2008	 * because of the previous NLP_EVT_DEVICE_RM.
2009	 * Lets wait for this to happen, if needed.
2010	 */
2011	while (!list_empty(&vport->fc_nodes)) {
2012		if (i++ > 3000) {
2013			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2014				"0233 Nodelist not empty\n");
2015			list_for_each_entry_safe(ndlp, next_ndlp,
2016						&vport->fc_nodes, nlp_listp) {
2017				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2018						LOG_NODE,
2019						"0282 did:x%x ndlp:x%p "
2020						"usgmap:x%x refcnt:%d\n",
2021						ndlp->nlp_DID, (void *)ndlp,
2022						ndlp->nlp_usg_map,
2023						atomic_read(
2024							&ndlp->kref.refcount));
2025			}
2026			break;
2027		}
2028
2029		/* Wait for any activity on ndlps to settle */
2030		msleep(10);
2031	}
2032}
2033
2034/**
2035 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2036 * @vport: pointer to a virtual N_Port data structure.
2037 *
2038 * This routine stops all the timers associated with a @vport. This function
2039 * is invoked before disabling or deleting a @vport. Note that the physical
2040 * port is treated as @vport 0.
2041 **/
2042void
2043lpfc_stop_vport_timers(struct lpfc_vport *vport)
2044{
2045	del_timer_sync(&vport->els_tmofunc);
2046	del_timer_sync(&vport->fc_fdmitmo);
2047	lpfc_can_disctmo(vport);
2048	return;
2049}
2050
2051/**
2052 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2053 * @phba: pointer to lpfc hba data structure.
2054 *
2055 * This routine stops all the timers associated with a HBA. This function is
2056 * invoked before either putting a HBA offline or unloading the driver.
2057 **/
2058void
2059lpfc_stop_hba_timers(struct lpfc_hba *phba)
2060{
2061	lpfc_stop_vport_timers(phba->pport);
2062	del_timer_sync(&phba->sli.mbox_tmo);
2063	del_timer_sync(&phba->fabric_block_timer);
2064	del_timer_sync(&phba->eratt_poll);
2065	del_timer_sync(&phba->hb_tmofunc);
2066	phba->hb_outstanding = 0;
2067
2068	switch (phba->pci_dev_grp) {
2069	case LPFC_PCI_DEV_LP:
2070		/* Stop any LightPulse device specific driver timers */
2071		del_timer_sync(&phba->fcp_poll_timer);
2072		break;
2073	case LPFC_PCI_DEV_OC:
2074		/* Stop any OneConnect device sepcific driver timers */
2075		break;
2076	default:
2077		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2078				"0297 Invalid device group (x%x)\n",
2079				phba->pci_dev_grp);
2080		break;
2081	}
2082	return;
2083}
2084
2085/**
2086 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2087 * @phba: pointer to lpfc hba data structure.
2088 *
2089 * This routine marks a HBA's management interface as blocked. Once the HBA's
2090 * management interface is marked as blocked, all the user space access to
2091 * the HBA, whether they are from sysfs interface or libdfc interface will
2092 * all be blocked. The HBA is set to block the management interface when the
2093 * driver prepares the HBA interface for online or offline.
2094 **/
2095static void
2096lpfc_block_mgmt_io(struct lpfc_hba * phba)
2097{
2098	unsigned long iflag;
2099
2100	spin_lock_irqsave(&phba->hbalock, iflag);
2101	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2102	spin_unlock_irqrestore(&phba->hbalock, iflag);
2103}
2104
2105/**
2106 * lpfc_online - Initialize and bring a HBA online
2107 * @phba: pointer to lpfc hba data structure.
2108 *
2109 * This routine initializes the HBA and brings a HBA online. During this
2110 * process, the management interface is blocked to prevent user space access
2111 * to the HBA interfering with the driver initialization.
2112 *
2113 * Return codes
2114 *   0 - successful
2115 *   1 - failed
2116 **/
2117int
2118lpfc_online(struct lpfc_hba *phba)
2119{
2120	struct lpfc_vport *vport;
2121	struct lpfc_vport **vports;
2122	int i;
2123
2124	if (!phba)
2125		return 0;
2126	vport = phba->pport;
2127
2128	if (!(vport->fc_flag & FC_OFFLINE_MODE))
2129		return 0;
2130
2131	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2132			"0458 Bring Adapter online\n");
2133
2134	lpfc_block_mgmt_io(phba);
2135
2136	if (!lpfc_sli_queue_setup(phba)) {
2137		lpfc_unblock_mgmt_io(phba);
2138		return 1;
2139	}
2140
2141	if (phba->sli_rev == LPFC_SLI_REV4) {
2142		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2143			lpfc_unblock_mgmt_io(phba);
2144			return 1;
2145		}
2146	} else {
2147		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
2148			lpfc_unblock_mgmt_io(phba);
2149			return 1;
2150		}
2151	}
2152
2153	vports = lpfc_create_vport_work_array(phba);
2154	if (vports != NULL)
2155		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2156			struct Scsi_Host *shost;
2157			shost = lpfc_shost_from_vport(vports[i]);
2158			spin_lock_irq(shost->host_lock);
2159			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2160			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2161				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2162			if (phba->sli_rev == LPFC_SLI_REV4)
2163				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2164			spin_unlock_irq(shost->host_lock);
2165		}
2166		lpfc_destroy_vport_work_array(phba, vports);
2167
2168	lpfc_unblock_mgmt_io(phba);
2169	return 0;
2170}
2171
2172/**
2173 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2174 * @phba: pointer to lpfc hba data structure.
2175 *
2176 * This routine marks a HBA's management interface as not blocked. Once the
2177 * HBA's management interface is marked as not blocked, all the user space
2178 * access to the HBA, whether they are from sysfs interface or libdfc
2179 * interface will be allowed. The HBA is set to block the management interface
2180 * when the driver prepares the HBA interface for online or offline and then
2181 * set to unblock the management interface afterwards.
2182 **/
2183void
2184lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2185{
2186	unsigned long iflag;
2187
2188	spin_lock_irqsave(&phba->hbalock, iflag);
2189	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2190	spin_unlock_irqrestore(&phba->hbalock, iflag);
2191}
2192
2193/**
2194 * lpfc_offline_prep - Prepare a HBA to be brought offline
2195 * @phba: pointer to lpfc hba data structure.
2196 *
2197 * This routine is invoked to prepare a HBA to be brought offline. It performs
2198 * unregistration login to all the nodes on all vports and flushes the mailbox
2199 * queue to make it ready to be brought offline.
2200 **/
2201void
2202lpfc_offline_prep(struct lpfc_hba * phba)
2203{
2204	struct lpfc_vport *vport = phba->pport;
2205	struct lpfc_nodelist  *ndlp, *next_ndlp;
2206	struct lpfc_vport **vports;
2207	int i;
2208
2209	if (vport->fc_flag & FC_OFFLINE_MODE)
2210		return;
2211
2212	lpfc_block_mgmt_io(phba);
2213
2214	lpfc_linkdown(phba);
2215
2216	/* Issue an unreg_login to all nodes on all vports */
2217	vports = lpfc_create_vport_work_array(phba);
2218	if (vports != NULL) {
2219		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2220			struct Scsi_Host *shost;
2221
2222			if (vports[i]->load_flag & FC_UNLOADING)
2223				continue;
2224			vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
2225			shost =	lpfc_shost_from_vport(vports[i]);
2226			list_for_each_entry_safe(ndlp, next_ndlp,
2227						 &vports[i]->fc_nodes,
2228						 nlp_listp) {
2229				if (!NLP_CHK_NODE_ACT(ndlp))
2230					continue;
2231				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2232					continue;
2233				if (ndlp->nlp_type & NLP_FABRIC) {
2234					lpfc_disc_state_machine(vports[i], ndlp,
2235						NULL, NLP_EVT_DEVICE_RECOVERY);
2236					lpfc_disc_state_machine(vports[i], ndlp,
2237						NULL, NLP_EVT_DEVICE_RM);
2238				}
2239				spin_lock_irq(shost->host_lock);
2240				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2241				spin_unlock_irq(shost->host_lock);
2242				lpfc_unreg_rpi(vports[i], ndlp);
2243			}
2244		}
2245	}
2246	lpfc_destroy_vport_work_array(phba, vports);
2247
2248	lpfc_sli_mbox_sys_shutdown(phba);
2249}
2250
2251/**
2252 * lpfc_offline - Bring a HBA offline
2253 * @phba: pointer to lpfc hba data structure.
2254 *
2255 * This routine actually brings a HBA offline. It stops all the timers
2256 * associated with the HBA, brings down the SLI layer, and eventually
2257 * marks the HBA as in offline state for the upper layer protocol.
2258 **/
2259void
2260lpfc_offline(struct lpfc_hba *phba)
2261{
2262	struct Scsi_Host  *shost;
2263	struct lpfc_vport **vports;
2264	int i;
2265
2266	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2267		return;
2268
2269	/* stop port and all timers associated with this hba */
2270	lpfc_stop_port(phba);
2271	vports = lpfc_create_vport_work_array(phba);
2272	if (vports != NULL)
2273		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2274			lpfc_stop_vport_timers(vports[i]);
2275	lpfc_destroy_vport_work_array(phba, vports);
2276	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2277			"0460 Bring Adapter offline\n");
2278	/* Bring down the SLI Layer and cleanup.  The HBA is offline
2279	   now.  */
2280	lpfc_sli_hba_down(phba);
2281	spin_lock_irq(&phba->hbalock);
2282	phba->work_ha = 0;
2283	spin_unlock_irq(&phba->hbalock);
2284	vports = lpfc_create_vport_work_array(phba);
2285	if (vports != NULL)
2286		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2287			shost = lpfc_shost_from_vport(vports[i]);
2288			spin_lock_irq(shost->host_lock);
2289			vports[i]->work_port_events = 0;
2290			vports[i]->fc_flag |= FC_OFFLINE_MODE;
2291			spin_unlock_irq(shost->host_lock);
2292		}
2293	lpfc_destroy_vport_work_array(phba, vports);
2294}
2295
2296/**
2297 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2298 * @phba: pointer to lpfc hba data structure.
2299 *
2300 * This routine is to free all the SCSI buffers and IOCBs from the driver
2301 * list back to kernel. It is called from lpfc_pci_remove_one to free
2302 * the internal resources before the device is removed from the system.
2303 *
2304 * Return codes
2305 *   0 - successful (for now, it always returns 0)
2306 **/
2307static int
2308lpfc_scsi_free(struct lpfc_hba *phba)
2309{
2310	struct lpfc_scsi_buf *sb, *sb_next;
2311	struct lpfc_iocbq *io, *io_next;
2312
2313	spin_lock_irq(&phba->hbalock);
2314	/* Release all the lpfc_scsi_bufs maintained by this host. */
2315	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2316		list_del(&sb->list);
2317		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2318			      sb->dma_handle);
2319		kfree(sb);
2320		phba->total_scsi_bufs--;
2321	}
2322
2323	/* Release all the lpfc_iocbq entries maintained by this host. */
2324	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2325		list_del(&io->list);
2326		kfree(io);
2327		phba->total_iocbq_bufs--;
2328	}
2329
2330	spin_unlock_irq(&phba->hbalock);
2331
2332	return 0;
2333}
2334
2335/**
2336 * lpfc_create_port - Create an FC port
2337 * @phba: pointer to lpfc hba data structure.
2338 * @instance: a unique integer ID to this FC port.
2339 * @dev: pointer to the device data structure.
2340 *
2341 * This routine creates a FC port for the upper layer protocol. The FC port
2342 * can be created on top of either a physical port or a virtual port provided
2343 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2344 * and associates the FC port created before adding the shost into the SCSI
2345 * layer.
2346 *
2347 * Return codes
2348 *   @vport - pointer to the virtual N_Port data structure.
2349 *   NULL - port create failed.
2350 **/
2351struct lpfc_vport *
2352lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2353{
2354	struct lpfc_vport *vport;
2355	struct Scsi_Host  *shost;
2356	int error = 0;
2357
2358	if (dev != &phba->pcidev->dev)
2359		shost = scsi_host_alloc(&lpfc_vport_template,
2360					sizeof(struct lpfc_vport));
2361	else
2362		shost = scsi_host_alloc(&lpfc_template,
2363					sizeof(struct lpfc_vport));
2364	if (!shost)
2365		goto out;
2366
2367	vport = (struct lpfc_vport *) shost->hostdata;
2368	vport->phba = phba;
2369	vport->load_flag |= FC_LOADING;
2370	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2371	vport->fc_rscn_flush = 0;
2372
2373	lpfc_get_vport_cfgparam(vport);
2374	shost->unique_id = instance;
2375	shost->max_id = LPFC_MAX_TARGET;
2376	shost->max_lun = vport->cfg_max_luns;
2377	shost->this_id = -1;
2378	shost->max_cmd_len = 16;
2379	if (phba->sli_rev == LPFC_SLI_REV4) {
2380		shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
2381		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2382	}
2383
2384	/*
2385	 * Set initial can_queue value since 0 is no longer supported and
2386	 * scsi_add_host will fail. This will be adjusted later based on the
2387	 * max xri value determined in hba setup.
2388	 */
2389	shost->can_queue = phba->cfg_hba_queue_depth - 10;
2390	if (dev != &phba->pcidev->dev) {
2391		shost->transportt = lpfc_vport_transport_template;
2392		vport->port_type = LPFC_NPIV_PORT;
2393	} else {
2394		shost->transportt = lpfc_transport_template;
2395		vport->port_type = LPFC_PHYSICAL_PORT;
2396	}
2397
2398	/* Initialize all internally managed lists. */
2399	INIT_LIST_HEAD(&vport->fc_nodes);
2400	INIT_LIST_HEAD(&vport->rcv_buffer_list);
2401	spin_lock_init(&vport->work_port_lock);
2402
2403	init_timer(&vport->fc_disctmo);
2404	vport->fc_disctmo.function = lpfc_disc_timeout;
2405	vport->fc_disctmo.data = (unsigned long)vport;
2406
2407	init_timer(&vport->fc_fdmitmo);
2408	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2409	vport->fc_fdmitmo.data = (unsigned long)vport;
2410
2411	init_timer(&vport->els_tmofunc);
2412	vport->els_tmofunc.function = lpfc_els_timeout;
2413	vport->els_tmofunc.data = (unsigned long)vport;
2414
2415	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2416	if (error)
2417		goto out_put_shost;
2418
2419	spin_lock_irq(&phba->hbalock);
2420	list_add_tail(&vport->listentry, &phba->port_list);
2421	spin_unlock_irq(&phba->hbalock);
2422	return vport;
2423
2424out_put_shost:
2425	scsi_host_put(shost);
2426out:
2427	return NULL;
2428}
2429
2430/**
2431 * destroy_port -  destroy an FC port
2432 * @vport: pointer to an lpfc virtual N_Port data structure.
2433 *
2434 * This routine destroys a FC port from the upper layer protocol. All the
2435 * resources associated with the port are released.
2436 **/
2437void
2438destroy_port(struct lpfc_vport *vport)
2439{
2440	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2441	struct lpfc_hba  *phba = vport->phba;
2442
2443	lpfc_debugfs_terminate(vport);
2444	fc_remove_host(shost);
2445	scsi_remove_host(shost);
2446
2447	spin_lock_irq(&phba->hbalock);
2448	list_del_init(&vport->listentry);
2449	spin_unlock_irq(&phba->hbalock);
2450
2451	lpfc_cleanup(vport);
2452	return;
2453}
2454
2455/**
2456 * lpfc_get_instance - Get a unique integer ID
2457 *
2458 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2459 * uses the kernel idr facility to perform the task.
2460 *
2461 * Return codes:
2462 *   instance - a unique integer ID allocated as the new instance.
2463 *   -1 - lpfc get instance failed.
2464 **/
2465int
2466lpfc_get_instance(void)
2467{
2468	int instance = 0;
2469
2470	/* Assign an unused number */
2471	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2472		return -1;
2473	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2474		return -1;
2475	return instance;
2476}
2477
2478/**
2479 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2480 * @shost: pointer to SCSI host data structure.
2481 * @time: elapsed time of the scan in jiffies.
2482 *
2483 * This routine is called by the SCSI layer with a SCSI host to determine
2484 * whether the scan host is finished.
2485 *
2486 * Note: there is no scan_start function as adapter initialization will have
2487 * asynchronously kicked off the link initialization.
2488 *
2489 * Return codes
2490 *   0 - SCSI host scan is not over yet.
2491 *   1 - SCSI host scan is over.
2492 **/
2493int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2494{
2495	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2496	struct lpfc_hba   *phba = vport->phba;
2497	int stat = 0;
2498
2499	spin_lock_irq(shost->host_lock);
2500
2501	if (vport->load_flag & FC_UNLOADING) {
2502		stat = 1;
2503		goto finished;
2504	}
2505	if (time >= 30 * HZ) {
2506		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2507				"0461 Scanning longer than 30 "
2508				"seconds.  Continuing initialization\n");
2509		stat = 1;
2510		goto finished;
2511	}
2512	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2513		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2514				"0465 Link down longer than 15 "
2515				"seconds.  Continuing initialization\n");
2516		stat = 1;
2517		goto finished;
2518	}
2519
2520	if (vport->port_state != LPFC_VPORT_READY)
2521		goto finished;
2522	if (vport->num_disc_nodes || vport->fc_prli_sent)
2523		goto finished;
2524	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2525		goto finished;
2526	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2527		goto finished;
2528
2529	stat = 1;
2530
2531finished:
2532	spin_unlock_irq(shost->host_lock);
2533	return stat;
2534}
2535
2536/**
2537 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2538 * @shost: pointer to SCSI host data structure.
2539 *
2540 * This routine initializes a given SCSI host attributes on a FC port. The
2541 * SCSI host can be either on top of a physical port or a virtual port.
2542 **/
2543void lpfc_host_attrib_init(struct Scsi_Host *shost)
2544{
2545	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2546	struct lpfc_hba   *phba = vport->phba;
2547	/*
2548	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
2549	 */
2550
2551	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2552	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2553	fc_host_supported_classes(shost) = FC_COS_CLASS3;
2554
2555	memset(fc_host_supported_fc4s(shost), 0,
2556	       sizeof(fc_host_supported_fc4s(shost)));
2557	fc_host_supported_fc4s(shost)[2] = 1;
2558	fc_host_supported_fc4s(shost)[7] = 1;
2559
2560	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2561				 sizeof fc_host_symbolic_name(shost));
2562
2563	fc_host_supported_speeds(shost) = 0;
2564	if (phba->lmt & LMT_10Gb)
2565		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2566	if (phba->lmt & LMT_8Gb)
2567		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2568	if (phba->lmt & LMT_4Gb)
2569		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2570	if (phba->lmt & LMT_2Gb)
2571		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2572	if (phba->lmt & LMT_1Gb)
2573		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2574
2575	fc_host_maxframe_size(shost) =
2576		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2577		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2578
2579	/* This value is also unchanging */
2580	memset(fc_host_active_fc4s(shost), 0,
2581	       sizeof(fc_host_active_fc4s(shost)));
2582	fc_host_active_fc4s(shost)[2] = 1;
2583	fc_host_active_fc4s(shost)[7] = 1;
2584
2585	fc_host_max_npiv_vports(shost) = phba->max_vpi;
2586	spin_lock_irq(shost->host_lock);
2587	vport->load_flag &= ~FC_LOADING;
2588	spin_unlock_irq(shost->host_lock);
2589}
2590
2591/**
2592 * lpfc_stop_port_s3 - Stop SLI3 device port
2593 * @phba: pointer to lpfc hba data structure.
2594 *
2595 * This routine is invoked to stop an SLI3 device port, it stops the device
2596 * from generating interrupts and stops the device driver's timers for the
2597 * device.
2598 **/
2599static void
2600lpfc_stop_port_s3(struct lpfc_hba *phba)
2601{
2602	/* Clear all interrupt enable conditions */
2603	writel(0, phba->HCregaddr);
2604	readl(phba->HCregaddr); /* flush */
2605	/* Clear all pending interrupts */
2606	writel(0xffffffff, phba->HAregaddr);
2607	readl(phba->HAregaddr); /* flush */
2608
2609	/* Reset some HBA SLI setup states */
2610	lpfc_stop_hba_timers(phba);
2611	phba->pport->work_port_events = 0;
2612}
2613
2614/**
2615 * lpfc_stop_port_s4 - Stop SLI4 device port
2616 * @phba: pointer to lpfc hba data structure.
2617 *
2618 * This routine is invoked to stop an SLI4 device port, it stops the device
2619 * from generating interrupts and stops the device driver's timers for the
2620 * device.
2621 **/
2622static void
2623lpfc_stop_port_s4(struct lpfc_hba *phba)
2624{
2625	/* Reset some HBA SLI4 setup states */
2626	lpfc_stop_hba_timers(phba);
2627	phba->pport->work_port_events = 0;
2628	phba->sli4_hba.intr_enable = 0;
2629	/* Hard clear it for now, shall have more graceful way to wait later */
2630	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2631}
2632
2633/**
2634 * lpfc_stop_port - Wrapper function for stopping hba port
2635 * @phba: Pointer to HBA context object.
2636 *
2637 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2638 * the API jump table function pointer from the lpfc_hba struct.
2639 **/
2640void
2641lpfc_stop_port(struct lpfc_hba *phba)
2642{
2643	phba->lpfc_stop_port(phba);
2644}
2645
2646/**
2647 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2648 * @phba: pointer to lpfc hba data structure.
2649 *
2650 * This routine is invoked to remove the driver default fcf record from
2651 * the port.  This routine currently acts on FCF Index 0.
2652 *
2653 **/
2654void
2655lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2656{
2657	int rc = 0;
2658	LPFC_MBOXQ_t *mboxq;
2659	struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2660	uint32_t mbox_tmo, req_len;
2661	uint32_t shdr_status, shdr_add_status;
2662
2663	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2664	if (!mboxq) {
2665		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2666			"2020 Failed to allocate mbox for ADD_FCF cmd\n");
2667		return;
2668	}
2669
2670	req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2671		  sizeof(struct lpfc_sli4_cfg_mhdr);
2672	rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2673			      LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2674			      req_len, LPFC_SLI4_MBX_EMBED);
2675	/*
2676	 * In phase 1, there is a single FCF index, 0.  In phase2, the driver
2677	 * supports multiple FCF indices.
2678	 */
2679	del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2680	bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2681	bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2682	       phba->fcf.fcf_indx);
2683
2684	if (!phba->sli4_hba.intr_enable)
2685		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2686	else {
2687		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2688		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2689	}
2690	/* The IOCTL status is embedded in the mailbox subheader. */
2691	shdr_status = bf_get(lpfc_mbox_hdr_status,
2692			     &del_fcf_record->header.cfg_shdr.response);
2693	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2694				 &del_fcf_record->header.cfg_shdr.response);
2695	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2696		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2697				"2516 DEL FCF of default FCF Index failed "
2698				"mbx status x%x, status x%x add_status x%x\n",
2699				rc, shdr_status, shdr_add_status);
2700	}
2701	if (rc != MBX_TIMEOUT)
2702		mempool_free(mboxq, phba->mbox_mem_pool);
2703}
2704
2705/**
2706 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
2707 * @phba: pointer to lpfc hba data structure.
2708 *
2709 * This function uses the QUERY_FW_CFG mailbox command to determine if the
2710 * firmware loaded supports FCoE. A return of zero indicates that the mailbox
2711 * was successful and the firmware supports FCoE. Any other return indicates
2712 * a error. It is assumed that this function will be called before interrupts
2713 * are enabled.
2714 **/
2715static int
2716lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
2717{
2718	int rc = 0;
2719	LPFC_MBOXQ_t *mboxq;
2720	struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
2721	uint32_t length;
2722	uint32_t shdr_status, shdr_add_status;
2723
2724	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2725	if (!mboxq) {
2726		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2727				"2621 Failed to allocate mbox for "
2728				"query firmware config cmd\n");
2729		return -ENOMEM;
2730	}
2731	query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
2732	length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
2733		  sizeof(struct lpfc_sli4_cfg_mhdr));
2734	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
2735			 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
2736			 length, LPFC_SLI4_MBX_EMBED);
2737	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2738	/* The IOCTL status is embedded in the mailbox subheader. */
2739	shdr_status = bf_get(lpfc_mbox_hdr_status,
2740			     &query_fw_cfg->header.cfg_shdr.response);
2741	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2742				 &query_fw_cfg->header.cfg_shdr.response);
2743	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2744		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2745				"2622 Query Firmware Config failed "
2746				"mbx status x%x, status x%x add_status x%x\n",
2747				rc, shdr_status, shdr_add_status);
2748		return -EINVAL;
2749	}
2750	if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
2751		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2752				"2623 FCoE Function not supported by firmware. "
2753				"Function mode = %08x\n",
2754				query_fw_cfg->function_mode);
2755		return -EINVAL;
2756	}
2757	if (rc != MBX_TIMEOUT)
2758		mempool_free(mboxq, phba->mbox_mem_pool);
2759	return 0;
2760}
2761
2762/**
2763 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2764 * @phba: pointer to lpfc hba data structure.
2765 * @acqe_link: pointer to the async link completion queue entry.
2766 *
2767 * This routine is to parse the SLI4 link-attention link fault code and
2768 * translate it into the base driver's read link attention mailbox command
2769 * status.
2770 *
2771 * Return: Link-attention status in terms of base driver's coding.
2772 **/
2773static uint16_t
2774lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
2775			   struct lpfc_acqe_link *acqe_link)
2776{
2777	uint16_t latt_fault;
2778
2779	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
2780	case LPFC_ASYNC_LINK_FAULT_NONE:
2781	case LPFC_ASYNC_LINK_FAULT_LOCAL:
2782	case LPFC_ASYNC_LINK_FAULT_REMOTE:
2783		latt_fault = 0;
2784		break;
2785	default:
2786		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2787				"0398 Invalid link fault code: x%x\n",
2788				bf_get(lpfc_acqe_link_fault, acqe_link));
2789		latt_fault = MBXERR_ERROR;
2790		break;
2791	}
2792	return latt_fault;
2793}
2794
2795/**
2796 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
2797 * @phba: pointer to lpfc hba data structure.
2798 * @acqe_link: pointer to the async link completion queue entry.
2799 *
2800 * This routine is to parse the SLI4 link attention type and translate it
2801 * into the base driver's link attention type coding.
2802 *
2803 * Return: Link attention type in terms of base driver's coding.
2804 **/
2805static uint8_t
2806lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
2807			  struct lpfc_acqe_link *acqe_link)
2808{
2809	uint8_t att_type;
2810
2811	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
2812	case LPFC_ASYNC_LINK_STATUS_DOWN:
2813	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
2814		att_type = AT_LINK_DOWN;
2815		break;
2816	case LPFC_ASYNC_LINK_STATUS_UP:
2817		/* Ignore physical link up events - wait for logical link up */
2818		att_type = AT_RESERVED;
2819		break;
2820	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
2821		att_type = AT_LINK_UP;
2822		break;
2823	default:
2824		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2825				"0399 Invalid link attention type: x%x\n",
2826				bf_get(lpfc_acqe_link_status, acqe_link));
2827		att_type = AT_RESERVED;
2828		break;
2829	}
2830	return att_type;
2831}
2832
2833/**
2834 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
2835 * @phba: pointer to lpfc hba data structure.
2836 * @acqe_link: pointer to the async link completion queue entry.
2837 *
2838 * This routine is to parse the SLI4 link-attention link speed and translate
2839 * it into the base driver's link-attention link speed coding.
2840 *
2841 * Return: Link-attention link speed in terms of base driver's coding.
2842 **/
2843static uint8_t
2844lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
2845				struct lpfc_acqe_link *acqe_link)
2846{
2847	uint8_t link_speed;
2848
2849	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
2850	case LPFC_ASYNC_LINK_SPEED_ZERO:
2851		link_speed = LA_UNKNW_LINK;
2852		break;
2853	case LPFC_ASYNC_LINK_SPEED_10MBPS:
2854		link_speed = LA_UNKNW_LINK;
2855		break;
2856	case LPFC_ASYNC_LINK_SPEED_100MBPS:
2857		link_speed = LA_UNKNW_LINK;
2858		break;
2859	case LPFC_ASYNC_LINK_SPEED_1GBPS:
2860		link_speed = LA_1GHZ_LINK;
2861		break;
2862	case LPFC_ASYNC_LINK_SPEED_10GBPS:
2863		link_speed = LA_10GHZ_LINK;
2864		break;
2865	default:
2866		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2867				"0483 Invalid link-attention link speed: x%x\n",
2868				bf_get(lpfc_acqe_link_speed, acqe_link));
2869		link_speed = LA_UNKNW_LINK;
2870		break;
2871	}
2872	return link_speed;
2873}
2874
2875/**
2876 * lpfc_sli4_async_link_evt - Process the asynchronous link event
2877 * @phba: pointer to lpfc hba data structure.
2878 * @acqe_link: pointer to the async link completion queue entry.
2879 *
2880 * This routine is to handle the SLI4 asynchronous link event.
2881 **/
2882static void
2883lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2884			 struct lpfc_acqe_link *acqe_link)
2885{
2886	struct lpfc_dmabuf *mp;
2887	LPFC_MBOXQ_t *pmb;
2888	MAILBOX_t *mb;
2889	READ_LA_VAR *la;
2890	uint8_t att_type;
2891
2892	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
2893	if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
2894		return;
2895	phba->fcoe_eventtag = acqe_link->event_tag;
2896	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2897	if (!pmb) {
2898		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2899				"0395 The mboxq allocation failed\n");
2900		return;
2901	}
2902	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2903	if (!mp) {
2904		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2905				"0396 The lpfc_dmabuf allocation failed\n");
2906		goto out_free_pmb;
2907	}
2908	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2909	if (!mp->virt) {
2910		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2911				"0397 The mbuf allocation failed\n");
2912		goto out_free_dmabuf;
2913	}
2914
2915	/* Cleanup any outstanding ELS commands */
2916	lpfc_els_flush_all_cmd(phba);
2917
2918	/* Block ELS IOCBs until we have done process link event */
2919	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2920
2921	/* Update link event statistics */
2922	phba->sli.slistat.link_event++;
2923
2924	/* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
2925	lpfc_read_la(phba, pmb, mp);
2926	pmb->vport = phba->pport;
2927
2928	/* Parse and translate status field */
2929	mb = &pmb->u.mb;
2930	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
2931
2932	/* Parse and translate link attention fields */
2933	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
2934	la->eventTag = acqe_link->event_tag;
2935	la->attType = att_type;
2936	la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
2937
2938	/* Fake the the following irrelvant fields */
2939	la->topology = TOPOLOGY_PT_PT;
2940	la->granted_AL_PA = 0;
2941	la->il = 0;
2942	la->pb = 0;
2943	la->fa = 0;
2944	la->mm = 0;
2945
2946	/* Keep the link status for extra SLI4 state machine reference */
2947	phba->sli4_hba.link_state.speed =
2948				bf_get(lpfc_acqe_link_speed, acqe_link);
2949	phba->sli4_hba.link_state.duplex =
2950				bf_get(lpfc_acqe_link_duplex, acqe_link);
2951	phba->sli4_hba.link_state.status =
2952				bf_get(lpfc_acqe_link_status, acqe_link);
2953	phba->sli4_hba.link_state.physical =
2954				bf_get(lpfc_acqe_link_physical, acqe_link);
2955	phba->sli4_hba.link_state.fault =
2956				bf_get(lpfc_acqe_link_fault, acqe_link);
2957
2958	/* Invoke the lpfc_handle_latt mailbox command callback function */
2959	lpfc_mbx_cmpl_read_la(phba, pmb);
2960
2961	return;
2962
2963out_free_dmabuf:
2964	kfree(mp);
2965out_free_pmb:
2966	mempool_free(pmb, phba->mbox_mem_pool);
2967}
2968
2969/**
2970 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
2971 * @phba: pointer to lpfc hba data structure.
2972 * @acqe_link: pointer to the async fcoe completion queue entry.
2973 *
2974 * This routine is to handle the SLI4 asynchronous fcoe event.
2975 **/
2976static void
2977lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2978			 struct lpfc_acqe_fcoe *acqe_fcoe)
2979{
2980	uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2981	int rc;
2982	struct lpfc_vport *vport;
2983	struct lpfc_nodelist *ndlp;
2984	struct Scsi_Host  *shost;
2985
2986	phba->fc_eventTag = acqe_fcoe->event_tag;
2987	phba->fcoe_eventtag = acqe_fcoe->event_tag;
2988	switch (event_type) {
2989	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2990		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2991			"2546 New FCF found index 0x%x tag 0x%x\n",
2992			acqe_fcoe->index,
2993			acqe_fcoe->event_tag);
2994		/*
2995		 * If the current FCF is in discovered state, or
2996		 * FCF discovery is in progress do nothing.
2997		 */
2998		spin_lock_irq(&phba->hbalock);
2999		if ((phba->fcf.fcf_flag & FCF_DISCOVERED) ||
3000		   (phba->hba_flag & FCF_DISC_INPROGRESS)) {
3001			spin_unlock_irq(&phba->hbalock);
3002			break;
3003		}
3004		spin_unlock_irq(&phba->hbalock);
3005
3006		/* Read the FCF table and re-discover SAN. */
3007		rc = lpfc_sli4_read_fcf_record(phba,
3008			LPFC_FCOE_FCF_GET_FIRST);
3009		if (rc)
3010			lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
3011				"2547 Read FCF record failed 0x%x\n",
3012				rc);
3013		break;
3014
3015	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
3016		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3017			"2548 FCF Table full count 0x%x tag 0x%x\n",
3018			bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
3019			acqe_fcoe->event_tag);
3020		break;
3021
3022	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
3023		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
3024			"2549 FCF disconnected fron network index 0x%x"
3025			" tag 0x%x\n", acqe_fcoe->index,
3026			acqe_fcoe->event_tag);
3027		/* If the event is not for currently used fcf do nothing */
3028		if (phba->fcf.fcf_indx != acqe_fcoe->index)
3029			break;
3030		/*
3031		 * Currently, driver support only one FCF - so treat this as
3032		 * a link down.
3033		 */
3034		lpfc_linkdown(phba);
3035		/* Unregister FCF if no devices connected to it */
3036		lpfc_unregister_unused_fcf(phba);
3037		break;
3038	case LPFC_FCOE_EVENT_TYPE_CVL:
3039		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
3040			"2718 Clear Virtual Link Received for VPI 0x%x"
3041			" tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3042		vport = lpfc_find_vport_by_vpid(phba,
3043				acqe_fcoe->index /*- phba->vpi_base*/);
3044		if (!vport)
3045			break;
3046		ndlp = lpfc_findnode_did(vport, Fabric_DID);
3047		if (!ndlp)
3048			break;
3049		shost = lpfc_shost_from_vport(vport);
3050		lpfc_linkdown_port(vport);
3051		if (vport->port_type != LPFC_NPIV_PORT) {
3052			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3053			spin_lock_irq(shost->host_lock);
3054			ndlp->nlp_flag |= NLP_DELAY_TMO;
3055			spin_unlock_irq(shost->host_lock);
3056			ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
3057			vport->port_state = LPFC_FLOGI;
3058		}
3059		break;
3060	default:
3061		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3062			"0288 Unknown FCoE event type 0x%x event tag "
3063			"0x%x\n", event_type, acqe_fcoe->event_tag);
3064		break;
3065	}
3066}
3067
3068/**
3069 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3070 * @phba: pointer to lpfc hba data structure.
3071 * @acqe_link: pointer to the async dcbx completion queue entry.
3072 *
3073 * This routine is to handle the SLI4 asynchronous dcbx event.
3074 **/
3075static void
3076lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3077			 struct lpfc_acqe_dcbx *acqe_dcbx)
3078{
3079	phba->fc_eventTag = acqe_dcbx->event_tag;
3080	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3081			"0290 The SLI4 DCBX asynchronous event is not "
3082			"handled yet\n");
3083}
3084
3085/**
3086 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3087 * @phba: pointer to lpfc hba data structure.
3088 *
3089 * This routine is invoked by the worker thread to process all the pending
3090 * SLI4 asynchronous events.
3091 **/
3092void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3093{
3094	struct lpfc_cq_event *cq_event;
3095
3096	/* First, declare the async event has been handled */
3097	spin_lock_irq(&phba->hbalock);
3098	phba->hba_flag &= ~ASYNC_EVENT;
3099	spin_unlock_irq(&phba->hbalock);
3100	/* Now, handle all the async events */
3101	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3102		/* Get the first event from the head of the event queue */
3103		spin_lock_irq(&phba->hbalock);
3104		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3105				 cq_event, struct lpfc_cq_event, list);
3106		spin_unlock_irq(&phba->hbalock);
3107		/* Process the asynchronous event */
3108		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3109		case LPFC_TRAILER_CODE_LINK:
3110			lpfc_sli4_async_link_evt(phba,
3111						 &cq_event->cqe.acqe_link);
3112			break;
3113		case LPFC_TRAILER_CODE_FCOE:
3114			lpfc_sli4_async_fcoe_evt(phba,
3115						 &cq_event->cqe.acqe_fcoe);
3116			break;
3117		case LPFC_TRAILER_CODE_DCBX:
3118			lpfc_sli4_async_dcbx_evt(phba,
3119						 &cq_event->cqe.acqe_dcbx);
3120			break;
3121		default:
3122			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3123					"1804 Invalid asynchrous event code: "
3124					"x%x\n", bf_get(lpfc_trailer_code,
3125					&cq_event->cqe.mcqe_cmpl));
3126			break;
3127		}
3128		/* Free the completion event processed to the free pool */
3129		lpfc_sli4_cq_event_release(phba, cq_event);
3130	}
3131}
3132
3133/**
3134 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3135 * @phba: pointer to lpfc hba data structure.
3136 * @dev_grp: The HBA PCI-Device group number.
3137 *
3138 * This routine is invoked to set up the per HBA PCI-Device group function
3139 * API jump table entries.
3140 *
3141 * Return: 0 if success, otherwise -ENODEV
3142 **/
3143int
3144lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3145{
3146	int rc;
3147
3148	/* Set up lpfc PCI-device group */
3149	phba->pci_dev_grp = dev_grp;
3150
3151	/* The LPFC_PCI_DEV_OC uses SLI4 */
3152	if (dev_grp == LPFC_PCI_DEV_OC)
3153		phba->sli_rev = LPFC_SLI_REV4;
3154
3155	/* Set up device INIT API function jump table */
3156	rc = lpfc_init_api_table_setup(phba, dev_grp);
3157	if (rc)
3158		return -ENODEV;
3159	/* Set up SCSI API function jump table */
3160	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3161	if (rc)
3162		return -ENODEV;
3163	/* Set up SLI API function jump table */
3164	rc = lpfc_sli_api_table_setup(phba, dev_grp);
3165	if (rc)
3166		return -ENODEV;
3167	/* Set up MBOX API function jump table */
3168	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3169	if (rc)
3170		return -ENODEV;
3171
3172	return 0;
3173}
3174
3175/**
3176 * lpfc_log_intr_mode - Log the active interrupt mode
3177 * @phba: pointer to lpfc hba data structure.
3178 * @intr_mode: active interrupt mode adopted.
3179 *
3180 * This routine it invoked to log the currently used active interrupt mode
3181 * to the device.
3182 **/
3183static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3184{
3185	switch (intr_mode) {
3186	case 0:
3187		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3188				"0470 Enable INTx interrupt mode.\n");
3189		break;
3190	case 1:
3191		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3192				"0481 Enabled MSI interrupt mode.\n");
3193		break;
3194	case 2:
3195		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3196				"0480 Enabled MSI-X interrupt mode.\n");
3197		break;
3198	default:
3199		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3200				"0482 Illegal interrupt mode.\n");
3201		break;
3202	}
3203	return;
3204}
3205
3206/**
3207 * lpfc_enable_pci_dev - Enable a generic PCI device.
3208 * @phba: pointer to lpfc hba data structure.
3209 *
3210 * This routine is invoked to enable the PCI device that is common to all
3211 * PCI devices.
3212 *
3213 * Return codes
3214 * 	0 - sucessful
3215 * 	other values - error
3216 **/
3217static int
3218lpfc_enable_pci_dev(struct lpfc_hba *phba)
3219{
3220	struct pci_dev *pdev;
3221	int bars;
3222
3223	/* Obtain PCI device reference */
3224	if (!phba->pcidev)
3225		goto out_error;
3226	else
3227		pdev = phba->pcidev;
3228	/* Select PCI BARs */
3229	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3230	/* Enable PCI device */
3231	if (pci_enable_device_mem(pdev))
3232		goto out_error;
3233	/* Request PCI resource for the device */
3234	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3235		goto out_disable_device;
3236	/* Set up device as PCI master and save state for EEH */
3237	pci_set_master(pdev);
3238	pci_try_set_mwi(pdev);
3239	pci_save_state(pdev);
3240
3241	return 0;
3242
3243out_disable_device:
3244	pci_disable_device(pdev);
3245out_error:
3246	return -ENODEV;
3247}
3248
3249/**
3250 * lpfc_disable_pci_dev - Disable a generic PCI device.
3251 * @phba: pointer to lpfc hba data structure.
3252 *
3253 * This routine is invoked to disable the PCI device that is common to all
3254 * PCI devices.
3255 **/
3256static void
3257lpfc_disable_pci_dev(struct lpfc_hba *phba)
3258{
3259	struct pci_dev *pdev;
3260	int bars;
3261
3262	/* Obtain PCI device reference */
3263	if (!phba->pcidev)
3264		return;
3265	else
3266		pdev = phba->pcidev;
3267	/* Select PCI BARs */
3268	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3269	/* Release PCI resource and disable PCI device */
3270	pci_release_selected_regions(pdev, bars);
3271	pci_disable_device(pdev);
3272	/* Null out PCI private reference to driver */
3273	pci_set_drvdata(pdev, NULL);
3274
3275	return;
3276}
3277
3278/**
3279 * lpfc_reset_hba - Reset a hba
3280 * @phba: pointer to lpfc hba data structure.
3281 *
3282 * This routine is invoked to reset a hba device. It brings the HBA
3283 * offline, performs a board restart, and then brings the board back
3284 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3285 * on outstanding mailbox commands.
3286 **/
3287void
3288lpfc_reset_hba(struct lpfc_hba *phba)
3289{
3290	/* If resets are disabled then set error state and return. */
3291	if (!phba->cfg_enable_hba_reset) {
3292		phba->link_state = LPFC_HBA_ERROR;
3293		return;
3294	}
3295	lpfc_offline_prep(phba);
3296	lpfc_offline(phba);
3297	lpfc_sli_brdrestart(phba);
3298	lpfc_online(phba);
3299	lpfc_unblock_mgmt_io(phba);
3300}
3301
3302/**
3303 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3304 * @phba: pointer to lpfc hba data structure.
3305 *
3306 * This routine is invoked to set up the driver internal resources specific to
3307 * support the SLI-3 HBA device it attached to.
3308 *
3309 * Return codes
3310 * 	0 - sucessful
3311 * 	other values - error
3312 **/
3313static int
3314lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3315{
3316	struct lpfc_sli *psli;
3317
3318	/*
3319	 * Initialize timers used by driver
3320	 */
3321
3322	/* Heartbeat timer */
3323	init_timer(&phba->hb_tmofunc);
3324	phba->hb_tmofunc.function = lpfc_hb_timeout;
3325	phba->hb_tmofunc.data = (unsigned long)phba;
3326
3327	psli = &phba->sli;
3328	/* MBOX heartbeat timer */
3329	init_timer(&psli->mbox_tmo);
3330	psli->mbox_tmo.function = lpfc_mbox_timeout;
3331	psli->mbox_tmo.data = (unsigned long) phba;
3332	/* FCP polling mode timer */
3333	init_timer(&phba->fcp_poll_timer);
3334	phba->fcp_poll_timer.function = lpfc_poll_timeout;
3335	phba->fcp_poll_timer.data = (unsigned long) phba;
3336	/* Fabric block timer */
3337	init_timer(&phba->fabric_block_timer);
3338	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3339	phba->fabric_block_timer.data = (unsigned long) phba;
3340	/* EA polling mode timer */
3341	init_timer(&phba->eratt_poll);
3342	phba->eratt_poll.function = lpfc_poll_eratt;
3343	phba->eratt_poll.data = (unsigned long) phba;
3344
3345	/* Host attention work mask setup */
3346	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3347	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3348
3349	/* Get all the module params for configuring this host */
3350	lpfc_get_cfgparam(phba);
3351	/*
3352	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3353	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3354	 * 2 segments are added since the IOCB needs a command and response bde.
3355	 */
3356	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3357		sizeof(struct fcp_rsp) +
3358			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3359
3360	if (phba->cfg_enable_bg) {
3361		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3362		phba->cfg_sg_dma_buf_size +=
3363			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3364	}
3365
3366	/* Also reinitialize the host templates with new values. */
3367	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3368	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3369
3370	phba->max_vpi = LPFC_MAX_VPI;
3371	/* This will be set to correct value after config_port mbox */
3372	phba->max_vports = 0;
3373
3374	/*
3375	 * Initialize the SLI Layer to run with lpfc HBAs.
3376	 */
3377	lpfc_sli_setup(phba);
3378	lpfc_sli_queue_setup(phba);
3379
3380	/* Allocate device driver memory */
3381	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3382		return -ENOMEM;
3383
3384	return 0;
3385}
3386
3387/**
3388 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3389 * @phba: pointer to lpfc hba data structure.
3390 *
3391 * This routine is invoked to unset the driver internal resources set up
3392 * specific for supporting the SLI-3 HBA device it attached to.
3393 **/
3394static void
3395lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3396{
3397	/* Free device driver memory allocated */
3398	lpfc_mem_free_all(phba);
3399
3400	return;
3401}
3402
3403/**
3404 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3405 * @phba: pointer to lpfc hba data structure.
3406 *
3407 * This routine is invoked to set up the driver internal resources specific to
3408 * support the SLI-4 HBA device it attached to.
3409 *
3410 * Return codes
3411 * 	0 - sucessful
3412 * 	other values - error
3413 **/
3414static int
3415lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3416{
3417	struct lpfc_sli *psli;
3418	int rc;
3419	int i, hbq_count;
3420
3421	/* Before proceed, wait for POST done and device ready */
3422	rc = lpfc_sli4_post_status_check(phba);
3423	if (rc)
3424		return -ENODEV;
3425
3426	/*
3427	 * Initialize timers used by driver
3428	 */
3429
3430	/* Heartbeat timer */
3431	init_timer(&phba->hb_tmofunc);
3432	phba->hb_tmofunc.function = lpfc_hb_timeout;
3433	phba->hb_tmofunc.data = (unsigned long)phba;
3434
3435	psli = &phba->sli;
3436	/* MBOX heartbeat timer */
3437	init_timer(&psli->mbox_tmo);
3438	psli->mbox_tmo.function = lpfc_mbox_timeout;
3439	psli->mbox_tmo.data = (unsigned long) phba;
3440	/* Fabric block timer */
3441	init_timer(&phba->fabric_block_timer);
3442	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3443	phba->fabric_block_timer.data = (unsigned long) phba;
3444	/* EA polling mode timer */
3445	init_timer(&phba->eratt_poll);
3446	phba->eratt_poll.function = lpfc_poll_eratt;
3447	phba->eratt_poll.data = (unsigned long) phba;
3448	/*
3449	 * We need to do a READ_CONFIG mailbox command here before
3450	 * calling lpfc_get_cfgparam. For VFs this will report the
3451	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3452	 * All of the resources allocated
3453	 * for this Port are tied to these values.
3454	 */
3455	/* Get all the module params for configuring this host */
3456	lpfc_get_cfgparam(phba);
3457	phba->max_vpi = LPFC_MAX_VPI;
3458	/* This will be set to correct value after the read_config mbox */
3459	phba->max_vports = 0;
3460
3461	/* Program the default value of vlan_id and fc_map */
3462	phba->valid_vlan = 0;
3463	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3464	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3465	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3466
3467	/*
3468	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3469	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3470	 * 2 segments are added since the IOCB needs a command and response bde.
3471	 * To insure that the scsi sgl does not cross a 4k page boundary only
3472	 * sgl sizes of 1k, 2k, 4k, and 8k are supported.
3473	 * Table of sgl sizes and seg_cnt:
3474	 * sgl size, 	sg_seg_cnt	total seg
3475	 * 1k		50		52
3476	 * 2k		114		116
3477	 * 4k		242		244
3478	 * 8k		498		500
3479	 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3480	 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3481	 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3482	 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3483	 */
3484	if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
3485		phba->cfg_sg_seg_cnt = 50;
3486	else if (phba->cfg_sg_seg_cnt <= 114)
3487		phba->cfg_sg_seg_cnt = 114;
3488	else if (phba->cfg_sg_seg_cnt <= 242)
3489		phba->cfg_sg_seg_cnt = 242;
3490	else
3491		phba->cfg_sg_seg_cnt = 498;
3492
3493	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
3494					+ sizeof(struct fcp_rsp);
3495	phba->cfg_sg_dma_buf_size +=
3496		((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
3497
3498	/* Initialize buffer queue management fields */
3499	hbq_count = lpfc_sli_hbq_count();
3500	for (i = 0; i < hbq_count; ++i)
3501		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
3502	INIT_LIST_HEAD(&phba->rb_pend_list);
3503	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
3504	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3505
3506	/*
3507	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
3508	 */
3509	/* Initialize the Abort scsi buffer list used by driver */
3510	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
3511	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
3512	/* This abort list used by worker thread */
3513	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3514
3515	/*
3516	 * Initialize dirver internal slow-path work queues
3517	 */
3518
3519	/* Driver internel slow-path CQ Event pool */
3520	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3521	/* Response IOCB work queue list */
3522	INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
3523	/* Asynchronous event CQ Event work queue list */
3524	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3525	/* Fast-path XRI aborted CQ Event work queue list */
3526	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
3527	/* Slow-path XRI aborted CQ Event work queue list */
3528	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
3529	/* Receive queue CQ Event work queue list */
3530	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
3531
3532	/* Initialize the driver internal SLI layer lists. */
3533	lpfc_sli_setup(phba);
3534	lpfc_sli_queue_setup(phba);
3535
3536	/* Allocate device driver memory */
3537	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
3538	if (rc)
3539		return -ENOMEM;
3540
3541	/* Create the bootstrap mailbox command */
3542	rc = lpfc_create_bootstrap_mbox(phba);
3543	if (unlikely(rc))
3544		goto out_free_mem;
3545
3546	/* Set up the host's endian order with the device. */
3547	rc = lpfc_setup_endian_order(phba);
3548	if (unlikely(rc))
3549		goto out_free_bsmbx;
3550
3551	rc = lpfc_sli4_fw_cfg_check(phba);
3552	if (unlikely(rc))
3553		goto out_free_bsmbx;
3554
3555	/* Set up the hba's configuration parameters. */
3556	rc = lpfc_sli4_read_config(phba);
3557	if (unlikely(rc))
3558		goto out_free_bsmbx;
3559
3560	/* Perform a function reset */
3561	rc = lpfc_pci_function_reset(phba);
3562	if (unlikely(rc))
3563		goto out_free_bsmbx;
3564
3565	/* Create all the SLI4 queues */
3566	rc = lpfc_sli4_queue_create(phba);
3567	if (rc)
3568		goto out_free_bsmbx;
3569
3570	/* Create driver internal CQE event pool */
3571	rc = lpfc_sli4_cq_event_pool_create(phba);
3572	if (rc)
3573		goto out_destroy_queue;
3574
3575	/* Initialize and populate the iocb list per host */
3576	rc = lpfc_init_sgl_list(phba);
3577	if (rc) {
3578		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3579				"1400 Failed to initialize sgl list.\n");
3580		goto out_destroy_cq_event_pool;
3581	}
3582	rc = lpfc_init_active_sgl_array(phba);
3583	if (rc) {
3584		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3585				"1430 Failed to initialize sgl list.\n");
3586		goto out_free_sgl_list;
3587	}
3588
3589	rc = lpfc_sli4_init_rpi_hdrs(phba);
3590	if (rc) {
3591		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3592				"1432 Failed to initialize rpi headers.\n");
3593		goto out_free_active_sgl;
3594	}
3595
3596	phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3597				    phba->cfg_fcp_eq_count), GFP_KERNEL);
3598	if (!phba->sli4_hba.fcp_eq_hdl) {
3599		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3600				"2572 Failed allocate memory for fast-path "
3601				"per-EQ handle array\n");
3602		goto out_remove_rpi_hdrs;
3603	}
3604
3605	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
3606				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
3607	if (!phba->sli4_hba.msix_entries) {
3608		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3609				"2573 Failed allocate memory for msi-x "
3610				"interrupt vector entries\n");
3611		goto out_free_fcp_eq_hdl;
3612	}
3613
3614	return rc;
3615
3616out_free_fcp_eq_hdl:
3617	kfree(phba->sli4_hba.fcp_eq_hdl);
3618out_remove_rpi_hdrs:
3619	lpfc_sli4_remove_rpi_hdrs(phba);
3620out_free_active_sgl:
3621	lpfc_free_active_sgl(phba);
3622out_free_sgl_list:
3623	lpfc_free_sgl_list(phba);
3624out_destroy_cq_event_pool:
3625	lpfc_sli4_cq_event_pool_destroy(phba);
3626out_destroy_queue:
3627	lpfc_sli4_queue_destroy(phba);
3628out_free_bsmbx:
3629	lpfc_destroy_bootstrap_mbox(phba);
3630out_free_mem:
3631	lpfc_mem_free(phba);
3632	return rc;
3633}
3634
3635/**
3636 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
3637 * @phba: pointer to lpfc hba data structure.
3638 *
3639 * This routine is invoked to unset the driver internal resources set up
3640 * specific for supporting the SLI-4 HBA device it attached to.
3641 **/
3642static void
3643lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3644{
3645	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
3646
3647	/* unregister default FCFI from the HBA */
3648	lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
3649
3650	/* Free the default FCR table */
3651	lpfc_sli_remove_dflt_fcf(phba);
3652
3653	/* Free memory allocated for msi-x interrupt vector entries */
3654	kfree(phba->sli4_hba.msix_entries);
3655
3656	/* Free memory allocated for fast-path work queue handles */
3657	kfree(phba->sli4_hba.fcp_eq_hdl);
3658
3659	/* Free the allocated rpi headers. */
3660	lpfc_sli4_remove_rpi_hdrs(phba);
3661	lpfc_sli4_remove_rpis(phba);
3662
3663	/* Free the ELS sgl list */
3664	lpfc_free_active_sgl(phba);
3665	lpfc_free_sgl_list(phba);
3666
3667	/* Free the SCSI sgl management array */
3668	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3669
3670	/* Free the SLI4 queues */
3671	lpfc_sli4_queue_destroy(phba);
3672
3673	/* Free the completion queue EQ event pool */
3674	lpfc_sli4_cq_event_release_all(phba);
3675	lpfc_sli4_cq_event_pool_destroy(phba);
3676
3677	/* Reset SLI4 HBA FCoE function */
3678	lpfc_pci_function_reset(phba);
3679
3680	/* Free the bsmbx region. */
3681	lpfc_destroy_bootstrap_mbox(phba);
3682
3683	/* Free the SLI Layer memory with SLI4 HBAs */
3684	lpfc_mem_free_all(phba);
3685
3686	/* Free the current connect table */
3687	list_for_each_entry_safe(conn_entry, next_conn_entry,
3688		&phba->fcf_conn_rec_list, list) {
3689		list_del_init(&conn_entry->list);
3690		kfree(conn_entry);
3691	}
3692
3693	return;
3694}
3695
3696/**
3697 * lpfc_init_api_table_setup - Set up init api fucntion jump table
3698 * @phba: The hba struct for which this call is being executed.
3699 * @dev_grp: The HBA PCI-Device group number.
3700 *
3701 * This routine sets up the device INIT interface API function jump table
3702 * in @phba struct.
3703 *
3704 * Returns: 0 - success, -ENODEV - failure.
3705 **/
3706int
3707lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3708{
3709	switch (dev_grp) {
3710	case LPFC_PCI_DEV_LP:
3711		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
3712		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
3713		phba->lpfc_stop_port = lpfc_stop_port_s3;
3714		break;
3715	case LPFC_PCI_DEV_OC:
3716		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
3717		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
3718		phba->lpfc_stop_port = lpfc_stop_port_s4;
3719		break;
3720	default:
3721		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3722				"1431 Invalid HBA PCI-device group: 0x%x\n",
3723				dev_grp);
3724		return -ENODEV;
3725		break;
3726	}
3727	return 0;
3728}
3729
3730/**
3731 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3732 * @phba: pointer to lpfc hba data structure.
3733 *
3734 * This routine is invoked to set up the driver internal resources before the
3735 * device specific resource setup to support the HBA device it attached to.
3736 *
3737 * Return codes
3738 *	0 - sucessful
3739 *	other values - error
3740 **/
3741static int
3742lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3743{
3744	/*
3745	 * Driver resources common to all SLI revisions
3746	 */
3747	atomic_set(&phba->fast_event_count, 0);
3748	spin_lock_init(&phba->hbalock);
3749
3750	/* Initialize ndlp management spinlock */
3751	spin_lock_init(&phba->ndlp_lock);
3752
3753	INIT_LIST_HEAD(&phba->port_list);
3754	INIT_LIST_HEAD(&phba->work_list);
3755	init_waitqueue_head(&phba->wait_4_mlo_m_q);
3756
3757	/* Initialize the wait queue head for the kernel thread */
3758	init_waitqueue_head(&phba->work_waitq);
3759
3760	/* Initialize the scsi buffer list used by driver for scsi IO */
3761	spin_lock_init(&phba->scsi_buf_list_lock);
3762	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
3763
3764	/* Initialize the fabric iocb list */
3765	INIT_LIST_HEAD(&phba->fabric_iocb_list);
3766
3767	/* Initialize list to save ELS buffers */
3768	INIT_LIST_HEAD(&phba->elsbuf);
3769
3770	/* Initialize FCF connection rec list */
3771	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
3772
3773	return 0;
3774}
3775
3776/**
3777 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
3778 * @phba: pointer to lpfc hba data structure.
3779 *
3780 * This routine is invoked to set up the driver internal resources after the
3781 * device specific resource setup to support the HBA device it attached to.
3782 *
3783 * Return codes
3784 * 	0 - sucessful
3785 * 	other values - error
3786 **/
3787static int
3788lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
3789{
3790	int error;
3791
3792	/* Startup the kernel thread for this host adapter. */
3793	phba->worker_thread = kthread_run(lpfc_do_work, phba,
3794					  "lpfc_worker_%d", phba->brd_no);
3795	if (IS_ERR(phba->worker_thread)) {
3796		error = PTR_ERR(phba->worker_thread);
3797		return error;
3798	}
3799
3800	return 0;
3801}
3802
3803/**
3804 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
3805 * @phba: pointer to lpfc hba data structure.
3806 *
3807 * This routine is invoked to unset the driver internal resources set up after
3808 * the device specific resource setup for supporting the HBA device it
3809 * attached to.
3810 **/
3811static void
3812lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
3813{
3814	/* Stop kernel worker thread */
3815	kthread_stop(phba->worker_thread);
3816}
3817
3818/**
3819 * lpfc_free_iocb_list - Free iocb list.
3820 * @phba: pointer to lpfc hba data structure.
3821 *
3822 * This routine is invoked to free the driver's IOCB list and memory.
3823 **/
3824static void
3825lpfc_free_iocb_list(struct lpfc_hba *phba)
3826{
3827	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
3828
3829	spin_lock_irq(&phba->hbalock);
3830	list_for_each_entry_safe(iocbq_entry, iocbq_next,
3831				 &phba->lpfc_iocb_list, list) {
3832		list_del(&iocbq_entry->list);
3833		kfree(iocbq_entry);
3834		phba->total_iocbq_bufs--;
3835	}
3836	spin_unlock_irq(&phba->hbalock);
3837
3838	return;
3839}
3840
3841/**
3842 * lpfc_init_iocb_list - Allocate and initialize iocb list.
3843 * @phba: pointer to lpfc hba data structure.
3844 *
3845 * This routine is invoked to allocate and initizlize the driver's IOCB
3846 * list and set up the IOCB tag array accordingly.
3847 *
3848 * Return codes
3849 *	0 - sucessful
3850 *	other values - error
3851 **/
3852static int
3853lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
3854{
3855	struct lpfc_iocbq *iocbq_entry = NULL;
3856	uint16_t iotag;
3857	int i;
3858
3859	/* Initialize and populate the iocb list per host.  */
3860	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3861	for (i = 0; i < iocb_count; i++) {
3862		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
3863		if (iocbq_entry == NULL) {
3864			printk(KERN_ERR "%s: only allocated %d iocbs of "
3865				"expected %d count. Unloading driver.\n",
3866				__func__, i, LPFC_IOCB_LIST_CNT);
3867			goto out_free_iocbq;
3868		}
3869
3870		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
3871		if (iotag == 0) {
3872			kfree(iocbq_entry);
3873			printk(KERN_ERR "%s: failed to allocate IOTAG. "
3874				"Unloading driver.\n", __func__);
3875			goto out_free_iocbq;
3876		}
3877		iocbq_entry->sli4_xritag = NO_XRI;
3878
3879		spin_lock_irq(&phba->hbalock);
3880		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
3881		phba->total_iocbq_bufs++;
3882		spin_unlock_irq(&phba->hbalock);
3883	}
3884
3885	return 0;
3886
3887out_free_iocbq:
3888	lpfc_free_iocb_list(phba);
3889
3890	return -ENOMEM;
3891}
3892
3893/**
3894 * lpfc_free_sgl_list - Free sgl list.
3895 * @phba: pointer to lpfc hba data structure.
3896 *
3897 * This routine is invoked to free the driver's sgl list and memory.
3898 **/
3899static void
3900lpfc_free_sgl_list(struct lpfc_hba *phba)
3901{
3902	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
3903	LIST_HEAD(sglq_list);
3904	int rc = 0;
3905
3906	spin_lock_irq(&phba->hbalock);
3907	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
3908	spin_unlock_irq(&phba->hbalock);
3909
3910	list_for_each_entry_safe(sglq_entry, sglq_next,
3911				 &sglq_list, list) {
3912		list_del(&sglq_entry->list);
3913		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
3914		kfree(sglq_entry);
3915		phba->sli4_hba.total_sglq_bufs--;
3916	}
3917	rc = lpfc_sli4_remove_all_sgl_pages(phba);
3918	if (rc) {
3919		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3920			"2005 Unable to deregister pages from HBA: %x", rc);
3921	}
3922	kfree(phba->sli4_hba.lpfc_els_sgl_array);
3923}
3924
3925/**
3926 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
3927 * @phba: pointer to lpfc hba data structure.
3928 *
3929 * This routine is invoked to allocate the driver's active sgl memory.
3930 * This array will hold the sglq_entry's for active IOs.
3931 **/
3932static int
3933lpfc_init_active_sgl_array(struct lpfc_hba *phba)
3934{
3935	int size;
3936	size = sizeof(struct lpfc_sglq *);
3937	size *= phba->sli4_hba.max_cfg_param.max_xri;
3938
3939	phba->sli4_hba.lpfc_sglq_active_list =
3940		kzalloc(size, GFP_KERNEL);
3941	if (!phba->sli4_hba.lpfc_sglq_active_list)
3942		return -ENOMEM;
3943	return 0;
3944}
3945
3946/**
3947 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3948 * @phba: pointer to lpfc hba data structure.
3949 *
3950 * This routine is invoked to walk through the array of active sglq entries
3951 * and free all of the resources.
3952 * This is just a place holder for now.
3953 **/
3954static void
3955lpfc_free_active_sgl(struct lpfc_hba *phba)
3956{
3957	kfree(phba->sli4_hba.lpfc_sglq_active_list);
3958}
3959
3960/**
3961 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3962 * @phba: pointer to lpfc hba data structure.
3963 *
3964 * This routine is invoked to allocate and initizlize the driver's sgl
3965 * list and set up the sgl xritag tag array accordingly.
3966 *
3967 * Return codes
3968 *	0 - sucessful
3969 *	other values - error
3970 **/
3971static int
3972lpfc_init_sgl_list(struct lpfc_hba *phba)
3973{
3974	struct lpfc_sglq *sglq_entry = NULL;
3975	int i;
3976	int els_xri_cnt;
3977
3978	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3979	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3980				"2400 lpfc_init_sgl_list els %d.\n",
3981				els_xri_cnt);
3982	/* Initialize and populate the sglq list per host/VF. */
3983	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
3984	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
3985
3986	/* Sanity check on XRI management */
3987	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
3988		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3989				"2562 No room left for SCSI XRI allocation: "
3990				"max_xri=%d, els_xri=%d\n",
3991				phba->sli4_hba.max_cfg_param.max_xri,
3992				els_xri_cnt);
3993		return -ENOMEM;
3994	}
3995
3996	/* Allocate memory for the ELS XRI management array */
3997	phba->sli4_hba.lpfc_els_sgl_array =
3998			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
3999			GFP_KERNEL);
4000
4001	if (!phba->sli4_hba.lpfc_els_sgl_array) {
4002		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4003				"2401 Failed to allocate memory for ELS "
4004				"XRI management array of size %d.\n",
4005				els_xri_cnt);
4006		return -ENOMEM;
4007	}
4008
4009	/* Keep the SCSI XRI into the XRI management array */
4010	phba->sli4_hba.scsi_xri_max =
4011			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4012	phba->sli4_hba.scsi_xri_cnt = 0;
4013
4014	phba->sli4_hba.lpfc_scsi_psb_array =
4015			kzalloc((sizeof(struct lpfc_scsi_buf *) *
4016			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
4017
4018	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
4019		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4020				"2563 Failed to allocate memory for SCSI "
4021				"XRI management array of size %d.\n",
4022				phba->sli4_hba.scsi_xri_max);
4023		kfree(phba->sli4_hba.lpfc_els_sgl_array);
4024		return -ENOMEM;
4025	}
4026
4027	for (i = 0; i < els_xri_cnt; i++) {
4028		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
4029		if (sglq_entry == NULL) {
4030			printk(KERN_ERR "%s: only allocated %d sgls of "
4031				"expected %d count. Unloading driver.\n",
4032				__func__, i, els_xri_cnt);
4033			goto out_free_mem;
4034		}
4035
4036		sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
4037		if (sglq_entry->sli4_xritag == NO_XRI) {
4038			kfree(sglq_entry);
4039			printk(KERN_ERR "%s: failed to allocate XRI.\n"
4040				"Unloading driver.\n", __func__);
4041			goto out_free_mem;
4042		}
4043		sglq_entry->buff_type = GEN_BUFF_TYPE;
4044		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4045		if (sglq_entry->virt == NULL) {
4046			kfree(sglq_entry);
4047			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
4048				"Unloading driver.\n", __func__);
4049			goto out_free_mem;
4050		}
4051		sglq_entry->sgl = sglq_entry->virt;
4052		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4053
4054		/* The list order is used by later block SGL registraton */
4055		spin_lock_irq(&phba->hbalock);
4056		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4057		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4058		phba->sli4_hba.total_sglq_bufs++;
4059		spin_unlock_irq(&phba->hbalock);
4060	}
4061	return 0;
4062
4063out_free_mem:
4064	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4065	lpfc_free_sgl_list(phba);
4066	return -ENOMEM;
4067}
4068
4069/**
4070 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
4071 * @phba: pointer to lpfc hba data structure.
4072 *
4073 * This routine is invoked to post rpi header templates to the
4074 * HBA consistent with the SLI-4 interface spec.  This routine
4075 * posts a PAGE_SIZE memory region to the port to hold up to
4076 * PAGE_SIZE modulo 64 rpi context headers.
4077 * No locks are held here because this is an initialization routine
4078 * called only from probe or lpfc_online when interrupts are not
4079 * enabled and the driver is reinitializing the device.
4080 *
4081 * Return codes
4082 * 	0 - sucessful
4083 * 	ENOMEM - No availble memory
4084 *      EIO - The mailbox failed to complete successfully.
4085 **/
4086int
4087lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4088{
4089	int rc = 0;
4090	int longs;
4091	uint16_t rpi_count;
4092	struct lpfc_rpi_hdr *rpi_hdr;
4093
4094	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4095
4096	/*
4097	 * Provision an rpi bitmask range for discovery. The total count
4098	 * is the difference between max and base + 1.
4099	 */
4100	rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
4101		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4102
4103	longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4104	phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
4105					   GFP_KERNEL);
4106	if (!phba->sli4_hba.rpi_bmask)
4107		return -ENOMEM;
4108
4109	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4110	if (!rpi_hdr) {
4111		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4112				"0391 Error during rpi post operation\n");
4113		lpfc_sli4_remove_rpis(phba);
4114		rc = -ENODEV;
4115	}
4116
4117	return rc;
4118}
4119
4120/**
4121 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4122 * @phba: pointer to lpfc hba data structure.
4123 *
4124 * This routine is invoked to allocate a single 4KB memory region to
4125 * support rpis and stores them in the phba.  This single region
4126 * provides support for up to 64 rpis.  The region is used globally
4127 * by the device.
4128 *
4129 * Returns:
4130 *   A valid rpi hdr on success.
4131 *   A NULL pointer on any failure.
4132 **/
4133struct lpfc_rpi_hdr *
4134lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4135{
4136	uint16_t rpi_limit, curr_rpi_range;
4137	struct lpfc_dmabuf *dmabuf;
4138	struct lpfc_rpi_hdr *rpi_hdr;
4139
4140	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4141		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4142
4143	spin_lock_irq(&phba->hbalock);
4144	curr_rpi_range = phba->sli4_hba.next_rpi;
4145	spin_unlock_irq(&phba->hbalock);
4146
4147	/*
4148	 * The port has a limited number of rpis. The increment here
4149	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4150	 * and to allow the full max_rpi range per port.
4151	 */
4152	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4153		return NULL;
4154
4155	/*
4156	 * First allocate the protocol header region for the port.  The
4157	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4158	 */
4159	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4160	if (!dmabuf)
4161		return NULL;
4162
4163	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4164					  LPFC_HDR_TEMPLATE_SIZE,
4165					  &dmabuf->phys,
4166					  GFP_KERNEL);
4167	if (!dmabuf->virt) {
4168		rpi_hdr = NULL;
4169		goto err_free_dmabuf;
4170	}
4171
4172	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4173	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4174		rpi_hdr = NULL;
4175		goto err_free_coherent;
4176	}
4177
4178	/* Save the rpi header data for cleanup later. */
4179	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4180	if (!rpi_hdr)
4181		goto err_free_coherent;
4182
4183	rpi_hdr->dmabuf = dmabuf;
4184	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4185	rpi_hdr->page_count = 1;
4186	spin_lock_irq(&phba->hbalock);
4187	rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4188	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4189
4190	/*
4191	 * The next_rpi stores the next module-64 rpi value to post
4192	 * in any subsequent rpi memory region postings.
4193	 */
4194	phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4195	spin_unlock_irq(&phba->hbalock);
4196	return rpi_hdr;
4197
4198 err_free_coherent:
4199	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4200			  dmabuf->virt, dmabuf->phys);
4201 err_free_dmabuf:
4202	kfree(dmabuf);
4203	return NULL;
4204}
4205
4206/**
4207 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4208 * @phba: pointer to lpfc hba data structure.
4209 *
4210 * This routine is invoked to remove all memory resources allocated
4211 * to support rpis. This routine presumes the caller has released all
4212 * rpis consumed by fabric or port logins and is prepared to have
4213 * the header pages removed.
4214 **/
4215void
4216lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4217{
4218	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4219
4220	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4221				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4222		list_del(&rpi_hdr->list);
4223		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4224				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4225		kfree(rpi_hdr->dmabuf);
4226		kfree(rpi_hdr);
4227	}
4228
4229	phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4230	memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4231}
4232
4233/**
4234 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4235 * @pdev: pointer to pci device data structure.
4236 *
4237 * This routine is invoked to allocate the driver hba data structure for an
4238 * HBA device. If the allocation is successful, the phba reference to the
4239 * PCI device data structure is set.
4240 *
4241 * Return codes
4242 *      pointer to @phba - sucessful
4243 *      NULL - error
4244 **/
4245static struct lpfc_hba *
4246lpfc_hba_alloc(struct pci_dev *pdev)
4247{
4248	struct lpfc_hba *phba;
4249
4250	/* Allocate memory for HBA structure */
4251	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4252	if (!phba) {
4253		dev_err(&pdev->dev, "failed to allocate hba struct\n");
4254		return NULL;
4255	}
4256
4257	/* Set reference to PCI device in HBA structure */
4258	phba->pcidev = pdev;
4259
4260	/* Assign an unused board number */
4261	phba->brd_no = lpfc_get_instance();
4262	if (phba->brd_no < 0) {
4263		kfree(phba);
4264		return NULL;
4265	}
4266
4267	mutex_init(&phba->ct_event_mutex);
4268	INIT_LIST_HEAD(&phba->ct_ev_waiters);
4269
4270	return phba;
4271}
4272
4273/**
4274 * lpfc_hba_free - Free driver hba data structure with a device.
4275 * @phba: pointer to lpfc hba data structure.
4276 *
4277 * This routine is invoked to free the driver hba data structure with an
4278 * HBA device.
4279 **/
4280static void
4281lpfc_hba_free(struct lpfc_hba *phba)
4282{
4283	/* Release the driver assigned board number */
4284	idr_remove(&lpfc_hba_index, phba->brd_no);
4285
4286	kfree(phba);
4287	return;
4288}
4289
4290/**
4291 * lpfc_create_shost - Create hba physical port with associated scsi host.
4292 * @phba: pointer to lpfc hba data structure.
4293 *
4294 * This routine is invoked to create HBA physical port and associate a SCSI
4295 * host with it.
4296 *
4297 * Return codes
4298 *      0 - sucessful
4299 *      other values - error
4300 **/
4301static int
4302lpfc_create_shost(struct lpfc_hba *phba)
4303{
4304	struct lpfc_vport *vport;
4305	struct Scsi_Host  *shost;
4306
4307	/* Initialize HBA FC structure */
4308	phba->fc_edtov = FF_DEF_EDTOV;
4309	phba->fc_ratov = FF_DEF_RATOV;
4310	phba->fc_altov = FF_DEF_ALTOV;
4311	phba->fc_arbtov = FF_DEF_ARBTOV;
4312
4313	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4314	if (!vport)
4315		return -ENODEV;
4316
4317	shost = lpfc_shost_from_vport(vport);
4318	phba->pport = vport;
4319	lpfc_debugfs_initialize(vport);
4320	/* Put reference to SCSI host to driver's device private data */
4321	pci_set_drvdata(phba->pcidev, shost);
4322
4323	return 0;
4324}
4325
4326/**
4327 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4328 * @phba: pointer to lpfc hba data structure.
4329 *
4330 * This routine is invoked to destroy HBA physical port and the associated
4331 * SCSI host.
4332 **/
4333static void
4334lpfc_destroy_shost(struct lpfc_hba *phba)
4335{
4336	struct lpfc_vport *vport = phba->pport;
4337
4338	/* Destroy physical port that associated with the SCSI host */
4339	destroy_port(vport);
4340
4341	return;
4342}
4343
4344/**
4345 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4346 * @phba: pointer to lpfc hba data structure.
4347 * @shost: the shost to be used to detect Block guard settings.
4348 *
4349 * This routine sets up the local Block guard protocol settings for @shost.
4350 * This routine also allocates memory for debugging bg buffers.
4351 **/
4352static void
4353lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4354{
4355	int pagecnt = 10;
4356	if (lpfc_prot_mask && lpfc_prot_guard) {
4357		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4358				"1478 Registering BlockGuard with the "
4359				"SCSI layer\n");
4360		scsi_host_set_prot(shost, lpfc_prot_mask);
4361		scsi_host_set_guard(shost, lpfc_prot_guard);
4362	}
4363	if (!_dump_buf_data) {
4364		while (pagecnt) {
4365			spin_lock_init(&_dump_buf_lock);
4366			_dump_buf_data =
4367				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4368			if (_dump_buf_data) {
4369				printk(KERN_ERR "BLKGRD allocated %d pages for "
4370				       "_dump_buf_data at 0x%p\n",
4371				       (1 << pagecnt), _dump_buf_data);
4372				_dump_buf_data_order = pagecnt;
4373				memset(_dump_buf_data, 0,
4374				       ((1 << PAGE_SHIFT) << pagecnt));
4375				break;
4376			} else
4377				--pagecnt;
4378		}
4379		if (!_dump_buf_data_order)
4380			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4381			       "memory for hexdump\n");
4382	} else
4383		printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
4384		       "\n", _dump_buf_data);
4385	if (!_dump_buf_dif) {
4386		while (pagecnt) {
4387			_dump_buf_dif =
4388				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4389			if (_dump_buf_dif) {
4390				printk(KERN_ERR "BLKGRD allocated %d pages for "
4391				       "_dump_buf_dif at 0x%p\n",
4392				       (1 << pagecnt), _dump_buf_dif);
4393				_dump_buf_dif_order = pagecnt;
4394				memset(_dump_buf_dif, 0,
4395				       ((1 << PAGE_SHIFT) << pagecnt));
4396				break;
4397			} else
4398				--pagecnt;
4399		}
4400		if (!_dump_buf_dif_order)
4401			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4402			       "memory for hexdump\n");
4403	} else
4404		printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
4405		       _dump_buf_dif);
4406}
4407
4408/**
4409 * lpfc_post_init_setup - Perform necessary device post initialization setup.
4410 * @phba: pointer to lpfc hba data structure.
4411 *
4412 * This routine is invoked to perform all the necessary post initialization
4413 * setup for the device.
4414 **/
4415static void
4416lpfc_post_init_setup(struct lpfc_hba *phba)
4417{
4418	struct Scsi_Host  *shost;
4419	struct lpfc_adapter_event_header adapter_event;
4420
4421	/* Get the default values for Model Name and Description */
4422	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
4423
4424	/*
4425	 * hba setup may have changed the hba_queue_depth so we need to
4426	 * adjust the value of can_queue.
4427	 */
4428	shost = pci_get_drvdata(phba->pcidev);
4429	shost->can_queue = phba->cfg_hba_queue_depth - 10;
4430	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4431		lpfc_setup_bg(phba, shost);
4432
4433	lpfc_host_attrib_init(shost);
4434
4435	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
4436		spin_lock_irq(shost->host_lock);
4437		lpfc_poll_start_timer(phba);
4438		spin_unlock_irq(shost->host_lock);
4439	}
4440
4441	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4442			"0428 Perform SCSI scan\n");
4443	/* Send board arrival event to upper layer */
4444	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
4445	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
4446	fc_host_post_vendor_event(shost, fc_get_event_number(),
4447				  sizeof(adapter_event),
4448				  (char *) &adapter_event,
4449				  LPFC_NL_VENDOR_ID);
4450	return;
4451}
4452
4453/**
4454 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
4455 * @phba: pointer to lpfc hba data structure.
4456 *
4457 * This routine is invoked to set up the PCI device memory space for device
4458 * with SLI-3 interface spec.
4459 *
4460 * Return codes
4461 * 	0 - sucessful
4462 * 	other values - error
4463 **/
4464static int
4465lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4466{
4467	struct pci_dev *pdev;
4468	unsigned long bar0map_len, bar2map_len;
4469	int i, hbq_count;
4470	void *ptr;
4471	int error = -ENODEV;
4472
4473	/* Obtain PCI device reference */
4474	if (!phba->pcidev)
4475		return error;
4476	else
4477		pdev = phba->pcidev;
4478
4479	/* Set the device DMA mask size */
4480	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
4481		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
4482			return error;
4483
4484	/* Get the bus address of Bar0 and Bar2 and the number of bytes
4485	 * required by each mapping.
4486	 */
4487	phba->pci_bar0_map = pci_resource_start(pdev, 0);
4488	bar0map_len = pci_resource_len(pdev, 0);
4489
4490	phba->pci_bar2_map = pci_resource_start(pdev, 2);
4491	bar2map_len = pci_resource_len(pdev, 2);
4492
4493	/* Map HBA SLIM to a kernel virtual address. */
4494	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
4495	if (!phba->slim_memmap_p) {
4496		dev_printk(KERN_ERR, &pdev->dev,
4497			   "ioremap failed for SLIM memory.\n");
4498		goto out;
4499	}
4500
4501	/* Map HBA Control Registers to a kernel virtual address. */
4502	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
4503	if (!phba->ctrl_regs_memmap_p) {
4504		dev_printk(KERN_ERR, &pdev->dev,
4505			   "ioremap failed for HBA control registers.\n");
4506		goto out_iounmap_slim;
4507	}
4508
4509	/* Allocate memory for SLI-2 structures */
4510	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
4511					       SLI2_SLIM_SIZE,
4512					       &phba->slim2p.phys,
4513					       GFP_KERNEL);
4514	if (!phba->slim2p.virt)
4515		goto out_iounmap;
4516
4517	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
4518	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
4519	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
4520	phba->IOCBs = (phba->slim2p.virt +
4521		       offsetof(struct lpfc_sli2_slim, IOCBs));
4522
4523	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
4524						 lpfc_sli_hbq_size(),
4525						 &phba->hbqslimp.phys,
4526						 GFP_KERNEL);
4527	if (!phba->hbqslimp.virt)
4528		goto out_free_slim;
4529
4530	hbq_count = lpfc_sli_hbq_count();
4531	ptr = phba->hbqslimp.virt;
4532	for (i = 0; i < hbq_count; ++i) {
4533		phba->hbqs[i].hbq_virt = ptr;
4534		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4535		ptr += (lpfc_hbq_defs[i]->entry_count *
4536			sizeof(struct lpfc_hbq_entry));
4537	}
4538	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
4539	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
4540
4541	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
4542
4543	INIT_LIST_HEAD(&phba->rb_pend_list);
4544
4545	phba->MBslimaddr = phba->slim_memmap_p;
4546	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
4547	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
4548	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
4549	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
4550
4551	return 0;
4552
4553out_free_slim:
4554	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4555			  phba->slim2p.virt, phba->slim2p.phys);
4556out_iounmap:
4557	iounmap(phba->ctrl_regs_memmap_p);
4558out_iounmap_slim:
4559	iounmap(phba->slim_memmap_p);
4560out:
4561	return error;
4562}
4563
4564/**
4565 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
4566 * @phba: pointer to lpfc hba data structure.
4567 *
4568 * This routine is invoked to unset the PCI device memory space for device
4569 * with SLI-3 interface spec.
4570 **/
4571static void
4572lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4573{
4574	struct pci_dev *pdev;
4575
4576	/* Obtain PCI device reference */
4577	if (!phba->pcidev)
4578		return;
4579	else
4580		pdev = phba->pcidev;
4581
4582	/* Free coherent DMA memory allocated */
4583	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
4584			  phba->hbqslimp.virt, phba->hbqslimp.phys);
4585	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4586			  phba->slim2p.virt, phba->slim2p.phys);
4587
4588	/* I/O memory unmap */
4589	iounmap(phba->ctrl_regs_memmap_p);
4590	iounmap(phba->slim_memmap_p);
4591
4592	return;
4593}
4594
4595/**
4596 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
4597 * @phba: pointer to lpfc hba data structure.
4598 *
4599 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
4600 * done and check status.
4601 *
4602 * Return 0 if successful, otherwise -ENODEV.
4603 **/
4604int
4605lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4606{
4607	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
4608	uint32_t onlnreg0, onlnreg1;
4609	int i, port_error = -ENODEV;
4610
4611	if (!phba->sli4_hba.STAregaddr)
4612		return -ENODEV;
4613
4614	/* Wait up to 30 seconds for the SLI Port POST done and ready */
4615	for (i = 0; i < 3000; i++) {
4616		sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
4617		/* Encounter fatal POST error, break out */
4618		if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
4619			port_error = -ENODEV;
4620			break;
4621		}
4622		if (LPFC_POST_STAGE_ARMFW_READY ==
4623		    bf_get(lpfc_hst_state_port_status, &sta_reg)) {
4624			port_error = 0;
4625			break;
4626		}
4627		msleep(10);
4628	}
4629
4630	if (port_error)
4631		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4632			"1408 Failure HBA POST Status: sta_reg=0x%x, "
4633			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
4634			"dl=x%x, pstatus=x%x\n", sta_reg.word0,
4635			bf_get(lpfc_hst_state_perr, &sta_reg),
4636			bf_get(lpfc_hst_state_sfi, &sta_reg),
4637			bf_get(lpfc_hst_state_nip, &sta_reg),
4638			bf_get(lpfc_hst_state_ipc, &sta_reg),
4639			bf_get(lpfc_hst_state_xrom, &sta_reg),
4640			bf_get(lpfc_hst_state_dl, &sta_reg),
4641			bf_get(lpfc_hst_state_port_status, &sta_reg));
4642
4643	/* Log device information */
4644	scratchpad.word0 =  readl(phba->sli4_hba.SCRATCHPADregaddr);
4645	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4646			"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4647			"FeatureL1=0x%x, FeatureL2=0x%x\n",
4648			bf_get(lpfc_scratchpad_chiptype, &scratchpad),
4649			bf_get(lpfc_scratchpad_slirev, &scratchpad),
4650			bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4651			bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4652
4653	/* With uncoverable error, log the error message and return error */
4654	onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4655	onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4656	if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4657		uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4658		uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4659		if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4660			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4661					"1422 HBA Unrecoverable error: "
4662					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4663					"online0_reg=0x%x, online1_reg=0x%x\n",
4664					uerrlo_reg.word0, uerrhi_reg.word0,
4665					onlnreg0, onlnreg1);
4666		}
4667		return -ENODEV;
4668	}
4669
4670	return port_error;
4671}
4672
4673/**
4674 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
4675 * @phba: pointer to lpfc hba data structure.
4676 *
4677 * This routine is invoked to set up SLI4 BAR0 PCI config space register
4678 * memory map.
4679 **/
4680static void
4681lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4682{
4683	phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4684					LPFC_UERR_STATUS_LO;
4685	phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4686					LPFC_UERR_STATUS_HI;
4687	phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
4688					LPFC_ONLINE0;
4689	phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
4690					LPFC_ONLINE1;
4691	phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
4692					LPFC_SCRATCHPAD;
4693}
4694
4695/**
4696 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
4697 * @phba: pointer to lpfc hba data structure.
4698 *
4699 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
4700 * memory map.
4701 **/
4702static void
4703lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
4704{
4705
4706	phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4707				    LPFC_HST_STATE;
4708	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4709				    LPFC_HST_ISR0;
4710	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4711				    LPFC_HST_IMR0;
4712	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4713				     LPFC_HST_ISCR0;
4714	return;
4715}
4716
4717/**
4718 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
4719 * @phba: pointer to lpfc hba data structure.
4720 * @vf: virtual function number
4721 *
4722 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
4723 * based on the given viftual function number, @vf.
4724 *
4725 * Return 0 if successful, otherwise -ENODEV.
4726 **/
4727static int
4728lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
4729{
4730	if (vf > LPFC_VIR_FUNC_MAX)
4731		return -ENODEV;
4732
4733	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4734				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
4735	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4736				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
4737	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4738				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
4739	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4740				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
4741	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4742				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
4743	return 0;
4744}
4745
4746/**
4747 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
4748 * @phba: pointer to lpfc hba data structure.
4749 *
4750 * This routine is invoked to create the bootstrap mailbox
4751 * region consistent with the SLI-4 interface spec.  This
4752 * routine allocates all memory necessary to communicate
4753 * mailbox commands to the port and sets up all alignment
4754 * needs.  No locks are expected to be held when calling
4755 * this routine.
4756 *
4757 * Return codes
4758 * 	0 - sucessful
4759 * 	ENOMEM - could not allocated memory.
4760 **/
4761static int
4762lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
4763{
4764	uint32_t bmbx_size;
4765	struct lpfc_dmabuf *dmabuf;
4766	struct dma_address *dma_address;
4767	uint32_t pa_addr;
4768	uint64_t phys_addr;
4769
4770	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4771	if (!dmabuf)
4772		return -ENOMEM;
4773
4774	/*
4775	 * The bootstrap mailbox region is comprised of 2 parts
4776	 * plus an alignment restriction of 16 bytes.
4777	 */
4778	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
4779	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4780					  bmbx_size,
4781					  &dmabuf->phys,
4782					  GFP_KERNEL);
4783	if (!dmabuf->virt) {
4784		kfree(dmabuf);
4785		return -ENOMEM;
4786	}
4787	memset(dmabuf->virt, 0, bmbx_size);
4788
4789	/*
4790	 * Initialize the bootstrap mailbox pointers now so that the register
4791	 * operations are simple later.  The mailbox dma address is required
4792	 * to be 16-byte aligned.  Also align the virtual memory as each
4793	 * maibox is copied into the bmbx mailbox region before issuing the
4794	 * command to the port.
4795	 */
4796	phba->sli4_hba.bmbx.dmabuf = dmabuf;
4797	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
4798
4799	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
4800					      LPFC_ALIGN_16_BYTE);
4801	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
4802					      LPFC_ALIGN_16_BYTE);
4803
4804	/*
4805	 * Set the high and low physical addresses now.  The SLI4 alignment
4806	 * requirement is 16 bytes and the mailbox is posted to the port
4807	 * as two 30-bit addresses.  The other data is a bit marking whether
4808	 * the 30-bit address is the high or low address.
4809	 * Upcast bmbx aphys to 64bits so shift instruction compiles
4810	 * clean on 32 bit machines.
4811	 */
4812	dma_address = &phba->sli4_hba.bmbx.dma_address;
4813	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
4814	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
4815	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
4816					   LPFC_BMBX_BIT1_ADDR_HI);
4817
4818	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
4819	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
4820					   LPFC_BMBX_BIT1_ADDR_LO);
4821	return 0;
4822}
4823
4824/**
4825 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
4826 * @phba: pointer to lpfc hba data structure.
4827 *
4828 * This routine is invoked to teardown the bootstrap mailbox
4829 * region and release all host resources. This routine requires
4830 * the caller to ensure all mailbox commands recovered, no
4831 * additional mailbox comands are sent, and interrupts are disabled
4832 * before calling this routine.
4833 *
4834 **/
4835static void
4836lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
4837{
4838	dma_free_coherent(&phba->pcidev->dev,
4839			  phba->sli4_hba.bmbx.bmbx_size,
4840			  phba->sli4_hba.bmbx.dmabuf->virt,
4841			  phba->sli4_hba.bmbx.dmabuf->phys);
4842
4843	kfree(phba->sli4_hba.bmbx.dmabuf);
4844	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
4845}
4846
4847/**
4848 * lpfc_sli4_read_config - Get the config parameters.
4849 * @phba: pointer to lpfc hba data structure.
4850 *
4851 * This routine is invoked to read the configuration parameters from the HBA.
4852 * The configuration parameters are used to set the base and maximum values
4853 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
4854 * allocation for the port.
4855 *
4856 * Return codes
4857 * 	0 - sucessful
4858 * 	ENOMEM - No availble memory
4859 *      EIO - The mailbox failed to complete successfully.
4860 **/
4861static int
4862lpfc_sli4_read_config(struct lpfc_hba *phba)
4863{
4864	LPFC_MBOXQ_t *pmb;
4865	struct lpfc_mbx_read_config *rd_config;
4866	uint32_t rc = 0;
4867
4868	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4869	if (!pmb) {
4870		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4871				"2011 Unable to allocate memory for issuing "
4872				"SLI_CONFIG_SPECIAL mailbox command\n");
4873		return -ENOMEM;
4874	}
4875
4876	lpfc_read_config(phba, pmb);
4877
4878	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4879	if (rc != MBX_SUCCESS) {
4880		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4881			"2012 Mailbox failed , mbxCmd x%x "
4882			"READ_CONFIG, mbxStatus x%x\n",
4883			bf_get(lpfc_mqe_command, &pmb->u.mqe),
4884			bf_get(lpfc_mqe_status, &pmb->u.mqe));
4885		rc = -EIO;
4886	} else {
4887		rd_config = &pmb->u.mqe.un.rd_config;
4888		phba->sli4_hba.max_cfg_param.max_xri =
4889			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
4890		phba->sli4_hba.max_cfg_param.xri_base =
4891			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
4892		phba->sli4_hba.max_cfg_param.max_vpi =
4893			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
4894		phba->sli4_hba.max_cfg_param.vpi_base =
4895			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
4896		phba->sli4_hba.max_cfg_param.max_rpi =
4897			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
4898		phba->sli4_hba.max_cfg_param.rpi_base =
4899			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
4900		phba->sli4_hba.max_cfg_param.max_vfi =
4901			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
4902		phba->sli4_hba.max_cfg_param.vfi_base =
4903			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
4904		phba->sli4_hba.max_cfg_param.max_fcfi =
4905			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
4906		phba->sli4_hba.max_cfg_param.fcfi_base =
4907			bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
4908		phba->sli4_hba.max_cfg_param.max_eq =
4909			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
4910		phba->sli4_hba.max_cfg_param.max_rq =
4911			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
4912		phba->sli4_hba.max_cfg_param.max_wq =
4913			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
4914		phba->sli4_hba.max_cfg_param.max_cq =
4915			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
4916		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
4917		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
4918		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4919		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4920		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4921		phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
4922		phba->max_vports = phba->max_vpi;
4923		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4924				"2003 cfg params XRI(B:%d M:%d), "
4925				"VPI(B:%d M:%d) "
4926				"VFI(B:%d M:%d) "
4927				"RPI(B:%d M:%d) "
4928				"FCFI(B:%d M:%d)\n",
4929				phba->sli4_hba.max_cfg_param.xri_base,
4930				phba->sli4_hba.max_cfg_param.max_xri,
4931				phba->sli4_hba.max_cfg_param.vpi_base,
4932				phba->sli4_hba.max_cfg_param.max_vpi,
4933				phba->sli4_hba.max_cfg_param.vfi_base,
4934				phba->sli4_hba.max_cfg_param.max_vfi,
4935				phba->sli4_hba.max_cfg_param.rpi_base,
4936				phba->sli4_hba.max_cfg_param.max_rpi,
4937				phba->sli4_hba.max_cfg_param.fcfi_base,
4938				phba->sli4_hba.max_cfg_param.max_fcfi);
4939	}
4940	mempool_free(pmb, phba->mbox_mem_pool);
4941
4942	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
4943	if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
4944		phba->cfg_hba_queue_depth =
4945				phba->sli4_hba.max_cfg_param.max_xri;
4946	return rc;
4947}
4948
4949/**
4950 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
4951 * @phba: pointer to lpfc hba data structure.
4952 *
4953 * This routine is invoked to setup the host-side endian order to the
4954 * HBA consistent with the SLI-4 interface spec.
4955 *
4956 * Return codes
4957 * 	0 - sucessful
4958 * 	ENOMEM - No availble memory
4959 *      EIO - The mailbox failed to complete successfully.
4960 **/
4961static int
4962lpfc_setup_endian_order(struct lpfc_hba *phba)
4963{
4964	LPFC_MBOXQ_t *mboxq;
4965	uint32_t rc = 0;
4966	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
4967				      HOST_ENDIAN_HIGH_WORD1};
4968
4969	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4970	if (!mboxq) {
4971		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4972				"0492 Unable to allocate memory for issuing "
4973				"SLI_CONFIG_SPECIAL mailbox command\n");
4974		return -ENOMEM;
4975	}
4976
4977	/*
4978	 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
4979	 * words to contain special data values and no other data.
4980	 */
4981	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
4982	memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
4983	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4984	if (rc != MBX_SUCCESS) {
4985		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4986				"0493 SLI_CONFIG_SPECIAL mailbox failed with "
4987				"status x%x\n",
4988				rc);
4989		rc = -EIO;
4990	}
4991
4992	mempool_free(mboxq, phba->mbox_mem_pool);
4993	return rc;
4994}
4995
4996/**
4997 * lpfc_sli4_queue_create - Create all the SLI4 queues
4998 * @phba: pointer to lpfc hba data structure.
4999 *
5000 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
5001 * operation. For each SLI4 queue type, the parameters such as queue entry
5002 * count (queue depth) shall be taken from the module parameter. For now,
5003 * we just use some constant number as place holder.
5004 *
5005 * Return codes
5006 *      0 - sucessful
5007 *      ENOMEM - No availble memory
5008 *      EIO - The mailbox failed to complete successfully.
5009 **/
5010static int
5011lpfc_sli4_queue_create(struct lpfc_hba *phba)
5012{
5013	struct lpfc_queue *qdesc;
5014	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5015	int cfg_fcp_wq_count;
5016	int cfg_fcp_eq_count;
5017
5018	/*
5019	 * Sanity check for confiugred queue parameters against the run-time
5020	 * device parameters
5021	 */
5022
5023	/* Sanity check on FCP fast-path WQ parameters */
5024	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
5025	if (cfg_fcp_wq_count >
5026	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
5027		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
5028				   LPFC_SP_WQN_DEF;
5029		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
5030			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5031					"2581 Not enough WQs (%d) from "
5032					"the pci function for supporting "
5033					"FCP WQs (%d)\n",
5034					phba->sli4_hba.max_cfg_param.max_wq,
5035					phba->cfg_fcp_wq_count);
5036			goto out_error;
5037		}
5038		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5039				"2582 Not enough WQs (%d) from the pci "
5040				"function for supporting the requested "
5041				"FCP WQs (%d), the actual FCP WQs can "
5042				"be supported: %d\n",
5043				phba->sli4_hba.max_cfg_param.max_wq,
5044				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
5045	}
5046	/* The actual number of FCP work queues adopted */
5047	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
5048
5049	/* Sanity check on FCP fast-path EQ parameters */
5050	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
5051	if (cfg_fcp_eq_count >
5052	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
5053		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
5054				   LPFC_SP_EQN_DEF;
5055		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
5056			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5057					"2574 Not enough EQs (%d) from the "
5058					"pci function for supporting FCP "
5059					"EQs (%d)\n",
5060					phba->sli4_hba.max_cfg_param.max_eq,
5061					phba->cfg_fcp_eq_count);
5062			goto out_error;
5063		}
5064		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5065				"2575 Not enough EQs (%d) from the pci "
5066				"function for supporting the requested "
5067				"FCP EQs (%d), the actual FCP EQs can "
5068				"be supported: %d\n",
5069				phba->sli4_hba.max_cfg_param.max_eq,
5070				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
5071	}
5072	/* It does not make sense to have more EQs than WQs */
5073	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
5074		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5075				"2593 The number of FCP EQs (%d) is more "
5076				"than the number of FCP WQs (%d), take "
5077				"the number of FCP EQs same as than of "
5078				"WQs (%d)\n", cfg_fcp_eq_count,
5079				phba->cfg_fcp_wq_count,
5080				phba->cfg_fcp_wq_count);
5081		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
5082	}
5083	/* The actual number of FCP event queues adopted */
5084	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
5085	/* The overall number of event queues used */
5086	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
5087
5088	/*
5089	 * Create Event Queues (EQs)
5090	 */
5091
5092	/* Get EQ depth from module parameter, fake the default for now */
5093	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
5094	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
5095
5096	/* Create slow path event queue */
5097	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5098				      phba->sli4_hba.eq_ecount);
5099	if (!qdesc) {
5100		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5101				"0496 Failed allocate slow-path EQ\n");
5102		goto out_error;
5103	}
5104	phba->sli4_hba.sp_eq = qdesc;
5105
5106	/* Create fast-path FCP Event Queue(s) */
5107	phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
5108			       phba->cfg_fcp_eq_count), GFP_KERNEL);
5109	if (!phba->sli4_hba.fp_eq) {
5110		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5111				"2576 Failed allocate memory for fast-path "
5112				"EQ record array\n");
5113		goto out_free_sp_eq;
5114	}
5115	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5116		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5117					      phba->sli4_hba.eq_ecount);
5118		if (!qdesc) {
5119			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5120					"0497 Failed allocate fast-path EQ\n");
5121			goto out_free_fp_eq;
5122		}
5123		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5124	}
5125
5126	/*
5127	 * Create Complete Queues (CQs)
5128	 */
5129
5130	/* Get CQ depth from module parameter, fake the default for now */
5131	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5132	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5133
5134	/* Create slow-path Mailbox Command Complete Queue */
5135	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5136				      phba->sli4_hba.cq_ecount);
5137	if (!qdesc) {
5138		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5139				"0500 Failed allocate slow-path mailbox CQ\n");
5140		goto out_free_fp_eq;
5141	}
5142	phba->sli4_hba.mbx_cq = qdesc;
5143
5144	/* Create slow-path ELS Complete Queue */
5145	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5146				      phba->sli4_hba.cq_ecount);
5147	if (!qdesc) {
5148		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5149				"0501 Failed allocate slow-path ELS CQ\n");
5150		goto out_free_mbx_cq;
5151	}
5152	phba->sli4_hba.els_cq = qdesc;
5153
5154
5155	/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5156	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5157				phba->cfg_fcp_eq_count), GFP_KERNEL);
5158	if (!phba->sli4_hba.fcp_cq) {
5159		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5160				"2577 Failed allocate memory for fast-path "
5161				"CQ record array\n");
5162		goto out_free_els_cq;
5163	}
5164	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5165		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5166					      phba->sli4_hba.cq_ecount);
5167		if (!qdesc) {
5168			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5169					"0499 Failed allocate fast-path FCP "
5170					"CQ (%d)\n", fcp_cqidx);
5171			goto out_free_fcp_cq;
5172		}
5173		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5174	}
5175
5176	/* Create Mailbox Command Queue */
5177	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5178	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5179
5180	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5181				      phba->sli4_hba.mq_ecount);
5182	if (!qdesc) {
5183		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5184				"0505 Failed allocate slow-path MQ\n");
5185		goto out_free_fcp_cq;
5186	}
5187	phba->sli4_hba.mbx_wq = qdesc;
5188
5189	/*
5190	 * Create all the Work Queues (WQs)
5191	 */
5192	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5193	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5194
5195	/* Create slow-path ELS Work Queue */
5196	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5197				      phba->sli4_hba.wq_ecount);
5198	if (!qdesc) {
5199		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5200				"0504 Failed allocate slow-path ELS WQ\n");
5201		goto out_free_mbx_wq;
5202	}
5203	phba->sli4_hba.els_wq = qdesc;
5204
5205	/* Create fast-path FCP Work Queue(s) */
5206	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5207				phba->cfg_fcp_wq_count), GFP_KERNEL);
5208	if (!phba->sli4_hba.fcp_wq) {
5209		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5210				"2578 Failed allocate memory for fast-path "
5211				"WQ record array\n");
5212		goto out_free_els_wq;
5213	}
5214	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5215		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5216					      phba->sli4_hba.wq_ecount);
5217		if (!qdesc) {
5218			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5219					"0503 Failed allocate fast-path FCP "
5220					"WQ (%d)\n", fcp_wqidx);
5221			goto out_free_fcp_wq;
5222		}
5223		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5224	}
5225
5226	/*
5227	 * Create Receive Queue (RQ)
5228	 */
5229	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5230	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5231
5232	/* Create Receive Queue for header */
5233	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5234				      phba->sli4_hba.rq_ecount);
5235	if (!qdesc) {
5236		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5237				"0506 Failed allocate receive HRQ\n");
5238		goto out_free_fcp_wq;
5239	}
5240	phba->sli4_hba.hdr_rq = qdesc;
5241
5242	/* Create Receive Queue for data */
5243	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5244				      phba->sli4_hba.rq_ecount);
5245	if (!qdesc) {
5246		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5247				"0507 Failed allocate receive DRQ\n");
5248		goto out_free_hdr_rq;
5249	}
5250	phba->sli4_hba.dat_rq = qdesc;
5251
5252	return 0;
5253
5254out_free_hdr_rq:
5255	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5256	phba->sli4_hba.hdr_rq = NULL;
5257out_free_fcp_wq:
5258	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5259		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5260		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5261	}
5262	kfree(phba->sli4_hba.fcp_wq);
5263out_free_els_wq:
5264	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5265	phba->sli4_hba.els_wq = NULL;
5266out_free_mbx_wq:
5267	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5268	phba->sli4_hba.mbx_wq = NULL;
5269out_free_fcp_cq:
5270	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5271		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5272		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5273	}
5274	kfree(phba->sli4_hba.fcp_cq);
5275out_free_els_cq:
5276	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5277	phba->sli4_hba.els_cq = NULL;
5278out_free_mbx_cq:
5279	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5280	phba->sli4_hba.mbx_cq = NULL;
5281out_free_fp_eq:
5282	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5283		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5284		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5285	}
5286	kfree(phba->sli4_hba.fp_eq);
5287out_free_sp_eq:
5288	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5289	phba->sli4_hba.sp_eq = NULL;
5290out_error:
5291	return -ENOMEM;
5292}
5293
5294/**
5295 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5296 * @phba: pointer to lpfc hba data structure.
5297 *
5298 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5299 * operation.
5300 *
5301 * Return codes
5302 *      0 - sucessful
5303 *      ENOMEM - No availble memory
5304 *      EIO - The mailbox failed to complete successfully.
5305 **/
5306static void
5307lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5308{
5309	int fcp_qidx;
5310
5311	/* Release mailbox command work queue */
5312	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5313	phba->sli4_hba.mbx_wq = NULL;
5314
5315	/* Release ELS work queue */
5316	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5317	phba->sli4_hba.els_wq = NULL;
5318
5319	/* Release FCP work queue */
5320	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5321		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5322	kfree(phba->sli4_hba.fcp_wq);
5323	phba->sli4_hba.fcp_wq = NULL;
5324
5325	/* Release unsolicited receive queue */
5326	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5327	phba->sli4_hba.hdr_rq = NULL;
5328	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5329	phba->sli4_hba.dat_rq = NULL;
5330
5331	/* Release ELS complete queue */
5332	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5333	phba->sli4_hba.els_cq = NULL;
5334
5335	/* Release mailbox command complete queue */
5336	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5337	phba->sli4_hba.mbx_cq = NULL;
5338
5339	/* Release FCP response complete queue */
5340	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5341		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5342	kfree(phba->sli4_hba.fcp_cq);
5343	phba->sli4_hba.fcp_cq = NULL;
5344
5345	/* Release fast-path event queue */
5346	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5347		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5348	kfree(phba->sli4_hba.fp_eq);
5349	phba->sli4_hba.fp_eq = NULL;
5350
5351	/* Release slow-path event queue */
5352	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5353	phba->sli4_hba.sp_eq = NULL;
5354
5355	return;
5356}
5357
5358/**
5359 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5360 * @phba: pointer to lpfc hba data structure.
5361 *
5362 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5363 * operation.
5364 *
5365 * Return codes
5366 *      0 - sucessful
5367 *      ENOMEM - No availble memory
5368 *      EIO - The mailbox failed to complete successfully.
5369 **/
5370int
5371lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5372{
5373	int rc = -ENOMEM;
5374	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5375	int fcp_cq_index = 0;
5376
5377	/*
5378	 * Set up Event Queues (EQs)
5379	 */
5380
5381	/* Set up slow-path event queue */
5382	if (!phba->sli4_hba.sp_eq) {
5383		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5384				"0520 Slow-path EQ not allocated\n");
5385		goto out_error;
5386	}
5387	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5388			    LPFC_SP_DEF_IMAX);
5389	if (rc) {
5390		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5391				"0521 Failed setup of slow-path EQ: "
5392				"rc = 0x%x\n", rc);
5393		goto out_error;
5394	}
5395	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5396			"2583 Slow-path EQ setup: queue-id=%d\n",
5397			phba->sli4_hba.sp_eq->queue_id);
5398
5399	/* Set up fast-path event queue */
5400	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5401		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5402			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5403					"0522 Fast-path EQ (%d) not "
5404					"allocated\n", fcp_eqidx);
5405			goto out_destroy_fp_eq;
5406		}
5407		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
5408				    phba->cfg_fcp_imax);
5409		if (rc) {
5410			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5411					"0523 Failed setup of fast-path EQ "
5412					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
5413			goto out_destroy_fp_eq;
5414		}
5415		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5416				"2584 Fast-path EQ setup: "
5417				"queue[%d]-id=%d\n", fcp_eqidx,
5418				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
5419	}
5420
5421	/*
5422	 * Set up Complete Queues (CQs)
5423	 */
5424
5425	/* Set up slow-path MBOX Complete Queue as the first CQ */
5426	if (!phba->sli4_hba.mbx_cq) {
5427		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5428				"0528 Mailbox CQ not allocated\n");
5429		goto out_destroy_fp_eq;
5430	}
5431	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
5432			    LPFC_MCQ, LPFC_MBOX);
5433	if (rc) {
5434		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5435				"0529 Failed setup of slow-path mailbox CQ: "
5436				"rc = 0x%x\n", rc);
5437		goto out_destroy_fp_eq;
5438	}
5439	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5440			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
5441			phba->sli4_hba.mbx_cq->queue_id,
5442			phba->sli4_hba.sp_eq->queue_id);
5443
5444	/* Set up slow-path ELS Complete Queue */
5445	if (!phba->sli4_hba.els_cq) {
5446		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5447				"0530 ELS CQ not allocated\n");
5448		goto out_destroy_mbx_cq;
5449	}
5450	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
5451			    LPFC_WCQ, LPFC_ELS);
5452	if (rc) {
5453		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5454				"0531 Failed setup of slow-path ELS CQ: "
5455				"rc = 0x%x\n", rc);
5456		goto out_destroy_mbx_cq;
5457	}
5458	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5459			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
5460			phba->sli4_hba.els_cq->queue_id,
5461			phba->sli4_hba.sp_eq->queue_id);
5462
5463	/* Set up fast-path FCP Response Complete Queue */
5464	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5465		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
5466			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5467					"0526 Fast-path FCP CQ (%d) not "
5468					"allocated\n", fcp_cqidx);
5469			goto out_destroy_fcp_cq;
5470		}
5471		rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
5472				    phba->sli4_hba.fp_eq[fcp_cqidx],
5473				    LPFC_WCQ, LPFC_FCP);
5474		if (rc) {
5475			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5476					"0527 Failed setup of fast-path FCP "
5477					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
5478			goto out_destroy_fcp_cq;
5479		}
5480		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5481				"2588 FCP CQ setup: cq[%d]-id=%d, "
5482				"parent eq[%d]-id=%d\n",
5483				fcp_cqidx,
5484				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
5485				fcp_cqidx,
5486				phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
5487	}
5488
5489	/*
5490	 * Set up all the Work Queues (WQs)
5491	 */
5492
5493	/* Set up Mailbox Command Queue */
5494	if (!phba->sli4_hba.mbx_wq) {
5495		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5496				"0538 Slow-path MQ not allocated\n");
5497		goto out_destroy_fcp_cq;
5498	}
5499	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
5500			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
5501	if (rc) {
5502		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5503				"0539 Failed setup of slow-path MQ: "
5504				"rc = 0x%x\n", rc);
5505		goto out_destroy_fcp_cq;
5506	}
5507	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5508			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
5509			phba->sli4_hba.mbx_wq->queue_id,
5510			phba->sli4_hba.mbx_cq->queue_id);
5511
5512	/* Set up slow-path ELS Work Queue */
5513	if (!phba->sli4_hba.els_wq) {
5514		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5515				"0536 Slow-path ELS WQ not allocated\n");
5516		goto out_destroy_mbx_wq;
5517	}
5518	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
5519			    phba->sli4_hba.els_cq, LPFC_ELS);
5520	if (rc) {
5521		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5522				"0537 Failed setup of slow-path ELS WQ: "
5523				"rc = 0x%x\n", rc);
5524		goto out_destroy_mbx_wq;
5525	}
5526	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5527			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
5528			phba->sli4_hba.els_wq->queue_id,
5529			phba->sli4_hba.els_cq->queue_id);
5530
5531	/* Set up fast-path FCP Work Queue */
5532	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5533		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
5534			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5535					"0534 Fast-path FCP WQ (%d) not "
5536					"allocated\n", fcp_wqidx);
5537			goto out_destroy_fcp_wq;
5538		}
5539		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
5540				    phba->sli4_hba.fcp_cq[fcp_cq_index],
5541				    LPFC_FCP);
5542		if (rc) {
5543			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5544					"0535 Failed setup of fast-path FCP "
5545					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
5546			goto out_destroy_fcp_wq;
5547		}
5548		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5549				"2591 FCP WQ setup: wq[%d]-id=%d, "
5550				"parent cq[%d]-id=%d\n",
5551				fcp_wqidx,
5552				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
5553				fcp_cq_index,
5554				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
5555		/* Round robin FCP Work Queue's Completion Queue assignment */
5556		fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
5557	}
5558
5559	/*
5560	 * Create Receive Queue (RQ)
5561	 */
5562	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
5563		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5564				"0540 Receive Queue not allocated\n");
5565		goto out_destroy_fcp_wq;
5566	}
5567	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5568			    phba->sli4_hba.els_cq, LPFC_USOL);
5569	if (rc) {
5570		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5571				"0541 Failed setup of Receive Queue: "
5572				"rc = 0x%x\n", rc);
5573		goto out_destroy_fcp_wq;
5574	}
5575	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5576			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
5577			"parent cq-id=%d\n",
5578			phba->sli4_hba.hdr_rq->queue_id,
5579			phba->sli4_hba.dat_rq->queue_id,
5580			phba->sli4_hba.els_cq->queue_id);
5581	return 0;
5582
5583out_destroy_fcp_wq:
5584	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
5585		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
5586	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5587out_destroy_mbx_wq:
5588	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5589out_destroy_fcp_cq:
5590	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5591		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5592	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5593out_destroy_mbx_cq:
5594	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5595out_destroy_fp_eq:
5596	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
5597		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
5598	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5599out_error:
5600	return rc;
5601}
5602
5603/**
5604 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
5605 * @phba: pointer to lpfc hba data structure.
5606 *
5607 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
5608 * operation.
5609 *
5610 * Return codes
5611 *      0 - sucessful
5612 *      ENOMEM - No availble memory
5613 *      EIO - The mailbox failed to complete successfully.
5614 **/
5615void
5616lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5617{
5618	int fcp_qidx;
5619
5620	/* Unset mailbox command work queue */
5621	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5622	/* Unset ELS work queue */
5623	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5624	/* Unset unsolicited receive queue */
5625	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
5626	/* Unset FCP work queue */
5627	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5628		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
5629	/* Unset mailbox command complete queue */
5630	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5631	/* Unset ELS complete queue */
5632	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5633	/* Unset FCP response complete queue */
5634	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5635		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
5636	/* Unset fast-path event queue */
5637	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5638		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
5639	/* Unset slow-path event queue */
5640	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5641}
5642
5643/**
5644 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
5645 * @phba: pointer to lpfc hba data structure.
5646 *
5647 * This routine is invoked to allocate and set up a pool of completion queue
5648 * events. The body of the completion queue event is a completion queue entry
5649 * CQE. For now, this pool is used for the interrupt service routine to queue
5650 * the following HBA completion queue events for the worker thread to process:
5651 *   - Mailbox asynchronous events
5652 *   - Receive queue completion unsolicited events
5653 * Later, this can be used for all the slow-path events.
5654 *
5655 * Return codes
5656 *      0 - sucessful
5657 *      -ENOMEM - No availble memory
5658 **/
5659static int
5660lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
5661{
5662	struct lpfc_cq_event *cq_event;
5663	int i;
5664
5665	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
5666		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
5667		if (!cq_event)
5668			goto out_pool_create_fail;
5669		list_add_tail(&cq_event->list,
5670			      &phba->sli4_hba.sp_cqe_event_pool);
5671	}
5672	return 0;
5673
5674out_pool_create_fail:
5675	lpfc_sli4_cq_event_pool_destroy(phba);
5676	return -ENOMEM;
5677}
5678
5679/**
5680 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
5681 * @phba: pointer to lpfc hba data structure.
5682 *
5683 * This routine is invoked to free the pool of completion queue events at
5684 * driver unload time. Note that, it is the responsibility of the driver
5685 * cleanup routine to free all the outstanding completion-queue events
5686 * allocated from this pool back into the pool before invoking this routine
5687 * to destroy the pool.
5688 **/
5689static void
5690lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
5691{
5692	struct lpfc_cq_event *cq_event, *next_cq_event;
5693
5694	list_for_each_entry_safe(cq_event, next_cq_event,
5695				 &phba->sli4_hba.sp_cqe_event_pool, list) {
5696		list_del(&cq_event->list);
5697		kfree(cq_event);
5698	}
5699}
5700
5701/**
5702 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5703 * @phba: pointer to lpfc hba data structure.
5704 *
5705 * This routine is the lock free version of the API invoked to allocate a
5706 * completion-queue event from the free pool.
5707 *
5708 * Return: Pointer to the newly allocated completion-queue event if successful
5709 *         NULL otherwise.
5710 **/
5711struct lpfc_cq_event *
5712__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5713{
5714	struct lpfc_cq_event *cq_event = NULL;
5715
5716	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
5717			 struct lpfc_cq_event, list);
5718	return cq_event;
5719}
5720
5721/**
5722 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5723 * @phba: pointer to lpfc hba data structure.
5724 *
5725 * This routine is the lock version of the API invoked to allocate a
5726 * completion-queue event from the free pool.
5727 *
5728 * Return: Pointer to the newly allocated completion-queue event if successful
5729 *         NULL otherwise.
5730 **/
5731struct lpfc_cq_event *
5732lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5733{
5734	struct lpfc_cq_event *cq_event;
5735	unsigned long iflags;
5736
5737	spin_lock_irqsave(&phba->hbalock, iflags);
5738	cq_event = __lpfc_sli4_cq_event_alloc(phba);
5739	spin_unlock_irqrestore(&phba->hbalock, iflags);
5740	return cq_event;
5741}
5742
5743/**
5744 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5745 * @phba: pointer to lpfc hba data structure.
5746 * @cq_event: pointer to the completion queue event to be freed.
5747 *
5748 * This routine is the lock free version of the API invoked to release a
5749 * completion-queue event back into the free pool.
5750 **/
5751void
5752__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5753			     struct lpfc_cq_event *cq_event)
5754{
5755	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
5756}
5757
5758/**
5759 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5760 * @phba: pointer to lpfc hba data structure.
5761 * @cq_event: pointer to the completion queue event to be freed.
5762 *
5763 * This routine is the lock version of the API invoked to release a
5764 * completion-queue event back into the free pool.
5765 **/
5766void
5767lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5768			   struct lpfc_cq_event *cq_event)
5769{
5770	unsigned long iflags;
5771	spin_lock_irqsave(&phba->hbalock, iflags);
5772	__lpfc_sli4_cq_event_release(phba, cq_event);
5773	spin_unlock_irqrestore(&phba->hbalock, iflags);
5774}
5775
5776/**
5777 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
5778 * @phba: pointer to lpfc hba data structure.
5779 *
5780 * This routine is to free all the pending completion-queue events to the
5781 * back into the free pool for device reset.
5782 **/
5783static void
5784lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
5785{
5786	LIST_HEAD(cqelist);
5787	struct lpfc_cq_event *cqe;
5788	unsigned long iflags;
5789
5790	/* Retrieve all the pending WCQEs from pending WCQE lists */
5791	spin_lock_irqsave(&phba->hbalock, iflags);
5792	/* Pending FCP XRI abort events */
5793	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
5794			 &cqelist);
5795	/* Pending ELS XRI abort events */
5796	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
5797			 &cqelist);
5798	/* Pending asynnc events */
5799	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
5800			 &cqelist);
5801	spin_unlock_irqrestore(&phba->hbalock, iflags);
5802
5803	while (!list_empty(&cqelist)) {
5804		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
5805		lpfc_sli4_cq_event_release(phba, cqe);
5806	}
5807}
5808
5809/**
5810 * lpfc_pci_function_reset - Reset pci function.
5811 * @phba: pointer to lpfc hba data structure.
5812 *
5813 * This routine is invoked to request a PCI function reset. It will destroys
5814 * all resources assigned to the PCI function which originates this request.
5815 *
5816 * Return codes
5817 *      0 - sucessful
5818 *      ENOMEM - No availble memory
5819 *      EIO - The mailbox failed to complete successfully.
5820 **/
5821int
5822lpfc_pci_function_reset(struct lpfc_hba *phba)
5823{
5824	LPFC_MBOXQ_t *mboxq;
5825	uint32_t rc = 0;
5826	uint32_t shdr_status, shdr_add_status;
5827	union lpfc_sli4_cfg_shdr *shdr;
5828
5829	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5830	if (!mboxq) {
5831		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5832				"0494 Unable to allocate memory for issuing "
5833				"SLI_FUNCTION_RESET mailbox command\n");
5834		return -ENOMEM;
5835	}
5836
5837	/* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
5838	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5839			 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
5840			 LPFC_SLI4_MBX_EMBED);
5841	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5842	shdr = (union lpfc_sli4_cfg_shdr *)
5843		&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5844	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5845	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5846	if (rc != MBX_TIMEOUT)
5847		mempool_free(mboxq, phba->mbox_mem_pool);
5848	if (shdr_status || shdr_add_status || rc) {
5849		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5850				"0495 SLI_FUNCTION_RESET mailbox failed with "
5851				"status x%x add_status x%x, mbx status x%x\n",
5852				shdr_status, shdr_add_status, rc);
5853		rc = -ENXIO;
5854	}
5855	return rc;
5856}
5857
5858/**
5859 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
5860 * @phba: pointer to lpfc hba data structure.
5861 * @cnt: number of nop mailbox commands to send.
5862 *
5863 * This routine is invoked to send a number @cnt of NOP mailbox command and
5864 * wait for each command to complete.
5865 *
5866 * Return: the number of NOP mailbox command completed.
5867 **/
5868static int
5869lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
5870{
5871	LPFC_MBOXQ_t *mboxq;
5872	int length, cmdsent;
5873	uint32_t mbox_tmo;
5874	uint32_t rc = 0;
5875	uint32_t shdr_status, shdr_add_status;
5876	union lpfc_sli4_cfg_shdr *shdr;
5877
5878	if (cnt == 0) {
5879		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5880				"2518 Requested to send 0 NOP mailbox cmd\n");
5881		return cnt;
5882	}
5883
5884	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5885	if (!mboxq) {
5886		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5887				"2519 Unable to allocate memory for issuing "
5888				"NOP mailbox command\n");
5889		return 0;
5890	}
5891
5892	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
5893	length = (sizeof(struct lpfc_mbx_nop) -
5894		  sizeof(struct lpfc_sli4_cfg_mhdr));
5895	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5896			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
5897
5898	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5899	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
5900		if (!phba->sli4_hba.intr_enable)
5901			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5902		else
5903			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
5904		if (rc == MBX_TIMEOUT)
5905			break;
5906		/* Check return status */
5907		shdr = (union lpfc_sli4_cfg_shdr *)
5908			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5909		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5910		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
5911					 &shdr->response);
5912		if (shdr_status || shdr_add_status || rc) {
5913			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5914					"2520 NOP mailbox command failed "
5915					"status x%x add_status x%x mbx "
5916					"status x%x\n", shdr_status,
5917					shdr_add_status, rc);
5918			break;
5919		}
5920	}
5921
5922	if (rc != MBX_TIMEOUT)
5923		mempool_free(mboxq, phba->mbox_mem_pool);
5924
5925	return cmdsent;
5926}
5927
5928/**
5929 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
5930 * @phba: pointer to lpfc hba data structure.
5931 * @fcfi: fcf index.
5932 *
5933 * This routine is invoked to unregister a FCFI from device.
5934 **/
5935void
5936lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5937{
5938	LPFC_MBOXQ_t *mbox;
5939	uint32_t mbox_tmo;
5940	int rc;
5941	unsigned long flags;
5942
5943	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5944
5945	if (!mbox)
5946		return;
5947
5948	lpfc_unreg_fcfi(mbox, fcfi);
5949
5950	if (!phba->sli4_hba.intr_enable)
5951		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5952	else {
5953		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5954		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5955	}
5956	if (rc != MBX_TIMEOUT)
5957		mempool_free(mbox, phba->mbox_mem_pool);
5958	if (rc != MBX_SUCCESS)
5959		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5960				"2517 Unregister FCFI command failed "
5961				"status %d, mbxStatus x%x\n", rc,
5962				bf_get(lpfc_mqe_status, &mbox->u.mqe));
5963	else {
5964		spin_lock_irqsave(&phba->hbalock, flags);
5965		/* Mark the FCFI is no longer registered */
5966		phba->fcf.fcf_flag &=
5967			~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
5968		spin_unlock_irqrestore(&phba->hbalock, flags);
5969	}
5970}
5971
5972/**
5973 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
5974 * @phba: pointer to lpfc hba data structure.
5975 *
5976 * This routine is invoked to set up the PCI device memory space for device
5977 * with SLI-4 interface spec.
5978 *
5979 * Return codes
5980 * 	0 - sucessful
5981 * 	other values - error
5982 **/
5983static int
5984lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
5985{
5986	struct pci_dev *pdev;
5987	unsigned long bar0map_len, bar1map_len, bar2map_len;
5988	int error = -ENODEV;
5989
5990	/* Obtain PCI device reference */
5991	if (!phba->pcidev)
5992		return error;
5993	else
5994		pdev = phba->pcidev;
5995
5996	/* Set the device DMA mask size */
5997	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
5998		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5999			return error;
6000
6001	/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
6002	 * number of bytes required by each mapping. They are actually
6003	 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
6004	 */
6005	phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
6006	bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
6007
6008	phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
6009	bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
6010
6011	phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
6012	bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
6013
6014	/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
6015	phba->sli4_hba.conf_regs_memmap_p =
6016				ioremap(phba->pci_bar0_map, bar0map_len);
6017	if (!phba->sli4_hba.conf_regs_memmap_p) {
6018		dev_printk(KERN_ERR, &pdev->dev,
6019			   "ioremap failed for SLI4 PCI config registers.\n");
6020		goto out;
6021	}
6022
6023	/* Map SLI4 HBA Control Register base to a kernel virtual address. */
6024	phba->sli4_hba.ctrl_regs_memmap_p =
6025				ioremap(phba->pci_bar1_map, bar1map_len);
6026	if (!phba->sli4_hba.ctrl_regs_memmap_p) {
6027		dev_printk(KERN_ERR, &pdev->dev,
6028			   "ioremap failed for SLI4 HBA control registers.\n");
6029		goto out_iounmap_conf;
6030	}
6031
6032	/* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
6033	phba->sli4_hba.drbl_regs_memmap_p =
6034				ioremap(phba->pci_bar2_map, bar2map_len);
6035	if (!phba->sli4_hba.drbl_regs_memmap_p) {
6036		dev_printk(KERN_ERR, &pdev->dev,
6037			   "ioremap failed for SLI4 HBA doorbell registers.\n");
6038		goto out_iounmap_ctrl;
6039	}
6040
6041	/* Set up BAR0 PCI config space register memory map */
6042	lpfc_sli4_bar0_register_memmap(phba);
6043
6044	/* Set up BAR1 register memory map */
6045	lpfc_sli4_bar1_register_memmap(phba);
6046
6047	/* Set up BAR2 register memory map */
6048	error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
6049	if (error)
6050		goto out_iounmap_all;
6051
6052	return 0;
6053
6054out_iounmap_all:
6055	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6056out_iounmap_ctrl:
6057	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6058out_iounmap_conf:
6059	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6060out:
6061	return error;
6062}
6063
6064/**
6065 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
6066 * @phba: pointer to lpfc hba data structure.
6067 *
6068 * This routine is invoked to unset the PCI device memory space for device
6069 * with SLI-4 interface spec.
6070 **/
6071static void
6072lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
6073{
6074	struct pci_dev *pdev;
6075
6076	/* Obtain PCI device reference */
6077	if (!phba->pcidev)
6078		return;
6079	else
6080		pdev = phba->pcidev;
6081
6082	/* Free coherent DMA memory allocated */
6083
6084	/* Unmap I/O memory space */
6085	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6086	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6087	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6088
6089	return;
6090}
6091
6092/**
6093 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6094 * @phba: pointer to lpfc hba data structure.
6095 *
6096 * This routine is invoked to enable the MSI-X interrupt vectors to device
6097 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6098 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6099 * invoked, enables either all or nothing, depending on the current
6100 * availability of PCI vector resources. The device driver is responsible
6101 * for calling the individual request_irq() to register each MSI-X vector
6102 * with a interrupt handler, which is done in this function. Note that
6103 * later when device is unloading, the driver should always call free_irq()
6104 * on all MSI-X vectors it has done request_irq() on before calling
6105 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
6106 * will be left with MSI-X enabled and leaks its vectors.
6107 *
6108 * Return codes
6109 *   0 - sucessful
6110 *   other values - error
6111 **/
6112static int
6113lpfc_sli_enable_msix(struct lpfc_hba *phba)
6114{
6115	int rc, i;
6116	LPFC_MBOXQ_t *pmb;
6117
6118	/* Set up MSI-X multi-message vectors */
6119	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6120		phba->msix_entries[i].entry = i;
6121
6122	/* Configure MSI-X capability structure */
6123	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
6124				ARRAY_SIZE(phba->msix_entries));
6125	if (rc) {
6126		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6127				"0420 PCI enable MSI-X failed (%d)\n", rc);
6128		goto msi_fail_out;
6129	}
6130	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6131		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6132				"0477 MSI-X entry[%d]: vector=x%x "
6133				"message=%d\n", i,
6134				phba->msix_entries[i].vector,
6135				phba->msix_entries[i].entry);
6136	/*
6137	 * Assign MSI-X vectors to interrupt handlers
6138	 */
6139
6140	/* vector-0 is associated to slow-path handler */
6141	rc = request_irq(phba->msix_entries[0].vector,
6142			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6143			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6144	if (rc) {
6145		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6146				"0421 MSI-X slow-path request_irq failed "
6147				"(%d)\n", rc);
6148		goto msi_fail_out;
6149	}
6150
6151	/* vector-1 is associated to fast-path handler */
6152	rc = request_irq(phba->msix_entries[1].vector,
6153			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6154			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
6155
6156	if (rc) {
6157		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6158				"0429 MSI-X fast-path request_irq failed "
6159				"(%d)\n", rc);
6160		goto irq_fail_out;
6161	}
6162
6163	/*
6164	 * Configure HBA MSI-X attention conditions to messages
6165	 */
6166	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6167
6168	if (!pmb) {
6169		rc = -ENOMEM;
6170		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6171				"0474 Unable to allocate memory for issuing "
6172				"MBOX_CONFIG_MSI command\n");
6173		goto mem_fail_out;
6174	}
6175	rc = lpfc_config_msi(phba, pmb);
6176	if (rc)
6177		goto mbx_fail_out;
6178	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6179	if (rc != MBX_SUCCESS) {
6180		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
6181				"0351 Config MSI mailbox command failed, "
6182				"mbxCmd x%x, mbxStatus x%x\n",
6183				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
6184		goto mbx_fail_out;
6185	}
6186
6187	/* Free memory allocated for mailbox command */
6188	mempool_free(pmb, phba->mbox_mem_pool);
6189	return rc;
6190
6191mbx_fail_out:
6192	/* Free memory allocated for mailbox command */
6193	mempool_free(pmb, phba->mbox_mem_pool);
6194
6195mem_fail_out:
6196	/* free the irq already requested */
6197	free_irq(phba->msix_entries[1].vector, phba);
6198
6199irq_fail_out:
6200	/* free the irq already requested */
6201	free_irq(phba->msix_entries[0].vector, phba);
6202
6203msi_fail_out:
6204	/* Unconfigure MSI-X capability structure */
6205	pci_disable_msix(phba->pcidev);
6206	return rc;
6207}
6208
6209/**
6210 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
6211 * @phba: pointer to lpfc hba data structure.
6212 *
6213 * This routine is invoked to release the MSI-X vectors and then disable the
6214 * MSI-X interrupt mode to device with SLI-3 interface spec.
6215 **/
6216static void
6217lpfc_sli_disable_msix(struct lpfc_hba *phba)
6218{
6219	int i;
6220
6221	/* Free up MSI-X multi-message vectors */
6222	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6223		free_irq(phba->msix_entries[i].vector, phba);
6224	/* Disable MSI-X */
6225	pci_disable_msix(phba->pcidev);
6226
6227	return;
6228}
6229
6230/**
6231 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
6232 * @phba: pointer to lpfc hba data structure.
6233 *
6234 * This routine is invoked to enable the MSI interrupt mode to device with
6235 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
6236 * enable the MSI vector. The device driver is responsible for calling the
6237 * request_irq() to register MSI vector with a interrupt the handler, which
6238 * is done in this function.
6239 *
6240 * Return codes
6241 * 	0 - sucessful
6242 * 	other values - error
6243 */
6244static int
6245lpfc_sli_enable_msi(struct lpfc_hba *phba)
6246{
6247	int rc;
6248
6249	rc = pci_enable_msi(phba->pcidev);
6250	if (!rc)
6251		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6252				"0462 PCI enable MSI mode success.\n");
6253	else {
6254		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6255				"0471 PCI enable MSI mode failed (%d)\n", rc);
6256		return rc;
6257	}
6258
6259	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6260			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6261	if (rc) {
6262		pci_disable_msi(phba->pcidev);
6263		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6264				"0478 MSI request_irq failed (%d)\n", rc);
6265	}
6266	return rc;
6267}
6268
6269/**
6270 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
6271 * @phba: pointer to lpfc hba data structure.
6272 *
6273 * This routine is invoked to disable the MSI interrupt mode to device with
6274 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
6275 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6276 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6277 * its vector.
6278 */
6279static void
6280lpfc_sli_disable_msi(struct lpfc_hba *phba)
6281{
6282	free_irq(phba->pcidev->irq, phba);
6283	pci_disable_msi(phba->pcidev);
6284	return;
6285}
6286
6287/**
6288 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
6289 * @phba: pointer to lpfc hba data structure.
6290 *
6291 * This routine is invoked to enable device interrupt and associate driver's
6292 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
6293 * spec. Depends on the interrupt mode configured to the driver, the driver
6294 * will try to fallback from the configured interrupt mode to an interrupt
6295 * mode which is supported by the platform, kernel, and device in the order
6296 * of:
6297 * MSI-X -> MSI -> IRQ.
6298 *
6299 * Return codes
6300 *   0 - sucessful
6301 *   other values - error
6302 **/
6303static uint32_t
6304lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6305{
6306	uint32_t intr_mode = LPFC_INTR_ERROR;
6307	int retval;
6308
6309	if (cfg_mode == 2) {
6310		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6311		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6312		if (!retval) {
6313			/* Now, try to enable MSI-X interrupt mode */
6314			retval = lpfc_sli_enable_msix(phba);
6315			if (!retval) {
6316				/* Indicate initialization to MSI-X mode */
6317				phba->intr_type = MSIX;
6318				intr_mode = 2;
6319			}
6320		}
6321	}
6322
6323	/* Fallback to MSI if MSI-X initialization failed */
6324	if (cfg_mode >= 1 && phba->intr_type == NONE) {
6325		retval = lpfc_sli_enable_msi(phba);
6326		if (!retval) {
6327			/* Indicate initialization to MSI mode */
6328			phba->intr_type = MSI;
6329			intr_mode = 1;
6330		}
6331	}
6332
6333	/* Fallback to INTx if both MSI-X/MSI initalization failed */
6334	if (phba->intr_type == NONE) {
6335		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6336				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6337		if (!retval) {
6338			/* Indicate initialization to INTx mode */
6339			phba->intr_type = INTx;
6340			intr_mode = 0;
6341		}
6342	}
6343	return intr_mode;
6344}
6345
6346/**
6347 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6348 * @phba: pointer to lpfc hba data structure.
6349 *
6350 * This routine is invoked to disable device interrupt and disassociate the
6351 * driver's interrupt handler(s) from interrupt vector(s) to device with
6352 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6353 * release the interrupt vector(s) for the message signaled interrupt.
6354 **/
6355static void
6356lpfc_sli_disable_intr(struct lpfc_hba *phba)
6357{
6358	/* Disable the currently initialized interrupt mode */
6359	if (phba->intr_type == MSIX)
6360		lpfc_sli_disable_msix(phba);
6361	else if (phba->intr_type == MSI)
6362		lpfc_sli_disable_msi(phba);
6363	else if (phba->intr_type == INTx)
6364		free_irq(phba->pcidev->irq, phba);
6365
6366	/* Reset interrupt management states */
6367	phba->intr_type = NONE;
6368	phba->sli.slistat.sli_intr = 0;
6369
6370	return;
6371}
6372
6373/**
6374 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6375 * @phba: pointer to lpfc hba data structure.
6376 *
6377 * This routine is invoked to enable the MSI-X interrupt vectors to device
6378 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6379 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6380 * enables either all or nothing, depending on the current availability of
6381 * PCI vector resources. The device driver is responsible for calling the
6382 * individual request_irq() to register each MSI-X vector with a interrupt
6383 * handler, which is done in this function. Note that later when device is
6384 * unloading, the driver should always call free_irq() on all MSI-X vectors
6385 * it has done request_irq() on before calling pci_disable_msix(). Failure
6386 * to do so results in a BUG_ON() and a device will be left with MSI-X
6387 * enabled and leaks its vectors.
6388 *
6389 * Return codes
6390 * 0 - sucessful
6391 * other values - error
6392 **/
6393static int
6394lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6395{
6396	int rc, index;
6397
6398	/* Set up MSI-X multi-message vectors */
6399	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6400		phba->sli4_hba.msix_entries[index].entry = index;
6401
6402	/* Configure MSI-X capability structure */
6403	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6404			     phba->sli4_hba.cfg_eqn);
6405	if (rc) {
6406		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6407				"0484 PCI enable MSI-X failed (%d)\n", rc);
6408		goto msi_fail_out;
6409	}
6410	/* Log MSI-X vector assignment */
6411	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6412		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6413				"0489 MSI-X entry[%d]: vector=x%x "
6414				"message=%d\n", index,
6415				phba->sli4_hba.msix_entries[index].vector,
6416				phba->sli4_hba.msix_entries[index].entry);
6417	/*
6418	 * Assign MSI-X vectors to interrupt handlers
6419	 */
6420
6421	/* The first vector must associated to slow-path handler for MQ */
6422	rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6423			 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6424			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6425	if (rc) {
6426		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6427				"0485 MSI-X slow-path request_irq failed "
6428				"(%d)\n", rc);
6429		goto msi_fail_out;
6430	}
6431
6432	/* The rest of the vector(s) are associated to fast-path handler(s) */
6433	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
6434		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
6435		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
6436		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
6437				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
6438				 LPFC_FP_DRIVER_HANDLER_NAME,
6439				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6440		if (rc) {
6441			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6442					"0486 MSI-X fast-path (%d) "
6443					"request_irq failed (%d)\n", index, rc);
6444			goto cfg_fail_out;
6445		}
6446	}
6447
6448	return rc;
6449
6450cfg_fail_out:
6451	/* free the irq already requested */
6452	for (--index; index >= 1; index--)
6453		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
6454			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6455
6456	/* free the irq already requested */
6457	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6458
6459msi_fail_out:
6460	/* Unconfigure MSI-X capability structure */
6461	pci_disable_msix(phba->pcidev);
6462	return rc;
6463}
6464
6465/**
6466 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
6467 * @phba: pointer to lpfc hba data structure.
6468 *
6469 * This routine is invoked to release the MSI-X vectors and then disable the
6470 * MSI-X interrupt mode to device with SLI-4 interface spec.
6471 **/
6472static void
6473lpfc_sli4_disable_msix(struct lpfc_hba *phba)
6474{
6475	int index;
6476
6477	/* Free up MSI-X multi-message vectors */
6478	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6479
6480	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
6481		free_irq(phba->sli4_hba.msix_entries[index].vector,
6482			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6483	/* Disable MSI-X */
6484	pci_disable_msix(phba->pcidev);
6485
6486	return;
6487}
6488
6489/**
6490 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
6491 * @phba: pointer to lpfc hba data structure.
6492 *
6493 * This routine is invoked to enable the MSI interrupt mode to device with
6494 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
6495 * to enable the MSI vector. The device driver is responsible for calling
6496 * the request_irq() to register MSI vector with a interrupt the handler,
6497 * which is done in this function.
6498 *
6499 * Return codes
6500 * 	0 - sucessful
6501 * 	other values - error
6502 **/
6503static int
6504lpfc_sli4_enable_msi(struct lpfc_hba *phba)
6505{
6506	int rc, index;
6507
6508	rc = pci_enable_msi(phba->pcidev);
6509	if (!rc)
6510		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6511				"0487 PCI enable MSI mode success.\n");
6512	else {
6513		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6514				"0488 PCI enable MSI mode failed (%d)\n", rc);
6515		return rc;
6516	}
6517
6518	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6519			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6520	if (rc) {
6521		pci_disable_msi(phba->pcidev);
6522		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6523				"0490 MSI request_irq failed (%d)\n", rc);
6524	}
6525
6526	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
6527		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6528		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6529	}
6530
6531	return rc;
6532}
6533
6534/**
6535 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
6536 * @phba: pointer to lpfc hba data structure.
6537 *
6538 * This routine is invoked to disable the MSI interrupt mode to device with
6539 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
6540 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6541 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6542 * its vector.
6543 **/
6544static void
6545lpfc_sli4_disable_msi(struct lpfc_hba *phba)
6546{
6547	free_irq(phba->pcidev->irq, phba);
6548	pci_disable_msi(phba->pcidev);
6549	return;
6550}
6551
6552/**
6553 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
6554 * @phba: pointer to lpfc hba data structure.
6555 *
6556 * This routine is invoked to enable device interrupt and associate driver's
6557 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
6558 * interface spec. Depends on the interrupt mode configured to the driver,
6559 * the driver will try to fallback from the configured interrupt mode to an
6560 * interrupt mode which is supported by the platform, kernel, and device in
6561 * the order of:
6562 * MSI-X -> MSI -> IRQ.
6563 *
6564 * Return codes
6565 * 	0 - sucessful
6566 * 	other values - error
6567 **/
6568static uint32_t
6569lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6570{
6571	uint32_t intr_mode = LPFC_INTR_ERROR;
6572	int retval, index;
6573
6574	if (cfg_mode == 2) {
6575		/* Preparation before conf_msi mbox cmd */
6576		retval = 0;
6577		if (!retval) {
6578			/* Now, try to enable MSI-X interrupt mode */
6579			retval = lpfc_sli4_enable_msix(phba);
6580			if (!retval) {
6581				/* Indicate initialization to MSI-X mode */
6582				phba->intr_type = MSIX;
6583				intr_mode = 2;
6584			}
6585		}
6586	}
6587
6588	/* Fallback to MSI if MSI-X initialization failed */
6589	if (cfg_mode >= 1 && phba->intr_type == NONE) {
6590		retval = lpfc_sli4_enable_msi(phba);
6591		if (!retval) {
6592			/* Indicate initialization to MSI mode */
6593			phba->intr_type = MSI;
6594			intr_mode = 1;
6595		}
6596	}
6597
6598	/* Fallback to INTx if both MSI-X/MSI initalization failed */
6599	if (phba->intr_type == NONE) {
6600		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6601				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6602		if (!retval) {
6603			/* Indicate initialization to INTx mode */
6604			phba->intr_type = INTx;
6605			intr_mode = 0;
6606			for (index = 0; index < phba->cfg_fcp_eq_count;
6607			     index++) {
6608				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6609				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6610			}
6611		}
6612	}
6613	return intr_mode;
6614}
6615
6616/**
6617 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
6618 * @phba: pointer to lpfc hba data structure.
6619 *
6620 * This routine is invoked to disable device interrupt and disassociate
6621 * the driver's interrupt handler(s) from interrupt vector(s) to device
6622 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
6623 * will release the interrupt vector(s) for the message signaled interrupt.
6624 **/
6625static void
6626lpfc_sli4_disable_intr(struct lpfc_hba *phba)
6627{
6628	/* Disable the currently initialized interrupt mode */
6629	if (phba->intr_type == MSIX)
6630		lpfc_sli4_disable_msix(phba);
6631	else if (phba->intr_type == MSI)
6632		lpfc_sli4_disable_msi(phba);
6633	else if (phba->intr_type == INTx)
6634		free_irq(phba->pcidev->irq, phba);
6635
6636	/* Reset interrupt management states */
6637	phba->intr_type = NONE;
6638	phba->sli.slistat.sli_intr = 0;
6639
6640	return;
6641}
6642
6643/**
6644 * lpfc_unset_hba - Unset SLI3 hba device initialization
6645 * @phba: pointer to lpfc hba data structure.
6646 *
6647 * This routine is invoked to unset the HBA device initialization steps to
6648 * a device with SLI-3 interface spec.
6649 **/
6650static void
6651lpfc_unset_hba(struct lpfc_hba *phba)
6652{
6653	struct lpfc_vport *vport = phba->pport;
6654	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
6655
6656	spin_lock_irq(shost->host_lock);
6657	vport->load_flag |= FC_UNLOADING;
6658	spin_unlock_irq(shost->host_lock);
6659
6660	lpfc_stop_hba_timers(phba);
6661
6662	phba->pport->work_port_events = 0;
6663
6664	lpfc_sli_hba_down(phba);
6665
6666	lpfc_sli_brdrestart(phba);
6667
6668	lpfc_sli_disable_intr(phba);
6669
6670	return;
6671}
6672
6673/**
6674 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
6675 * @phba: pointer to lpfc hba data structure.
6676 *
6677 * This routine is invoked to unset the HBA device initialization steps to
6678 * a device with SLI-4 interface spec.
6679 **/
6680static void
6681lpfc_sli4_unset_hba(struct lpfc_hba *phba)
6682{
6683	struct lpfc_vport *vport = phba->pport;
6684	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
6685
6686	spin_lock_irq(shost->host_lock);
6687	vport->load_flag |= FC_UNLOADING;
6688	spin_unlock_irq(shost->host_lock);
6689
6690	phba->pport->work_port_events = 0;
6691
6692	lpfc_sli4_hba_down(phba);
6693
6694	lpfc_sli4_disable_intr(phba);
6695
6696	return;
6697}
6698
6699/**
6700 * lpfc_sli4_hba_unset - Unset the fcoe hba
6701 * @phba: Pointer to HBA context object.
6702 *
6703 * This function is called in the SLI4 code path to reset the HBA's FCoE
6704 * function. The caller is not required to hold any lock. This routine
6705 * issues PCI function reset mailbox command to reset the FCoE function.
6706 * At the end of the function, it calls lpfc_hba_down_post function to
6707 * free any pending commands.
6708 **/
6709static void
6710lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6711{
6712	int wait_cnt = 0;
6713	LPFC_MBOXQ_t *mboxq;
6714
6715	lpfc_stop_hba_timers(phba);
6716	phba->sli4_hba.intr_enable = 0;
6717
6718	/*
6719	 * Gracefully wait out the potential current outstanding asynchronous
6720	 * mailbox command.
6721	 */
6722
6723	/* First, block any pending async mailbox command from posted */
6724	spin_lock_irq(&phba->hbalock);
6725	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
6726	spin_unlock_irq(&phba->hbalock);
6727	/* Now, trying to wait it out if we can */
6728	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6729		msleep(10);
6730		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
6731			break;
6732	}
6733	/* Forcefully release the outstanding mailbox command if timed out */
6734	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6735		spin_lock_irq(&phba->hbalock);
6736		mboxq = phba->sli.mbox_active;
6737		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
6738		__lpfc_mbox_cmpl_put(phba, mboxq);
6739		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6740		phba->sli.mbox_active = NULL;
6741		spin_unlock_irq(&phba->hbalock);
6742	}
6743
6744	/* Tear down the queues in the HBA */
6745	lpfc_sli4_queue_unset(phba);
6746
6747	/* Disable PCI subsystem interrupt */
6748	lpfc_sli4_disable_intr(phba);
6749
6750	/* Stop kthread signal shall trigger work_done one more time */
6751	kthread_stop(phba->worker_thread);
6752
6753	/* Stop the SLI4 device port */
6754	phba->pport->work_port_events = 0;
6755}
6756
6757/**
6758 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
6759 * @pdev: pointer to PCI device
6760 * @pid: pointer to PCI device identifier
6761 *
6762 * This routine is to be called to attach a device with SLI-3 interface spec
6763 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6764 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
6765 * information of the device and driver to see if the driver state that it can
6766 * support this kind of device. If the match is successful, the driver core
6767 * invokes this routine. If this routine determines it can claim the HBA, it
6768 * does all the initialization that it needs to do to handle the HBA properly.
6769 *
6770 * Return code
6771 * 	0 - driver can claim the device
6772 * 	negative value - driver can not claim the device
6773 **/
6774static int __devinit
6775lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6776{
6777	struct lpfc_hba   *phba;
6778	struct lpfc_vport *vport = NULL;
6779	struct Scsi_Host  *shost = NULL;
6780	int error;
6781	uint32_t cfg_mode, intr_mode;
6782
6783	/* Allocate memory for HBA structure */
6784	phba = lpfc_hba_alloc(pdev);
6785	if (!phba)
6786		return -ENOMEM;
6787
6788	/* Perform generic PCI device enabling operation */
6789	error = lpfc_enable_pci_dev(phba);
6790	if (error) {
6791		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6792				"1401 Failed to enable pci device.\n");
6793		goto out_free_phba;
6794	}
6795
6796	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
6797	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
6798	if (error)
6799		goto out_disable_pci_dev;
6800
6801	/* Set up SLI-3 specific device PCI memory space */
6802	error = lpfc_sli_pci_mem_setup(phba);
6803	if (error) {
6804		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6805				"1402 Failed to set up pci memory space.\n");
6806		goto out_disable_pci_dev;
6807	}
6808
6809	/* Set up phase-1 common device driver resources */
6810	error = lpfc_setup_driver_resource_phase1(phba);
6811	if (error) {
6812		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6813				"1403 Failed to set up driver resource.\n");
6814		goto out_unset_pci_mem_s3;
6815	}
6816
6817	/* Set up SLI-3 specific device driver resources */
6818	error = lpfc_sli_driver_resource_setup(phba);
6819	if (error) {
6820		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6821				"1404 Failed to set up driver resource.\n");
6822		goto out_unset_pci_mem_s3;
6823	}
6824
6825	/* Initialize and populate the iocb list per host */
6826	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
6827	if (error) {
6828		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6829				"1405 Failed to initialize iocb list.\n");
6830		goto out_unset_driver_resource_s3;
6831	}
6832
6833	/* Set up common device driver resources */
6834	error = lpfc_setup_driver_resource_phase2(phba);
6835	if (error) {
6836		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6837				"1406 Failed to set up driver resource.\n");
6838		goto out_free_iocb_list;
6839	}
6840
6841	/* Create SCSI host to the physical port */
6842	error = lpfc_create_shost(phba);
6843	if (error) {
6844		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6845				"1407 Failed to create scsi host.\n");
6846		goto out_unset_driver_resource;
6847	}
6848
6849	/* Configure sysfs attributes */
6850	vport = phba->pport;
6851	error = lpfc_alloc_sysfs_attr(vport);
6852	if (error) {
6853		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6854				"1476 Failed to allocate sysfs attr\n");
6855		goto out_destroy_shost;
6856	}
6857
6858	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
6859	/* Now, trying to enable interrupt and bring up the device */
6860	cfg_mode = phba->cfg_use_msi;
6861	while (true) {
6862		/* Put device to a known state before enabling interrupt */
6863		lpfc_stop_port(phba);
6864		/* Configure and enable interrupt */
6865		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
6866		if (intr_mode == LPFC_INTR_ERROR) {
6867			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6868					"0431 Failed to enable interrupt.\n");
6869			error = -ENODEV;
6870			goto out_free_sysfs_attr;
6871		}
6872		/* SLI-3 HBA setup */
6873		if (lpfc_sli_hba_setup(phba)) {
6874			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6875					"1477 Failed to set up hba\n");
6876			error = -ENODEV;
6877			goto out_remove_device;
6878		}
6879
6880		/* Wait 50ms for the interrupts of previous mailbox commands */
6881		msleep(50);
6882		/* Check active interrupts on message signaled interrupts */
6883		if (intr_mode == 0 ||
6884		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
6885			/* Log the current active interrupt mode */
6886			phba->intr_mode = intr_mode;
6887			lpfc_log_intr_mode(phba, intr_mode);
6888			break;
6889		} else {
6890			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6891					"0447 Configure interrupt mode (%d) "
6892					"failed active interrupt test.\n",
6893					intr_mode);
6894			/* Disable the current interrupt mode */
6895			lpfc_sli_disable_intr(phba);
6896			/* Try next level of interrupt mode */
6897			cfg_mode = --intr_mode;
6898		}
6899	}
6900
6901	/* Perform post initialization setup */
6902	lpfc_post_init_setup(phba);
6903
6904	/* Check if there are static vports to be created. */
6905	lpfc_create_static_vport(phba);
6906
6907	return 0;
6908
6909out_remove_device:
6910	lpfc_unset_hba(phba);
6911out_free_sysfs_attr:
6912	lpfc_free_sysfs_attr(vport);
6913out_destroy_shost:
6914	lpfc_destroy_shost(phba);
6915out_unset_driver_resource:
6916	lpfc_unset_driver_resource_phase2(phba);
6917out_free_iocb_list:
6918	lpfc_free_iocb_list(phba);
6919out_unset_driver_resource_s3:
6920	lpfc_sli_driver_resource_unset(phba);
6921out_unset_pci_mem_s3:
6922	lpfc_sli_pci_mem_unset(phba);
6923out_disable_pci_dev:
6924	lpfc_disable_pci_dev(phba);
6925	if (shost)
6926		scsi_host_put(shost);
6927out_free_phba:
6928	lpfc_hba_free(phba);
6929	return error;
6930}
6931
6932/**
6933 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
6934 * @pdev: pointer to PCI device
6935 *
6936 * This routine is to be called to disattach a device with SLI-3 interface
6937 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6938 * removed from PCI bus, it performs all the necessary cleanup for the HBA
6939 * device to be removed from the PCI subsystem properly.
6940 **/
6941static void __devexit
6942lpfc_pci_remove_one_s3(struct pci_dev *pdev)
6943{
6944	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
6945	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6946	struct lpfc_vport **vports;
6947	struct lpfc_hba   *phba = vport->phba;
6948	int i;
6949	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
6950
6951	spin_lock_irq(&phba->hbalock);
6952	vport->load_flag |= FC_UNLOADING;
6953	spin_unlock_irq(&phba->hbalock);
6954
6955	lpfc_free_sysfs_attr(vport);
6956
6957	/* Release all the vports against this physical port */
6958	vports = lpfc_create_vport_work_array(phba);
6959	if (vports != NULL)
6960		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
6961			fc_vport_terminate(vports[i]->fc_vport);
6962	lpfc_destroy_vport_work_array(phba, vports);
6963
6964	/* Remove FC host and then SCSI host with the physical port */
6965	fc_remove_host(shost);
6966	scsi_remove_host(shost);
6967	lpfc_cleanup(vport);
6968
6969	/*
6970	 * Bring down the SLI Layer. This step disable all interrupts,
6971	 * clears the rings, discards all mailbox commands, and resets
6972	 * the HBA.
6973	 */
6974
6975	/* HBA interrupt will be diabled after this call */
6976	lpfc_sli_hba_down(phba);
6977	/* Stop kthread signal shall trigger work_done one more time */
6978	kthread_stop(phba->worker_thread);
6979	/* Final cleanup of txcmplq and reset the HBA */
6980	lpfc_sli_brdrestart(phba);
6981
6982	lpfc_stop_hba_timers(phba);
6983	spin_lock_irq(&phba->hbalock);
6984	list_del_init(&vport->listentry);
6985	spin_unlock_irq(&phba->hbalock);
6986
6987	lpfc_debugfs_terminate(vport);
6988
6989	/* Disable interrupt */
6990	lpfc_sli_disable_intr(phba);
6991
6992	pci_set_drvdata(pdev, NULL);
6993	scsi_host_put(shost);
6994
6995	/*
6996	 * Call scsi_free before mem_free since scsi bufs are released to their
6997	 * corresponding pools here.
6998	 */
6999	lpfc_scsi_free(phba);
7000	lpfc_mem_free_all(phba);
7001
7002	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7003			  phba->hbqslimp.virt, phba->hbqslimp.phys);
7004
7005	/* Free resources associated with SLI2 interface */
7006	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7007			  phba->slim2p.virt, phba->slim2p.phys);
7008
7009	/* unmap adapter SLIM and Control Registers */
7010	iounmap(phba->ctrl_regs_memmap_p);
7011	iounmap(phba->slim_memmap_p);
7012
7013	lpfc_hba_free(phba);
7014
7015	pci_release_selected_regions(pdev, bars);
7016	pci_disable_device(pdev);
7017}
7018
7019/**
7020 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
7021 * @pdev: pointer to PCI device
7022 * @msg: power management message
7023 *
7024 * This routine is to be called from the kernel's PCI subsystem to support
7025 * system Power Management (PM) to device with SLI-3 interface spec. When
7026 * PM invokes this method, it quiesces the device by stopping the driver's
7027 * worker thread for the device, turning off device's interrupt and DMA,
7028 * and bring the device offline. Note that as the driver implements the
7029 * minimum PM requirements to a power-aware driver's PM support for the
7030 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7031 * to the suspend() method call will be treated as SUSPEND and the driver will
7032 * fully reinitialize its device during resume() method call, the driver will
7033 * set device to PCI_D3hot state in PCI config space instead of setting it
7034 * according to the @msg provided by the PM.
7035 *
7036 * Return code
7037 * 	0 - driver suspended the device
7038 * 	Error otherwise
7039 **/
7040static int
7041lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
7042{
7043	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7044	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7045
7046	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7047			"0473 PCI device Power Management suspend.\n");
7048
7049	/* Bring down the device */
7050	lpfc_offline_prep(phba);
7051	lpfc_offline(phba);
7052	kthread_stop(phba->worker_thread);
7053
7054	/* Disable interrupt from device */
7055	lpfc_sli_disable_intr(phba);
7056
7057	/* Save device state to PCI config space */
7058	pci_save_state(pdev);
7059	pci_set_power_state(pdev, PCI_D3hot);
7060
7061	return 0;
7062}
7063
7064/**
7065 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
7066 * @pdev: pointer to PCI device
7067 *
7068 * This routine is to be called from the kernel's PCI subsystem to support
7069 * system Power Management (PM) to device with SLI-3 interface spec. When PM
7070 * invokes this method, it restores the device's PCI config space state and
7071 * fully reinitializes the device and brings it online. Note that as the
7072 * driver implements the minimum PM requirements to a power-aware driver's
7073 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
7074 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
7075 * driver will fully reinitialize its device during resume() method call,
7076 * the device will be set to PCI_D0 directly in PCI config space before
7077 * restoring the state.
7078 *
7079 * Return code
7080 * 	0 - driver suspended the device
7081 * 	Error otherwise
7082 **/
7083static int
7084lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7085{
7086	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7087	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7088	uint32_t intr_mode;
7089	int error;
7090
7091	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7092			"0452 PCI device Power Management resume.\n");
7093
7094	/* Restore device state from PCI config space */
7095	pci_set_power_state(pdev, PCI_D0);
7096	pci_restore_state(pdev);
7097	if (pdev->is_busmaster)
7098		pci_set_master(pdev);
7099
7100	/* Startup the kernel thread for this host adapter. */
7101	phba->worker_thread = kthread_run(lpfc_do_work, phba,
7102					"lpfc_worker_%d", phba->brd_no);
7103	if (IS_ERR(phba->worker_thread)) {
7104		error = PTR_ERR(phba->worker_thread);
7105		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7106				"0434 PM resume failed to start worker "
7107				"thread: error=x%x.\n", error);
7108		return error;
7109	}
7110
7111	/* Configure and enable interrupt */
7112	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7113	if (intr_mode == LPFC_INTR_ERROR) {
7114		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7115				"0430 PM resume Failed to enable interrupt\n");
7116		return -EIO;
7117	} else
7118		phba->intr_mode = intr_mode;
7119
7120	/* Restart HBA and bring it online */
7121	lpfc_sli_brdrestart(phba);
7122	lpfc_online(phba);
7123
7124	/* Log the current active interrupt mode */
7125	lpfc_log_intr_mode(phba, phba->intr_mode);
7126
7127	return 0;
7128}
7129
7130/**
7131 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7132 * @pdev: pointer to PCI device.
7133 * @state: the current PCI connection state.
7134 *
7135 * This routine is called from the PCI subsystem for I/O error handling to
7136 * device with SLI-3 interface spec. This function is called by the PCI
7137 * subsystem after a PCI bus error affecting this device has been detected.
7138 * When this function is invoked, it will need to stop all the I/Os and
7139 * interrupt(s) to the device. Once that is done, it will return
7140 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7141 * as desired.
7142 *
7143 * Return codes
7144 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7145 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7146 **/
7147static pci_ers_result_t
7148lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7149{
7150	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7151	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7152	struct lpfc_sli *psli = &phba->sli;
7153	struct lpfc_sli_ring  *pring;
7154
7155	if (state == pci_channel_io_perm_failure) {
7156		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7157				"0472 PCI channel I/O permanent failure\n");
7158		/* Block all SCSI devices' I/Os on the host */
7159		lpfc_scsi_dev_block(phba);
7160		/* Clean up all driver's outstanding SCSI I/Os */
7161		lpfc_sli_flush_fcp_rings(phba);
7162		return PCI_ERS_RESULT_DISCONNECT;
7163	}
7164
7165	pci_disable_device(pdev);
7166	/*
7167	 * There may be I/Os dropped by the firmware.
7168	 * Error iocb (I/O) on txcmplq and let the SCSI layer
7169	 * retry it after re-establishing link.
7170	 */
7171	pring = &psli->ring[psli->fcp_ring];
7172	lpfc_sli_abort_iocb_ring(phba, pring);
7173
7174	/* Disable interrupt */
7175	lpfc_sli_disable_intr(phba);
7176
7177	/* Request a slot reset. */
7178	return PCI_ERS_RESULT_NEED_RESET;
7179}
7180
7181/**
7182 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
7183 * @pdev: pointer to PCI device.
7184 *
7185 * This routine is called from the PCI subsystem for error handling to
7186 * device with SLI-3 interface spec. This is called after PCI bus has been
7187 * reset to restart the PCI card from scratch, as if from a cold-boot.
7188 * During the PCI subsystem error recovery, after driver returns
7189 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7190 * recovery and then call this routine before calling the .resume method
7191 * to recover the device. This function will initialize the HBA device,
7192 * enable the interrupt, but it will just put the HBA to offline state
7193 * without passing any I/O traffic.
7194 *
7195 * Return codes
7196 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7197 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7198 */
7199static pci_ers_result_t
7200lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7201{
7202	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7203	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7204	struct lpfc_sli *psli = &phba->sli;
7205	uint32_t intr_mode;
7206
7207	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
7208	if (pci_enable_device_mem(pdev)) {
7209		printk(KERN_ERR "lpfc: Cannot re-enable "
7210			"PCI device after reset.\n");
7211		return PCI_ERS_RESULT_DISCONNECT;
7212	}
7213
7214	pci_restore_state(pdev);
7215	if (pdev->is_busmaster)
7216		pci_set_master(pdev);
7217
7218	spin_lock_irq(&phba->hbalock);
7219	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7220	spin_unlock_irq(&phba->hbalock);
7221
7222	/* Configure and enable interrupt */
7223	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7224	if (intr_mode == LPFC_INTR_ERROR) {
7225		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7226				"0427 Cannot re-enable interrupt after "
7227				"slot reset.\n");
7228		return PCI_ERS_RESULT_DISCONNECT;
7229	} else
7230		phba->intr_mode = intr_mode;
7231
7232	/* Take device offline; this will perform cleanup */
7233	lpfc_offline(phba);
7234	lpfc_sli_brdrestart(phba);
7235
7236	/* Log the current active interrupt mode */
7237	lpfc_log_intr_mode(phba, phba->intr_mode);
7238
7239	return PCI_ERS_RESULT_RECOVERED;
7240}
7241
7242/**
7243 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
7244 * @pdev: pointer to PCI device
7245 *
7246 * This routine is called from the PCI subsystem for error handling to device
7247 * with SLI-3 interface spec. It is called when kernel error recovery tells
7248 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7249 * error recovery. After this call, traffic can start to flow from this device
7250 * again.
7251 */
7252static void
7253lpfc_io_resume_s3(struct pci_dev *pdev)
7254{
7255	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7256	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7257
7258	lpfc_online(phba);
7259}
7260
7261/**
7262 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7263 * @phba: pointer to lpfc hba data structure.
7264 *
7265 * returns the number of ELS/CT IOCBs to reserve
7266 **/
7267int
7268lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7269{
7270	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7271
7272	if (phba->sli_rev == LPFC_SLI_REV4) {
7273		if (max_xri <= 100)
7274			return 4;
7275		else if (max_xri <= 256)
7276			return 8;
7277		else if (max_xri <= 512)
7278			return 16;
7279		else if (max_xri <= 1024)
7280			return 32;
7281		else
7282			return 48;
7283	} else
7284		return 0;
7285}
7286
7287/**
7288 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
7289 * @pdev: pointer to PCI device
7290 * @pid: pointer to PCI device identifier
7291 *
7292 * This routine is called from the kernel's PCI subsystem to device with
7293 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7294 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7295 * information of the device and driver to see if the driver state that it
7296 * can support this kind of device. If the match is successful, the driver
7297 * core invokes this routine. If this routine determines it can claim the HBA,
7298 * it does all the initialization that it needs to do to handle the HBA
7299 * properly.
7300 *
7301 * Return code
7302 * 	0 - driver can claim the device
7303 * 	negative value - driver can not claim the device
7304 **/
7305static int __devinit
7306lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7307{
7308	struct lpfc_hba   *phba;
7309	struct lpfc_vport *vport = NULL;
7310	struct Scsi_Host  *shost = NULL;
7311	int error;
7312	uint32_t cfg_mode, intr_mode;
7313	int mcnt;
7314
7315	/* Allocate memory for HBA structure */
7316	phba = lpfc_hba_alloc(pdev);
7317	if (!phba)
7318		return -ENOMEM;
7319
7320	/* Perform generic PCI device enabling operation */
7321	error = lpfc_enable_pci_dev(phba);
7322	if (error) {
7323		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7324				"1409 Failed to enable pci device.\n");
7325		goto out_free_phba;
7326	}
7327
7328	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
7329	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
7330	if (error)
7331		goto out_disable_pci_dev;
7332
7333	/* Set up SLI-4 specific device PCI memory space */
7334	error = lpfc_sli4_pci_mem_setup(phba);
7335	if (error) {
7336		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7337				"1410 Failed to set up pci memory space.\n");
7338		goto out_disable_pci_dev;
7339	}
7340
7341	/* Set up phase-1 common device driver resources */
7342	error = lpfc_setup_driver_resource_phase1(phba);
7343	if (error) {
7344		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7345				"1411 Failed to set up driver resource.\n");
7346		goto out_unset_pci_mem_s4;
7347	}
7348
7349	/* Set up SLI-4 Specific device driver resources */
7350	error = lpfc_sli4_driver_resource_setup(phba);
7351	if (error) {
7352		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7353				"1412 Failed to set up driver resource.\n");
7354		goto out_unset_pci_mem_s4;
7355	}
7356
7357	/* Initialize and populate the iocb list per host */
7358	error = lpfc_init_iocb_list(phba,
7359			phba->sli4_hba.max_cfg_param.max_xri);
7360	if (error) {
7361		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7362				"1413 Failed to initialize iocb list.\n");
7363		goto out_unset_driver_resource_s4;
7364	}
7365
7366	/* Set up common device driver resources */
7367	error = lpfc_setup_driver_resource_phase2(phba);
7368	if (error) {
7369		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7370				"1414 Failed to set up driver resource.\n");
7371		goto out_free_iocb_list;
7372	}
7373
7374	/* Create SCSI host to the physical port */
7375	error = lpfc_create_shost(phba);
7376	if (error) {
7377		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7378				"1415 Failed to create scsi host.\n");
7379		goto out_unset_driver_resource;
7380	}
7381
7382	/* Configure sysfs attributes */
7383	vport = phba->pport;
7384	error = lpfc_alloc_sysfs_attr(vport);
7385	if (error) {
7386		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7387				"1416 Failed to allocate sysfs attr\n");
7388		goto out_destroy_shost;
7389	}
7390
7391	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
7392	/* Now, trying to enable interrupt and bring up the device */
7393	cfg_mode = phba->cfg_use_msi;
7394	while (true) {
7395		/* Put device to a known state before enabling interrupt */
7396		lpfc_stop_port(phba);
7397		/* Configure and enable interrupt */
7398		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
7399		if (intr_mode == LPFC_INTR_ERROR) {
7400			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7401					"0426 Failed to enable interrupt.\n");
7402			error = -ENODEV;
7403			goto out_free_sysfs_attr;
7404		}
7405		/* Set up SLI-4 HBA */
7406		if (lpfc_sli4_hba_setup(phba)) {
7407			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7408					"1421 Failed to set up hba\n");
7409			error = -ENODEV;
7410			goto out_disable_intr;
7411		}
7412
7413		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
7414		if (intr_mode != 0)
7415			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
7416							    LPFC_ACT_INTR_CNT);
7417
7418		/* Check active interrupts received only for MSI/MSI-X */
7419		if (intr_mode == 0 ||
7420		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
7421			/* Log the current active interrupt mode */
7422			phba->intr_mode = intr_mode;
7423			lpfc_log_intr_mode(phba, intr_mode);
7424			break;
7425		}
7426		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7427				"0451 Configure interrupt mode (%d) "
7428				"failed active interrupt test.\n",
7429				intr_mode);
7430		/* Unset the preivous SLI-4 HBA setup */
7431		lpfc_sli4_unset_hba(phba);
7432		/* Try next level of interrupt mode */
7433		cfg_mode = --intr_mode;
7434	}
7435
7436	/* Perform post initialization setup */
7437	lpfc_post_init_setup(phba);
7438
7439	/* Check if there are static vports to be created. */
7440	lpfc_create_static_vport(phba);
7441
7442	return 0;
7443
7444out_disable_intr:
7445	lpfc_sli4_disable_intr(phba);
7446out_free_sysfs_attr:
7447	lpfc_free_sysfs_attr(vport);
7448out_destroy_shost:
7449	lpfc_destroy_shost(phba);
7450out_unset_driver_resource:
7451	lpfc_unset_driver_resource_phase2(phba);
7452out_free_iocb_list:
7453	lpfc_free_iocb_list(phba);
7454out_unset_driver_resource_s4:
7455	lpfc_sli4_driver_resource_unset(phba);
7456out_unset_pci_mem_s4:
7457	lpfc_sli4_pci_mem_unset(phba);
7458out_disable_pci_dev:
7459	lpfc_disable_pci_dev(phba);
7460	if (shost)
7461		scsi_host_put(shost);
7462out_free_phba:
7463	lpfc_hba_free(phba);
7464	return error;
7465}
7466
7467/**
7468 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
7469 * @pdev: pointer to PCI device
7470 *
7471 * This routine is called from the kernel's PCI subsystem to device with
7472 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7473 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7474 * device to be removed from the PCI subsystem properly.
7475 **/
7476static void __devexit
7477lpfc_pci_remove_one_s4(struct pci_dev *pdev)
7478{
7479	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7480	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7481	struct lpfc_vport **vports;
7482	struct lpfc_hba *phba = vport->phba;
7483	int i;
7484
7485	/* Mark the device unloading flag */
7486	spin_lock_irq(&phba->hbalock);
7487	vport->load_flag |= FC_UNLOADING;
7488	spin_unlock_irq(&phba->hbalock);
7489
7490	/* Free the HBA sysfs attributes */
7491	lpfc_free_sysfs_attr(vport);
7492
7493	/* Release all the vports against this physical port */
7494	vports = lpfc_create_vport_work_array(phba);
7495	if (vports != NULL)
7496		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7497			fc_vport_terminate(vports[i]->fc_vport);
7498	lpfc_destroy_vport_work_array(phba, vports);
7499
7500	/* Remove FC host and then SCSI host with the physical port */
7501	fc_remove_host(shost);
7502	scsi_remove_host(shost);
7503
7504	/* Perform cleanup on the physical port */
7505	lpfc_cleanup(vport);
7506
7507	/*
7508	 * Bring down the SLI Layer. This step disables all interrupts,
7509	 * clears the rings, discards all mailbox commands, and resets
7510	 * the HBA FCoE function.
7511	 */
7512	lpfc_debugfs_terminate(vport);
7513	lpfc_sli4_hba_unset(phba);
7514
7515	spin_lock_irq(&phba->hbalock);
7516	list_del_init(&vport->listentry);
7517	spin_unlock_irq(&phba->hbalock);
7518
7519	/* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
7520	 * buffers are released to their corresponding pools here.
7521	 */
7522	lpfc_scsi_free(phba);
7523	lpfc_sli4_driver_resource_unset(phba);
7524
7525	/* Unmap adapter Control and Doorbell registers */
7526	lpfc_sli4_pci_mem_unset(phba);
7527
7528	/* Release PCI resources and disable device's PCI function */
7529	scsi_host_put(shost);
7530	lpfc_disable_pci_dev(phba);
7531
7532	/* Finally, free the driver's device data structure */
7533	lpfc_hba_free(phba);
7534
7535	return;
7536}
7537
7538/**
7539 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
7540 * @pdev: pointer to PCI device
7541 * @msg: power management message
7542 *
7543 * This routine is called from the kernel's PCI subsystem to support system
7544 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
7545 * this method, it quiesces the device by stopping the driver's worker
7546 * thread for the device, turning off device's interrupt and DMA, and bring
7547 * the device offline. Note that as the driver implements the minimum PM
7548 * requirements to a power-aware driver's PM support for suspend/resume -- all
7549 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
7550 * method call will be treated as SUSPEND and the driver will fully
7551 * reinitialize its device during resume() method call, the driver will set
7552 * device to PCI_D3hot state in PCI config space instead of setting it
7553 * according to the @msg provided by the PM.
7554 *
7555 * Return code
7556 * 	0 - driver suspended the device
7557 * 	Error otherwise
7558 **/
7559static int
7560lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
7561{
7562	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7563	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7564
7565	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7566			"0298 PCI device Power Management suspend.\n");
7567
7568	/* Bring down the device */
7569	lpfc_offline_prep(phba);
7570	lpfc_offline(phba);
7571	kthread_stop(phba->worker_thread);
7572
7573	/* Disable interrupt from device */
7574	lpfc_sli4_disable_intr(phba);
7575
7576	/* Save device state to PCI config space */
7577	pci_save_state(pdev);
7578	pci_set_power_state(pdev, PCI_D3hot);
7579
7580	return 0;
7581}
7582
7583/**
7584 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
7585 * @pdev: pointer to PCI device
7586 *
7587 * This routine is called from the kernel's PCI subsystem to support system
7588 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
7589 * this method, it restores the device's PCI config space state and fully
7590 * reinitializes the device and brings it online. Note that as the driver
7591 * implements the minimum PM requirements to a power-aware driver's PM for
7592 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7593 * to the suspend() method call will be treated as SUSPEND and the driver
7594 * will fully reinitialize its device during resume() method call, the device
7595 * will be set to PCI_D0 directly in PCI config space before restoring the
7596 * state.
7597 *
7598 * Return code
7599 * 	0 - driver suspended the device
7600 * 	Error otherwise
7601 **/
7602static int
7603lpfc_pci_resume_one_s4(struct pci_dev *pdev)
7604{
7605	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7606	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7607	uint32_t intr_mode;
7608	int error;
7609
7610	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7611			"0292 PCI device Power Management resume.\n");
7612
7613	/* Restore device state from PCI config space */
7614	pci_set_power_state(pdev, PCI_D0);
7615	pci_restore_state(pdev);
7616	if (pdev->is_busmaster)
7617		pci_set_master(pdev);
7618
7619	 /* Startup the kernel thread for this host adapter. */
7620	phba->worker_thread = kthread_run(lpfc_do_work, phba,
7621					"lpfc_worker_%d", phba->brd_no);
7622	if (IS_ERR(phba->worker_thread)) {
7623		error = PTR_ERR(phba->worker_thread);
7624		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7625				"0293 PM resume failed to start worker "
7626				"thread: error=x%x.\n", error);
7627		return error;
7628	}
7629
7630	/* Configure and enable interrupt */
7631	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
7632	if (intr_mode == LPFC_INTR_ERROR) {
7633		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7634				"0294 PM resume Failed to enable interrupt\n");
7635		return -EIO;
7636	} else
7637		phba->intr_mode = intr_mode;
7638
7639	/* Restart HBA and bring it online */
7640	lpfc_sli_brdrestart(phba);
7641	lpfc_online(phba);
7642
7643	/* Log the current active interrupt mode */
7644	lpfc_log_intr_mode(phba, phba->intr_mode);
7645
7646	return 0;
7647}
7648
7649/**
7650 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
7651 * @pdev: pointer to PCI device.
7652 * @state: the current PCI connection state.
7653 *
7654 * This routine is called from the PCI subsystem for error handling to device
7655 * with SLI-4 interface spec. This function is called by the PCI subsystem
7656 * after a PCI bus error affecting this device has been detected. When this
7657 * function is invoked, it will need to stop all the I/Os and interrupt(s)
7658 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
7659 * for the PCI subsystem to perform proper recovery as desired.
7660 *
7661 * Return codes
7662 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7663 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7664 **/
7665static pci_ers_result_t
7666lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
7667{
7668	return PCI_ERS_RESULT_NEED_RESET;
7669}
7670
7671/**
7672 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
7673 * @pdev: pointer to PCI device.
7674 *
7675 * This routine is called from the PCI subsystem for error handling to device
7676 * with SLI-4 interface spec. It is called after PCI bus has been reset to
7677 * restart the PCI card from scratch, as if from a cold-boot. During the
7678 * PCI subsystem error recovery, after the driver returns
7679 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7680 * recovery and then call this routine before calling the .resume method to
7681 * recover the device. This function will initialize the HBA device, enable
7682 * the interrupt, but it will just put the HBA to offline state without
7683 * passing any I/O traffic.
7684 *
7685 * Return codes
7686 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7687 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7688 */
7689static pci_ers_result_t
7690lpfc_io_slot_reset_s4(struct pci_dev *pdev)
7691{
7692	return PCI_ERS_RESULT_RECOVERED;
7693}
7694
7695/**
7696 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
7697 * @pdev: pointer to PCI device
7698 *
7699 * This routine is called from the PCI subsystem for error handling to device
7700 * with SLI-4 interface spec. It is called when kernel error recovery tells
7701 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7702 * error recovery. After this call, traffic can start to flow from this device
7703 * again.
7704 **/
7705static void
7706lpfc_io_resume_s4(struct pci_dev *pdev)
7707{
7708	return;
7709}
7710
7711/**
7712 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
7713 * @pdev: pointer to PCI device
7714 * @pid: pointer to PCI device identifier
7715 *
7716 * This routine is to be registered to the kernel's PCI subsystem. When an
7717 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
7718 * at PCI device-specific information of the device and driver to see if the
7719 * driver state that it can support this kind of device. If the match is
7720 * successful, the driver core invokes this routine. This routine dispatches
7721 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
7722 * do all the initialization that it needs to do to handle the HBA device
7723 * properly.
7724 *
7725 * Return code
7726 * 	0 - driver can claim the device
7727 * 	negative value - driver can not claim the device
7728 **/
7729static int __devinit
7730lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7731{
7732	int rc;
7733	struct lpfc_sli_intf intf;
7734
7735	if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0))
7736		return -ENODEV;
7737
7738	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
7739		(bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4))
7740		rc = lpfc_pci_probe_one_s4(pdev, pid);
7741	else
7742		rc = lpfc_pci_probe_one_s3(pdev, pid);
7743
7744	return rc;
7745}
7746
7747/**
7748 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
7749 * @pdev: pointer to PCI device
7750 *
7751 * This routine is to be registered to the kernel's PCI subsystem. When an
7752 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
7753 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
7754 * remove routine, which will perform all the necessary cleanup for the
7755 * device to be removed from the PCI subsystem properly.
7756 **/
7757static void __devexit
7758lpfc_pci_remove_one(struct pci_dev *pdev)
7759{
7760	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7761	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7762
7763	switch (phba->pci_dev_grp) {
7764	case LPFC_PCI_DEV_LP:
7765		lpfc_pci_remove_one_s3(pdev);
7766		break;
7767	case LPFC_PCI_DEV_OC:
7768		lpfc_pci_remove_one_s4(pdev);
7769		break;
7770	default:
7771		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7772				"1424 Invalid PCI device group: 0x%x\n",
7773				phba->pci_dev_grp);
7774		break;
7775	}
7776	return;
7777}
7778
7779/**
7780 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
7781 * @pdev: pointer to PCI device
7782 * @msg: power management message
7783 *
7784 * This routine is to be registered to the kernel's PCI subsystem to support
7785 * system Power Management (PM). When PM invokes this method, it dispatches
7786 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
7787 * suspend the device.
7788 *
7789 * Return code
7790 * 	0 - driver suspended the device
7791 * 	Error otherwise
7792 **/
7793static int
7794lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
7795{
7796	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7797	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7798	int rc = -ENODEV;
7799
7800	switch (phba->pci_dev_grp) {
7801	case LPFC_PCI_DEV_LP:
7802		rc = lpfc_pci_suspend_one_s3(pdev, msg);
7803		break;
7804	case LPFC_PCI_DEV_OC:
7805		rc = lpfc_pci_suspend_one_s4(pdev, msg);
7806		break;
7807	default:
7808		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7809				"1425 Invalid PCI device group: 0x%x\n",
7810				phba->pci_dev_grp);
7811		break;
7812	}
7813	return rc;
7814}
7815
7816/**
7817 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
7818 * @pdev: pointer to PCI device
7819 *
7820 * This routine is to be registered to the kernel's PCI subsystem to support
7821 * system Power Management (PM). When PM invokes this method, it dispatches
7822 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
7823 * resume the device.
7824 *
7825 * Return code
7826 * 	0 - driver suspended the device
7827 * 	Error otherwise
7828 **/
7829static int
7830lpfc_pci_resume_one(struct pci_dev *pdev)
7831{
7832	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7833	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7834	int rc = -ENODEV;
7835
7836	switch (phba->pci_dev_grp) {
7837	case LPFC_PCI_DEV_LP:
7838		rc = lpfc_pci_resume_one_s3(pdev);
7839		break;
7840	case LPFC_PCI_DEV_OC:
7841		rc = lpfc_pci_resume_one_s4(pdev);
7842		break;
7843	default:
7844		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7845				"1426 Invalid PCI device group: 0x%x\n",
7846				phba->pci_dev_grp);
7847		break;
7848	}
7849	return rc;
7850}
7851
7852/**
7853 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
7854 * @pdev: pointer to PCI device.
7855 * @state: the current PCI connection state.
7856 *
7857 * This routine is registered to the PCI subsystem for error handling. This
7858 * function is called by the PCI subsystem after a PCI bus error affecting
7859 * this device has been detected. When this routine is invoked, it dispatches
7860 * the action to the proper SLI-3 or SLI-4 device error detected handling
7861 * routine, which will perform the proper error detected operation.
7862 *
7863 * Return codes
7864 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7865 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7866 **/
7867static pci_ers_result_t
7868lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7869{
7870	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7871	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7872	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7873
7874	switch (phba->pci_dev_grp) {
7875	case LPFC_PCI_DEV_LP:
7876		rc = lpfc_io_error_detected_s3(pdev, state);
7877		break;
7878	case LPFC_PCI_DEV_OC:
7879		rc = lpfc_io_error_detected_s4(pdev, state);
7880		break;
7881	default:
7882		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7883				"1427 Invalid PCI device group: 0x%x\n",
7884				phba->pci_dev_grp);
7885		break;
7886	}
7887	return rc;
7888}
7889
7890/**
7891 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
7892 * @pdev: pointer to PCI device.
7893 *
7894 * This routine is registered to the PCI subsystem for error handling. This
7895 * function is called after PCI bus has been reset to restart the PCI card
7896 * from scratch, as if from a cold-boot. When this routine is invoked, it
7897 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
7898 * routine, which will perform the proper device reset.
7899 *
7900 * Return codes
7901 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7902 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7903 **/
7904static pci_ers_result_t
7905lpfc_io_slot_reset(struct pci_dev *pdev)
7906{
7907	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7908	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7909	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7910
7911	switch (phba->pci_dev_grp) {
7912	case LPFC_PCI_DEV_LP:
7913		rc = lpfc_io_slot_reset_s3(pdev);
7914		break;
7915	case LPFC_PCI_DEV_OC:
7916		rc = lpfc_io_slot_reset_s4(pdev);
7917		break;
7918	default:
7919		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7920				"1428 Invalid PCI device group: 0x%x\n",
7921				phba->pci_dev_grp);
7922		break;
7923	}
7924	return rc;
7925}
7926
7927/**
7928 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
7929 * @pdev: pointer to PCI device
7930 *
7931 * This routine is registered to the PCI subsystem for error handling. It
7932 * is called when kernel error recovery tells the lpfc driver that it is
7933 * OK to resume normal PCI operation after PCI bus error recovery. When
7934 * this routine is invoked, it dispatches the action to the proper SLI-3
7935 * or SLI-4 device io_resume routine, which will resume the device operation.
7936 **/
7937static void
7938lpfc_io_resume(struct pci_dev *pdev)
7939{
7940	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7941	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7942
7943	switch (phba->pci_dev_grp) {
7944	case LPFC_PCI_DEV_LP:
7945		lpfc_io_resume_s3(pdev);
7946		break;
7947	case LPFC_PCI_DEV_OC:
7948		lpfc_io_resume_s4(pdev);
7949		break;
7950	default:
7951		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7952				"1429 Invalid PCI device group: 0x%x\n",
7953				phba->pci_dev_grp);
7954		break;
7955	}
7956	return;
7957}
7958
7959static struct pci_device_id lpfc_id_table[] = {
7960	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
7961		PCI_ANY_ID, PCI_ANY_ID, },
7962	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
7963		PCI_ANY_ID, PCI_ANY_ID, },
7964	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
7965		PCI_ANY_ID, PCI_ANY_ID, },
7966	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
7967		PCI_ANY_ID, PCI_ANY_ID, },
7968	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
7969		PCI_ANY_ID, PCI_ANY_ID, },
7970	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
7971		PCI_ANY_ID, PCI_ANY_ID, },
7972	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
7973		PCI_ANY_ID, PCI_ANY_ID, },
7974	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
7975		PCI_ANY_ID, PCI_ANY_ID, },
7976	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
7977		PCI_ANY_ID, PCI_ANY_ID, },
7978	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
7979		PCI_ANY_ID, PCI_ANY_ID, },
7980	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
7981		PCI_ANY_ID, PCI_ANY_ID, },
7982	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
7983		PCI_ANY_ID, PCI_ANY_ID, },
7984	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
7985		PCI_ANY_ID, PCI_ANY_ID, },
7986	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
7987		PCI_ANY_ID, PCI_ANY_ID, },
7988	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
7989		PCI_ANY_ID, PCI_ANY_ID, },
7990	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
7991		PCI_ANY_ID, PCI_ANY_ID, },
7992	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
7993		PCI_ANY_ID, PCI_ANY_ID, },
7994	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
7995		PCI_ANY_ID, PCI_ANY_ID, },
7996	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
7997		PCI_ANY_ID, PCI_ANY_ID, },
7998	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
7999		PCI_ANY_ID, PCI_ANY_ID, },
8000	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
8001		PCI_ANY_ID, PCI_ANY_ID, },
8002	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
8003		PCI_ANY_ID, PCI_ANY_ID, },
8004	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
8005		PCI_ANY_ID, PCI_ANY_ID, },
8006	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
8007		PCI_ANY_ID, PCI_ANY_ID, },
8008	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
8009		PCI_ANY_ID, PCI_ANY_ID, },
8010	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
8011		PCI_ANY_ID, PCI_ANY_ID, },
8012	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
8013		PCI_ANY_ID, PCI_ANY_ID, },
8014	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
8015		PCI_ANY_ID, PCI_ANY_ID, },
8016	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
8017		PCI_ANY_ID, PCI_ANY_ID, },
8018	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
8019		PCI_ANY_ID, PCI_ANY_ID, },
8020	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
8021		PCI_ANY_ID, PCI_ANY_ID, },
8022	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
8023		PCI_ANY_ID, PCI_ANY_ID, },
8024	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
8025		PCI_ANY_ID, PCI_ANY_ID, },
8026	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
8027		PCI_ANY_ID, PCI_ANY_ID, },
8028	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
8029		PCI_ANY_ID, PCI_ANY_ID, },
8030	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
8031		PCI_ANY_ID, PCI_ANY_ID, },
8032	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
8033		PCI_ANY_ID, PCI_ANY_ID, },
8034	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
8035		PCI_ANY_ID, PCI_ANY_ID, },
8036	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TS_BE3,
8037		PCI_ANY_ID, PCI_ANY_ID, },
8038	{ 0 }
8039};
8040
8041MODULE_DEVICE_TABLE(pci, lpfc_id_table);
8042
8043static struct pci_error_handlers lpfc_err_handler = {
8044	.error_detected = lpfc_io_error_detected,
8045	.slot_reset = lpfc_io_slot_reset,
8046	.resume = lpfc_io_resume,
8047};
8048
8049static struct pci_driver lpfc_driver = {
8050	.name		= LPFC_DRIVER_NAME,
8051	.id_table	= lpfc_id_table,
8052	.probe		= lpfc_pci_probe_one,
8053	.remove		= __devexit_p(lpfc_pci_remove_one),
8054	.suspend        = lpfc_pci_suspend_one,
8055	.resume		= lpfc_pci_resume_one,
8056	.err_handler    = &lpfc_err_handler,
8057};
8058
8059/**
8060 * lpfc_init - lpfc module initialization routine
8061 *
8062 * This routine is to be invoked when the lpfc module is loaded into the
8063 * kernel. The special kernel macro module_init() is used to indicate the
8064 * role of this routine to the kernel as lpfc module entry point.
8065 *
8066 * Return codes
8067 *   0 - successful
8068 *   -ENOMEM - FC attach transport failed
8069 *   all others - failed
8070 */
8071static int __init
8072lpfc_init(void)
8073{
8074	int error = 0;
8075
8076	printk(LPFC_MODULE_DESC "\n");
8077	printk(LPFC_COPYRIGHT "\n");
8078
8079	if (lpfc_enable_npiv) {
8080		lpfc_transport_functions.vport_create = lpfc_vport_create;
8081		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
8082	}
8083	lpfc_transport_template =
8084				fc_attach_transport(&lpfc_transport_functions);
8085	if (lpfc_transport_template == NULL)
8086		return -ENOMEM;
8087	if (lpfc_enable_npiv) {
8088		lpfc_vport_transport_template =
8089			fc_attach_transport(&lpfc_vport_transport_functions);
8090		if (lpfc_vport_transport_template == NULL) {
8091			fc_release_transport(lpfc_transport_template);
8092			return -ENOMEM;
8093		}
8094	}
8095	error = pci_register_driver(&lpfc_driver);
8096	if (error) {
8097		fc_release_transport(lpfc_transport_template);
8098		if (lpfc_enable_npiv)
8099			fc_release_transport(lpfc_vport_transport_template);
8100	}
8101
8102	return error;
8103}
8104
8105/**
8106 * lpfc_exit - lpfc module removal routine
8107 *
8108 * This routine is invoked when the lpfc module is removed from the kernel.
8109 * The special kernel macro module_exit() is used to indicate the role of
8110 * this routine to the kernel as lpfc module exit point.
8111 */
8112static void __exit
8113lpfc_exit(void)
8114{
8115	pci_unregister_driver(&lpfc_driver);
8116	fc_release_transport(lpfc_transport_template);
8117	if (lpfc_enable_npiv)
8118		fc_release_transport(lpfc_vport_transport_template);
8119	if (_dump_buf_data) {
8120		printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data "
8121				"at 0x%p\n",
8122				(1L << _dump_buf_data_order), _dump_buf_data);
8123		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
8124	}
8125
8126	if (_dump_buf_dif) {
8127		printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif "
8128				"at 0x%p\n",
8129				(1L << _dump_buf_dif_order), _dump_buf_dif);
8130		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
8131	}
8132}
8133
8134module_init(lpfc_init);
8135module_exit(lpfc_exit);
8136MODULE_LICENSE("GPL");
8137MODULE_DESCRIPTION(LPFC_MODULE_DESC);
8138MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
8139MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
8140