lpfc_init.c revision 52d5244096017bbd11164479116baceaede342b0
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
29#include <linux/spinlock.h>
30#include <linux/ctype.h>
31#include <linux/aer.h>
32#include <linux/slab.h>
33#include <linux/firmware.h>
34
35#include <scsi/scsi.h>
36#include <scsi/scsi_device.h>
37#include <scsi/scsi_host.h>
38#include <scsi/scsi_transport_fc.h>
39
40#include "lpfc_hw4.h"
41#include "lpfc_hw.h"
42#include "lpfc_sli.h"
43#include "lpfc_sli4.h"
44#include "lpfc_nl.h"
45#include "lpfc_disc.h"
46#include "lpfc_scsi.h"
47#include "lpfc.h"
48#include "lpfc_logmsg.h"
49#include "lpfc_crtn.h"
50#include "lpfc_vport.h"
51#include "lpfc_version.h"
52
53char *_dump_buf_data;
54unsigned long _dump_buf_data_order;
55char *_dump_buf_dif;
56unsigned long _dump_buf_dif_order;
57spinlock_t _dump_buf_lock;
58
59static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
60static int lpfc_post_rcv_buf(struct lpfc_hba *);
61static int lpfc_sli4_queue_create(struct lpfc_hba *);
62static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
63static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
64static int lpfc_setup_endian_order(struct lpfc_hba *);
65static int lpfc_sli4_read_config(struct lpfc_hba *);
66static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
67static void lpfc_free_sgl_list(struct lpfc_hba *);
68static int lpfc_init_sgl_list(struct lpfc_hba *);
69static int lpfc_init_active_sgl_array(struct lpfc_hba *);
70static void lpfc_free_active_sgl(struct lpfc_hba *);
71static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
72static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
73static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
74static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
75static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
76
77static struct scsi_transport_template *lpfc_transport_template = NULL;
78static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
79static DEFINE_IDR(lpfc_hba_index);
80
81/**
82 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
83 * @phba: pointer to lpfc hba data structure.
84 *
85 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
86 * mailbox command. It retrieves the revision information from the HBA and
87 * collects the Vital Product Data (VPD) about the HBA for preparing the
88 * configuration of the HBA.
89 *
90 * Return codes:
91 *   0 - success.
92 *   -ERESTART - requests the SLI layer to reset the HBA and try again.
93 *   Any other value - indicates an error.
94 **/
95int
96lpfc_config_port_prep(struct lpfc_hba *phba)
97{
98	lpfc_vpd_t *vp = &phba->vpd;
99	int i = 0, rc;
100	LPFC_MBOXQ_t *pmb;
101	MAILBOX_t *mb;
102	char *lpfc_vpd_data = NULL;
103	uint16_t offset = 0;
104	static char licensed[56] =
105		    "key unlock for use with gnu public licensed code only\0";
106	static int init_key = 1;
107
108	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
109	if (!pmb) {
110		phba->link_state = LPFC_HBA_ERROR;
111		return -ENOMEM;
112	}
113
114	mb = &pmb->u.mb;
115	phba->link_state = LPFC_INIT_MBX_CMDS;
116
117	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
118		if (init_key) {
119			uint32_t *ptext = (uint32_t *) licensed;
120
121			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
122				*ptext = cpu_to_be32(*ptext);
123			init_key = 0;
124		}
125
126		lpfc_read_nv(phba, pmb);
127		memset((char*)mb->un.varRDnvp.rsvd3, 0,
128			sizeof (mb->un.varRDnvp.rsvd3));
129		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
130			 sizeof (licensed));
131
132		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
133
134		if (rc != MBX_SUCCESS) {
135			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
136					"0324 Config Port initialization "
137					"error, mbxCmd x%x READ_NVPARM, "
138					"mbxStatus x%x\n",
139					mb->mbxCommand, mb->mbxStatus);
140			mempool_free(pmb, phba->mbox_mem_pool);
141			return -ERESTART;
142		}
143		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
144		       sizeof(phba->wwnn));
145		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
146		       sizeof(phba->wwpn));
147	}
148
149	phba->sli3_options = 0x0;
150
151	/* Setup and issue mailbox READ REV command */
152	lpfc_read_rev(phba, pmb);
153	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
154	if (rc != MBX_SUCCESS) {
155		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
156				"0439 Adapter failed to init, mbxCmd x%x "
157				"READ_REV, mbxStatus x%x\n",
158				mb->mbxCommand, mb->mbxStatus);
159		mempool_free( pmb, phba->mbox_mem_pool);
160		return -ERESTART;
161	}
162
163
164	/*
165	 * The value of rr must be 1 since the driver set the cv field to 1.
166	 * This setting requires the FW to set all revision fields.
167	 */
168	if (mb->un.varRdRev.rr == 0) {
169		vp->rev.rBit = 0;
170		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
171				"0440 Adapter failed to init, READ_REV has "
172				"missing revision information.\n");
173		mempool_free(pmb, phba->mbox_mem_pool);
174		return -ERESTART;
175	}
176
177	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
178		mempool_free(pmb, phba->mbox_mem_pool);
179		return -EINVAL;
180	}
181
182	/* Save information as VPD data */
183	vp->rev.rBit = 1;
184	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
185	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
186	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
187	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
188	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
189	vp->rev.biuRev = mb->un.varRdRev.biuRev;
190	vp->rev.smRev = mb->un.varRdRev.smRev;
191	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
192	vp->rev.endecRev = mb->un.varRdRev.endecRev;
193	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
194	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
195	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
196	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
197	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
198	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
199
200	/* If the sli feature level is less then 9, we must
201	 * tear down all RPIs and VPIs on link down if NPIV
202	 * is enabled.
203	 */
204	if (vp->rev.feaLevelHigh < 9)
205		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
206
207	if (lpfc_is_LC_HBA(phba->pcidev->device))
208		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
209						sizeof (phba->RandomData));
210
211	/* Get adapter VPD information */
212	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
213	if (!lpfc_vpd_data)
214		goto out_free_mbox;
215
216	do {
217		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
218		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
219
220		if (rc != MBX_SUCCESS) {
221			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
222					"0441 VPD not present on adapter, "
223					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
224					mb->mbxCommand, mb->mbxStatus);
225			mb->un.varDmp.word_cnt = 0;
226		}
227		/* dump mem may return a zero when finished or we got a
228		 * mailbox error, either way we are done.
229		 */
230		if (mb->un.varDmp.word_cnt == 0)
231			break;
232		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
233			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
234		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
235				      lpfc_vpd_data + offset,
236				      mb->un.varDmp.word_cnt);
237		offset += mb->un.varDmp.word_cnt;
238	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
239	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
240
241	kfree(lpfc_vpd_data);
242out_free_mbox:
243	mempool_free(pmb, phba->mbox_mem_pool);
244	return 0;
245}
246
247/**
248 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
249 * @phba: pointer to lpfc hba data structure.
250 * @pmboxq: pointer to the driver internal queue element for mailbox command.
251 *
252 * This is the completion handler for driver's configuring asynchronous event
253 * mailbox command to the device. If the mailbox command returns successfully,
254 * it will set internal async event support flag to 1; otherwise, it will
255 * set internal async event support flag to 0.
256 **/
257static void
258lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
259{
260	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
261		phba->temp_sensor_support = 1;
262	else
263		phba->temp_sensor_support = 0;
264	mempool_free(pmboxq, phba->mbox_mem_pool);
265	return;
266}
267
268/**
269 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
270 * @phba: pointer to lpfc hba data structure.
271 * @pmboxq: pointer to the driver internal queue element for mailbox command.
272 *
273 * This is the completion handler for dump mailbox command for getting
274 * wake up parameters. When this command complete, the response contain
275 * Option rom version of the HBA. This function translate the version number
276 * into a human readable string and store it in OptionROMVersion.
277 **/
278static void
279lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
280{
281	struct prog_id *prg;
282	uint32_t prog_id_word;
283	char dist = ' ';
284	/* character array used for decoding dist type. */
285	char dist_char[] = "nabx";
286
287	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
288		mempool_free(pmboxq, phba->mbox_mem_pool);
289		return;
290	}
291
292	prg = (struct prog_id *) &prog_id_word;
293
294	/* word 7 contain option rom version */
295	prog_id_word = pmboxq->u.mb.un.varWords[7];
296
297	/* Decode the Option rom version word to a readable string */
298	if (prg->dist < 4)
299		dist = dist_char[prg->dist];
300
301	if ((prg->dist == 3) && (prg->num == 0))
302		sprintf(phba->OptionROMVersion, "%d.%d%d",
303			prg->ver, prg->rev, prg->lev);
304	else
305		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
306			prg->ver, prg->rev, prg->lev,
307			dist, prg->num);
308	mempool_free(pmboxq, phba->mbox_mem_pool);
309	return;
310}
311
312/**
313 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
314 *	cfg_soft_wwnn, cfg_soft_wwpn
315 * @vport: pointer to lpfc vport data structure.
316 *
317 *
318 * Return codes
319 *   None.
320 **/
321void
322lpfc_update_vport_wwn(struct lpfc_vport *vport)
323{
324	/* If the soft name exists then update it using the service params */
325	if (vport->phba->cfg_soft_wwnn)
326		u64_to_wwn(vport->phba->cfg_soft_wwnn,
327			   vport->fc_sparam.nodeName.u.wwn);
328	if (vport->phba->cfg_soft_wwpn)
329		u64_to_wwn(vport->phba->cfg_soft_wwpn,
330			   vport->fc_sparam.portName.u.wwn);
331
332	/*
333	 * If the name is empty or there exists a soft name
334	 * then copy the service params name, otherwise use the fc name
335	 */
336	if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
337		memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
338			sizeof(struct lpfc_name));
339	else
340		memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
341			sizeof(struct lpfc_name));
342
343	if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
344		memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
345			sizeof(struct lpfc_name));
346	else
347		memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
348			sizeof(struct lpfc_name));
349}
350
351/**
352 * lpfc_config_port_post - Perform lpfc initialization after config port
353 * @phba: pointer to lpfc hba data structure.
354 *
355 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
356 * command call. It performs all internal resource and state setups on the
357 * port: post IOCB buffers, enable appropriate host interrupt attentions,
358 * ELS ring timers, etc.
359 *
360 * Return codes
361 *   0 - success.
362 *   Any other value - error.
363 **/
364int
365lpfc_config_port_post(struct lpfc_hba *phba)
366{
367	struct lpfc_vport *vport = phba->pport;
368	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
369	LPFC_MBOXQ_t *pmb;
370	MAILBOX_t *mb;
371	struct lpfc_dmabuf *mp;
372	struct lpfc_sli *psli = &phba->sli;
373	uint32_t status, timeout;
374	int i, j;
375	int rc;
376
377	spin_lock_irq(&phba->hbalock);
378	/*
379	 * If the Config port completed correctly the HBA is not
380	 * over heated any more.
381	 */
382	if (phba->over_temp_state == HBA_OVER_TEMP)
383		phba->over_temp_state = HBA_NORMAL_TEMP;
384	spin_unlock_irq(&phba->hbalock);
385
386	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
387	if (!pmb) {
388		phba->link_state = LPFC_HBA_ERROR;
389		return -ENOMEM;
390	}
391	mb = &pmb->u.mb;
392
393	/* Get login parameters for NID.  */
394	rc = lpfc_read_sparam(phba, pmb, 0);
395	if (rc) {
396		mempool_free(pmb, phba->mbox_mem_pool);
397		return -ENOMEM;
398	}
399
400	pmb->vport = vport;
401	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
402		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
403				"0448 Adapter failed init, mbxCmd x%x "
404				"READ_SPARM mbxStatus x%x\n",
405				mb->mbxCommand, mb->mbxStatus);
406		phba->link_state = LPFC_HBA_ERROR;
407		mp = (struct lpfc_dmabuf *) pmb->context1;
408		mempool_free(pmb, phba->mbox_mem_pool);
409		lpfc_mbuf_free(phba, mp->virt, mp->phys);
410		kfree(mp);
411		return -EIO;
412	}
413
414	mp = (struct lpfc_dmabuf *) pmb->context1;
415
416	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
417	lpfc_mbuf_free(phba, mp->virt, mp->phys);
418	kfree(mp);
419	pmb->context1 = NULL;
420	lpfc_update_vport_wwn(vport);
421
422	/* Update the fc_host data structures with new wwn. */
423	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
424	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
425	fc_host_max_npiv_vports(shost) = phba->max_vpi;
426
427	/* If no serial number in VPD data, use low 6 bytes of WWNN */
428	/* This should be consolidated into parse_vpd ? - mr */
429	if (phba->SerialNumber[0] == 0) {
430		uint8_t *outptr;
431
432		outptr = &vport->fc_nodename.u.s.IEEE[0];
433		for (i = 0; i < 12; i++) {
434			status = *outptr++;
435			j = ((status & 0xf0) >> 4);
436			if (j <= 9)
437				phba->SerialNumber[i] =
438				    (char)((uint8_t) 0x30 + (uint8_t) j);
439			else
440				phba->SerialNumber[i] =
441				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
442			i++;
443			j = (status & 0xf);
444			if (j <= 9)
445				phba->SerialNumber[i] =
446				    (char)((uint8_t) 0x30 + (uint8_t) j);
447			else
448				phba->SerialNumber[i] =
449				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
450		}
451	}
452
453	lpfc_read_config(phba, pmb);
454	pmb->vport = vport;
455	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
456		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
457				"0453 Adapter failed to init, mbxCmd x%x "
458				"READ_CONFIG, mbxStatus x%x\n",
459				mb->mbxCommand, mb->mbxStatus);
460		phba->link_state = LPFC_HBA_ERROR;
461		mempool_free( pmb, phba->mbox_mem_pool);
462		return -EIO;
463	}
464
465	/* Check if the port is disabled */
466	lpfc_sli_read_link_ste(phba);
467
468	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
469	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
470		phba->cfg_hba_queue_depth =
471			(mb->un.varRdConfig.max_xri + 1) -
472					lpfc_sli4_get_els_iocb_cnt(phba);
473
474	phba->lmt = mb->un.varRdConfig.lmt;
475
476	/* Get the default values for Model Name and Description */
477	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
478
479	if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G)
480	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G)
481		&& !(phba->lmt & LMT_1Gb))
482	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G)
483		&& !(phba->lmt & LMT_2Gb))
484	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G)
485		&& !(phba->lmt & LMT_4Gb))
486	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G)
487		&& !(phba->lmt & LMT_8Gb))
488	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G)
489		&& !(phba->lmt & LMT_10Gb))
490	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
491		&& !(phba->lmt & LMT_16Gb))) {
492		/* Reset link speed to auto */
493		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
494			"1302 Invalid speed for this board: "
495			"Reset link speed to auto: x%x\n",
496			phba->cfg_link_speed);
497			phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
498	}
499
500	phba->link_state = LPFC_LINK_DOWN;
501
502	/* Only process IOCBs on ELS ring till hba_state is READY */
503	if (psli->ring[psli->extra_ring].cmdringaddr)
504		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
505	if (psli->ring[psli->fcp_ring].cmdringaddr)
506		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
507	if (psli->ring[psli->next_ring].cmdringaddr)
508		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
509
510	/* Post receive buffers for desired rings */
511	if (phba->sli_rev != 3)
512		lpfc_post_rcv_buf(phba);
513
514	/*
515	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
516	 */
517	if (phba->intr_type == MSIX) {
518		rc = lpfc_config_msi(phba, pmb);
519		if (rc) {
520			mempool_free(pmb, phba->mbox_mem_pool);
521			return -EIO;
522		}
523		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
524		if (rc != MBX_SUCCESS) {
525			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
526					"0352 Config MSI mailbox command "
527					"failed, mbxCmd x%x, mbxStatus x%x\n",
528					pmb->u.mb.mbxCommand,
529					pmb->u.mb.mbxStatus);
530			mempool_free(pmb, phba->mbox_mem_pool);
531			return -EIO;
532		}
533	}
534
535	spin_lock_irq(&phba->hbalock);
536	/* Initialize ERATT handling flag */
537	phba->hba_flag &= ~HBA_ERATT_HANDLED;
538
539	/* Enable appropriate host interrupts */
540	if (lpfc_readl(phba->HCregaddr, &status)) {
541		spin_unlock_irq(&phba->hbalock);
542		return -EIO;
543	}
544	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
545	if (psli->num_rings > 0)
546		status |= HC_R0INT_ENA;
547	if (psli->num_rings > 1)
548		status |= HC_R1INT_ENA;
549	if (psli->num_rings > 2)
550		status |= HC_R2INT_ENA;
551	if (psli->num_rings > 3)
552		status |= HC_R3INT_ENA;
553
554	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
555	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
556		status &= ~(HC_R0INT_ENA);
557
558	writel(status, phba->HCregaddr);
559	readl(phba->HCregaddr); /* flush */
560	spin_unlock_irq(&phba->hbalock);
561
562	/* Set up ring-0 (ELS) timer */
563	timeout = phba->fc_ratov * 2;
564	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
565	/* Set up heart beat (HB) timer */
566	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
567	phba->hb_outstanding = 0;
568	phba->last_completion_time = jiffies;
569	/* Set up error attention (ERATT) polling timer */
570	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
571
572	if (phba->hba_flag & LINK_DISABLED) {
573		lpfc_printf_log(phba,
574			KERN_ERR, LOG_INIT,
575			"2598 Adapter Link is disabled.\n");
576		lpfc_down_link(phba, pmb);
577		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
578		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
579		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
580			lpfc_printf_log(phba,
581			KERN_ERR, LOG_INIT,
582			"2599 Adapter failed to issue DOWN_LINK"
583			" mbox command rc 0x%x\n", rc);
584
585			mempool_free(pmb, phba->mbox_mem_pool);
586			return -EIO;
587		}
588	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
589		lpfc_init_link(phba, pmb, phba->cfg_topology,
590			phba->cfg_link_speed);
591		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
592		lpfc_set_loopback_flag(phba);
593		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
594		if (rc != MBX_SUCCESS) {
595			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
596				"0454 Adapter failed to init, mbxCmd x%x "
597				"INIT_LINK, mbxStatus x%x\n",
598				mb->mbxCommand, mb->mbxStatus);
599
600			/* Clear all interrupt enable conditions */
601			writel(0, phba->HCregaddr);
602			readl(phba->HCregaddr); /* flush */
603			/* Clear all pending interrupts */
604			writel(0xffffffff, phba->HAregaddr);
605			readl(phba->HAregaddr); /* flush */
606
607			phba->link_state = LPFC_HBA_ERROR;
608			if (rc != MBX_BUSY)
609				mempool_free(pmb, phba->mbox_mem_pool);
610			return -EIO;
611		}
612	}
613	/* MBOX buffer will be freed in mbox compl */
614	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
615	if (!pmb) {
616		phba->link_state = LPFC_HBA_ERROR;
617		return -ENOMEM;
618	}
619
620	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
621	pmb->mbox_cmpl = lpfc_config_async_cmpl;
622	pmb->vport = phba->pport;
623	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
624
625	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
626		lpfc_printf_log(phba,
627				KERN_ERR,
628				LOG_INIT,
629				"0456 Adapter failed to issue "
630				"ASYNCEVT_ENABLE mbox status x%x\n",
631				rc);
632		mempool_free(pmb, phba->mbox_mem_pool);
633	}
634
635	/* Get Option rom version */
636	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
637	if (!pmb) {
638		phba->link_state = LPFC_HBA_ERROR;
639		return -ENOMEM;
640	}
641
642	lpfc_dump_wakeup_param(phba, pmb);
643	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
644	pmb->vport = phba->pport;
645	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
646
647	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
648		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
649				"to get Option ROM version status x%x\n", rc);
650		mempool_free(pmb, phba->mbox_mem_pool);
651	}
652
653	return 0;
654}
655
656/**
657 * lpfc_hba_init_link - Initialize the FC link
658 * @phba: pointer to lpfc hba data structure.
659 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
660 *
661 * This routine will issue the INIT_LINK mailbox command call.
662 * It is available to other drivers through the lpfc_hba data
663 * structure for use as a delayed link up mechanism with the
664 * module parameter lpfc_suppress_link_up.
665 *
666 * Return code
667 *		0 - success
668 *		Any other value - error
669 **/
670int
671lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
672{
673	struct lpfc_vport *vport = phba->pport;
674	LPFC_MBOXQ_t *pmb;
675	MAILBOX_t *mb;
676	int rc;
677
678	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
679	if (!pmb) {
680		phba->link_state = LPFC_HBA_ERROR;
681		return -ENOMEM;
682	}
683	mb = &pmb->u.mb;
684	pmb->vport = vport;
685
686	lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
687	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
688	lpfc_set_loopback_flag(phba);
689	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
690	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
691		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
692			"0498 Adapter failed to init, mbxCmd x%x "
693			"INIT_LINK, mbxStatus x%x\n",
694			mb->mbxCommand, mb->mbxStatus);
695		if (phba->sli_rev <= LPFC_SLI_REV3) {
696			/* Clear all interrupt enable conditions */
697			writel(0, phba->HCregaddr);
698			readl(phba->HCregaddr); /* flush */
699			/* Clear all pending interrupts */
700			writel(0xffffffff, phba->HAregaddr);
701			readl(phba->HAregaddr); /* flush */
702		}
703		phba->link_state = LPFC_HBA_ERROR;
704		if (rc != MBX_BUSY || flag == MBX_POLL)
705			mempool_free(pmb, phba->mbox_mem_pool);
706		return -EIO;
707	}
708	phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
709	if (flag == MBX_POLL)
710		mempool_free(pmb, phba->mbox_mem_pool);
711
712	return 0;
713}
714
715/**
716 * lpfc_hba_down_link - this routine downs the FC link
717 * @phba: pointer to lpfc hba data structure.
718 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
719 *
720 * This routine will issue the DOWN_LINK mailbox command call.
721 * It is available to other drivers through the lpfc_hba data
722 * structure for use to stop the link.
723 *
724 * Return code
725 *		0 - success
726 *		Any other value - error
727 **/
728int
729lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
730{
731	LPFC_MBOXQ_t *pmb;
732	int rc;
733
734	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
735	if (!pmb) {
736		phba->link_state = LPFC_HBA_ERROR;
737		return -ENOMEM;
738	}
739
740	lpfc_printf_log(phba,
741		KERN_ERR, LOG_INIT,
742		"0491 Adapter Link is disabled.\n");
743	lpfc_down_link(phba, pmb);
744	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
745	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
746	if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
747		lpfc_printf_log(phba,
748		KERN_ERR, LOG_INIT,
749		"2522 Adapter failed to issue DOWN_LINK"
750		" mbox command rc 0x%x\n", rc);
751
752		mempool_free(pmb, phba->mbox_mem_pool);
753		return -EIO;
754	}
755	if (flag == MBX_POLL)
756		mempool_free(pmb, phba->mbox_mem_pool);
757
758	return 0;
759}
760
761/**
762 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
763 * @phba: pointer to lpfc HBA data structure.
764 *
765 * This routine will do LPFC uninitialization before the HBA is reset when
766 * bringing down the SLI Layer.
767 *
768 * Return codes
769 *   0 - success.
770 *   Any other value - error.
771 **/
772int
773lpfc_hba_down_prep(struct lpfc_hba *phba)
774{
775	struct lpfc_vport **vports;
776	int i;
777
778	if (phba->sli_rev <= LPFC_SLI_REV3) {
779		/* Disable interrupts */
780		writel(0, phba->HCregaddr);
781		readl(phba->HCregaddr); /* flush */
782	}
783
784	if (phba->pport->load_flag & FC_UNLOADING)
785		lpfc_cleanup_discovery_resources(phba->pport);
786	else {
787		vports = lpfc_create_vport_work_array(phba);
788		if (vports != NULL)
789			for (i = 0; i <= phba->max_vports &&
790				vports[i] != NULL; i++)
791				lpfc_cleanup_discovery_resources(vports[i]);
792		lpfc_destroy_vport_work_array(phba, vports);
793	}
794	return 0;
795}
796
797/**
798 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
799 * @phba: pointer to lpfc HBA data structure.
800 *
801 * This routine will do uninitialization after the HBA is reset when bring
802 * down the SLI Layer.
803 *
804 * Return codes
805 *   0 - success.
806 *   Any other value - error.
807 **/
808static int
809lpfc_hba_down_post_s3(struct lpfc_hba *phba)
810{
811	struct lpfc_sli *psli = &phba->sli;
812	struct lpfc_sli_ring *pring;
813	struct lpfc_dmabuf *mp, *next_mp;
814	LIST_HEAD(completions);
815	int i;
816
817	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
818		lpfc_sli_hbqbuf_free_all(phba);
819	else {
820		/* Cleanup preposted buffers on the ELS ring */
821		pring = &psli->ring[LPFC_ELS_RING];
822		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
823			list_del(&mp->list);
824			pring->postbufq_cnt--;
825			lpfc_mbuf_free(phba, mp->virt, mp->phys);
826			kfree(mp);
827		}
828	}
829
830	spin_lock_irq(&phba->hbalock);
831	for (i = 0; i < psli->num_rings; i++) {
832		pring = &psli->ring[i];
833
834		/* At this point in time the HBA is either reset or DOA. Either
835		 * way, nothing should be on txcmplq as it will NEVER complete.
836		 */
837		list_splice_init(&pring->txcmplq, &completions);
838		pring->txcmplq_cnt = 0;
839		spin_unlock_irq(&phba->hbalock);
840
841		/* Cancel all the IOCBs from the completions list */
842		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
843				      IOERR_SLI_ABORTED);
844
845		lpfc_sli_abort_iocb_ring(phba, pring);
846		spin_lock_irq(&phba->hbalock);
847	}
848	spin_unlock_irq(&phba->hbalock);
849
850	return 0;
851}
852
853/**
854 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
855 * @phba: pointer to lpfc HBA data structure.
856 *
857 * This routine will do uninitialization after the HBA is reset when bring
858 * down the SLI Layer.
859 *
860 * Return codes
861 *   0 - success.
862 *   Any other value - error.
863 **/
864static int
865lpfc_hba_down_post_s4(struct lpfc_hba *phba)
866{
867	struct lpfc_scsi_buf *psb, *psb_next;
868	LIST_HEAD(aborts);
869	int ret;
870	unsigned long iflag = 0;
871	struct lpfc_sglq *sglq_entry = NULL;
872
873	ret = lpfc_hba_down_post_s3(phba);
874	if (ret)
875		return ret;
876	/* At this point in time the HBA is either reset or DOA. Either
877	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
878	 * on the lpfc_sgl_list so that it can either be freed if the
879	 * driver is unloading or reposted if the driver is restarting
880	 * the port.
881	 */
882	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
883					/* scsl_buf_list */
884	/* abts_sgl_list_lock required because worker thread uses this
885	 * list.
886	 */
887	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
888	list_for_each_entry(sglq_entry,
889		&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
890		sglq_entry->state = SGL_FREED;
891
892	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
893			&phba->sli4_hba.lpfc_sgl_list);
894	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
895	/* abts_scsi_buf_list_lock required because worker thread uses this
896	 * list.
897	 */
898	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
899	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
900			&aborts);
901	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
902	spin_unlock_irq(&phba->hbalock);
903
904	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
905		psb->pCmd = NULL;
906		psb->status = IOSTAT_SUCCESS;
907	}
908	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
909	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
910	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
911	return 0;
912}
913
914/**
915 * lpfc_hba_down_post - Wrapper func for hba down post routine
916 * @phba: pointer to lpfc HBA data structure.
917 *
918 * This routine wraps the actual SLI3 or SLI4 routine for performing
919 * uninitialization after the HBA is reset when bring down the SLI Layer.
920 *
921 * Return codes
922 *   0 - success.
923 *   Any other value - error.
924 **/
925int
926lpfc_hba_down_post(struct lpfc_hba *phba)
927{
928	return (*phba->lpfc_hba_down_post)(phba);
929}
930
931/**
932 * lpfc_hb_timeout - The HBA-timer timeout handler
933 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
934 *
935 * This is the HBA-timer timeout handler registered to the lpfc driver. When
936 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
937 * work-port-events bitmap and the worker thread is notified. This timeout
938 * event will be used by the worker thread to invoke the actual timeout
939 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
940 * be performed in the timeout handler and the HBA timeout event bit shall
941 * be cleared by the worker thread after it has taken the event bitmap out.
942 **/
943static void
944lpfc_hb_timeout(unsigned long ptr)
945{
946	struct lpfc_hba *phba;
947	uint32_t tmo_posted;
948	unsigned long iflag;
949
950	phba = (struct lpfc_hba *)ptr;
951
952	/* Check for heart beat timeout conditions */
953	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
954	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
955	if (!tmo_posted)
956		phba->pport->work_port_events |= WORKER_HB_TMO;
957	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
958
959	/* Tell the worker thread there is work to do */
960	if (!tmo_posted)
961		lpfc_worker_wake_up(phba);
962	return;
963}
964
965/**
966 * lpfc_rrq_timeout - The RRQ-timer timeout handler
967 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
968 *
969 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
970 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
971 * work-port-events bitmap and the worker thread is notified. This timeout
972 * event will be used by the worker thread to invoke the actual timeout
973 * handler routine, lpfc_rrq_handler. Any periodical operations will
974 * be performed in the timeout handler and the RRQ timeout event bit shall
975 * be cleared by the worker thread after it has taken the event bitmap out.
976 **/
977static void
978lpfc_rrq_timeout(unsigned long ptr)
979{
980	struct lpfc_hba *phba;
981	unsigned long iflag;
982
983	phba = (struct lpfc_hba *)ptr;
984	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
985	phba->hba_flag |= HBA_RRQ_ACTIVE;
986	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
987	lpfc_worker_wake_up(phba);
988}
989
990/**
991 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
992 * @phba: pointer to lpfc hba data structure.
993 * @pmboxq: pointer to the driver internal queue element for mailbox command.
994 *
995 * This is the callback function to the lpfc heart-beat mailbox command.
996 * If configured, the lpfc driver issues the heart-beat mailbox command to
997 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
998 * heart-beat mailbox command is issued, the driver shall set up heart-beat
999 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1000 * heart-beat outstanding state. Once the mailbox command comes back and
1001 * no error conditions detected, the heart-beat mailbox command timer is
1002 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1003 * state is cleared for the next heart-beat. If the timer expired with the
1004 * heart-beat outstanding state set, the driver will put the HBA offline.
1005 **/
1006static void
1007lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1008{
1009	unsigned long drvr_flag;
1010
1011	spin_lock_irqsave(&phba->hbalock, drvr_flag);
1012	phba->hb_outstanding = 0;
1013	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1014
1015	/* Check and reset heart-beat timer is necessary */
1016	mempool_free(pmboxq, phba->mbox_mem_pool);
1017	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1018		!(phba->link_state == LPFC_HBA_ERROR) &&
1019		!(phba->pport->load_flag & FC_UNLOADING))
1020		mod_timer(&phba->hb_tmofunc,
1021			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1022	return;
1023}
1024
1025/**
1026 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1027 * @phba: pointer to lpfc hba data structure.
1028 *
1029 * This is the actual HBA-timer timeout handler to be invoked by the worker
1030 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1031 * handler performs any periodic operations needed for the device. If such
1032 * periodic event has already been attended to either in the interrupt handler
1033 * or by processing slow-ring or fast-ring events within the HBA-timer
1034 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1035 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1036 * is configured and there is no heart-beat mailbox command outstanding, a
1037 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1038 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1039 * to offline.
1040 **/
1041void
1042lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1043{
1044	struct lpfc_vport **vports;
1045	LPFC_MBOXQ_t *pmboxq;
1046	struct lpfc_dmabuf *buf_ptr;
1047	int retval, i;
1048	struct lpfc_sli *psli = &phba->sli;
1049	LIST_HEAD(completions);
1050
1051	vports = lpfc_create_vport_work_array(phba);
1052	if (vports != NULL)
1053		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1054			lpfc_rcv_seq_check_edtov(vports[i]);
1055	lpfc_destroy_vport_work_array(phba, vports);
1056
1057	if ((phba->link_state == LPFC_HBA_ERROR) ||
1058		(phba->pport->load_flag & FC_UNLOADING) ||
1059		(phba->pport->fc_flag & FC_OFFLINE_MODE))
1060		return;
1061
1062	spin_lock_irq(&phba->pport->work_port_lock);
1063
1064	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
1065		jiffies)) {
1066		spin_unlock_irq(&phba->pport->work_port_lock);
1067		if (!phba->hb_outstanding)
1068			mod_timer(&phba->hb_tmofunc,
1069				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1070		else
1071			mod_timer(&phba->hb_tmofunc,
1072				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1073		return;
1074	}
1075	spin_unlock_irq(&phba->pport->work_port_lock);
1076
1077	if (phba->elsbuf_cnt &&
1078		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1079		spin_lock_irq(&phba->hbalock);
1080		list_splice_init(&phba->elsbuf, &completions);
1081		phba->elsbuf_cnt = 0;
1082		phba->elsbuf_prev_cnt = 0;
1083		spin_unlock_irq(&phba->hbalock);
1084
1085		while (!list_empty(&completions)) {
1086			list_remove_head(&completions, buf_ptr,
1087				struct lpfc_dmabuf, list);
1088			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1089			kfree(buf_ptr);
1090		}
1091	}
1092	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1093
1094	/* If there is no heart beat outstanding, issue a heartbeat command */
1095	if (phba->cfg_enable_hba_heartbeat) {
1096		if (!phba->hb_outstanding) {
1097			if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1098				(list_empty(&psli->mboxq))) {
1099				pmboxq = mempool_alloc(phba->mbox_mem_pool,
1100							GFP_KERNEL);
1101				if (!pmboxq) {
1102					mod_timer(&phba->hb_tmofunc,
1103						 jiffies +
1104						 HZ * LPFC_HB_MBOX_INTERVAL);
1105					return;
1106				}
1107
1108				lpfc_heart_beat(phba, pmboxq);
1109				pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1110				pmboxq->vport = phba->pport;
1111				retval = lpfc_sli_issue_mbox(phba, pmboxq,
1112						MBX_NOWAIT);
1113
1114				if (retval != MBX_BUSY &&
1115					retval != MBX_SUCCESS) {
1116					mempool_free(pmboxq,
1117							phba->mbox_mem_pool);
1118					mod_timer(&phba->hb_tmofunc,
1119						jiffies +
1120						HZ * LPFC_HB_MBOX_INTERVAL);
1121					return;
1122				}
1123				phba->skipped_hb = 0;
1124				phba->hb_outstanding = 1;
1125			} else if (time_before_eq(phba->last_completion_time,
1126					phba->skipped_hb)) {
1127				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1128					"2857 Last completion time not "
1129					" updated in %d ms\n",
1130					jiffies_to_msecs(jiffies
1131						 - phba->last_completion_time));
1132			} else
1133				phba->skipped_hb = jiffies;
1134
1135			mod_timer(&phba->hb_tmofunc,
1136				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1137			return;
1138		} else {
1139			/*
1140			* If heart beat timeout called with hb_outstanding set
1141			* we need to give the hb mailbox cmd a chance to
1142			* complete or TMO.
1143			*/
1144			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1145					"0459 Adapter heartbeat still out"
1146					"standing:last compl time was %d ms.\n",
1147					jiffies_to_msecs(jiffies
1148						 - phba->last_completion_time));
1149			mod_timer(&phba->hb_tmofunc,
1150				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1151		}
1152	}
1153}
1154
1155/**
1156 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1157 * @phba: pointer to lpfc hba data structure.
1158 *
1159 * This routine is called to bring the HBA offline when HBA hardware error
1160 * other than Port Error 6 has been detected.
1161 **/
1162static void
1163lpfc_offline_eratt(struct lpfc_hba *phba)
1164{
1165	struct lpfc_sli   *psli = &phba->sli;
1166
1167	spin_lock_irq(&phba->hbalock);
1168	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1169	spin_unlock_irq(&phba->hbalock);
1170	lpfc_offline_prep(phba);
1171
1172	lpfc_offline(phba);
1173	lpfc_reset_barrier(phba);
1174	spin_lock_irq(&phba->hbalock);
1175	lpfc_sli_brdreset(phba);
1176	spin_unlock_irq(&phba->hbalock);
1177	lpfc_hba_down_post(phba);
1178	lpfc_sli_brdready(phba, HS_MBRDY);
1179	lpfc_unblock_mgmt_io(phba);
1180	phba->link_state = LPFC_HBA_ERROR;
1181	return;
1182}
1183
1184/**
1185 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1186 * @phba: pointer to lpfc hba data structure.
1187 *
1188 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1189 * other than Port Error 6 has been detected.
1190 **/
1191static void
1192lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1193{
1194	lpfc_offline_prep(phba);
1195	lpfc_offline(phba);
1196	lpfc_sli4_brdreset(phba);
1197	lpfc_hba_down_post(phba);
1198	lpfc_sli4_post_status_check(phba);
1199	lpfc_unblock_mgmt_io(phba);
1200	phba->link_state = LPFC_HBA_ERROR;
1201}
1202
1203/**
1204 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1205 * @phba: pointer to lpfc hba data structure.
1206 *
1207 * This routine is invoked to handle the deferred HBA hardware error
1208 * conditions. This type of error is indicated by HBA by setting ER1
1209 * and another ER bit in the host status register. The driver will
1210 * wait until the ER1 bit clears before handling the error condition.
1211 **/
1212static void
1213lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1214{
1215	uint32_t old_host_status = phba->work_hs;
1216	struct lpfc_sli_ring  *pring;
1217	struct lpfc_sli *psli = &phba->sli;
1218
1219	/* If the pci channel is offline, ignore possible errors,
1220	 * since we cannot communicate with the pci card anyway.
1221	 */
1222	if (pci_channel_offline(phba->pcidev)) {
1223		spin_lock_irq(&phba->hbalock);
1224		phba->hba_flag &= ~DEFER_ERATT;
1225		spin_unlock_irq(&phba->hbalock);
1226		return;
1227	}
1228
1229	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1230		"0479 Deferred Adapter Hardware Error "
1231		"Data: x%x x%x x%x\n",
1232		phba->work_hs,
1233		phba->work_status[0], phba->work_status[1]);
1234
1235	spin_lock_irq(&phba->hbalock);
1236	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1237	spin_unlock_irq(&phba->hbalock);
1238
1239
1240	/*
1241	 * Firmware stops when it triggred erratt. That could cause the I/Os
1242	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1243	 * SCSI layer retry it after re-establishing link.
1244	 */
1245	pring = &psli->ring[psli->fcp_ring];
1246	lpfc_sli_abort_iocb_ring(phba, pring);
1247
1248	/*
1249	 * There was a firmware error. Take the hba offline and then
1250	 * attempt to restart it.
1251	 */
1252	lpfc_offline_prep(phba);
1253	lpfc_offline(phba);
1254
1255	/* Wait for the ER1 bit to clear.*/
1256	while (phba->work_hs & HS_FFER1) {
1257		msleep(100);
1258		if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1259			phba->work_hs = UNPLUG_ERR ;
1260			break;
1261		}
1262		/* If driver is unloading let the worker thread continue */
1263		if (phba->pport->load_flag & FC_UNLOADING) {
1264			phba->work_hs = 0;
1265			break;
1266		}
1267	}
1268
1269	/*
1270	 * This is to ptrotect against a race condition in which
1271	 * first write to the host attention register clear the
1272	 * host status register.
1273	 */
1274	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1275		phba->work_hs = old_host_status & ~HS_FFER1;
1276
1277	spin_lock_irq(&phba->hbalock);
1278	phba->hba_flag &= ~DEFER_ERATT;
1279	spin_unlock_irq(&phba->hbalock);
1280	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1281	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1282}
1283
1284static void
1285lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1286{
1287	struct lpfc_board_event_header board_event;
1288	struct Scsi_Host *shost;
1289
1290	board_event.event_type = FC_REG_BOARD_EVENT;
1291	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1292	shost = lpfc_shost_from_vport(phba->pport);
1293	fc_host_post_vendor_event(shost, fc_get_event_number(),
1294				  sizeof(board_event),
1295				  (char *) &board_event,
1296				  LPFC_NL_VENDOR_ID);
1297}
1298
1299/**
1300 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1301 * @phba: pointer to lpfc hba data structure.
1302 *
1303 * This routine is invoked to handle the following HBA hardware error
1304 * conditions:
1305 * 1 - HBA error attention interrupt
1306 * 2 - DMA ring index out of range
1307 * 3 - Mailbox command came back as unknown
1308 **/
1309static void
1310lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1311{
1312	struct lpfc_vport *vport = phba->pport;
1313	struct lpfc_sli   *psli = &phba->sli;
1314	struct lpfc_sli_ring  *pring;
1315	uint32_t event_data;
1316	unsigned long temperature;
1317	struct temp_event temp_event_data;
1318	struct Scsi_Host  *shost;
1319
1320	/* If the pci channel is offline, ignore possible errors,
1321	 * since we cannot communicate with the pci card anyway.
1322	 */
1323	if (pci_channel_offline(phba->pcidev)) {
1324		spin_lock_irq(&phba->hbalock);
1325		phba->hba_flag &= ~DEFER_ERATT;
1326		spin_unlock_irq(&phba->hbalock);
1327		return;
1328	}
1329
1330	/* If resets are disabled then leave the HBA alone and return */
1331	if (!phba->cfg_enable_hba_reset)
1332		return;
1333
1334	/* Send an internal error event to mgmt application */
1335	lpfc_board_errevt_to_mgmt(phba);
1336
1337	if (phba->hba_flag & DEFER_ERATT)
1338		lpfc_handle_deferred_eratt(phba);
1339
1340	if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1341		if (phba->work_hs & HS_FFER6)
1342			/* Re-establishing Link */
1343			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1344					"1301 Re-establishing Link "
1345					"Data: x%x x%x x%x\n",
1346					phba->work_hs, phba->work_status[0],
1347					phba->work_status[1]);
1348		if (phba->work_hs & HS_FFER8)
1349			/* Device Zeroization */
1350			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1351					"2861 Host Authentication device "
1352					"zeroization Data:x%x x%x x%x\n",
1353					phba->work_hs, phba->work_status[0],
1354					phba->work_status[1]);
1355
1356		spin_lock_irq(&phba->hbalock);
1357		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1358		spin_unlock_irq(&phba->hbalock);
1359
1360		/*
1361		* Firmware stops when it triggled erratt with HS_FFER6.
1362		* That could cause the I/Os dropped by the firmware.
1363		* Error iocb (I/O) on txcmplq and let the SCSI layer
1364		* retry it after re-establishing link.
1365		*/
1366		pring = &psli->ring[psli->fcp_ring];
1367		lpfc_sli_abort_iocb_ring(phba, pring);
1368
1369		/*
1370		 * There was a firmware error.  Take the hba offline and then
1371		 * attempt to restart it.
1372		 */
1373		lpfc_offline_prep(phba);
1374		lpfc_offline(phba);
1375		lpfc_sli_brdrestart(phba);
1376		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1377			lpfc_unblock_mgmt_io(phba);
1378			return;
1379		}
1380		lpfc_unblock_mgmt_io(phba);
1381	} else if (phba->work_hs & HS_CRIT_TEMP) {
1382		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1383		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1384		temp_event_data.event_code = LPFC_CRIT_TEMP;
1385		temp_event_data.data = (uint32_t)temperature;
1386
1387		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1388				"0406 Adapter maximum temperature exceeded "
1389				"(%ld), taking this port offline "
1390				"Data: x%x x%x x%x\n",
1391				temperature, phba->work_hs,
1392				phba->work_status[0], phba->work_status[1]);
1393
1394		shost = lpfc_shost_from_vport(phba->pport);
1395		fc_host_post_vendor_event(shost, fc_get_event_number(),
1396					  sizeof(temp_event_data),
1397					  (char *) &temp_event_data,
1398					  SCSI_NL_VID_TYPE_PCI
1399					  | PCI_VENDOR_ID_EMULEX);
1400
1401		spin_lock_irq(&phba->hbalock);
1402		phba->over_temp_state = HBA_OVER_TEMP;
1403		spin_unlock_irq(&phba->hbalock);
1404		lpfc_offline_eratt(phba);
1405
1406	} else {
1407		/* The if clause above forces this code path when the status
1408		 * failure is a value other than FFER6. Do not call the offline
1409		 * twice. This is the adapter hardware error path.
1410		 */
1411		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1412				"0457 Adapter Hardware Error "
1413				"Data: x%x x%x x%x\n",
1414				phba->work_hs,
1415				phba->work_status[0], phba->work_status[1]);
1416
1417		event_data = FC_REG_DUMP_EVENT;
1418		shost = lpfc_shost_from_vport(vport);
1419		fc_host_post_vendor_event(shost, fc_get_event_number(),
1420				sizeof(event_data), (char *) &event_data,
1421				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1422
1423		lpfc_offline_eratt(phba);
1424	}
1425	return;
1426}
1427
1428/**
1429 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1430 * @phba: pointer to lpfc hba data structure.
1431 *
1432 * This routine is invoked to handle the SLI4 HBA hardware error attention
1433 * conditions.
1434 **/
1435static void
1436lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1437{
1438	struct lpfc_vport *vport = phba->pport;
1439	uint32_t event_data;
1440	struct Scsi_Host *shost;
1441	uint32_t if_type;
1442	struct lpfc_register portstat_reg;
1443
1444	/* If the pci channel is offline, ignore possible errors, since
1445	 * we cannot communicate with the pci card anyway.
1446	 */
1447	if (pci_channel_offline(phba->pcidev))
1448		return;
1449	/* If resets are disabled then leave the HBA alone and return */
1450	if (!phba->cfg_enable_hba_reset)
1451		return;
1452
1453	/* Send an internal error event to mgmt application */
1454	lpfc_board_errevt_to_mgmt(phba);
1455
1456	/* For now, the actual action for SLI4 device handling is not
1457	 * specified yet, just treated it as adaptor hardware failure
1458	 */
1459	event_data = FC_REG_DUMP_EVENT;
1460	shost = lpfc_shost_from_vport(vport);
1461	fc_host_post_vendor_event(shost, fc_get_event_number(),
1462				  sizeof(event_data), (char *) &event_data,
1463				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1464
1465	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1466	switch (if_type) {
1467	case LPFC_SLI_INTF_IF_TYPE_0:
1468		lpfc_sli4_offline_eratt(phba);
1469		break;
1470	case LPFC_SLI_INTF_IF_TYPE_2:
1471		portstat_reg.word0 =
1472			readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
1473
1474		if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1475			/* TODO: Register for Overtemp async events. */
1476			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1477				"2889 Port Overtemperature event, "
1478				"taking port\n");
1479			spin_lock_irq(&phba->hbalock);
1480			phba->over_temp_state = HBA_OVER_TEMP;
1481			spin_unlock_irq(&phba->hbalock);
1482			lpfc_sli4_offline_eratt(phba);
1483			return;
1484		}
1485		if (bf_get(lpfc_sliport_status_rn, &portstat_reg)) {
1486			/*
1487			 * TODO: Attempt port recovery via a port reset.
1488			 * When fully implemented, the driver should
1489			 * attempt to recover the port here and return.
1490			 * For now, log an error and take the port offline.
1491			 */
1492			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1493					"2887 Port Error: Attempting "
1494					"Port Recovery\n");
1495		}
1496		lpfc_sli4_offline_eratt(phba);
1497		break;
1498	case LPFC_SLI_INTF_IF_TYPE_1:
1499	default:
1500		break;
1501	}
1502}
1503
1504/**
1505 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1506 * @phba: pointer to lpfc HBA data structure.
1507 *
1508 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1509 * routine from the API jump table function pointer from the lpfc_hba struct.
1510 *
1511 * Return codes
1512 *   0 - success.
1513 *   Any other value - error.
1514 **/
1515void
1516lpfc_handle_eratt(struct lpfc_hba *phba)
1517{
1518	(*phba->lpfc_handle_eratt)(phba);
1519}
1520
1521/**
1522 * lpfc_handle_latt - The HBA link event handler
1523 * @phba: pointer to lpfc hba data structure.
1524 *
1525 * This routine is invoked from the worker thread to handle a HBA host
1526 * attention link event.
1527 **/
1528void
1529lpfc_handle_latt(struct lpfc_hba *phba)
1530{
1531	struct lpfc_vport *vport = phba->pport;
1532	struct lpfc_sli   *psli = &phba->sli;
1533	LPFC_MBOXQ_t *pmb;
1534	volatile uint32_t control;
1535	struct lpfc_dmabuf *mp;
1536	int rc = 0;
1537
1538	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1539	if (!pmb) {
1540		rc = 1;
1541		goto lpfc_handle_latt_err_exit;
1542	}
1543
1544	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1545	if (!mp) {
1546		rc = 2;
1547		goto lpfc_handle_latt_free_pmb;
1548	}
1549
1550	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1551	if (!mp->virt) {
1552		rc = 3;
1553		goto lpfc_handle_latt_free_mp;
1554	}
1555
1556	/* Cleanup any outstanding ELS commands */
1557	lpfc_els_flush_all_cmd(phba);
1558
1559	psli->slistat.link_event++;
1560	lpfc_read_topology(phba, pmb, mp);
1561	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
1562	pmb->vport = vport;
1563	/* Block ELS IOCBs until we have processed this mbox command */
1564	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1565	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1566	if (rc == MBX_NOT_FINISHED) {
1567		rc = 4;
1568		goto lpfc_handle_latt_free_mbuf;
1569	}
1570
1571	/* Clear Link Attention in HA REG */
1572	spin_lock_irq(&phba->hbalock);
1573	writel(HA_LATT, phba->HAregaddr);
1574	readl(phba->HAregaddr); /* flush */
1575	spin_unlock_irq(&phba->hbalock);
1576
1577	return;
1578
1579lpfc_handle_latt_free_mbuf:
1580	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1581	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1582lpfc_handle_latt_free_mp:
1583	kfree(mp);
1584lpfc_handle_latt_free_pmb:
1585	mempool_free(pmb, phba->mbox_mem_pool);
1586lpfc_handle_latt_err_exit:
1587	/* Enable Link attention interrupts */
1588	spin_lock_irq(&phba->hbalock);
1589	psli->sli_flag |= LPFC_PROCESS_LA;
1590	control = readl(phba->HCregaddr);
1591	control |= HC_LAINT_ENA;
1592	writel(control, phba->HCregaddr);
1593	readl(phba->HCregaddr); /* flush */
1594
1595	/* Clear Link Attention in HA REG */
1596	writel(HA_LATT, phba->HAregaddr);
1597	readl(phba->HAregaddr); /* flush */
1598	spin_unlock_irq(&phba->hbalock);
1599	lpfc_linkdown(phba);
1600	phba->link_state = LPFC_HBA_ERROR;
1601
1602	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1603		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1604
1605	return;
1606}
1607
1608/**
1609 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1610 * @phba: pointer to lpfc hba data structure.
1611 * @vpd: pointer to the vital product data.
1612 * @len: length of the vital product data in bytes.
1613 *
1614 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1615 * an array of characters. In this routine, the ModelName, ProgramType, and
1616 * ModelDesc, etc. fields of the phba data structure will be populated.
1617 *
1618 * Return codes
1619 *   0 - pointer to the VPD passed in is NULL
1620 *   1 - success
1621 **/
1622int
1623lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1624{
1625	uint8_t lenlo, lenhi;
1626	int Length;
1627	int i, j;
1628	int finished = 0;
1629	int index = 0;
1630
1631	if (!vpd)
1632		return 0;
1633
1634	/* Vital Product */
1635	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1636			"0455 Vital Product Data: x%x x%x x%x x%x\n",
1637			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1638			(uint32_t) vpd[3]);
1639	while (!finished && (index < (len - 4))) {
1640		switch (vpd[index]) {
1641		case 0x82:
1642		case 0x91:
1643			index += 1;
1644			lenlo = vpd[index];
1645			index += 1;
1646			lenhi = vpd[index];
1647			index += 1;
1648			i = ((((unsigned short)lenhi) << 8) + lenlo);
1649			index += i;
1650			break;
1651		case 0x90:
1652			index += 1;
1653			lenlo = vpd[index];
1654			index += 1;
1655			lenhi = vpd[index];
1656			index += 1;
1657			Length = ((((unsigned short)lenhi) << 8) + lenlo);
1658			if (Length > len - index)
1659				Length = len - index;
1660			while (Length > 0) {
1661			/* Look for Serial Number */
1662			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1663				index += 2;
1664				i = vpd[index];
1665				index += 1;
1666				j = 0;
1667				Length -= (3+i);
1668				while(i--) {
1669					phba->SerialNumber[j++] = vpd[index++];
1670					if (j == 31)
1671						break;
1672				}
1673				phba->SerialNumber[j] = 0;
1674				continue;
1675			}
1676			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1677				phba->vpd_flag |= VPD_MODEL_DESC;
1678				index += 2;
1679				i = vpd[index];
1680				index += 1;
1681				j = 0;
1682				Length -= (3+i);
1683				while(i--) {
1684					phba->ModelDesc[j++] = vpd[index++];
1685					if (j == 255)
1686						break;
1687				}
1688				phba->ModelDesc[j] = 0;
1689				continue;
1690			}
1691			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1692				phba->vpd_flag |= VPD_MODEL_NAME;
1693				index += 2;
1694				i = vpd[index];
1695				index += 1;
1696				j = 0;
1697				Length -= (3+i);
1698				while(i--) {
1699					phba->ModelName[j++] = vpd[index++];
1700					if (j == 79)
1701						break;
1702				}
1703				phba->ModelName[j] = 0;
1704				continue;
1705			}
1706			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1707				phba->vpd_flag |= VPD_PROGRAM_TYPE;
1708				index += 2;
1709				i = vpd[index];
1710				index += 1;
1711				j = 0;
1712				Length -= (3+i);
1713				while(i--) {
1714					phba->ProgramType[j++] = vpd[index++];
1715					if (j == 255)
1716						break;
1717				}
1718				phba->ProgramType[j] = 0;
1719				continue;
1720			}
1721			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1722				phba->vpd_flag |= VPD_PORT;
1723				index += 2;
1724				i = vpd[index];
1725				index += 1;
1726				j = 0;
1727				Length -= (3+i);
1728				while(i--) {
1729				phba->Port[j++] = vpd[index++];
1730				if (j == 19)
1731					break;
1732				}
1733				phba->Port[j] = 0;
1734				continue;
1735			}
1736			else {
1737				index += 2;
1738				i = vpd[index];
1739				index += 1;
1740				index += i;
1741				Length -= (3 + i);
1742			}
1743		}
1744		finished = 0;
1745		break;
1746		case 0x78:
1747			finished = 1;
1748			break;
1749		default:
1750			index ++;
1751			break;
1752		}
1753	}
1754
1755	return(1);
1756}
1757
1758/**
1759 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1760 * @phba: pointer to lpfc hba data structure.
1761 * @mdp: pointer to the data structure to hold the derived model name.
1762 * @descp: pointer to the data structure to hold the derived description.
1763 *
1764 * This routine retrieves HBA's description based on its registered PCI device
1765 * ID. The @descp passed into this function points to an array of 256 chars. It
1766 * shall be returned with the model name, maximum speed, and the host bus type.
1767 * The @mdp passed into this function points to an array of 80 chars. When the
1768 * function returns, the @mdp will be filled with the model name.
1769 **/
1770static void
1771lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1772{
1773	lpfc_vpd_t *vp;
1774	uint16_t dev_id = phba->pcidev->device;
1775	int max_speed;
1776	int GE = 0;
1777	int oneConnect = 0; /* default is not a oneConnect */
1778	struct {
1779		char *name;
1780		char *bus;
1781		char *function;
1782	} m = {"<Unknown>", "", ""};
1783
1784	if (mdp && mdp[0] != '\0'
1785		&& descp && descp[0] != '\0')
1786		return;
1787
1788	if (phba->lmt & LMT_16Gb)
1789		max_speed = 16;
1790	else if (phba->lmt & LMT_10Gb)
1791		max_speed = 10;
1792	else if (phba->lmt & LMT_8Gb)
1793		max_speed = 8;
1794	else if (phba->lmt & LMT_4Gb)
1795		max_speed = 4;
1796	else if (phba->lmt & LMT_2Gb)
1797		max_speed = 2;
1798	else
1799		max_speed = 1;
1800
1801	vp = &phba->vpd;
1802
1803	switch (dev_id) {
1804	case PCI_DEVICE_ID_FIREFLY:
1805		m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1806		break;
1807	case PCI_DEVICE_ID_SUPERFLY:
1808		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1809			m = (typeof(m)){"LP7000", "PCI",
1810					"Fibre Channel Adapter"};
1811		else
1812			m = (typeof(m)){"LP7000E", "PCI",
1813					"Fibre Channel Adapter"};
1814		break;
1815	case PCI_DEVICE_ID_DRAGONFLY:
1816		m = (typeof(m)){"LP8000", "PCI",
1817				"Fibre Channel Adapter"};
1818		break;
1819	case PCI_DEVICE_ID_CENTAUR:
1820		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1821			m = (typeof(m)){"LP9002", "PCI",
1822					"Fibre Channel Adapter"};
1823		else
1824			m = (typeof(m)){"LP9000", "PCI",
1825					"Fibre Channel Adapter"};
1826		break;
1827	case PCI_DEVICE_ID_RFLY:
1828		m = (typeof(m)){"LP952", "PCI",
1829				"Fibre Channel Adapter"};
1830		break;
1831	case PCI_DEVICE_ID_PEGASUS:
1832		m = (typeof(m)){"LP9802", "PCI-X",
1833				"Fibre Channel Adapter"};
1834		break;
1835	case PCI_DEVICE_ID_THOR:
1836		m = (typeof(m)){"LP10000", "PCI-X",
1837				"Fibre Channel Adapter"};
1838		break;
1839	case PCI_DEVICE_ID_VIPER:
1840		m = (typeof(m)){"LPX1000",  "PCI-X",
1841				"Fibre Channel Adapter"};
1842		break;
1843	case PCI_DEVICE_ID_PFLY:
1844		m = (typeof(m)){"LP982", "PCI-X",
1845				"Fibre Channel Adapter"};
1846		break;
1847	case PCI_DEVICE_ID_TFLY:
1848		m = (typeof(m)){"LP1050", "PCI-X",
1849				"Fibre Channel Adapter"};
1850		break;
1851	case PCI_DEVICE_ID_HELIOS:
1852		m = (typeof(m)){"LP11000", "PCI-X2",
1853				"Fibre Channel Adapter"};
1854		break;
1855	case PCI_DEVICE_ID_HELIOS_SCSP:
1856		m = (typeof(m)){"LP11000-SP", "PCI-X2",
1857				"Fibre Channel Adapter"};
1858		break;
1859	case PCI_DEVICE_ID_HELIOS_DCSP:
1860		m = (typeof(m)){"LP11002-SP",  "PCI-X2",
1861				"Fibre Channel Adapter"};
1862		break;
1863	case PCI_DEVICE_ID_NEPTUNE:
1864		m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1865		break;
1866	case PCI_DEVICE_ID_NEPTUNE_SCSP:
1867		m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1868		break;
1869	case PCI_DEVICE_ID_NEPTUNE_DCSP:
1870		m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1871		break;
1872	case PCI_DEVICE_ID_BMID:
1873		m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1874		break;
1875	case PCI_DEVICE_ID_BSMB:
1876		m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1877		break;
1878	case PCI_DEVICE_ID_ZEPHYR:
1879		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1880		break;
1881	case PCI_DEVICE_ID_ZEPHYR_SCSP:
1882		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1883		break;
1884	case PCI_DEVICE_ID_ZEPHYR_DCSP:
1885		m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1886		GE = 1;
1887		break;
1888	case PCI_DEVICE_ID_ZMID:
1889		m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1890		break;
1891	case PCI_DEVICE_ID_ZSMB:
1892		m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1893		break;
1894	case PCI_DEVICE_ID_LP101:
1895		m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1896		break;
1897	case PCI_DEVICE_ID_LP10000S:
1898		m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1899		break;
1900	case PCI_DEVICE_ID_LP11000S:
1901		m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1902		break;
1903	case PCI_DEVICE_ID_LPE11000S:
1904		m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1905		break;
1906	case PCI_DEVICE_ID_SAT:
1907		m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1908		break;
1909	case PCI_DEVICE_ID_SAT_MID:
1910		m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1911		break;
1912	case PCI_DEVICE_ID_SAT_SMB:
1913		m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1914		break;
1915	case PCI_DEVICE_ID_SAT_DCSP:
1916		m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1917		break;
1918	case PCI_DEVICE_ID_SAT_SCSP:
1919		m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1920		break;
1921	case PCI_DEVICE_ID_SAT_S:
1922		m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1923		break;
1924	case PCI_DEVICE_ID_HORNET:
1925		m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1926		GE = 1;
1927		break;
1928	case PCI_DEVICE_ID_PROTEUS_VF:
1929		m = (typeof(m)){"LPev12000", "PCIe IOV",
1930				"Fibre Channel Adapter"};
1931		break;
1932	case PCI_DEVICE_ID_PROTEUS_PF:
1933		m = (typeof(m)){"LPev12000", "PCIe IOV",
1934				"Fibre Channel Adapter"};
1935		break;
1936	case PCI_DEVICE_ID_PROTEUS_S:
1937		m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1938				"Fibre Channel Adapter"};
1939		break;
1940	case PCI_DEVICE_ID_TIGERSHARK:
1941		oneConnect = 1;
1942		m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1943		break;
1944	case PCI_DEVICE_ID_TOMCAT:
1945		oneConnect = 1;
1946		m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1947		break;
1948	case PCI_DEVICE_ID_FALCON:
1949		m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1950				"EmulexSecure Fibre"};
1951		break;
1952	case PCI_DEVICE_ID_BALIUS:
1953		m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
1954				"Fibre Channel Adapter"};
1955		break;
1956	case PCI_DEVICE_ID_LANCER_FC:
1957	case PCI_DEVICE_ID_LANCER_FC_VF:
1958		m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
1959		break;
1960	case PCI_DEVICE_ID_LANCER_FCOE:
1961	case PCI_DEVICE_ID_LANCER_FCOE_VF:
1962		oneConnect = 1;
1963		m = (typeof(m)){"OCe50100", "PCIe", "FCoE"};
1964		break;
1965	default:
1966		m = (typeof(m)){"Unknown", "", ""};
1967		break;
1968	}
1969
1970	if (mdp && mdp[0] == '\0')
1971		snprintf(mdp, 79,"%s", m.name);
1972	/*
1973	 * oneConnect hba requires special processing, they are all initiators
1974	 * and we put the port number on the end
1975	 */
1976	if (descp && descp[0] == '\0') {
1977		if (oneConnect)
1978			snprintf(descp, 255,
1979				"Emulex OneConnect %s, %s Initiator, Port %s",
1980				m.name, m.function,
1981				phba->Port);
1982		else
1983			snprintf(descp, 255,
1984				"Emulex %s %d%s %s %s",
1985				m.name, max_speed, (GE) ? "GE" : "Gb",
1986				m.bus, m.function);
1987	}
1988}
1989
1990/**
1991 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1992 * @phba: pointer to lpfc hba data structure.
1993 * @pring: pointer to a IOCB ring.
1994 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1995 *
1996 * This routine posts a given number of IOCBs with the associated DMA buffer
1997 * descriptors specified by the cnt argument to the given IOCB ring.
1998 *
1999 * Return codes
2000 *   The number of IOCBs NOT able to be posted to the IOCB ring.
2001 **/
2002int
2003lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2004{
2005	IOCB_t *icmd;
2006	struct lpfc_iocbq *iocb;
2007	struct lpfc_dmabuf *mp1, *mp2;
2008
2009	cnt += pring->missbufcnt;
2010
2011	/* While there are buffers to post */
2012	while (cnt > 0) {
2013		/* Allocate buffer for  command iocb */
2014		iocb = lpfc_sli_get_iocbq(phba);
2015		if (iocb == NULL) {
2016			pring->missbufcnt = cnt;
2017			return cnt;
2018		}
2019		icmd = &iocb->iocb;
2020
2021		/* 2 buffers can be posted per command */
2022		/* Allocate buffer to post */
2023		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2024		if (mp1)
2025		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2026		if (!mp1 || !mp1->virt) {
2027			kfree(mp1);
2028			lpfc_sli_release_iocbq(phba, iocb);
2029			pring->missbufcnt = cnt;
2030			return cnt;
2031		}
2032
2033		INIT_LIST_HEAD(&mp1->list);
2034		/* Allocate buffer to post */
2035		if (cnt > 1) {
2036			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2037			if (mp2)
2038				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2039							    &mp2->phys);
2040			if (!mp2 || !mp2->virt) {
2041				kfree(mp2);
2042				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2043				kfree(mp1);
2044				lpfc_sli_release_iocbq(phba, iocb);
2045				pring->missbufcnt = cnt;
2046				return cnt;
2047			}
2048
2049			INIT_LIST_HEAD(&mp2->list);
2050		} else {
2051			mp2 = NULL;
2052		}
2053
2054		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2055		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2056		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2057		icmd->ulpBdeCount = 1;
2058		cnt--;
2059		if (mp2) {
2060			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2061			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2062			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2063			cnt--;
2064			icmd->ulpBdeCount = 2;
2065		}
2066
2067		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2068		icmd->ulpLe = 1;
2069
2070		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2071		    IOCB_ERROR) {
2072			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2073			kfree(mp1);
2074			cnt++;
2075			if (mp2) {
2076				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2077				kfree(mp2);
2078				cnt++;
2079			}
2080			lpfc_sli_release_iocbq(phba, iocb);
2081			pring->missbufcnt = cnt;
2082			return cnt;
2083		}
2084		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2085		if (mp2)
2086			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2087	}
2088	pring->missbufcnt = 0;
2089	return 0;
2090}
2091
2092/**
2093 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2094 * @phba: pointer to lpfc hba data structure.
2095 *
2096 * This routine posts initial receive IOCB buffers to the ELS ring. The
2097 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2098 * set to 64 IOCBs.
2099 *
2100 * Return codes
2101 *   0 - success (currently always success)
2102 **/
2103static int
2104lpfc_post_rcv_buf(struct lpfc_hba *phba)
2105{
2106	struct lpfc_sli *psli = &phba->sli;
2107
2108	/* Ring 0, ELS / CT buffers */
2109	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2110	/* Ring 2 - FCP no buffers needed */
2111
2112	return 0;
2113}
2114
2115#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2116
2117/**
2118 * lpfc_sha_init - Set up initial array of hash table entries
2119 * @HashResultPointer: pointer to an array as hash table.
2120 *
2121 * This routine sets up the initial values to the array of hash table entries
2122 * for the LC HBAs.
2123 **/
2124static void
2125lpfc_sha_init(uint32_t * HashResultPointer)
2126{
2127	HashResultPointer[0] = 0x67452301;
2128	HashResultPointer[1] = 0xEFCDAB89;
2129	HashResultPointer[2] = 0x98BADCFE;
2130	HashResultPointer[3] = 0x10325476;
2131	HashResultPointer[4] = 0xC3D2E1F0;
2132}
2133
2134/**
2135 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2136 * @HashResultPointer: pointer to an initial/result hash table.
2137 * @HashWorkingPointer: pointer to an working hash table.
2138 *
2139 * This routine iterates an initial hash table pointed by @HashResultPointer
2140 * with the values from the working hash table pointeed by @HashWorkingPointer.
2141 * The results are putting back to the initial hash table, returned through
2142 * the @HashResultPointer as the result hash table.
2143 **/
2144static void
2145lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2146{
2147	int t;
2148	uint32_t TEMP;
2149	uint32_t A, B, C, D, E;
2150	t = 16;
2151	do {
2152		HashWorkingPointer[t] =
2153		    S(1,
2154		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2155								     8] ^
2156		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2157	} while (++t <= 79);
2158	t = 0;
2159	A = HashResultPointer[0];
2160	B = HashResultPointer[1];
2161	C = HashResultPointer[2];
2162	D = HashResultPointer[3];
2163	E = HashResultPointer[4];
2164
2165	do {
2166		if (t < 20) {
2167			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2168		} else if (t < 40) {
2169			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2170		} else if (t < 60) {
2171			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2172		} else {
2173			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2174		}
2175		TEMP += S(5, A) + E + HashWorkingPointer[t];
2176		E = D;
2177		D = C;
2178		C = S(30, B);
2179		B = A;
2180		A = TEMP;
2181	} while (++t <= 79);
2182
2183	HashResultPointer[0] += A;
2184	HashResultPointer[1] += B;
2185	HashResultPointer[2] += C;
2186	HashResultPointer[3] += D;
2187	HashResultPointer[4] += E;
2188
2189}
2190
2191/**
2192 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2193 * @RandomChallenge: pointer to the entry of host challenge random number array.
2194 * @HashWorking: pointer to the entry of the working hash array.
2195 *
2196 * This routine calculates the working hash array referred by @HashWorking
2197 * from the challenge random numbers associated with the host, referred by
2198 * @RandomChallenge. The result is put into the entry of the working hash
2199 * array and returned by reference through @HashWorking.
2200 **/
2201static void
2202lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2203{
2204	*HashWorking = (*RandomChallenge ^ *HashWorking);
2205}
2206
2207/**
2208 * lpfc_hba_init - Perform special handling for LC HBA initialization
2209 * @phba: pointer to lpfc hba data structure.
2210 * @hbainit: pointer to an array of unsigned 32-bit integers.
2211 *
2212 * This routine performs the special handling for LC HBA initialization.
2213 **/
2214void
2215lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2216{
2217	int t;
2218	uint32_t *HashWorking;
2219	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2220
2221	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2222	if (!HashWorking)
2223		return;
2224
2225	HashWorking[0] = HashWorking[78] = *pwwnn++;
2226	HashWorking[1] = HashWorking[79] = *pwwnn;
2227
2228	for (t = 0; t < 7; t++)
2229		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2230
2231	lpfc_sha_init(hbainit);
2232	lpfc_sha_iterate(hbainit, HashWorking);
2233	kfree(HashWorking);
2234}
2235
2236/**
2237 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2238 * @vport: pointer to a virtual N_Port data structure.
2239 *
2240 * This routine performs the necessary cleanups before deleting the @vport.
2241 * It invokes the discovery state machine to perform necessary state
2242 * transitions and to release the ndlps associated with the @vport. Note,
2243 * the physical port is treated as @vport 0.
2244 **/
2245void
2246lpfc_cleanup(struct lpfc_vport *vport)
2247{
2248	struct lpfc_hba   *phba = vport->phba;
2249	struct lpfc_nodelist *ndlp, *next_ndlp;
2250	int i = 0;
2251
2252	if (phba->link_state > LPFC_LINK_DOWN)
2253		lpfc_port_link_failure(vport);
2254
2255	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2256		if (!NLP_CHK_NODE_ACT(ndlp)) {
2257			ndlp = lpfc_enable_node(vport, ndlp,
2258						NLP_STE_UNUSED_NODE);
2259			if (!ndlp)
2260				continue;
2261			spin_lock_irq(&phba->ndlp_lock);
2262			NLP_SET_FREE_REQ(ndlp);
2263			spin_unlock_irq(&phba->ndlp_lock);
2264			/* Trigger the release of the ndlp memory */
2265			lpfc_nlp_put(ndlp);
2266			continue;
2267		}
2268		spin_lock_irq(&phba->ndlp_lock);
2269		if (NLP_CHK_FREE_REQ(ndlp)) {
2270			/* The ndlp should not be in memory free mode already */
2271			spin_unlock_irq(&phba->ndlp_lock);
2272			continue;
2273		} else
2274			/* Indicate request for freeing ndlp memory */
2275			NLP_SET_FREE_REQ(ndlp);
2276		spin_unlock_irq(&phba->ndlp_lock);
2277
2278		if (vport->port_type != LPFC_PHYSICAL_PORT &&
2279		    ndlp->nlp_DID == Fabric_DID) {
2280			/* Just free up ndlp with Fabric_DID for vports */
2281			lpfc_nlp_put(ndlp);
2282			continue;
2283		}
2284
2285		if (ndlp->nlp_type & NLP_FABRIC)
2286			lpfc_disc_state_machine(vport, ndlp, NULL,
2287					NLP_EVT_DEVICE_RECOVERY);
2288
2289		lpfc_disc_state_machine(vport, ndlp, NULL,
2290					     NLP_EVT_DEVICE_RM);
2291
2292	}
2293
2294	/* At this point, ALL ndlp's should be gone
2295	 * because of the previous NLP_EVT_DEVICE_RM.
2296	 * Lets wait for this to happen, if needed.
2297	 */
2298	while (!list_empty(&vport->fc_nodes)) {
2299		if (i++ > 3000) {
2300			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2301				"0233 Nodelist not empty\n");
2302			list_for_each_entry_safe(ndlp, next_ndlp,
2303						&vport->fc_nodes, nlp_listp) {
2304				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2305						LOG_NODE,
2306						"0282 did:x%x ndlp:x%p "
2307						"usgmap:x%x refcnt:%d\n",
2308						ndlp->nlp_DID, (void *)ndlp,
2309						ndlp->nlp_usg_map,
2310						atomic_read(
2311							&ndlp->kref.refcount));
2312			}
2313			break;
2314		}
2315
2316		/* Wait for any activity on ndlps to settle */
2317		msleep(10);
2318	}
2319	lpfc_cleanup_vports_rrqs(vport, NULL);
2320}
2321
2322/**
2323 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2324 * @vport: pointer to a virtual N_Port data structure.
2325 *
2326 * This routine stops all the timers associated with a @vport. This function
2327 * is invoked before disabling or deleting a @vport. Note that the physical
2328 * port is treated as @vport 0.
2329 **/
2330void
2331lpfc_stop_vport_timers(struct lpfc_vport *vport)
2332{
2333	del_timer_sync(&vport->els_tmofunc);
2334	del_timer_sync(&vport->fc_fdmitmo);
2335	del_timer_sync(&vport->delayed_disc_tmo);
2336	lpfc_can_disctmo(vport);
2337	return;
2338}
2339
2340/**
2341 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2342 * @phba: pointer to lpfc hba data structure.
2343 *
2344 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2345 * caller of this routine should already hold the host lock.
2346 **/
2347void
2348__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2349{
2350	/* Clear pending FCF rediscovery wait flag */
2351	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2352
2353	/* Now, try to stop the timer */
2354	del_timer(&phba->fcf.redisc_wait);
2355}
2356
2357/**
2358 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2359 * @phba: pointer to lpfc hba data structure.
2360 *
2361 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2362 * checks whether the FCF rediscovery wait timer is pending with the host
2363 * lock held before proceeding with disabling the timer and clearing the
2364 * wait timer pendig flag.
2365 **/
2366void
2367lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2368{
2369	spin_lock_irq(&phba->hbalock);
2370	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2371		/* FCF rediscovery timer already fired or stopped */
2372		spin_unlock_irq(&phba->hbalock);
2373		return;
2374	}
2375	__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2376	/* Clear failover in progress flags */
2377	phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2378	spin_unlock_irq(&phba->hbalock);
2379}
2380
2381/**
2382 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2383 * @phba: pointer to lpfc hba data structure.
2384 *
2385 * This routine stops all the timers associated with a HBA. This function is
2386 * invoked before either putting a HBA offline or unloading the driver.
2387 **/
2388void
2389lpfc_stop_hba_timers(struct lpfc_hba *phba)
2390{
2391	lpfc_stop_vport_timers(phba->pport);
2392	del_timer_sync(&phba->sli.mbox_tmo);
2393	del_timer_sync(&phba->fabric_block_timer);
2394	del_timer_sync(&phba->eratt_poll);
2395	del_timer_sync(&phba->hb_tmofunc);
2396	if (phba->sli_rev == LPFC_SLI_REV4) {
2397		del_timer_sync(&phba->rrq_tmr);
2398		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2399	}
2400	phba->hb_outstanding = 0;
2401
2402	switch (phba->pci_dev_grp) {
2403	case LPFC_PCI_DEV_LP:
2404		/* Stop any LightPulse device specific driver timers */
2405		del_timer_sync(&phba->fcp_poll_timer);
2406		break;
2407	case LPFC_PCI_DEV_OC:
2408		/* Stop any OneConnect device sepcific driver timers */
2409		lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2410		break;
2411	default:
2412		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2413				"0297 Invalid device group (x%x)\n",
2414				phba->pci_dev_grp);
2415		break;
2416	}
2417	return;
2418}
2419
2420/**
2421 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2422 * @phba: pointer to lpfc hba data structure.
2423 *
2424 * This routine marks a HBA's management interface as blocked. Once the HBA's
2425 * management interface is marked as blocked, all the user space access to
2426 * the HBA, whether they are from sysfs interface or libdfc interface will
2427 * all be blocked. The HBA is set to block the management interface when the
2428 * driver prepares the HBA interface for online or offline.
2429 **/
2430static void
2431lpfc_block_mgmt_io(struct lpfc_hba * phba)
2432{
2433	unsigned long iflag;
2434	uint8_t actcmd = MBX_HEARTBEAT;
2435	unsigned long timeout;
2436
2437
2438	spin_lock_irqsave(&phba->hbalock, iflag);
2439	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2440	if (phba->sli.mbox_active)
2441		actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2442	spin_unlock_irqrestore(&phba->hbalock, iflag);
2443	/* Determine how long we might wait for the active mailbox
2444	 * command to be gracefully completed by firmware.
2445	 */
2446	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
2447			jiffies;
2448	/* Wait for the outstnading mailbox command to complete */
2449	while (phba->sli.mbox_active) {
2450		/* Check active mailbox complete status every 2ms */
2451		msleep(2);
2452		if (time_after(jiffies, timeout)) {
2453			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2454				"2813 Mgmt IO is Blocked %x "
2455				"- mbox cmd %x still active\n",
2456				phba->sli.sli_flag, actcmd);
2457			break;
2458		}
2459	}
2460}
2461
2462/**
2463 * lpfc_online - Initialize and bring a HBA online
2464 * @phba: pointer to lpfc hba data structure.
2465 *
2466 * This routine initializes the HBA and brings a HBA online. During this
2467 * process, the management interface is blocked to prevent user space access
2468 * to the HBA interfering with the driver initialization.
2469 *
2470 * Return codes
2471 *   0 - successful
2472 *   1 - failed
2473 **/
2474int
2475lpfc_online(struct lpfc_hba *phba)
2476{
2477	struct lpfc_vport *vport;
2478	struct lpfc_vport **vports;
2479	int i;
2480
2481	if (!phba)
2482		return 0;
2483	vport = phba->pport;
2484
2485	if (!(vport->fc_flag & FC_OFFLINE_MODE))
2486		return 0;
2487
2488	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2489			"0458 Bring Adapter online\n");
2490
2491	lpfc_block_mgmt_io(phba);
2492
2493	if (!lpfc_sli_queue_setup(phba)) {
2494		lpfc_unblock_mgmt_io(phba);
2495		return 1;
2496	}
2497
2498	if (phba->sli_rev == LPFC_SLI_REV4) {
2499		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2500			lpfc_unblock_mgmt_io(phba);
2501			return 1;
2502		}
2503	} else {
2504		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
2505			lpfc_unblock_mgmt_io(phba);
2506			return 1;
2507		}
2508	}
2509
2510	vports = lpfc_create_vport_work_array(phba);
2511	if (vports != NULL)
2512		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2513			struct Scsi_Host *shost;
2514			shost = lpfc_shost_from_vport(vports[i]);
2515			spin_lock_irq(shost->host_lock);
2516			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2517			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2518				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2519			if (phba->sli_rev == LPFC_SLI_REV4)
2520				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2521			spin_unlock_irq(shost->host_lock);
2522		}
2523		lpfc_destroy_vport_work_array(phba, vports);
2524
2525	lpfc_unblock_mgmt_io(phba);
2526	return 0;
2527}
2528
2529/**
2530 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2531 * @phba: pointer to lpfc hba data structure.
2532 *
2533 * This routine marks a HBA's management interface as not blocked. Once the
2534 * HBA's management interface is marked as not blocked, all the user space
2535 * access to the HBA, whether they are from sysfs interface or libdfc
2536 * interface will be allowed. The HBA is set to block the management interface
2537 * when the driver prepares the HBA interface for online or offline and then
2538 * set to unblock the management interface afterwards.
2539 **/
2540void
2541lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2542{
2543	unsigned long iflag;
2544
2545	spin_lock_irqsave(&phba->hbalock, iflag);
2546	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2547	spin_unlock_irqrestore(&phba->hbalock, iflag);
2548}
2549
2550/**
2551 * lpfc_offline_prep - Prepare a HBA to be brought offline
2552 * @phba: pointer to lpfc hba data structure.
2553 *
2554 * This routine is invoked to prepare a HBA to be brought offline. It performs
2555 * unregistration login to all the nodes on all vports and flushes the mailbox
2556 * queue to make it ready to be brought offline.
2557 **/
2558void
2559lpfc_offline_prep(struct lpfc_hba * phba)
2560{
2561	struct lpfc_vport *vport = phba->pport;
2562	struct lpfc_nodelist  *ndlp, *next_ndlp;
2563	struct lpfc_vport **vports;
2564	struct Scsi_Host *shost;
2565	int i;
2566
2567	if (vport->fc_flag & FC_OFFLINE_MODE)
2568		return;
2569
2570	lpfc_block_mgmt_io(phba);
2571
2572	lpfc_linkdown(phba);
2573
2574	/* Issue an unreg_login to all nodes on all vports */
2575	vports = lpfc_create_vport_work_array(phba);
2576	if (vports != NULL) {
2577		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2578			if (vports[i]->load_flag & FC_UNLOADING)
2579				continue;
2580			shost = lpfc_shost_from_vport(vports[i]);
2581			spin_lock_irq(shost->host_lock);
2582			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2583			vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2584			vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2585			spin_unlock_irq(shost->host_lock);
2586
2587			shost =	lpfc_shost_from_vport(vports[i]);
2588			list_for_each_entry_safe(ndlp, next_ndlp,
2589						 &vports[i]->fc_nodes,
2590						 nlp_listp) {
2591				if (!NLP_CHK_NODE_ACT(ndlp))
2592					continue;
2593				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2594					continue;
2595				if (ndlp->nlp_type & NLP_FABRIC) {
2596					lpfc_disc_state_machine(vports[i], ndlp,
2597						NULL, NLP_EVT_DEVICE_RECOVERY);
2598					lpfc_disc_state_machine(vports[i], ndlp,
2599						NULL, NLP_EVT_DEVICE_RM);
2600				}
2601				spin_lock_irq(shost->host_lock);
2602				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2603				spin_unlock_irq(shost->host_lock);
2604				lpfc_unreg_rpi(vports[i], ndlp);
2605			}
2606		}
2607	}
2608	lpfc_destroy_vport_work_array(phba, vports);
2609
2610	lpfc_sli_mbox_sys_shutdown(phba);
2611}
2612
2613/**
2614 * lpfc_offline - Bring a HBA offline
2615 * @phba: pointer to lpfc hba data structure.
2616 *
2617 * This routine actually brings a HBA offline. It stops all the timers
2618 * associated with the HBA, brings down the SLI layer, and eventually
2619 * marks the HBA as in offline state for the upper layer protocol.
2620 **/
2621void
2622lpfc_offline(struct lpfc_hba *phba)
2623{
2624	struct Scsi_Host  *shost;
2625	struct lpfc_vport **vports;
2626	int i;
2627
2628	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2629		return;
2630
2631	/* stop port and all timers associated with this hba */
2632	lpfc_stop_port(phba);
2633	vports = lpfc_create_vport_work_array(phba);
2634	if (vports != NULL)
2635		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2636			lpfc_stop_vport_timers(vports[i]);
2637	lpfc_destroy_vport_work_array(phba, vports);
2638	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2639			"0460 Bring Adapter offline\n");
2640	/* Bring down the SLI Layer and cleanup.  The HBA is offline
2641	   now.  */
2642	lpfc_sli_hba_down(phba);
2643	spin_lock_irq(&phba->hbalock);
2644	phba->work_ha = 0;
2645	spin_unlock_irq(&phba->hbalock);
2646	vports = lpfc_create_vport_work_array(phba);
2647	if (vports != NULL)
2648		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2649			shost = lpfc_shost_from_vport(vports[i]);
2650			spin_lock_irq(shost->host_lock);
2651			vports[i]->work_port_events = 0;
2652			vports[i]->fc_flag |= FC_OFFLINE_MODE;
2653			spin_unlock_irq(shost->host_lock);
2654		}
2655	lpfc_destroy_vport_work_array(phba, vports);
2656}
2657
2658/**
2659 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2660 * @phba: pointer to lpfc hba data structure.
2661 *
2662 * This routine is to free all the SCSI buffers and IOCBs from the driver
2663 * list back to kernel. It is called from lpfc_pci_remove_one to free
2664 * the internal resources before the device is removed from the system.
2665 *
2666 * Return codes
2667 *   0 - successful (for now, it always returns 0)
2668 **/
2669static int
2670lpfc_scsi_free(struct lpfc_hba *phba)
2671{
2672	struct lpfc_scsi_buf *sb, *sb_next;
2673	struct lpfc_iocbq *io, *io_next;
2674
2675	spin_lock_irq(&phba->hbalock);
2676	/* Release all the lpfc_scsi_bufs maintained by this host. */
2677	spin_lock(&phba->scsi_buf_list_lock);
2678	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2679		list_del(&sb->list);
2680		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2681			      sb->dma_handle);
2682		kfree(sb);
2683		phba->total_scsi_bufs--;
2684	}
2685	spin_unlock(&phba->scsi_buf_list_lock);
2686
2687	/* Release all the lpfc_iocbq entries maintained by this host. */
2688	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2689		list_del(&io->list);
2690		kfree(io);
2691		phba->total_iocbq_bufs--;
2692	}
2693	spin_unlock_irq(&phba->hbalock);
2694	return 0;
2695}
2696
2697/**
2698 * lpfc_create_port - Create an FC port
2699 * @phba: pointer to lpfc hba data structure.
2700 * @instance: a unique integer ID to this FC port.
2701 * @dev: pointer to the device data structure.
2702 *
2703 * This routine creates a FC port for the upper layer protocol. The FC port
2704 * can be created on top of either a physical port or a virtual port provided
2705 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2706 * and associates the FC port created before adding the shost into the SCSI
2707 * layer.
2708 *
2709 * Return codes
2710 *   @vport - pointer to the virtual N_Port data structure.
2711 *   NULL - port create failed.
2712 **/
2713struct lpfc_vport *
2714lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2715{
2716	struct lpfc_vport *vport;
2717	struct Scsi_Host  *shost;
2718	int error = 0;
2719
2720	if (dev != &phba->pcidev->dev)
2721		shost = scsi_host_alloc(&lpfc_vport_template,
2722					sizeof(struct lpfc_vport));
2723	else
2724		shost = scsi_host_alloc(&lpfc_template,
2725					sizeof(struct lpfc_vport));
2726	if (!shost)
2727		goto out;
2728
2729	vport = (struct lpfc_vport *) shost->hostdata;
2730	vport->phba = phba;
2731	vport->load_flag |= FC_LOADING;
2732	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2733	vport->fc_rscn_flush = 0;
2734
2735	lpfc_get_vport_cfgparam(vport);
2736	shost->unique_id = instance;
2737	shost->max_id = LPFC_MAX_TARGET;
2738	shost->max_lun = vport->cfg_max_luns;
2739	shost->this_id = -1;
2740	shost->max_cmd_len = 16;
2741	if (phba->sli_rev == LPFC_SLI_REV4) {
2742		shost->dma_boundary =
2743			phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2744		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2745	}
2746
2747	/*
2748	 * Set initial can_queue value since 0 is no longer supported and
2749	 * scsi_add_host will fail. This will be adjusted later based on the
2750	 * max xri value determined in hba setup.
2751	 */
2752	shost->can_queue = phba->cfg_hba_queue_depth - 10;
2753	if (dev != &phba->pcidev->dev) {
2754		shost->transportt = lpfc_vport_transport_template;
2755		vport->port_type = LPFC_NPIV_PORT;
2756	} else {
2757		shost->transportt = lpfc_transport_template;
2758		vport->port_type = LPFC_PHYSICAL_PORT;
2759	}
2760
2761	/* Initialize all internally managed lists. */
2762	INIT_LIST_HEAD(&vport->fc_nodes);
2763	INIT_LIST_HEAD(&vport->rcv_buffer_list);
2764	spin_lock_init(&vport->work_port_lock);
2765
2766	init_timer(&vport->fc_disctmo);
2767	vport->fc_disctmo.function = lpfc_disc_timeout;
2768	vport->fc_disctmo.data = (unsigned long)vport;
2769
2770	init_timer(&vport->fc_fdmitmo);
2771	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2772	vport->fc_fdmitmo.data = (unsigned long)vport;
2773
2774	init_timer(&vport->els_tmofunc);
2775	vport->els_tmofunc.function = lpfc_els_timeout;
2776	vport->els_tmofunc.data = (unsigned long)vport;
2777
2778	init_timer(&vport->delayed_disc_tmo);
2779	vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
2780	vport->delayed_disc_tmo.data = (unsigned long)vport;
2781
2782	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2783	if (error)
2784		goto out_put_shost;
2785
2786	spin_lock_irq(&phba->hbalock);
2787	list_add_tail(&vport->listentry, &phba->port_list);
2788	spin_unlock_irq(&phba->hbalock);
2789	return vport;
2790
2791out_put_shost:
2792	scsi_host_put(shost);
2793out:
2794	return NULL;
2795}
2796
2797/**
2798 * destroy_port -  destroy an FC port
2799 * @vport: pointer to an lpfc virtual N_Port data structure.
2800 *
2801 * This routine destroys a FC port from the upper layer protocol. All the
2802 * resources associated with the port are released.
2803 **/
2804void
2805destroy_port(struct lpfc_vport *vport)
2806{
2807	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2808	struct lpfc_hba  *phba = vport->phba;
2809
2810	lpfc_debugfs_terminate(vport);
2811	fc_remove_host(shost);
2812	scsi_remove_host(shost);
2813
2814	spin_lock_irq(&phba->hbalock);
2815	list_del_init(&vport->listentry);
2816	spin_unlock_irq(&phba->hbalock);
2817
2818	lpfc_cleanup(vport);
2819	return;
2820}
2821
2822/**
2823 * lpfc_get_instance - Get a unique integer ID
2824 *
2825 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2826 * uses the kernel idr facility to perform the task.
2827 *
2828 * Return codes:
2829 *   instance - a unique integer ID allocated as the new instance.
2830 *   -1 - lpfc get instance failed.
2831 **/
2832int
2833lpfc_get_instance(void)
2834{
2835	int instance = 0;
2836
2837	/* Assign an unused number */
2838	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2839		return -1;
2840	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2841		return -1;
2842	return instance;
2843}
2844
2845/**
2846 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2847 * @shost: pointer to SCSI host data structure.
2848 * @time: elapsed time of the scan in jiffies.
2849 *
2850 * This routine is called by the SCSI layer with a SCSI host to determine
2851 * whether the scan host is finished.
2852 *
2853 * Note: there is no scan_start function as adapter initialization will have
2854 * asynchronously kicked off the link initialization.
2855 *
2856 * Return codes
2857 *   0 - SCSI host scan is not over yet.
2858 *   1 - SCSI host scan is over.
2859 **/
2860int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2861{
2862	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2863	struct lpfc_hba   *phba = vport->phba;
2864	int stat = 0;
2865
2866	spin_lock_irq(shost->host_lock);
2867
2868	if (vport->load_flag & FC_UNLOADING) {
2869		stat = 1;
2870		goto finished;
2871	}
2872	if (time >= 30 * HZ) {
2873		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2874				"0461 Scanning longer than 30 "
2875				"seconds.  Continuing initialization\n");
2876		stat = 1;
2877		goto finished;
2878	}
2879	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2880		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2881				"0465 Link down longer than 15 "
2882				"seconds.  Continuing initialization\n");
2883		stat = 1;
2884		goto finished;
2885	}
2886
2887	if (vport->port_state != LPFC_VPORT_READY)
2888		goto finished;
2889	if (vport->num_disc_nodes || vport->fc_prli_sent)
2890		goto finished;
2891	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2892		goto finished;
2893	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2894		goto finished;
2895
2896	stat = 1;
2897
2898finished:
2899	spin_unlock_irq(shost->host_lock);
2900	return stat;
2901}
2902
2903/**
2904 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2905 * @shost: pointer to SCSI host data structure.
2906 *
2907 * This routine initializes a given SCSI host attributes on a FC port. The
2908 * SCSI host can be either on top of a physical port or a virtual port.
2909 **/
2910void lpfc_host_attrib_init(struct Scsi_Host *shost)
2911{
2912	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2913	struct lpfc_hba   *phba = vport->phba;
2914	/*
2915	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
2916	 */
2917
2918	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2919	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2920	fc_host_supported_classes(shost) = FC_COS_CLASS3;
2921
2922	memset(fc_host_supported_fc4s(shost), 0,
2923	       sizeof(fc_host_supported_fc4s(shost)));
2924	fc_host_supported_fc4s(shost)[2] = 1;
2925	fc_host_supported_fc4s(shost)[7] = 1;
2926
2927	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2928				 sizeof fc_host_symbolic_name(shost));
2929
2930	fc_host_supported_speeds(shost) = 0;
2931	if (phba->lmt & LMT_10Gb)
2932		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2933	if (phba->lmt & LMT_8Gb)
2934		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2935	if (phba->lmt & LMT_4Gb)
2936		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2937	if (phba->lmt & LMT_2Gb)
2938		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2939	if (phba->lmt & LMT_1Gb)
2940		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2941
2942	fc_host_maxframe_size(shost) =
2943		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2944		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2945
2946	fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
2947
2948	/* This value is also unchanging */
2949	memset(fc_host_active_fc4s(shost), 0,
2950	       sizeof(fc_host_active_fc4s(shost)));
2951	fc_host_active_fc4s(shost)[2] = 1;
2952	fc_host_active_fc4s(shost)[7] = 1;
2953
2954	fc_host_max_npiv_vports(shost) = phba->max_vpi;
2955	spin_lock_irq(shost->host_lock);
2956	vport->load_flag &= ~FC_LOADING;
2957	spin_unlock_irq(shost->host_lock);
2958}
2959
2960/**
2961 * lpfc_stop_port_s3 - Stop SLI3 device port
2962 * @phba: pointer to lpfc hba data structure.
2963 *
2964 * This routine is invoked to stop an SLI3 device port, it stops the device
2965 * from generating interrupts and stops the device driver's timers for the
2966 * device.
2967 **/
2968static void
2969lpfc_stop_port_s3(struct lpfc_hba *phba)
2970{
2971	/* Clear all interrupt enable conditions */
2972	writel(0, phba->HCregaddr);
2973	readl(phba->HCregaddr); /* flush */
2974	/* Clear all pending interrupts */
2975	writel(0xffffffff, phba->HAregaddr);
2976	readl(phba->HAregaddr); /* flush */
2977
2978	/* Reset some HBA SLI setup states */
2979	lpfc_stop_hba_timers(phba);
2980	phba->pport->work_port_events = 0;
2981}
2982
2983/**
2984 * lpfc_stop_port_s4 - Stop SLI4 device port
2985 * @phba: pointer to lpfc hba data structure.
2986 *
2987 * This routine is invoked to stop an SLI4 device port, it stops the device
2988 * from generating interrupts and stops the device driver's timers for the
2989 * device.
2990 **/
2991static void
2992lpfc_stop_port_s4(struct lpfc_hba *phba)
2993{
2994	/* Reset some HBA SLI4 setup states */
2995	lpfc_stop_hba_timers(phba);
2996	phba->pport->work_port_events = 0;
2997	phba->sli4_hba.intr_enable = 0;
2998}
2999
3000/**
3001 * lpfc_stop_port - Wrapper function for stopping hba port
3002 * @phba: Pointer to HBA context object.
3003 *
3004 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
3005 * the API jump table function pointer from the lpfc_hba struct.
3006 **/
3007void
3008lpfc_stop_port(struct lpfc_hba *phba)
3009{
3010	phba->lpfc_stop_port(phba);
3011}
3012
3013/**
3014 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
3015 * @phba: Pointer to hba for which this call is being executed.
3016 *
3017 * This routine starts the timer waiting for the FCF rediscovery to complete.
3018 **/
3019void
3020lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
3021{
3022	unsigned long fcf_redisc_wait_tmo =
3023		(jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
3024	/* Start fcf rediscovery wait period timer */
3025	mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
3026	spin_lock_irq(&phba->hbalock);
3027	/* Allow action to new fcf asynchronous event */
3028	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
3029	/* Mark the FCF rediscovery pending state */
3030	phba->fcf.fcf_flag |= FCF_REDISC_PEND;
3031	spin_unlock_irq(&phba->hbalock);
3032}
3033
3034/**
3035 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
3036 * @ptr: Map to lpfc_hba data structure pointer.
3037 *
3038 * This routine is invoked when waiting for FCF table rediscover has been
3039 * timed out. If new FCF record(s) has (have) been discovered during the
3040 * wait period, a new FCF event shall be added to the FCOE async event
3041 * list, and then worker thread shall be waked up for processing from the
3042 * worker thread context.
3043 **/
3044void
3045lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
3046{
3047	struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
3048
3049	/* Don't send FCF rediscovery event if timer cancelled */
3050	spin_lock_irq(&phba->hbalock);
3051	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3052		spin_unlock_irq(&phba->hbalock);
3053		return;
3054	}
3055	/* Clear FCF rediscovery timer pending flag */
3056	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3057	/* FCF rediscovery event to worker thread */
3058	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
3059	spin_unlock_irq(&phba->hbalock);
3060	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3061			"2776 FCF rediscover quiescent timer expired\n");
3062	/* wake up worker thread */
3063	lpfc_worker_wake_up(phba);
3064}
3065
3066/**
3067 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3068 * @phba: pointer to lpfc hba data structure.
3069 * @acqe_link: pointer to the async link completion queue entry.
3070 *
3071 * This routine is to parse the SLI4 link-attention link fault code and
3072 * translate it into the base driver's read link attention mailbox command
3073 * status.
3074 *
3075 * Return: Link-attention status in terms of base driver's coding.
3076 **/
3077static uint16_t
3078lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3079			   struct lpfc_acqe_link *acqe_link)
3080{
3081	uint16_t latt_fault;
3082
3083	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3084	case LPFC_ASYNC_LINK_FAULT_NONE:
3085	case LPFC_ASYNC_LINK_FAULT_LOCAL:
3086	case LPFC_ASYNC_LINK_FAULT_REMOTE:
3087		latt_fault = 0;
3088		break;
3089	default:
3090		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3091				"0398 Invalid link fault code: x%x\n",
3092				bf_get(lpfc_acqe_link_fault, acqe_link));
3093		latt_fault = MBXERR_ERROR;
3094		break;
3095	}
3096	return latt_fault;
3097}
3098
3099/**
3100 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3101 * @phba: pointer to lpfc hba data structure.
3102 * @acqe_link: pointer to the async link completion queue entry.
3103 *
3104 * This routine is to parse the SLI4 link attention type and translate it
3105 * into the base driver's link attention type coding.
3106 *
3107 * Return: Link attention type in terms of base driver's coding.
3108 **/
3109static uint8_t
3110lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3111			  struct lpfc_acqe_link *acqe_link)
3112{
3113	uint8_t att_type;
3114
3115	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3116	case LPFC_ASYNC_LINK_STATUS_DOWN:
3117	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3118		att_type = LPFC_ATT_LINK_DOWN;
3119		break;
3120	case LPFC_ASYNC_LINK_STATUS_UP:
3121		/* Ignore physical link up events - wait for logical link up */
3122		att_type = LPFC_ATT_RESERVED;
3123		break;
3124	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3125		att_type = LPFC_ATT_LINK_UP;
3126		break;
3127	default:
3128		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3129				"0399 Invalid link attention type: x%x\n",
3130				bf_get(lpfc_acqe_link_status, acqe_link));
3131		att_type = LPFC_ATT_RESERVED;
3132		break;
3133	}
3134	return att_type;
3135}
3136
3137/**
3138 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3139 * @phba: pointer to lpfc hba data structure.
3140 * @acqe_link: pointer to the async link completion queue entry.
3141 *
3142 * This routine is to parse the SLI4 link-attention link speed and translate
3143 * it into the base driver's link-attention link speed coding.
3144 *
3145 * Return: Link-attention link speed in terms of base driver's coding.
3146 **/
3147static uint8_t
3148lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3149				struct lpfc_acqe_link *acqe_link)
3150{
3151	uint8_t link_speed;
3152
3153	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3154	case LPFC_ASYNC_LINK_SPEED_ZERO:
3155	case LPFC_ASYNC_LINK_SPEED_10MBPS:
3156	case LPFC_ASYNC_LINK_SPEED_100MBPS:
3157		link_speed = LPFC_LINK_SPEED_UNKNOWN;
3158		break;
3159	case LPFC_ASYNC_LINK_SPEED_1GBPS:
3160		link_speed = LPFC_LINK_SPEED_1GHZ;
3161		break;
3162	case LPFC_ASYNC_LINK_SPEED_10GBPS:
3163		link_speed = LPFC_LINK_SPEED_10GHZ;
3164		break;
3165	default:
3166		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3167				"0483 Invalid link-attention link speed: x%x\n",
3168				bf_get(lpfc_acqe_link_speed, acqe_link));
3169		link_speed = LPFC_LINK_SPEED_UNKNOWN;
3170		break;
3171	}
3172	return link_speed;
3173}
3174
3175/**
3176 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3177 * @phba: pointer to lpfc hba data structure.
3178 * @acqe_link: pointer to the async link completion queue entry.
3179 *
3180 * This routine is to handle the SLI4 asynchronous FCoE link event.
3181 **/
3182static void
3183lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3184			 struct lpfc_acqe_link *acqe_link)
3185{
3186	struct lpfc_dmabuf *mp;
3187	LPFC_MBOXQ_t *pmb;
3188	MAILBOX_t *mb;
3189	struct lpfc_mbx_read_top *la;
3190	uint8_t att_type;
3191	int rc;
3192
3193	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3194	if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
3195		return;
3196	phba->fcoe_eventtag = acqe_link->event_tag;
3197	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3198	if (!pmb) {
3199		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3200				"0395 The mboxq allocation failed\n");
3201		return;
3202	}
3203	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3204	if (!mp) {
3205		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3206				"0396 The lpfc_dmabuf allocation failed\n");
3207		goto out_free_pmb;
3208	}
3209	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3210	if (!mp->virt) {
3211		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3212				"0397 The mbuf allocation failed\n");
3213		goto out_free_dmabuf;
3214	}
3215
3216	/* Cleanup any outstanding ELS commands */
3217	lpfc_els_flush_all_cmd(phba);
3218
3219	/* Block ELS IOCBs until we have done process link event */
3220	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3221
3222	/* Update link event statistics */
3223	phba->sli.slistat.link_event++;
3224
3225	/* Create lpfc_handle_latt mailbox command from link ACQE */
3226	lpfc_read_topology(phba, pmb, mp);
3227	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3228	pmb->vport = phba->pport;
3229
3230	/* Keep the link status for extra SLI4 state machine reference */
3231	phba->sli4_hba.link_state.speed =
3232				bf_get(lpfc_acqe_link_speed, acqe_link);
3233	phba->sli4_hba.link_state.duplex =
3234				bf_get(lpfc_acqe_link_duplex, acqe_link);
3235	phba->sli4_hba.link_state.status =
3236				bf_get(lpfc_acqe_link_status, acqe_link);
3237	phba->sli4_hba.link_state.type =
3238				bf_get(lpfc_acqe_link_type, acqe_link);
3239	phba->sli4_hba.link_state.number =
3240				bf_get(lpfc_acqe_link_number, acqe_link);
3241	phba->sli4_hba.link_state.fault =
3242				bf_get(lpfc_acqe_link_fault, acqe_link);
3243	phba->sli4_hba.link_state.logical_speed =
3244			bf_get(lpfc_acqe_logical_link_speed, acqe_link);
3245	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3246			"2900 Async FC/FCoE Link event - Speed:%dGBit "
3247			"duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3248			"Logical speed:%dMbps Fault:%d\n",
3249			phba->sli4_hba.link_state.speed,
3250			phba->sli4_hba.link_state.topology,
3251			phba->sli4_hba.link_state.status,
3252			phba->sli4_hba.link_state.type,
3253			phba->sli4_hba.link_state.number,
3254			phba->sli4_hba.link_state.logical_speed * 10,
3255			phba->sli4_hba.link_state.fault);
3256	/*
3257	 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3258	 * topology info. Note: Optional for non FC-AL ports.
3259	 */
3260	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3261		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3262		if (rc == MBX_NOT_FINISHED)
3263			goto out_free_dmabuf;
3264		return;
3265	}
3266	/*
3267	 * For FCoE Mode: fill in all the topology information we need and call
3268	 * the READ_TOPOLOGY completion routine to continue without actually
3269	 * sending the READ_TOPOLOGY mailbox command to the port.
3270	 */
3271	/* Parse and translate status field */
3272	mb = &pmb->u.mb;
3273	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3274
3275	/* Parse and translate link attention fields */
3276	la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3277	la->eventTag = acqe_link->event_tag;
3278	bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3279	bf_set(lpfc_mbx_read_top_link_spd, la,
3280	       lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3281
3282	/* Fake the the following irrelvant fields */
3283	bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3284	bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3285	bf_set(lpfc_mbx_read_top_il, la, 0);
3286	bf_set(lpfc_mbx_read_top_pb, la, 0);
3287	bf_set(lpfc_mbx_read_top_fa, la, 0);
3288	bf_set(lpfc_mbx_read_top_mm, la, 0);
3289
3290	/* Invoke the lpfc_handle_latt mailbox command callback function */
3291	lpfc_mbx_cmpl_read_topology(phba, pmb);
3292
3293	return;
3294
3295out_free_dmabuf:
3296	kfree(mp);
3297out_free_pmb:
3298	mempool_free(pmb, phba->mbox_mem_pool);
3299}
3300
3301/**
3302 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
3303 * @phba: pointer to lpfc hba data structure.
3304 * @acqe_fc: pointer to the async fc completion queue entry.
3305 *
3306 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
3307 * that the event was received and then issue a read_topology mailbox command so
3308 * that the rest of the driver will treat it the same as SLI3.
3309 **/
3310static void
3311lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3312{
3313	struct lpfc_dmabuf *mp;
3314	LPFC_MBOXQ_t *pmb;
3315	int rc;
3316
3317	if (bf_get(lpfc_trailer_type, acqe_fc) !=
3318	    LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
3319		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3320				"2895 Non FC link Event detected.(%d)\n",
3321				bf_get(lpfc_trailer_type, acqe_fc));
3322		return;
3323	}
3324	/* Keep the link status for extra SLI4 state machine reference */
3325	phba->sli4_hba.link_state.speed =
3326				bf_get(lpfc_acqe_fc_la_speed, acqe_fc);
3327	phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3328	phba->sli4_hba.link_state.topology =
3329				bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
3330	phba->sli4_hba.link_state.status =
3331				bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
3332	phba->sli4_hba.link_state.type =
3333				bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
3334	phba->sli4_hba.link_state.number =
3335				bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
3336	phba->sli4_hba.link_state.fault =
3337				bf_get(lpfc_acqe_link_fault, acqe_fc);
3338	phba->sli4_hba.link_state.logical_speed =
3339				bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
3340	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3341			"2896 Async FC event - Speed:%dGBaud Topology:x%x "
3342			"LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3343			"%dMbps Fault:%d\n",
3344			phba->sli4_hba.link_state.speed,
3345			phba->sli4_hba.link_state.topology,
3346			phba->sli4_hba.link_state.status,
3347			phba->sli4_hba.link_state.type,
3348			phba->sli4_hba.link_state.number,
3349			phba->sli4_hba.link_state.logical_speed * 10,
3350			phba->sli4_hba.link_state.fault);
3351	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3352	if (!pmb) {
3353		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3354				"2897 The mboxq allocation failed\n");
3355		return;
3356	}
3357	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3358	if (!mp) {
3359		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3360				"2898 The lpfc_dmabuf allocation failed\n");
3361		goto out_free_pmb;
3362	}
3363	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3364	if (!mp->virt) {
3365		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3366				"2899 The mbuf allocation failed\n");
3367		goto out_free_dmabuf;
3368	}
3369
3370	/* Cleanup any outstanding ELS commands */
3371	lpfc_els_flush_all_cmd(phba);
3372
3373	/* Block ELS IOCBs until we have done process link event */
3374	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3375
3376	/* Update link event statistics */
3377	phba->sli.slistat.link_event++;
3378
3379	/* Create lpfc_handle_latt mailbox command from link ACQE */
3380	lpfc_read_topology(phba, pmb, mp);
3381	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3382	pmb->vport = phba->pport;
3383
3384	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3385	if (rc == MBX_NOT_FINISHED)
3386		goto out_free_dmabuf;
3387	return;
3388
3389out_free_dmabuf:
3390	kfree(mp);
3391out_free_pmb:
3392	mempool_free(pmb, phba->mbox_mem_pool);
3393}
3394
3395/**
3396 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
3397 * @phba: pointer to lpfc hba data structure.
3398 * @acqe_fc: pointer to the async SLI completion queue entry.
3399 *
3400 * This routine is to handle the SLI4 asynchronous SLI events.
3401 **/
3402static void
3403lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3404{
3405	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3406			"2901 Async SLI event - Event Data1:x%08x Event Data2:"
3407			"x%08x SLI Event Type:%d",
3408			acqe_sli->event_data1, acqe_sli->event_data2,
3409			bf_get(lpfc_trailer_type, acqe_sli));
3410	return;
3411}
3412
3413/**
3414 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3415 * @vport: pointer to vport data structure.
3416 *
3417 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3418 * response to a CVL event.
3419 *
3420 * Return the pointer to the ndlp with the vport if successful, otherwise
3421 * return NULL.
3422 **/
3423static struct lpfc_nodelist *
3424lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3425{
3426	struct lpfc_nodelist *ndlp;
3427	struct Scsi_Host *shost;
3428	struct lpfc_hba *phba;
3429
3430	if (!vport)
3431		return NULL;
3432	phba = vport->phba;
3433	if (!phba)
3434		return NULL;
3435	ndlp = lpfc_findnode_did(vport, Fabric_DID);
3436	if (!ndlp) {
3437		/* Cannot find existing Fabric ndlp, so allocate a new one */
3438		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3439		if (!ndlp)
3440			return 0;
3441		lpfc_nlp_init(vport, ndlp, Fabric_DID);
3442		/* Set the node type */
3443		ndlp->nlp_type |= NLP_FABRIC;
3444		/* Put ndlp onto node list */
3445		lpfc_enqueue_node(vport, ndlp);
3446	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
3447		/* re-setup ndlp without removing from node list */
3448		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3449		if (!ndlp)
3450			return 0;
3451	}
3452	if ((phba->pport->port_state < LPFC_FLOGI) &&
3453		(phba->pport->port_state != LPFC_VPORT_FAILED))
3454		return NULL;
3455	/* If virtual link is not yet instantiated ignore CVL */
3456	if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
3457		&& (vport->port_state != LPFC_VPORT_FAILED))
3458		return NULL;
3459	shost = lpfc_shost_from_vport(vport);
3460	if (!shost)
3461		return NULL;
3462	lpfc_linkdown_port(vport);
3463	lpfc_cleanup_pending_mbox(vport);
3464	spin_lock_irq(shost->host_lock);
3465	vport->fc_flag |= FC_VPORT_CVL_RCVD;
3466	spin_unlock_irq(shost->host_lock);
3467
3468	return ndlp;
3469}
3470
3471/**
3472 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3473 * @vport: pointer to lpfc hba data structure.
3474 *
3475 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3476 * response to a FCF dead event.
3477 **/
3478static void
3479lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3480{
3481	struct lpfc_vport **vports;
3482	int i;
3483
3484	vports = lpfc_create_vport_work_array(phba);
3485	if (vports)
3486		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3487			lpfc_sli4_perform_vport_cvl(vports[i]);
3488	lpfc_destroy_vport_work_array(phba, vports);
3489}
3490
3491/**
3492 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
3493 * @phba: pointer to lpfc hba data structure.
3494 * @acqe_link: pointer to the async fcoe completion queue entry.
3495 *
3496 * This routine is to handle the SLI4 asynchronous fcoe event.
3497 **/
3498static void
3499lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3500			struct lpfc_acqe_fip *acqe_fip)
3501{
3502	uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
3503	int rc;
3504	struct lpfc_vport *vport;
3505	struct lpfc_nodelist *ndlp;
3506	struct Scsi_Host  *shost;
3507	int active_vlink_present;
3508	struct lpfc_vport **vports;
3509	int i;
3510
3511	phba->fc_eventTag = acqe_fip->event_tag;
3512	phba->fcoe_eventtag = acqe_fip->event_tag;
3513	switch (event_type) {
3514	case LPFC_FIP_EVENT_TYPE_NEW_FCF:
3515	case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
3516		if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
3517			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3518					LOG_DISCOVERY,
3519					"2546 New FCF event, evt_tag:x%x, "
3520					"index:x%x\n",
3521					acqe_fip->event_tag,
3522					acqe_fip->index);
3523		else
3524			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3525					LOG_DISCOVERY,
3526					"2788 FCF param modified event, "
3527					"evt_tag:x%x, index:x%x\n",
3528					acqe_fip->event_tag,
3529					acqe_fip->index);
3530		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3531			/*
3532			 * During period of FCF discovery, read the FCF
3533			 * table record indexed by the event to update
3534			 * FCF roundrobin failover eligible FCF bmask.
3535			 */
3536			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3537					LOG_DISCOVERY,
3538					"2779 Read FCF (x%x) for updating "
3539					"roundrobin FCF failover bmask\n",
3540					acqe_fip->index);
3541			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
3542		}
3543
3544		/* If the FCF discovery is in progress, do nothing. */
3545		spin_lock_irq(&phba->hbalock);
3546		if (phba->hba_flag & FCF_TS_INPROG) {
3547			spin_unlock_irq(&phba->hbalock);
3548			break;
3549		}
3550		/* If fast FCF failover rescan event is pending, do nothing */
3551		if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3552			spin_unlock_irq(&phba->hbalock);
3553			break;
3554		}
3555
3556		/* If the FCF has been in discovered state, do nothing. */
3557		if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3558			spin_unlock_irq(&phba->hbalock);
3559			break;
3560		}
3561		spin_unlock_irq(&phba->hbalock);
3562
3563		/* Otherwise, scan the entire FCF table and re-discover SAN */
3564		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3565				"2770 Start FCF table scan per async FCF "
3566				"event, evt_tag:x%x, index:x%x\n",
3567				acqe_fip->event_tag, acqe_fip->index);
3568		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3569						     LPFC_FCOE_FCF_GET_FIRST);
3570		if (rc)
3571			lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3572					"2547 Issue FCF scan read FCF mailbox "
3573					"command failed (x%x)\n", rc);
3574		break;
3575
3576	case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
3577		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3578			"2548 FCF Table full count 0x%x tag 0x%x\n",
3579			bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
3580			acqe_fip->event_tag);
3581		break;
3582
3583	case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
3584		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3585			"2549 FCF (x%x) disconnected from network, "
3586			"tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
3587		/*
3588		 * If we are in the middle of FCF failover process, clear
3589		 * the corresponding FCF bit in the roundrobin bitmap.
3590		 */
3591		spin_lock_irq(&phba->hbalock);
3592		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3593			spin_unlock_irq(&phba->hbalock);
3594			/* Update FLOGI FCF failover eligible FCF bmask */
3595			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
3596			break;
3597		}
3598		spin_unlock_irq(&phba->hbalock);
3599
3600		/* If the event is not for currently used fcf do nothing */
3601		if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
3602			break;
3603
3604		/*
3605		 * Otherwise, request the port to rediscover the entire FCF
3606		 * table for a fast recovery from case that the current FCF
3607		 * is no longer valid as we are not in the middle of FCF
3608		 * failover process already.
3609		 */
3610		spin_lock_irq(&phba->hbalock);
3611		/* Mark the fast failover process in progress */
3612		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3613		spin_unlock_irq(&phba->hbalock);
3614
3615		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3616				"2771 Start FCF fast failover process due to "
3617				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3618				"\n", acqe_fip->event_tag, acqe_fip->index);
3619		rc = lpfc_sli4_redisc_fcf_table(phba);
3620		if (rc) {
3621			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3622					LOG_DISCOVERY,
3623					"2772 Issue FCF rediscover mabilbox "
3624					"command failed, fail through to FCF "
3625					"dead event\n");
3626			spin_lock_irq(&phba->hbalock);
3627			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3628			spin_unlock_irq(&phba->hbalock);
3629			/*
3630			 * Last resort will fail over by treating this
3631			 * as a link down to FCF registration.
3632			 */
3633			lpfc_sli4_fcf_dead_failthrough(phba);
3634		} else {
3635			/* Reset FCF roundrobin bmask for new discovery */
3636			memset(phba->fcf.fcf_rr_bmask, 0,
3637			       sizeof(*phba->fcf.fcf_rr_bmask));
3638			/*
3639			 * Handling fast FCF failover to a DEAD FCF event is
3640			 * considered equalivant to receiving CVL to all vports.
3641			 */
3642			lpfc_sli4_perform_all_vport_cvl(phba);
3643		}
3644		break;
3645	case LPFC_FIP_EVENT_TYPE_CVL:
3646		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3647			"2718 Clear Virtual Link Received for VPI 0x%x"
3648			" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3649		vport = lpfc_find_vport_by_vpid(phba,
3650				acqe_fip->index - phba->vpi_base);
3651		ndlp = lpfc_sli4_perform_vport_cvl(vport);
3652		if (!ndlp)
3653			break;
3654		active_vlink_present = 0;
3655
3656		vports = lpfc_create_vport_work_array(phba);
3657		if (vports) {
3658			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3659					i++) {
3660				if ((!(vports[i]->fc_flag &
3661					FC_VPORT_CVL_RCVD)) &&
3662					(vports[i]->port_state > LPFC_FDISC)) {
3663					active_vlink_present = 1;
3664					break;
3665				}
3666			}
3667			lpfc_destroy_vport_work_array(phba, vports);
3668		}
3669
3670		if (active_vlink_present) {
3671			/*
3672			 * If there are other active VLinks present,
3673			 * re-instantiate the Vlink using FDISC.
3674			 */
3675			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3676			shost = lpfc_shost_from_vport(vport);
3677			spin_lock_irq(shost->host_lock);
3678			ndlp->nlp_flag |= NLP_DELAY_TMO;
3679			spin_unlock_irq(shost->host_lock);
3680			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3681			vport->port_state = LPFC_FDISC;
3682		} else {
3683			/*
3684			 * Otherwise, we request port to rediscover
3685			 * the entire FCF table for a fast recovery
3686			 * from possible case that the current FCF
3687			 * is no longer valid if we are not already
3688			 * in the FCF failover process.
3689			 */
3690			spin_lock_irq(&phba->hbalock);
3691			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3692				spin_unlock_irq(&phba->hbalock);
3693				break;
3694			}
3695			/* Mark the fast failover process in progress */
3696			phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3697			spin_unlock_irq(&phba->hbalock);
3698			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3699					LOG_DISCOVERY,
3700					"2773 Start FCF failover per CVL, "
3701					"evt_tag:x%x\n", acqe_fip->event_tag);
3702			rc = lpfc_sli4_redisc_fcf_table(phba);
3703			if (rc) {
3704				lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3705						LOG_DISCOVERY,
3706						"2774 Issue FCF rediscover "
3707						"mabilbox command failed, "
3708						"through to CVL event\n");
3709				spin_lock_irq(&phba->hbalock);
3710				phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3711				spin_unlock_irq(&phba->hbalock);
3712				/*
3713				 * Last resort will be re-try on the
3714				 * the current registered FCF entry.
3715				 */
3716				lpfc_retry_pport_discovery(phba);
3717			} else
3718				/*
3719				 * Reset FCF roundrobin bmask for new
3720				 * discovery.
3721				 */
3722				memset(phba->fcf.fcf_rr_bmask, 0,
3723				       sizeof(*phba->fcf.fcf_rr_bmask));
3724		}
3725		break;
3726	default:
3727		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3728			"0288 Unknown FCoE event type 0x%x event tag "
3729			"0x%x\n", event_type, acqe_fip->event_tag);
3730		break;
3731	}
3732}
3733
3734/**
3735 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3736 * @phba: pointer to lpfc hba data structure.
3737 * @acqe_link: pointer to the async dcbx completion queue entry.
3738 *
3739 * This routine is to handle the SLI4 asynchronous dcbx event.
3740 **/
3741static void
3742lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3743			 struct lpfc_acqe_dcbx *acqe_dcbx)
3744{
3745	phba->fc_eventTag = acqe_dcbx->event_tag;
3746	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3747			"0290 The SLI4 DCBX asynchronous event is not "
3748			"handled yet\n");
3749}
3750
3751/**
3752 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
3753 * @phba: pointer to lpfc hba data structure.
3754 * @acqe_link: pointer to the async grp5 completion queue entry.
3755 *
3756 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
3757 * is an asynchronous notified of a logical link speed change.  The Port
3758 * reports the logical link speed in units of 10Mbps.
3759 **/
3760static void
3761lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
3762			 struct lpfc_acqe_grp5 *acqe_grp5)
3763{
3764	uint16_t prev_ll_spd;
3765
3766	phba->fc_eventTag = acqe_grp5->event_tag;
3767	phba->fcoe_eventtag = acqe_grp5->event_tag;
3768	prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
3769	phba->sli4_hba.link_state.logical_speed =
3770		(bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
3771	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3772			"2789 GRP5 Async Event: Updating logical link speed "
3773			"from %dMbps to %dMbps\n", (prev_ll_spd * 10),
3774			(phba->sli4_hba.link_state.logical_speed*10));
3775}
3776
3777/**
3778 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3779 * @phba: pointer to lpfc hba data structure.
3780 *
3781 * This routine is invoked by the worker thread to process all the pending
3782 * SLI4 asynchronous events.
3783 **/
3784void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3785{
3786	struct lpfc_cq_event *cq_event;
3787
3788	/* First, declare the async event has been handled */
3789	spin_lock_irq(&phba->hbalock);
3790	phba->hba_flag &= ~ASYNC_EVENT;
3791	spin_unlock_irq(&phba->hbalock);
3792	/* Now, handle all the async events */
3793	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3794		/* Get the first event from the head of the event queue */
3795		spin_lock_irq(&phba->hbalock);
3796		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3797				 cq_event, struct lpfc_cq_event, list);
3798		spin_unlock_irq(&phba->hbalock);
3799		/* Process the asynchronous event */
3800		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3801		case LPFC_TRAILER_CODE_LINK:
3802			lpfc_sli4_async_link_evt(phba,
3803						 &cq_event->cqe.acqe_link);
3804			break;
3805		case LPFC_TRAILER_CODE_FCOE:
3806			lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
3807			break;
3808		case LPFC_TRAILER_CODE_DCBX:
3809			lpfc_sli4_async_dcbx_evt(phba,
3810						 &cq_event->cqe.acqe_dcbx);
3811			break;
3812		case LPFC_TRAILER_CODE_GRP5:
3813			lpfc_sli4_async_grp5_evt(phba,
3814						 &cq_event->cqe.acqe_grp5);
3815			break;
3816		case LPFC_TRAILER_CODE_FC:
3817			lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
3818			break;
3819		case LPFC_TRAILER_CODE_SLI:
3820			lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
3821			break;
3822		default:
3823			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3824					"1804 Invalid asynchrous event code: "
3825					"x%x\n", bf_get(lpfc_trailer_code,
3826					&cq_event->cqe.mcqe_cmpl));
3827			break;
3828		}
3829		/* Free the completion event processed to the free pool */
3830		lpfc_sli4_cq_event_release(phba, cq_event);
3831	}
3832}
3833
3834/**
3835 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3836 * @phba: pointer to lpfc hba data structure.
3837 *
3838 * This routine is invoked by the worker thread to process FCF table
3839 * rediscovery pending completion event.
3840 **/
3841void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3842{
3843	int rc;
3844
3845	spin_lock_irq(&phba->hbalock);
3846	/* Clear FCF rediscovery timeout event */
3847	phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3848	/* Clear driver fast failover FCF record flag */
3849	phba->fcf.failover_rec.flag = 0;
3850	/* Set state for FCF fast failover */
3851	phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3852	spin_unlock_irq(&phba->hbalock);
3853
3854	/* Scan FCF table from the first entry to re-discover SAN */
3855	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3856			"2777 Start post-quiescent FCF table scan\n");
3857	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3858	if (rc)
3859		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3860				"2747 Issue FCF scan read FCF mailbox "
3861				"command failed 0x%x\n", rc);
3862}
3863
3864/**
3865 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3866 * @phba: pointer to lpfc hba data structure.
3867 * @dev_grp: The HBA PCI-Device group number.
3868 *
3869 * This routine is invoked to set up the per HBA PCI-Device group function
3870 * API jump table entries.
3871 *
3872 * Return: 0 if success, otherwise -ENODEV
3873 **/
3874int
3875lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3876{
3877	int rc;
3878
3879	/* Set up lpfc PCI-device group */
3880	phba->pci_dev_grp = dev_grp;
3881
3882	/* The LPFC_PCI_DEV_OC uses SLI4 */
3883	if (dev_grp == LPFC_PCI_DEV_OC)
3884		phba->sli_rev = LPFC_SLI_REV4;
3885
3886	/* Set up device INIT API function jump table */
3887	rc = lpfc_init_api_table_setup(phba, dev_grp);
3888	if (rc)
3889		return -ENODEV;
3890	/* Set up SCSI API function jump table */
3891	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3892	if (rc)
3893		return -ENODEV;
3894	/* Set up SLI API function jump table */
3895	rc = lpfc_sli_api_table_setup(phba, dev_grp);
3896	if (rc)
3897		return -ENODEV;
3898	/* Set up MBOX API function jump table */
3899	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3900	if (rc)
3901		return -ENODEV;
3902
3903	return 0;
3904}
3905
3906/**
3907 * lpfc_log_intr_mode - Log the active interrupt mode
3908 * @phba: pointer to lpfc hba data structure.
3909 * @intr_mode: active interrupt mode adopted.
3910 *
3911 * This routine it invoked to log the currently used active interrupt mode
3912 * to the device.
3913 **/
3914static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3915{
3916	switch (intr_mode) {
3917	case 0:
3918		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3919				"0470 Enable INTx interrupt mode.\n");
3920		break;
3921	case 1:
3922		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3923				"0481 Enabled MSI interrupt mode.\n");
3924		break;
3925	case 2:
3926		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3927				"0480 Enabled MSI-X interrupt mode.\n");
3928		break;
3929	default:
3930		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3931				"0482 Illegal interrupt mode.\n");
3932		break;
3933	}
3934	return;
3935}
3936
3937/**
3938 * lpfc_enable_pci_dev - Enable a generic PCI device.
3939 * @phba: pointer to lpfc hba data structure.
3940 *
3941 * This routine is invoked to enable the PCI device that is common to all
3942 * PCI devices.
3943 *
3944 * Return codes
3945 * 	0 - successful
3946 * 	other values - error
3947 **/
3948static int
3949lpfc_enable_pci_dev(struct lpfc_hba *phba)
3950{
3951	struct pci_dev *pdev;
3952	int bars;
3953
3954	/* Obtain PCI device reference */
3955	if (!phba->pcidev)
3956		goto out_error;
3957	else
3958		pdev = phba->pcidev;
3959	/* Select PCI BARs */
3960	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3961	/* Enable PCI device */
3962	if (pci_enable_device_mem(pdev))
3963		goto out_error;
3964	/* Request PCI resource for the device */
3965	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3966		goto out_disable_device;
3967	/* Set up device as PCI master and save state for EEH */
3968	pci_set_master(pdev);
3969	pci_try_set_mwi(pdev);
3970	pci_save_state(pdev);
3971
3972	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
3973	if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
3974		pdev->needs_freset = 1;
3975
3976	return 0;
3977
3978out_disable_device:
3979	pci_disable_device(pdev);
3980out_error:
3981	return -ENODEV;
3982}
3983
3984/**
3985 * lpfc_disable_pci_dev - Disable a generic PCI device.
3986 * @phba: pointer to lpfc hba data structure.
3987 *
3988 * This routine is invoked to disable the PCI device that is common to all
3989 * PCI devices.
3990 **/
3991static void
3992lpfc_disable_pci_dev(struct lpfc_hba *phba)
3993{
3994	struct pci_dev *pdev;
3995	int bars;
3996
3997	/* Obtain PCI device reference */
3998	if (!phba->pcidev)
3999		return;
4000	else
4001		pdev = phba->pcidev;
4002	/* Select PCI BARs */
4003	bars = pci_select_bars(pdev, IORESOURCE_MEM);
4004	/* Release PCI resource and disable PCI device */
4005	pci_release_selected_regions(pdev, bars);
4006	pci_disable_device(pdev);
4007	/* Null out PCI private reference to driver */
4008	pci_set_drvdata(pdev, NULL);
4009
4010	return;
4011}
4012
4013/**
4014 * lpfc_reset_hba - Reset a hba
4015 * @phba: pointer to lpfc hba data structure.
4016 *
4017 * This routine is invoked to reset a hba device. It brings the HBA
4018 * offline, performs a board restart, and then brings the board back
4019 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
4020 * on outstanding mailbox commands.
4021 **/
4022void
4023lpfc_reset_hba(struct lpfc_hba *phba)
4024{
4025	/* If resets are disabled then set error state and return. */
4026	if (!phba->cfg_enable_hba_reset) {
4027		phba->link_state = LPFC_HBA_ERROR;
4028		return;
4029	}
4030	lpfc_offline_prep(phba);
4031	lpfc_offline(phba);
4032	lpfc_sli_brdrestart(phba);
4033	lpfc_online(phba);
4034	lpfc_unblock_mgmt_io(phba);
4035}
4036
4037/**
4038 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4039 * @phba: pointer to lpfc hba data structure.
4040 * @nr_vfn: number of virtual functions to be enabled.
4041 *
4042 * This function enables the PCI SR-IOV virtual functions to a physical
4043 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4044 * enable the number of virtual functions to the physical function. As
4045 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4046 * API call does not considered as an error condition for most of the device.
4047 **/
4048int
4049lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4050{
4051	struct pci_dev *pdev = phba->pcidev;
4052	int rc;
4053
4054	rc = pci_enable_sriov(pdev, nr_vfn);
4055	if (rc) {
4056		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4057				"2806 Failed to enable sriov on this device "
4058				"with vfn number nr_vf:%d, rc:%d\n",
4059				nr_vfn, rc);
4060	} else
4061		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4062				"2807 Successful enable sriov on this device "
4063				"with vfn number nr_vf:%d\n", nr_vfn);
4064	return rc;
4065}
4066
4067/**
4068 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4069 * @phba: pointer to lpfc hba data structure.
4070 *
4071 * This routine is invoked to set up the driver internal resources specific to
4072 * support the SLI-3 HBA device it attached to.
4073 *
4074 * Return codes
4075 * 	0 - successful
4076 * 	other values - error
4077 **/
4078static int
4079lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4080{
4081	struct lpfc_sli *psli;
4082	int rc;
4083
4084	/*
4085	 * Initialize timers used by driver
4086	 */
4087
4088	/* Heartbeat timer */
4089	init_timer(&phba->hb_tmofunc);
4090	phba->hb_tmofunc.function = lpfc_hb_timeout;
4091	phba->hb_tmofunc.data = (unsigned long)phba;
4092
4093	psli = &phba->sli;
4094	/* MBOX heartbeat timer */
4095	init_timer(&psli->mbox_tmo);
4096	psli->mbox_tmo.function = lpfc_mbox_timeout;
4097	psli->mbox_tmo.data = (unsigned long) phba;
4098	/* FCP polling mode timer */
4099	init_timer(&phba->fcp_poll_timer);
4100	phba->fcp_poll_timer.function = lpfc_poll_timeout;
4101	phba->fcp_poll_timer.data = (unsigned long) phba;
4102	/* Fabric block timer */
4103	init_timer(&phba->fabric_block_timer);
4104	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4105	phba->fabric_block_timer.data = (unsigned long) phba;
4106	/* EA polling mode timer */
4107	init_timer(&phba->eratt_poll);
4108	phba->eratt_poll.function = lpfc_poll_eratt;
4109	phba->eratt_poll.data = (unsigned long) phba;
4110
4111	/* Host attention work mask setup */
4112	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
4113	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
4114
4115	/* Get all the module params for configuring this host */
4116	lpfc_get_cfgparam(phba);
4117	if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
4118		phba->menlo_flag |= HBA_MENLO_SUPPORT;
4119		/* check for menlo minimum sg count */
4120		if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
4121			phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4122	}
4123
4124	/*
4125	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4126	 * used to create the sg_dma_buf_pool must be dynamically calculated.
4127	 * 2 segments are added since the IOCB needs a command and response bde.
4128	 */
4129	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4130		sizeof(struct fcp_rsp) +
4131			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4132
4133	if (phba->cfg_enable_bg) {
4134		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4135		phba->cfg_sg_dma_buf_size +=
4136			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4137	}
4138
4139	/* Also reinitialize the host templates with new values. */
4140	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4141	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4142
4143	phba->max_vpi = LPFC_MAX_VPI;
4144	/* This will be set to correct value after config_port mbox */
4145	phba->max_vports = 0;
4146
4147	/*
4148	 * Initialize the SLI Layer to run with lpfc HBAs.
4149	 */
4150	lpfc_sli_setup(phba);
4151	lpfc_sli_queue_setup(phba);
4152
4153	/* Allocate device driver memory */
4154	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4155		return -ENOMEM;
4156
4157	/*
4158	 * Enable sr-iov virtual functions if supported and configured
4159	 * through the module parameter.
4160	 */
4161	if (phba->cfg_sriov_nr_virtfn > 0) {
4162		rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4163						 phba->cfg_sriov_nr_virtfn);
4164		if (rc) {
4165			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4166					"2808 Requested number of SR-IOV "
4167					"virtual functions (%d) is not "
4168					"supported\n",
4169					phba->cfg_sriov_nr_virtfn);
4170			phba->cfg_sriov_nr_virtfn = 0;
4171		}
4172	}
4173
4174	return 0;
4175}
4176
4177/**
4178 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
4179 * @phba: pointer to lpfc hba data structure.
4180 *
4181 * This routine is invoked to unset the driver internal resources set up
4182 * specific for supporting the SLI-3 HBA device it attached to.
4183 **/
4184static void
4185lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4186{
4187	/* Free device driver memory allocated */
4188	lpfc_mem_free_all(phba);
4189
4190	return;
4191}
4192
4193/**
4194 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
4195 * @phba: pointer to lpfc hba data structure.
4196 *
4197 * This routine is invoked to set up the driver internal resources specific to
4198 * support the SLI-4 HBA device it attached to.
4199 *
4200 * Return codes
4201 * 	0 - successful
4202 * 	other values - error
4203 **/
4204static int
4205lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4206{
4207	struct lpfc_sli *psli;
4208	LPFC_MBOXQ_t *mboxq;
4209	int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
4210	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4211	struct lpfc_mqe *mqe;
4212	int longs, sli_family;
4213
4214	/* Before proceed, wait for POST done and device ready */
4215	rc = lpfc_sli4_post_status_check(phba);
4216	if (rc)
4217		return -ENODEV;
4218
4219	/*
4220	 * Initialize timers used by driver
4221	 */
4222
4223	/* Heartbeat timer */
4224	init_timer(&phba->hb_tmofunc);
4225	phba->hb_tmofunc.function = lpfc_hb_timeout;
4226	phba->hb_tmofunc.data = (unsigned long)phba;
4227	init_timer(&phba->rrq_tmr);
4228	phba->rrq_tmr.function = lpfc_rrq_timeout;
4229	phba->rrq_tmr.data = (unsigned long)phba;
4230
4231	psli = &phba->sli;
4232	/* MBOX heartbeat timer */
4233	init_timer(&psli->mbox_tmo);
4234	psli->mbox_tmo.function = lpfc_mbox_timeout;
4235	psli->mbox_tmo.data = (unsigned long) phba;
4236	/* Fabric block timer */
4237	init_timer(&phba->fabric_block_timer);
4238	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4239	phba->fabric_block_timer.data = (unsigned long) phba;
4240	/* EA polling mode timer */
4241	init_timer(&phba->eratt_poll);
4242	phba->eratt_poll.function = lpfc_poll_eratt;
4243	phba->eratt_poll.data = (unsigned long) phba;
4244	/* FCF rediscover timer */
4245	init_timer(&phba->fcf.redisc_wait);
4246	phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4247	phba->fcf.redisc_wait.data = (unsigned long)phba;
4248
4249	/*
4250	 * We need to do a READ_CONFIG mailbox command here before
4251	 * calling lpfc_get_cfgparam. For VFs this will report the
4252	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
4253	 * All of the resources allocated
4254	 * for this Port are tied to these values.
4255	 */
4256	/* Get all the module params for configuring this host */
4257	lpfc_get_cfgparam(phba);
4258	phba->max_vpi = LPFC_MAX_VPI;
4259	/* This will be set to correct value after the read_config mbox */
4260	phba->max_vports = 0;
4261
4262	/* Program the default value of vlan_id and fc_map */
4263	phba->valid_vlan = 0;
4264	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4265	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4266	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4267
4268	/*
4269	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4270	 * used to create the sg_dma_buf_pool must be dynamically calculated.
4271	 * 2 segments are added since the IOCB needs a command and response bde.
4272	 * To insure that the scsi sgl does not cross a 4k page boundary only
4273	 * sgl sizes of must be a power of 2.
4274	 */
4275	buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4276		    ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
4277
4278	sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
4279	max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
4280	switch (sli_family) {
4281	case LPFC_SLI_INTF_FAMILY_BE2:
4282	case LPFC_SLI_INTF_FAMILY_BE3:
4283		/* There is a single hint for BE - 2 pages per BPL. */
4284		if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
4285		    LPFC_SLI_INTF_SLI_HINT1_1)
4286			max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
4287		break;
4288	case LPFC_SLI_INTF_FAMILY_LNCR_A0:
4289	case LPFC_SLI_INTF_FAMILY_LNCR_B0:
4290	default:
4291		break;
4292	}
4293	for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4294	     dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4295	     dma_buf_size = dma_buf_size << 1)
4296		;
4297	if (dma_buf_size == max_buf_size)
4298		phba->cfg_sg_seg_cnt = (dma_buf_size -
4299			sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
4300			(2 * sizeof(struct sli4_sge))) /
4301				sizeof(struct sli4_sge);
4302	phba->cfg_sg_dma_buf_size = dma_buf_size;
4303
4304	/* Initialize buffer queue management fields */
4305	hbq_count = lpfc_sli_hbq_count();
4306	for (i = 0; i < hbq_count; ++i)
4307		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4308	INIT_LIST_HEAD(&phba->rb_pend_list);
4309	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4310	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
4311
4312	/*
4313	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
4314	 */
4315	/* Initialize the Abort scsi buffer list used by driver */
4316	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
4317	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
4318	/* This abort list used by worker thread */
4319	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4320
4321	/*
4322	 * Initialize dirver internal slow-path work queues
4323	 */
4324
4325	/* Driver internel slow-path CQ Event pool */
4326	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
4327	/* Response IOCB work queue list */
4328	INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
4329	/* Asynchronous event CQ Event work queue list */
4330	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4331	/* Fast-path XRI aborted CQ Event work queue list */
4332	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4333	/* Slow-path XRI aborted CQ Event work queue list */
4334	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4335	/* Receive queue CQ Event work queue list */
4336	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4337
4338	/* Initialize the driver internal SLI layer lists. */
4339	lpfc_sli_setup(phba);
4340	lpfc_sli_queue_setup(phba);
4341
4342	/* Allocate device driver memory */
4343	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4344	if (rc)
4345		return -ENOMEM;
4346
4347	/* IF Type 2 ports get initialized now. */
4348	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4349	    LPFC_SLI_INTF_IF_TYPE_2) {
4350		rc = lpfc_pci_function_reset(phba);
4351		if (unlikely(rc))
4352			return -ENODEV;
4353	}
4354
4355	/* Create the bootstrap mailbox command */
4356	rc = lpfc_create_bootstrap_mbox(phba);
4357	if (unlikely(rc))
4358		goto out_free_mem;
4359
4360	/* Set up the host's endian order with the device. */
4361	rc = lpfc_setup_endian_order(phba);
4362	if (unlikely(rc))
4363		goto out_free_bsmbx;
4364
4365	/* Set up the hba's configuration parameters. */
4366	rc = lpfc_sli4_read_config(phba);
4367	if (unlikely(rc))
4368		goto out_free_bsmbx;
4369
4370	/* IF Type 0 ports get initialized now. */
4371	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4372	    LPFC_SLI_INTF_IF_TYPE_0) {
4373		rc = lpfc_pci_function_reset(phba);
4374		if (unlikely(rc))
4375			goto out_free_bsmbx;
4376	}
4377
4378	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4379						       GFP_KERNEL);
4380	if (!mboxq) {
4381		rc = -ENOMEM;
4382		goto out_free_bsmbx;
4383	}
4384
4385	/* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
4386	lpfc_supported_pages(mboxq);
4387	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4388	if (!rc) {
4389		mqe = &mboxq->u.mqe;
4390		memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4391		       LPFC_MAX_SUPPORTED_PAGES);
4392		for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4393			switch (pn_page[i]) {
4394			case LPFC_SLI4_PARAMETERS:
4395				phba->sli4_hba.pc_sli4_params.supported = 1;
4396				break;
4397			default:
4398				break;
4399			}
4400		}
4401		/* Read the port's SLI4 Parameters capabilities if supported. */
4402		if (phba->sli4_hba.pc_sli4_params.supported)
4403			rc = lpfc_pc_sli4_params_get(phba, mboxq);
4404		if (rc) {
4405			mempool_free(mboxq, phba->mbox_mem_pool);
4406			rc = -EIO;
4407			goto out_free_bsmbx;
4408		}
4409	}
4410	/*
4411	 * Get sli4 parameters that override parameters from Port capabilities.
4412	 * If this call fails it is not a critical error so continue loading.
4413	 */
4414	lpfc_get_sli4_parameters(phba, mboxq);
4415	mempool_free(mboxq, phba->mbox_mem_pool);
4416	/* Create all the SLI4 queues */
4417	rc = lpfc_sli4_queue_create(phba);
4418	if (rc)
4419		goto out_free_bsmbx;
4420
4421	/* Create driver internal CQE event pool */
4422	rc = lpfc_sli4_cq_event_pool_create(phba);
4423	if (rc)
4424		goto out_destroy_queue;
4425
4426	/* Initialize and populate the iocb list per host */
4427	rc = lpfc_init_sgl_list(phba);
4428	if (rc) {
4429		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4430				"1400 Failed to initialize sgl list.\n");
4431		goto out_destroy_cq_event_pool;
4432	}
4433	rc = lpfc_init_active_sgl_array(phba);
4434	if (rc) {
4435		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4436				"1430 Failed to initialize sgl list.\n");
4437		goto out_free_sgl_list;
4438	}
4439
4440	rc = lpfc_sli4_init_rpi_hdrs(phba);
4441	if (rc) {
4442		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4443				"1432 Failed to initialize rpi headers.\n");
4444		goto out_free_active_sgl;
4445	}
4446
4447	/* Allocate eligible FCF bmask memory for FCF roundrobin failover */
4448	longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4449	phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4450					 GFP_KERNEL);
4451	if (!phba->fcf.fcf_rr_bmask) {
4452		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4453				"2759 Failed allocate memory for FCF round "
4454				"robin failover bmask\n");
4455		rc = -ENOMEM;
4456		goto out_remove_rpi_hdrs;
4457	}
4458
4459	phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4460				    phba->cfg_fcp_eq_count), GFP_KERNEL);
4461	if (!phba->sli4_hba.fcp_eq_hdl) {
4462		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4463				"2572 Failed allocate memory for fast-path "
4464				"per-EQ handle array\n");
4465		rc = -ENOMEM;
4466		goto out_free_fcf_rr_bmask;
4467	}
4468
4469	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4470				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4471	if (!phba->sli4_hba.msix_entries) {
4472		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4473				"2573 Failed allocate memory for msi-x "
4474				"interrupt vector entries\n");
4475		rc = -ENOMEM;
4476		goto out_free_fcp_eq_hdl;
4477	}
4478
4479	/*
4480	 * Enable sr-iov virtual functions if supported and configured
4481	 * through the module parameter.
4482	 */
4483	if (phba->cfg_sriov_nr_virtfn > 0) {
4484		rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4485						 phba->cfg_sriov_nr_virtfn);
4486		if (rc) {
4487			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4488					"3020 Requested number of SR-IOV "
4489					"virtual functions (%d) is not "
4490					"supported\n",
4491					phba->cfg_sriov_nr_virtfn);
4492			phba->cfg_sriov_nr_virtfn = 0;
4493		}
4494	}
4495
4496	return rc;
4497
4498out_free_fcp_eq_hdl:
4499	kfree(phba->sli4_hba.fcp_eq_hdl);
4500out_free_fcf_rr_bmask:
4501	kfree(phba->fcf.fcf_rr_bmask);
4502out_remove_rpi_hdrs:
4503	lpfc_sli4_remove_rpi_hdrs(phba);
4504out_free_active_sgl:
4505	lpfc_free_active_sgl(phba);
4506out_free_sgl_list:
4507	lpfc_free_sgl_list(phba);
4508out_destroy_cq_event_pool:
4509	lpfc_sli4_cq_event_pool_destroy(phba);
4510out_destroy_queue:
4511	lpfc_sli4_queue_destroy(phba);
4512out_free_bsmbx:
4513	lpfc_destroy_bootstrap_mbox(phba);
4514out_free_mem:
4515	lpfc_mem_free(phba);
4516	return rc;
4517}
4518
4519/**
4520 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4521 * @phba: pointer to lpfc hba data structure.
4522 *
4523 * This routine is invoked to unset the driver internal resources set up
4524 * specific for supporting the SLI-4 HBA device it attached to.
4525 **/
4526static void
4527lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4528{
4529	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4530
4531	/* Free memory allocated for msi-x interrupt vector entries */
4532	kfree(phba->sli4_hba.msix_entries);
4533
4534	/* Free memory allocated for fast-path work queue handles */
4535	kfree(phba->sli4_hba.fcp_eq_hdl);
4536
4537	/* Free the allocated rpi headers. */
4538	lpfc_sli4_remove_rpi_hdrs(phba);
4539	lpfc_sli4_remove_rpis(phba);
4540
4541	/* Free eligible FCF index bmask */
4542	kfree(phba->fcf.fcf_rr_bmask);
4543
4544	/* Free the ELS sgl list */
4545	lpfc_free_active_sgl(phba);
4546	lpfc_free_sgl_list(phba);
4547
4548	/* Free the SCSI sgl management array */
4549	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4550
4551	/* Free the SLI4 queues */
4552	lpfc_sli4_queue_destroy(phba);
4553
4554	/* Free the completion queue EQ event pool */
4555	lpfc_sli4_cq_event_release_all(phba);
4556	lpfc_sli4_cq_event_pool_destroy(phba);
4557
4558	/* Free the bsmbx region. */
4559	lpfc_destroy_bootstrap_mbox(phba);
4560
4561	/* Free the SLI Layer memory with SLI4 HBAs */
4562	lpfc_mem_free_all(phba);
4563
4564	/* Free the current connect table */
4565	list_for_each_entry_safe(conn_entry, next_conn_entry,
4566		&phba->fcf_conn_rec_list, list) {
4567		list_del_init(&conn_entry->list);
4568		kfree(conn_entry);
4569	}
4570
4571	return;
4572}
4573
4574/**
4575 * lpfc_init_api_table_setup - Set up init api function jump table
4576 * @phba: The hba struct for which this call is being executed.
4577 * @dev_grp: The HBA PCI-Device group number.
4578 *
4579 * This routine sets up the device INIT interface API function jump table
4580 * in @phba struct.
4581 *
4582 * Returns: 0 - success, -ENODEV - failure.
4583 **/
4584int
4585lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4586{
4587	phba->lpfc_hba_init_link = lpfc_hba_init_link;
4588	phba->lpfc_hba_down_link = lpfc_hba_down_link;
4589	phba->lpfc_selective_reset = lpfc_selective_reset;
4590	switch (dev_grp) {
4591	case LPFC_PCI_DEV_LP:
4592		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4593		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4594		phba->lpfc_stop_port = lpfc_stop_port_s3;
4595		break;
4596	case LPFC_PCI_DEV_OC:
4597		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4598		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4599		phba->lpfc_stop_port = lpfc_stop_port_s4;
4600		break;
4601	default:
4602		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4603				"1431 Invalid HBA PCI-device group: 0x%x\n",
4604				dev_grp);
4605		return -ENODEV;
4606		break;
4607	}
4608	return 0;
4609}
4610
4611/**
4612 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4613 * @phba: pointer to lpfc hba data structure.
4614 *
4615 * This routine is invoked to set up the driver internal resources before the
4616 * device specific resource setup to support the HBA device it attached to.
4617 *
4618 * Return codes
4619 *	0 - successful
4620 *	other values - error
4621 **/
4622static int
4623lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4624{
4625	/*
4626	 * Driver resources common to all SLI revisions
4627	 */
4628	atomic_set(&phba->fast_event_count, 0);
4629	spin_lock_init(&phba->hbalock);
4630
4631	/* Initialize ndlp management spinlock */
4632	spin_lock_init(&phba->ndlp_lock);
4633
4634	INIT_LIST_HEAD(&phba->port_list);
4635	INIT_LIST_HEAD(&phba->work_list);
4636	init_waitqueue_head(&phba->wait_4_mlo_m_q);
4637
4638	/* Initialize the wait queue head for the kernel thread */
4639	init_waitqueue_head(&phba->work_waitq);
4640
4641	/* Initialize the scsi buffer list used by driver for scsi IO */
4642	spin_lock_init(&phba->scsi_buf_list_lock);
4643	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4644
4645	/* Initialize the fabric iocb list */
4646	INIT_LIST_HEAD(&phba->fabric_iocb_list);
4647
4648	/* Initialize list to save ELS buffers */
4649	INIT_LIST_HEAD(&phba->elsbuf);
4650
4651	/* Initialize FCF connection rec list */
4652	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4653
4654	return 0;
4655}
4656
4657/**
4658 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4659 * @phba: pointer to lpfc hba data structure.
4660 *
4661 * This routine is invoked to set up the driver internal resources after the
4662 * device specific resource setup to support the HBA device it attached to.
4663 *
4664 * Return codes
4665 * 	0 - successful
4666 * 	other values - error
4667 **/
4668static int
4669lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4670{
4671	int error;
4672
4673	/* Startup the kernel thread for this host adapter. */
4674	phba->worker_thread = kthread_run(lpfc_do_work, phba,
4675					  "lpfc_worker_%d", phba->brd_no);
4676	if (IS_ERR(phba->worker_thread)) {
4677		error = PTR_ERR(phba->worker_thread);
4678		return error;
4679	}
4680
4681	return 0;
4682}
4683
4684/**
4685 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4686 * @phba: pointer to lpfc hba data structure.
4687 *
4688 * This routine is invoked to unset the driver internal resources set up after
4689 * the device specific resource setup for supporting the HBA device it
4690 * attached to.
4691 **/
4692static void
4693lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4694{
4695	/* Stop kernel worker thread */
4696	kthread_stop(phba->worker_thread);
4697}
4698
4699/**
4700 * lpfc_free_iocb_list - Free iocb list.
4701 * @phba: pointer to lpfc hba data structure.
4702 *
4703 * This routine is invoked to free the driver's IOCB list and memory.
4704 **/
4705static void
4706lpfc_free_iocb_list(struct lpfc_hba *phba)
4707{
4708	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4709
4710	spin_lock_irq(&phba->hbalock);
4711	list_for_each_entry_safe(iocbq_entry, iocbq_next,
4712				 &phba->lpfc_iocb_list, list) {
4713		list_del(&iocbq_entry->list);
4714		kfree(iocbq_entry);
4715		phba->total_iocbq_bufs--;
4716	}
4717	spin_unlock_irq(&phba->hbalock);
4718
4719	return;
4720}
4721
4722/**
4723 * lpfc_init_iocb_list - Allocate and initialize iocb list.
4724 * @phba: pointer to lpfc hba data structure.
4725 *
4726 * This routine is invoked to allocate and initizlize the driver's IOCB
4727 * list and set up the IOCB tag array accordingly.
4728 *
4729 * Return codes
4730 *	0 - successful
4731 *	other values - error
4732 **/
4733static int
4734lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4735{
4736	struct lpfc_iocbq *iocbq_entry = NULL;
4737	uint16_t iotag;
4738	int i;
4739
4740	/* Initialize and populate the iocb list per host.  */
4741	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
4742	for (i = 0; i < iocb_count; i++) {
4743		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
4744		if (iocbq_entry == NULL) {
4745			printk(KERN_ERR "%s: only allocated %d iocbs of "
4746				"expected %d count. Unloading driver.\n",
4747				__func__, i, LPFC_IOCB_LIST_CNT);
4748			goto out_free_iocbq;
4749		}
4750
4751		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4752		if (iotag == 0) {
4753			kfree(iocbq_entry);
4754			printk(KERN_ERR "%s: failed to allocate IOTAG. "
4755				"Unloading driver.\n", __func__);
4756			goto out_free_iocbq;
4757		}
4758		iocbq_entry->sli4_xritag = NO_XRI;
4759
4760		spin_lock_irq(&phba->hbalock);
4761		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4762		phba->total_iocbq_bufs++;
4763		spin_unlock_irq(&phba->hbalock);
4764	}
4765
4766	return 0;
4767
4768out_free_iocbq:
4769	lpfc_free_iocb_list(phba);
4770
4771	return -ENOMEM;
4772}
4773
4774/**
4775 * lpfc_free_sgl_list - Free sgl list.
4776 * @phba: pointer to lpfc hba data structure.
4777 *
4778 * This routine is invoked to free the driver's sgl list and memory.
4779 **/
4780static void
4781lpfc_free_sgl_list(struct lpfc_hba *phba)
4782{
4783	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4784	LIST_HEAD(sglq_list);
4785
4786	spin_lock_irq(&phba->hbalock);
4787	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
4788	spin_unlock_irq(&phba->hbalock);
4789
4790	list_for_each_entry_safe(sglq_entry, sglq_next,
4791				 &sglq_list, list) {
4792		list_del(&sglq_entry->list);
4793		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
4794		kfree(sglq_entry);
4795		phba->sli4_hba.total_sglq_bufs--;
4796	}
4797	kfree(phba->sli4_hba.lpfc_els_sgl_array);
4798}
4799
4800/**
4801 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
4802 * @phba: pointer to lpfc hba data structure.
4803 *
4804 * This routine is invoked to allocate the driver's active sgl memory.
4805 * This array will hold the sglq_entry's for active IOs.
4806 **/
4807static int
4808lpfc_init_active_sgl_array(struct lpfc_hba *phba)
4809{
4810	int size;
4811	size = sizeof(struct lpfc_sglq *);
4812	size *= phba->sli4_hba.max_cfg_param.max_xri;
4813
4814	phba->sli4_hba.lpfc_sglq_active_list =
4815		kzalloc(size, GFP_KERNEL);
4816	if (!phba->sli4_hba.lpfc_sglq_active_list)
4817		return -ENOMEM;
4818	return 0;
4819}
4820
4821/**
4822 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
4823 * @phba: pointer to lpfc hba data structure.
4824 *
4825 * This routine is invoked to walk through the array of active sglq entries
4826 * and free all of the resources.
4827 * This is just a place holder for now.
4828 **/
4829static void
4830lpfc_free_active_sgl(struct lpfc_hba *phba)
4831{
4832	kfree(phba->sli4_hba.lpfc_sglq_active_list);
4833}
4834
4835/**
4836 * lpfc_init_sgl_list - Allocate and initialize sgl list.
4837 * @phba: pointer to lpfc hba data structure.
4838 *
4839 * This routine is invoked to allocate and initizlize the driver's sgl
4840 * list and set up the sgl xritag tag array accordingly.
4841 *
4842 * Return codes
4843 *	0 - successful
4844 *	other values - error
4845 **/
4846static int
4847lpfc_init_sgl_list(struct lpfc_hba *phba)
4848{
4849	struct lpfc_sglq *sglq_entry = NULL;
4850	int i;
4851	int els_xri_cnt;
4852
4853	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4854	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4855				"2400 lpfc_init_sgl_list els %d.\n",
4856				els_xri_cnt);
4857	/* Initialize and populate the sglq list per host/VF. */
4858	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
4859	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
4860
4861	/* Sanity check on XRI management */
4862	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
4863		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4864				"2562 No room left for SCSI XRI allocation: "
4865				"max_xri=%d, els_xri=%d\n",
4866				phba->sli4_hba.max_cfg_param.max_xri,
4867				els_xri_cnt);
4868		return -ENOMEM;
4869	}
4870
4871	/* Allocate memory for the ELS XRI management array */
4872	phba->sli4_hba.lpfc_els_sgl_array =
4873			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
4874			GFP_KERNEL);
4875
4876	if (!phba->sli4_hba.lpfc_els_sgl_array) {
4877		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4878				"2401 Failed to allocate memory for ELS "
4879				"XRI management array of size %d.\n",
4880				els_xri_cnt);
4881		return -ENOMEM;
4882	}
4883
4884	/* Keep the SCSI XRI into the XRI management array */
4885	phba->sli4_hba.scsi_xri_max =
4886			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4887	phba->sli4_hba.scsi_xri_cnt = 0;
4888
4889	phba->sli4_hba.lpfc_scsi_psb_array =
4890			kzalloc((sizeof(struct lpfc_scsi_buf *) *
4891			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
4892
4893	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
4894		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4895				"2563 Failed to allocate memory for SCSI "
4896				"XRI management array of size %d.\n",
4897				phba->sli4_hba.scsi_xri_max);
4898		kfree(phba->sli4_hba.lpfc_els_sgl_array);
4899		return -ENOMEM;
4900	}
4901
4902	for (i = 0; i < els_xri_cnt; i++) {
4903		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
4904		if (sglq_entry == NULL) {
4905			printk(KERN_ERR "%s: only allocated %d sgls of "
4906				"expected %d count. Unloading driver.\n",
4907				__func__, i, els_xri_cnt);
4908			goto out_free_mem;
4909		}
4910
4911		sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
4912		if (sglq_entry->sli4_xritag == NO_XRI) {
4913			kfree(sglq_entry);
4914			printk(KERN_ERR "%s: failed to allocate XRI.\n"
4915				"Unloading driver.\n", __func__);
4916			goto out_free_mem;
4917		}
4918		sglq_entry->buff_type = GEN_BUFF_TYPE;
4919		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4920		if (sglq_entry->virt == NULL) {
4921			kfree(sglq_entry);
4922			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
4923				"Unloading driver.\n", __func__);
4924			goto out_free_mem;
4925		}
4926		sglq_entry->sgl = sglq_entry->virt;
4927		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4928
4929		/* The list order is used by later block SGL registraton */
4930		spin_lock_irq(&phba->hbalock);
4931		sglq_entry->state = SGL_FREED;
4932		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4933		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4934		phba->sli4_hba.total_sglq_bufs++;
4935		spin_unlock_irq(&phba->hbalock);
4936	}
4937	return 0;
4938
4939out_free_mem:
4940	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4941	lpfc_free_sgl_list(phba);
4942	return -ENOMEM;
4943}
4944
4945/**
4946 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
4947 * @phba: pointer to lpfc hba data structure.
4948 *
4949 * This routine is invoked to post rpi header templates to the
4950 * HBA consistent with the SLI-4 interface spec.  This routine
4951 * posts a PAGE_SIZE memory region to the port to hold up to
4952 * PAGE_SIZE modulo 64 rpi context headers.
4953 * No locks are held here because this is an initialization routine
4954 * called only from probe or lpfc_online when interrupts are not
4955 * enabled and the driver is reinitializing the device.
4956 *
4957 * Return codes
4958 * 	0 - successful
4959 * 	-ENOMEM - No available memory
4960 *      -EIO - The mailbox failed to complete successfully.
4961 **/
4962int
4963lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4964{
4965	int rc = 0;
4966	int longs;
4967	uint16_t rpi_count;
4968	struct lpfc_rpi_hdr *rpi_hdr;
4969
4970	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4971
4972	/*
4973	 * Provision an rpi bitmask range for discovery. The total count
4974	 * is the difference between max and base + 1.
4975	 */
4976	rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
4977		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4978
4979	longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4980	phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
4981					   GFP_KERNEL);
4982	if (!phba->sli4_hba.rpi_bmask)
4983		return -ENOMEM;
4984
4985	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4986	if (!rpi_hdr) {
4987		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4988				"0391 Error during rpi post operation\n");
4989		lpfc_sli4_remove_rpis(phba);
4990		rc = -ENODEV;
4991	}
4992
4993	return rc;
4994}
4995
4996/**
4997 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4998 * @phba: pointer to lpfc hba data structure.
4999 *
5000 * This routine is invoked to allocate a single 4KB memory region to
5001 * support rpis and stores them in the phba.  This single region
5002 * provides support for up to 64 rpis.  The region is used globally
5003 * by the device.
5004 *
5005 * Returns:
5006 *   A valid rpi hdr on success.
5007 *   A NULL pointer on any failure.
5008 **/
5009struct lpfc_rpi_hdr *
5010lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5011{
5012	uint16_t rpi_limit, curr_rpi_range;
5013	struct lpfc_dmabuf *dmabuf;
5014	struct lpfc_rpi_hdr *rpi_hdr;
5015	uint32_t rpi_count;
5016
5017	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
5018		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
5019
5020	spin_lock_irq(&phba->hbalock);
5021	curr_rpi_range = phba->sli4_hba.next_rpi;
5022	spin_unlock_irq(&phba->hbalock);
5023
5024	/*
5025	 * The port has a limited number of rpis. The increment here
5026	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
5027	 * and to allow the full max_rpi range per port.
5028	 */
5029	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
5030		rpi_count = rpi_limit - curr_rpi_range;
5031	else
5032		rpi_count = LPFC_RPI_HDR_COUNT;
5033
5034	/*
5035	 * First allocate the protocol header region for the port.  The
5036	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
5037	 */
5038	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5039	if (!dmabuf)
5040		return NULL;
5041
5042	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5043					  LPFC_HDR_TEMPLATE_SIZE,
5044					  &dmabuf->phys,
5045					  GFP_KERNEL);
5046	if (!dmabuf->virt) {
5047		rpi_hdr = NULL;
5048		goto err_free_dmabuf;
5049	}
5050
5051	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
5052	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
5053		rpi_hdr = NULL;
5054		goto err_free_coherent;
5055	}
5056
5057	/* Save the rpi header data for cleanup later. */
5058	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
5059	if (!rpi_hdr)
5060		goto err_free_coherent;
5061
5062	rpi_hdr->dmabuf = dmabuf;
5063	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
5064	rpi_hdr->page_count = 1;
5065	spin_lock_irq(&phba->hbalock);
5066	rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
5067	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
5068
5069	/*
5070	 * The next_rpi stores the next module-64 rpi value to post
5071	 * in any subsequent rpi memory region postings.
5072	 */
5073	phba->sli4_hba.next_rpi += rpi_count;
5074	spin_unlock_irq(&phba->hbalock);
5075	return rpi_hdr;
5076
5077 err_free_coherent:
5078	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
5079			  dmabuf->virt, dmabuf->phys);
5080 err_free_dmabuf:
5081	kfree(dmabuf);
5082	return NULL;
5083}
5084
5085/**
5086 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
5087 * @phba: pointer to lpfc hba data structure.
5088 *
5089 * This routine is invoked to remove all memory resources allocated
5090 * to support rpis. This routine presumes the caller has released all
5091 * rpis consumed by fabric or port logins and is prepared to have
5092 * the header pages removed.
5093 **/
5094void
5095lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
5096{
5097	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
5098
5099	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
5100				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
5101		list_del(&rpi_hdr->list);
5102		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
5103				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
5104		kfree(rpi_hdr->dmabuf);
5105		kfree(rpi_hdr);
5106	}
5107	phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
5108}
5109
5110/**
5111 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
5112 * @pdev: pointer to pci device data structure.
5113 *
5114 * This routine is invoked to allocate the driver hba data structure for an
5115 * HBA device. If the allocation is successful, the phba reference to the
5116 * PCI device data structure is set.
5117 *
5118 * Return codes
5119 *      pointer to @phba - successful
5120 *      NULL - error
5121 **/
5122static struct lpfc_hba *
5123lpfc_hba_alloc(struct pci_dev *pdev)
5124{
5125	struct lpfc_hba *phba;
5126
5127	/* Allocate memory for HBA structure */
5128	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
5129	if (!phba) {
5130		dev_err(&pdev->dev, "failed to allocate hba struct\n");
5131		return NULL;
5132	}
5133
5134	/* Set reference to PCI device in HBA structure */
5135	phba->pcidev = pdev;
5136
5137	/* Assign an unused board number */
5138	phba->brd_no = lpfc_get_instance();
5139	if (phba->brd_no < 0) {
5140		kfree(phba);
5141		return NULL;
5142	}
5143
5144	spin_lock_init(&phba->ct_ev_lock);
5145	INIT_LIST_HEAD(&phba->ct_ev_waiters);
5146
5147	return phba;
5148}
5149
5150/**
5151 * lpfc_hba_free - Free driver hba data structure with a device.
5152 * @phba: pointer to lpfc hba data structure.
5153 *
5154 * This routine is invoked to free the driver hba data structure with an
5155 * HBA device.
5156 **/
5157static void
5158lpfc_hba_free(struct lpfc_hba *phba)
5159{
5160	/* Release the driver assigned board number */
5161	idr_remove(&lpfc_hba_index, phba->brd_no);
5162
5163	kfree(phba);
5164	return;
5165}
5166
5167/**
5168 * lpfc_create_shost - Create hba physical port with associated scsi host.
5169 * @phba: pointer to lpfc hba data structure.
5170 *
5171 * This routine is invoked to create HBA physical port and associate a SCSI
5172 * host with it.
5173 *
5174 * Return codes
5175 *      0 - successful
5176 *      other values - error
5177 **/
5178static int
5179lpfc_create_shost(struct lpfc_hba *phba)
5180{
5181	struct lpfc_vport *vport;
5182	struct Scsi_Host  *shost;
5183
5184	/* Initialize HBA FC structure */
5185	phba->fc_edtov = FF_DEF_EDTOV;
5186	phba->fc_ratov = FF_DEF_RATOV;
5187	phba->fc_altov = FF_DEF_ALTOV;
5188	phba->fc_arbtov = FF_DEF_ARBTOV;
5189
5190	atomic_set(&phba->sdev_cnt, 0);
5191	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
5192	if (!vport)
5193		return -ENODEV;
5194
5195	shost = lpfc_shost_from_vport(vport);
5196	phba->pport = vport;
5197	lpfc_debugfs_initialize(vport);
5198	/* Put reference to SCSI host to driver's device private data */
5199	pci_set_drvdata(phba->pcidev, shost);
5200
5201	return 0;
5202}
5203
5204/**
5205 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
5206 * @phba: pointer to lpfc hba data structure.
5207 *
5208 * This routine is invoked to destroy HBA physical port and the associated
5209 * SCSI host.
5210 **/
5211static void
5212lpfc_destroy_shost(struct lpfc_hba *phba)
5213{
5214	struct lpfc_vport *vport = phba->pport;
5215
5216	/* Destroy physical port that associated with the SCSI host */
5217	destroy_port(vport);
5218
5219	return;
5220}
5221
5222/**
5223 * lpfc_setup_bg - Setup Block guard structures and debug areas.
5224 * @phba: pointer to lpfc hba data structure.
5225 * @shost: the shost to be used to detect Block guard settings.
5226 *
5227 * This routine sets up the local Block guard protocol settings for @shost.
5228 * This routine also allocates memory for debugging bg buffers.
5229 **/
5230static void
5231lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
5232{
5233	int pagecnt = 10;
5234	if (lpfc_prot_mask && lpfc_prot_guard) {
5235		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5236				"1478 Registering BlockGuard with the "
5237				"SCSI layer\n");
5238		scsi_host_set_prot(shost, lpfc_prot_mask);
5239		scsi_host_set_guard(shost, lpfc_prot_guard);
5240	}
5241	if (!_dump_buf_data) {
5242		while (pagecnt) {
5243			spin_lock_init(&_dump_buf_lock);
5244			_dump_buf_data =
5245				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
5246			if (_dump_buf_data) {
5247				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5248					"9043 BLKGRD: allocated %d pages for "
5249				       "_dump_buf_data at 0x%p\n",
5250				       (1 << pagecnt), _dump_buf_data);
5251				_dump_buf_data_order = pagecnt;
5252				memset(_dump_buf_data, 0,
5253				       ((1 << PAGE_SHIFT) << pagecnt));
5254				break;
5255			} else
5256				--pagecnt;
5257		}
5258		if (!_dump_buf_data_order)
5259			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5260				"9044 BLKGRD: ERROR unable to allocate "
5261			       "memory for hexdump\n");
5262	} else
5263		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5264			"9045 BLKGRD: already allocated _dump_buf_data=0x%p"
5265		       "\n", _dump_buf_data);
5266	if (!_dump_buf_dif) {
5267		while (pagecnt) {
5268			_dump_buf_dif =
5269				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
5270			if (_dump_buf_dif) {
5271				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5272					"9046 BLKGRD: allocated %d pages for "
5273				       "_dump_buf_dif at 0x%p\n",
5274				       (1 << pagecnt), _dump_buf_dif);
5275				_dump_buf_dif_order = pagecnt;
5276				memset(_dump_buf_dif, 0,
5277				       ((1 << PAGE_SHIFT) << pagecnt));
5278				break;
5279			} else
5280				--pagecnt;
5281		}
5282		if (!_dump_buf_dif_order)
5283			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5284			"9047 BLKGRD: ERROR unable to allocate "
5285			       "memory for hexdump\n");
5286	} else
5287		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5288			"9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
5289		       _dump_buf_dif);
5290}
5291
5292/**
5293 * lpfc_post_init_setup - Perform necessary device post initialization setup.
5294 * @phba: pointer to lpfc hba data structure.
5295 *
5296 * This routine is invoked to perform all the necessary post initialization
5297 * setup for the device.
5298 **/
5299static void
5300lpfc_post_init_setup(struct lpfc_hba *phba)
5301{
5302	struct Scsi_Host  *shost;
5303	struct lpfc_adapter_event_header adapter_event;
5304
5305	/* Get the default values for Model Name and Description */
5306	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
5307
5308	/*
5309	 * hba setup may have changed the hba_queue_depth so we need to
5310	 * adjust the value of can_queue.
5311	 */
5312	shost = pci_get_drvdata(phba->pcidev);
5313	shost->can_queue = phba->cfg_hba_queue_depth - 10;
5314	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
5315		lpfc_setup_bg(phba, shost);
5316
5317	lpfc_host_attrib_init(shost);
5318
5319	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
5320		spin_lock_irq(shost->host_lock);
5321		lpfc_poll_start_timer(phba);
5322		spin_unlock_irq(shost->host_lock);
5323	}
5324
5325	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5326			"0428 Perform SCSI scan\n");
5327	/* Send board arrival event to upper layer */
5328	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
5329	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
5330	fc_host_post_vendor_event(shost, fc_get_event_number(),
5331				  sizeof(adapter_event),
5332				  (char *) &adapter_event,
5333				  LPFC_NL_VENDOR_ID);
5334	return;
5335}
5336
5337/**
5338 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
5339 * @phba: pointer to lpfc hba data structure.
5340 *
5341 * This routine is invoked to set up the PCI device memory space for device
5342 * with SLI-3 interface spec.
5343 *
5344 * Return codes
5345 * 	0 - successful
5346 * 	other values - error
5347 **/
5348static int
5349lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5350{
5351	struct pci_dev *pdev;
5352	unsigned long bar0map_len, bar2map_len;
5353	int i, hbq_count;
5354	void *ptr;
5355	int error = -ENODEV;
5356
5357	/* Obtain PCI device reference */
5358	if (!phba->pcidev)
5359		return error;
5360	else
5361		pdev = phba->pcidev;
5362
5363	/* Set the device DMA mask size */
5364	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5365	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5366		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5367		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5368			return error;
5369		}
5370	}
5371
5372	/* Get the bus address of Bar0 and Bar2 and the number of bytes
5373	 * required by each mapping.
5374	 */
5375	phba->pci_bar0_map = pci_resource_start(pdev, 0);
5376	bar0map_len = pci_resource_len(pdev, 0);
5377
5378	phba->pci_bar2_map = pci_resource_start(pdev, 2);
5379	bar2map_len = pci_resource_len(pdev, 2);
5380
5381	/* Map HBA SLIM to a kernel virtual address. */
5382	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5383	if (!phba->slim_memmap_p) {
5384		dev_printk(KERN_ERR, &pdev->dev,
5385			   "ioremap failed for SLIM memory.\n");
5386		goto out;
5387	}
5388
5389	/* Map HBA Control Registers to a kernel virtual address. */
5390	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5391	if (!phba->ctrl_regs_memmap_p) {
5392		dev_printk(KERN_ERR, &pdev->dev,
5393			   "ioremap failed for HBA control registers.\n");
5394		goto out_iounmap_slim;
5395	}
5396
5397	/* Allocate memory for SLI-2 structures */
5398	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5399					       SLI2_SLIM_SIZE,
5400					       &phba->slim2p.phys,
5401					       GFP_KERNEL);
5402	if (!phba->slim2p.virt)
5403		goto out_iounmap;
5404
5405	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5406	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5407	phba->mbox_ext = (phba->slim2p.virt +
5408		offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5409	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5410	phba->IOCBs = (phba->slim2p.virt +
5411		       offsetof(struct lpfc_sli2_slim, IOCBs));
5412
5413	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5414						 lpfc_sli_hbq_size(),
5415						 &phba->hbqslimp.phys,
5416						 GFP_KERNEL);
5417	if (!phba->hbqslimp.virt)
5418		goto out_free_slim;
5419
5420	hbq_count = lpfc_sli_hbq_count();
5421	ptr = phba->hbqslimp.virt;
5422	for (i = 0; i < hbq_count; ++i) {
5423		phba->hbqs[i].hbq_virt = ptr;
5424		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5425		ptr += (lpfc_hbq_defs[i]->entry_count *
5426			sizeof(struct lpfc_hbq_entry));
5427	}
5428	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5429	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5430
5431	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5432
5433	INIT_LIST_HEAD(&phba->rb_pend_list);
5434
5435	phba->MBslimaddr = phba->slim_memmap_p;
5436	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5437	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5438	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5439	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5440
5441	return 0;
5442
5443out_free_slim:
5444	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5445			  phba->slim2p.virt, phba->slim2p.phys);
5446out_iounmap:
5447	iounmap(phba->ctrl_regs_memmap_p);
5448out_iounmap_slim:
5449	iounmap(phba->slim_memmap_p);
5450out:
5451	return error;
5452}
5453
5454/**
5455 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5456 * @phba: pointer to lpfc hba data structure.
5457 *
5458 * This routine is invoked to unset the PCI device memory space for device
5459 * with SLI-3 interface spec.
5460 **/
5461static void
5462lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5463{
5464	struct pci_dev *pdev;
5465
5466	/* Obtain PCI device reference */
5467	if (!phba->pcidev)
5468		return;
5469	else
5470		pdev = phba->pcidev;
5471
5472	/* Free coherent DMA memory allocated */
5473	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5474			  phba->hbqslimp.virt, phba->hbqslimp.phys);
5475	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5476			  phba->slim2p.virt, phba->slim2p.phys);
5477
5478	/* I/O memory unmap */
5479	iounmap(phba->ctrl_regs_memmap_p);
5480	iounmap(phba->slim_memmap_p);
5481
5482	return;
5483}
5484
5485/**
5486 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
5487 * @phba: pointer to lpfc hba data structure.
5488 *
5489 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5490 * done and check status.
5491 *
5492 * Return 0 if successful, otherwise -ENODEV.
5493 **/
5494int
5495lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5496{
5497	struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
5498	struct lpfc_register reg_data;
5499	int i, port_error = 0;
5500	uint32_t if_type;
5501
5502	memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
5503	memset(&reg_data, 0, sizeof(reg_data));
5504	if (!phba->sli4_hba.PSMPHRregaddr)
5505		return -ENODEV;
5506
5507	/* Wait up to 30 seconds for the SLI Port POST done and ready */
5508	for (i = 0; i < 3000; i++) {
5509		if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
5510			&portsmphr_reg.word0) ||
5511			(bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
5512			/* Port has a fatal POST error, break out */
5513			port_error = -ENODEV;
5514			break;
5515		}
5516		if (LPFC_POST_STAGE_PORT_READY ==
5517		    bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
5518			break;
5519		msleep(10);
5520	}
5521
5522	/*
5523	 * If there was a port error during POST, then don't proceed with
5524	 * other register reads as the data may not be valid.  Just exit.
5525	 */
5526	if (port_error) {
5527		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5528			"1408 Port Failed POST - portsmphr=0x%x, "
5529			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
5530			"scr2=x%x, hscratch=x%x, pstatus=x%x\n",
5531			portsmphr_reg.word0,
5532			bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
5533			bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
5534			bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
5535			bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
5536			bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
5537			bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
5538			bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
5539			bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
5540	} else {
5541		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5542				"2534 Device Info: SLIFamily=0x%x, "
5543				"SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
5544				"SLIHint_2=0x%x, FT=0x%x\n",
5545				bf_get(lpfc_sli_intf_sli_family,
5546				       &phba->sli4_hba.sli_intf),
5547				bf_get(lpfc_sli_intf_slirev,
5548				       &phba->sli4_hba.sli_intf),
5549				bf_get(lpfc_sli_intf_if_type,
5550				       &phba->sli4_hba.sli_intf),
5551				bf_get(lpfc_sli_intf_sli_hint1,
5552				       &phba->sli4_hba.sli_intf),
5553				bf_get(lpfc_sli_intf_sli_hint2,
5554				       &phba->sli4_hba.sli_intf),
5555				bf_get(lpfc_sli_intf_func_type,
5556				       &phba->sli4_hba.sli_intf));
5557		/*
5558		 * Check for other Port errors during the initialization
5559		 * process.  Fail the load if the port did not come up
5560		 * correctly.
5561		 */
5562		if_type = bf_get(lpfc_sli_intf_if_type,
5563				 &phba->sli4_hba.sli_intf);
5564		switch (if_type) {
5565		case LPFC_SLI_INTF_IF_TYPE_0:
5566			phba->sli4_hba.ue_mask_lo =
5567			      readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
5568			phba->sli4_hba.ue_mask_hi =
5569			      readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
5570			uerrlo_reg.word0 =
5571			      readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
5572			uerrhi_reg.word0 =
5573				readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
5574			if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5575			    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5576				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5577						"1422 Unrecoverable Error "
5578						"Detected during POST "
5579						"uerr_lo_reg=0x%x, "
5580						"uerr_hi_reg=0x%x, "
5581						"ue_mask_lo_reg=0x%x, "
5582						"ue_mask_hi_reg=0x%x\n",
5583						uerrlo_reg.word0,
5584						uerrhi_reg.word0,
5585						phba->sli4_hba.ue_mask_lo,
5586						phba->sli4_hba.ue_mask_hi);
5587				port_error = -ENODEV;
5588			}
5589			break;
5590		case LPFC_SLI_INTF_IF_TYPE_2:
5591			/* Final checks.  The port status should be clean. */
5592			if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
5593				&reg_data.word0) ||
5594				(bf_get(lpfc_sliport_status_err, &reg_data) &&
5595				 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
5596				phba->work_status[0] =
5597					readl(phba->sli4_hba.u.if_type2.
5598					      ERR1regaddr);
5599				phba->work_status[1] =
5600					readl(phba->sli4_hba.u.if_type2.
5601					      ERR2regaddr);
5602				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5603					"2888 Port Error Detected "
5604					"during POST: "
5605					"port status reg 0x%x, "
5606					"port_smphr reg 0x%x, "
5607					"error 1=0x%x, error 2=0x%x\n",
5608					reg_data.word0,
5609					portsmphr_reg.word0,
5610					phba->work_status[0],
5611					phba->work_status[1]);
5612				port_error = -ENODEV;
5613			}
5614			break;
5615		case LPFC_SLI_INTF_IF_TYPE_1:
5616		default:
5617			break;
5618		}
5619	}
5620	return port_error;
5621}
5622
5623/**
5624 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5625 * @phba: pointer to lpfc hba data structure.
5626 * @if_type:  The SLI4 interface type getting configured.
5627 *
5628 * This routine is invoked to set up SLI4 BAR0 PCI config space register
5629 * memory map.
5630 **/
5631static void
5632lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
5633{
5634	switch (if_type) {
5635	case LPFC_SLI_INTF_IF_TYPE_0:
5636		phba->sli4_hba.u.if_type0.UERRLOregaddr =
5637			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
5638		phba->sli4_hba.u.if_type0.UERRHIregaddr =
5639			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
5640		phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
5641			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
5642		phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
5643			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
5644		phba->sli4_hba.SLIINTFregaddr =
5645			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5646		break;
5647	case LPFC_SLI_INTF_IF_TYPE_2:
5648		phba->sli4_hba.u.if_type2.ERR1regaddr =
5649			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1;
5650		phba->sli4_hba.u.if_type2.ERR2regaddr =
5651			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2;
5652		phba->sli4_hba.u.if_type2.CTRLregaddr =
5653			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL;
5654		phba->sli4_hba.u.if_type2.STATUSregaddr =
5655			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS;
5656		phba->sli4_hba.SLIINTFregaddr =
5657			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5658		phba->sli4_hba.PSMPHRregaddr =
5659		     phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR;
5660		phba->sli4_hba.RQDBregaddr =
5661			phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
5662		phba->sli4_hba.WQDBregaddr =
5663			phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
5664		phba->sli4_hba.EQCQDBregaddr =
5665			phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
5666		phba->sli4_hba.MQDBregaddr =
5667			phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
5668		phba->sli4_hba.BMBXregaddr =
5669			phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
5670		break;
5671	case LPFC_SLI_INTF_IF_TYPE_1:
5672	default:
5673		dev_printk(KERN_ERR, &phba->pcidev->dev,
5674			   "FATAL - unsupported SLI4 interface type - %d\n",
5675			   if_type);
5676		break;
5677	}
5678}
5679
5680/**
5681 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5682 * @phba: pointer to lpfc hba data structure.
5683 *
5684 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5685 * memory map.
5686 **/
5687static void
5688lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5689{
5690	phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5691		LPFC_SLIPORT_IF0_SMPHR;
5692	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5693		LPFC_HST_ISR0;
5694	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5695		LPFC_HST_IMR0;
5696	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5697		LPFC_HST_ISCR0;
5698}
5699
5700/**
5701 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
5702 * @phba: pointer to lpfc hba data structure.
5703 * @vf: virtual function number
5704 *
5705 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5706 * based on the given viftual function number, @vf.
5707 *
5708 * Return 0 if successful, otherwise -ENODEV.
5709 **/
5710static int
5711lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5712{
5713	if (vf > LPFC_VIR_FUNC_MAX)
5714		return -ENODEV;
5715
5716	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5717				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5718	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5719				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5720	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5721				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5722	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5723				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5724	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5725				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5726	return 0;
5727}
5728
5729/**
5730 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
5731 * @phba: pointer to lpfc hba data structure.
5732 *
5733 * This routine is invoked to create the bootstrap mailbox
5734 * region consistent with the SLI-4 interface spec.  This
5735 * routine allocates all memory necessary to communicate
5736 * mailbox commands to the port and sets up all alignment
5737 * needs.  No locks are expected to be held when calling
5738 * this routine.
5739 *
5740 * Return codes
5741 * 	0 - successful
5742 * 	-ENOMEM - could not allocated memory.
5743 **/
5744static int
5745lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
5746{
5747	uint32_t bmbx_size;
5748	struct lpfc_dmabuf *dmabuf;
5749	struct dma_address *dma_address;
5750	uint32_t pa_addr;
5751	uint64_t phys_addr;
5752
5753	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5754	if (!dmabuf)
5755		return -ENOMEM;
5756
5757	/*
5758	 * The bootstrap mailbox region is comprised of 2 parts
5759	 * plus an alignment restriction of 16 bytes.
5760	 */
5761	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5762	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5763					  bmbx_size,
5764					  &dmabuf->phys,
5765					  GFP_KERNEL);
5766	if (!dmabuf->virt) {
5767		kfree(dmabuf);
5768		return -ENOMEM;
5769	}
5770	memset(dmabuf->virt, 0, bmbx_size);
5771
5772	/*
5773	 * Initialize the bootstrap mailbox pointers now so that the register
5774	 * operations are simple later.  The mailbox dma address is required
5775	 * to be 16-byte aligned.  Also align the virtual memory as each
5776	 * maibox is copied into the bmbx mailbox region before issuing the
5777	 * command to the port.
5778	 */
5779	phba->sli4_hba.bmbx.dmabuf = dmabuf;
5780	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
5781
5782	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
5783					      LPFC_ALIGN_16_BYTE);
5784	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
5785					      LPFC_ALIGN_16_BYTE);
5786
5787	/*
5788	 * Set the high and low physical addresses now.  The SLI4 alignment
5789	 * requirement is 16 bytes and the mailbox is posted to the port
5790	 * as two 30-bit addresses.  The other data is a bit marking whether
5791	 * the 30-bit address is the high or low address.
5792	 * Upcast bmbx aphys to 64bits so shift instruction compiles
5793	 * clean on 32 bit machines.
5794	 */
5795	dma_address = &phba->sli4_hba.bmbx.dma_address;
5796	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
5797	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
5798	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
5799					   LPFC_BMBX_BIT1_ADDR_HI);
5800
5801	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
5802	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
5803					   LPFC_BMBX_BIT1_ADDR_LO);
5804	return 0;
5805}
5806
5807/**
5808 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
5809 * @phba: pointer to lpfc hba data structure.
5810 *
5811 * This routine is invoked to teardown the bootstrap mailbox
5812 * region and release all host resources. This routine requires
5813 * the caller to ensure all mailbox commands recovered, no
5814 * additional mailbox comands are sent, and interrupts are disabled
5815 * before calling this routine.
5816 *
5817 **/
5818static void
5819lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5820{
5821	dma_free_coherent(&phba->pcidev->dev,
5822			  phba->sli4_hba.bmbx.bmbx_size,
5823			  phba->sli4_hba.bmbx.dmabuf->virt,
5824			  phba->sli4_hba.bmbx.dmabuf->phys);
5825
5826	kfree(phba->sli4_hba.bmbx.dmabuf);
5827	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
5828}
5829
5830/**
5831 * lpfc_sli4_read_config - Get the config parameters.
5832 * @phba: pointer to lpfc hba data structure.
5833 *
5834 * This routine is invoked to read the configuration parameters from the HBA.
5835 * The configuration parameters are used to set the base and maximum values
5836 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
5837 * allocation for the port.
5838 *
5839 * Return codes
5840 * 	0 - successful
5841 * 	-ENOMEM - No available memory
5842 *      -EIO - The mailbox failed to complete successfully.
5843 **/
5844static int
5845lpfc_sli4_read_config(struct lpfc_hba *phba)
5846{
5847	LPFC_MBOXQ_t *pmb;
5848	struct lpfc_mbx_read_config *rd_config;
5849	union  lpfc_sli4_cfg_shdr *shdr;
5850	uint32_t shdr_status, shdr_add_status;
5851	struct lpfc_mbx_get_func_cfg *get_func_cfg;
5852	struct lpfc_rsrc_desc_fcfcoe *desc;
5853	uint32_t desc_count;
5854	int length, i, rc = 0;
5855
5856	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5857	if (!pmb) {
5858		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5859				"2011 Unable to allocate memory for issuing "
5860				"SLI_CONFIG_SPECIAL mailbox command\n");
5861		return -ENOMEM;
5862	}
5863
5864	lpfc_read_config(phba, pmb);
5865
5866	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5867	if (rc != MBX_SUCCESS) {
5868		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5869			"2012 Mailbox failed , mbxCmd x%x "
5870			"READ_CONFIG, mbxStatus x%x\n",
5871			bf_get(lpfc_mqe_command, &pmb->u.mqe),
5872			bf_get(lpfc_mqe_status, &pmb->u.mqe));
5873		rc = -EIO;
5874	} else {
5875		rd_config = &pmb->u.mqe.un.rd_config;
5876		phba->sli4_hba.max_cfg_param.max_xri =
5877			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5878		phba->sli4_hba.max_cfg_param.xri_base =
5879			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
5880		phba->sli4_hba.max_cfg_param.max_vpi =
5881			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
5882		phba->sli4_hba.max_cfg_param.vpi_base =
5883			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
5884		phba->sli4_hba.max_cfg_param.max_rpi =
5885			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
5886		phba->sli4_hba.max_cfg_param.rpi_base =
5887			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
5888		phba->sli4_hba.max_cfg_param.max_vfi =
5889			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
5890		phba->sli4_hba.max_cfg_param.vfi_base =
5891			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5892		phba->sli4_hba.max_cfg_param.max_fcfi =
5893			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5894		phba->sli4_hba.max_cfg_param.fcfi_base =
5895			bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
5896		phba->sli4_hba.max_cfg_param.max_eq =
5897			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5898		phba->sli4_hba.max_cfg_param.max_rq =
5899			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
5900		phba->sli4_hba.max_cfg_param.max_wq =
5901			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
5902		phba->sli4_hba.max_cfg_param.max_cq =
5903			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
5904		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
5905		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
5906		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
5907		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5908		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
5909		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
5910				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
5911		phba->max_vports = phba->max_vpi;
5912		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5913				"2003 cfg params XRI(B:%d M:%d), "
5914				"VPI(B:%d M:%d) "
5915				"VFI(B:%d M:%d) "
5916				"RPI(B:%d M:%d) "
5917				"FCFI(B:%d M:%d)\n",
5918				phba->sli4_hba.max_cfg_param.xri_base,
5919				phba->sli4_hba.max_cfg_param.max_xri,
5920				phba->sli4_hba.max_cfg_param.vpi_base,
5921				phba->sli4_hba.max_cfg_param.max_vpi,
5922				phba->sli4_hba.max_cfg_param.vfi_base,
5923				phba->sli4_hba.max_cfg_param.max_vfi,
5924				phba->sli4_hba.max_cfg_param.rpi_base,
5925				phba->sli4_hba.max_cfg_param.max_rpi,
5926				phba->sli4_hba.max_cfg_param.fcfi_base,
5927				phba->sli4_hba.max_cfg_param.max_fcfi);
5928	}
5929
5930	if (rc)
5931		goto read_cfg_out;
5932
5933	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
5934	if (phba->cfg_hba_queue_depth >
5935		(phba->sli4_hba.max_cfg_param.max_xri -
5936			lpfc_sli4_get_els_iocb_cnt(phba)))
5937		phba->cfg_hba_queue_depth =
5938			phba->sli4_hba.max_cfg_param.max_xri -
5939				lpfc_sli4_get_els_iocb_cnt(phba);
5940
5941	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
5942	    LPFC_SLI_INTF_IF_TYPE_2)
5943		goto read_cfg_out;
5944
5945	/* get the pf# and vf# for SLI4 if_type 2 port */
5946	length = (sizeof(struct lpfc_mbx_get_func_cfg) -
5947		  sizeof(struct lpfc_sli4_cfg_mhdr));
5948	lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
5949			 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
5950			 length, LPFC_SLI4_MBX_EMBED);
5951
5952	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5953	shdr = (union lpfc_sli4_cfg_shdr *)
5954				&pmb->u.mqe.un.sli4_config.header.cfg_shdr;
5955	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5956	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5957	if (rc || shdr_status || shdr_add_status) {
5958		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5959				"3026 Mailbox failed , mbxCmd x%x "
5960				"GET_FUNCTION_CONFIG, mbxStatus x%x\n",
5961				bf_get(lpfc_mqe_command, &pmb->u.mqe),
5962				bf_get(lpfc_mqe_status, &pmb->u.mqe));
5963		rc = -EIO;
5964		goto read_cfg_out;
5965	}
5966
5967	/* search for fc_fcoe resrouce descriptor */
5968	get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
5969	desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
5970
5971	for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
5972		desc = (struct lpfc_rsrc_desc_fcfcoe *)
5973			&get_func_cfg->func_cfg.desc[i];
5974		if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
5975		    bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
5976			phba->sli4_hba.iov.pf_number =
5977				bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
5978			phba->sli4_hba.iov.vf_number =
5979				bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
5980			break;
5981		}
5982	}
5983
5984	if (i < LPFC_RSRC_DESC_MAX_NUM)
5985		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5986				"3027 GET_FUNCTION_CONFIG: pf_number:%d, "
5987				"vf_number:%d\n", phba->sli4_hba.iov.pf_number,
5988				phba->sli4_hba.iov.vf_number);
5989	else {
5990		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5991				"3028 GET_FUNCTION_CONFIG: failed to find "
5992				"Resrouce Descriptor:x%x\n",
5993				LPFC_RSRC_DESC_TYPE_FCFCOE);
5994		rc = -EIO;
5995	}
5996
5997read_cfg_out:
5998	mempool_free(pmb, phba->mbox_mem_pool);
5999	return rc;
6000}
6001
6002/**
6003 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
6004 * @phba: pointer to lpfc hba data structure.
6005 *
6006 * This routine is invoked to setup the port-side endian order when
6007 * the port if_type is 0.  This routine has no function for other
6008 * if_types.
6009 *
6010 * Return codes
6011 * 	0 - successful
6012 * 	-ENOMEM - No available memory
6013 *      -EIO - The mailbox failed to complete successfully.
6014 **/
6015static int
6016lpfc_setup_endian_order(struct lpfc_hba *phba)
6017{
6018	LPFC_MBOXQ_t *mboxq;
6019	uint32_t if_type, rc = 0;
6020	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
6021				      HOST_ENDIAN_HIGH_WORD1};
6022
6023	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6024	switch (if_type) {
6025	case LPFC_SLI_INTF_IF_TYPE_0:
6026		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6027						       GFP_KERNEL);
6028		if (!mboxq) {
6029			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6030					"0492 Unable to allocate memory for "
6031					"issuing SLI_CONFIG_SPECIAL mailbox "
6032					"command\n");
6033			return -ENOMEM;
6034		}
6035
6036		/*
6037		 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
6038		 * two words to contain special data values and no other data.
6039		 */
6040		memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
6041		memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
6042		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6043		if (rc != MBX_SUCCESS) {
6044			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6045					"0493 SLI_CONFIG_SPECIAL mailbox "
6046					"failed with status x%x\n",
6047					rc);
6048			rc = -EIO;
6049		}
6050		mempool_free(mboxq, phba->mbox_mem_pool);
6051		break;
6052	case LPFC_SLI_INTF_IF_TYPE_2:
6053	case LPFC_SLI_INTF_IF_TYPE_1:
6054	default:
6055		break;
6056	}
6057	return rc;
6058}
6059
6060/**
6061 * lpfc_sli4_queue_create - Create all the SLI4 queues
6062 * @phba: pointer to lpfc hba data structure.
6063 *
6064 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
6065 * operation. For each SLI4 queue type, the parameters such as queue entry
6066 * count (queue depth) shall be taken from the module parameter. For now,
6067 * we just use some constant number as place holder.
6068 *
6069 * Return codes
6070 *      0 - successful
6071 *      -ENOMEM - No available memory
6072 *      -EIO - The mailbox failed to complete successfully.
6073 **/
6074static int
6075lpfc_sli4_queue_create(struct lpfc_hba *phba)
6076{
6077	struct lpfc_queue *qdesc;
6078	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6079	int cfg_fcp_wq_count;
6080	int cfg_fcp_eq_count;
6081
6082	/*
6083	 * Sanity check for confiugred queue parameters against the run-time
6084	 * device parameters
6085	 */
6086
6087	/* Sanity check on FCP fast-path WQ parameters */
6088	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
6089	if (cfg_fcp_wq_count >
6090	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
6091		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
6092				   LPFC_SP_WQN_DEF;
6093		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
6094			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6095					"2581 Not enough WQs (%d) from "
6096					"the pci function for supporting "
6097					"FCP WQs (%d)\n",
6098					phba->sli4_hba.max_cfg_param.max_wq,
6099					phba->cfg_fcp_wq_count);
6100			goto out_error;
6101		}
6102		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6103				"2582 Not enough WQs (%d) from the pci "
6104				"function for supporting the requested "
6105				"FCP WQs (%d), the actual FCP WQs can "
6106				"be supported: %d\n",
6107				phba->sli4_hba.max_cfg_param.max_wq,
6108				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
6109	}
6110	/* The actual number of FCP work queues adopted */
6111	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
6112
6113	/* Sanity check on FCP fast-path EQ parameters */
6114	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
6115	if (cfg_fcp_eq_count >
6116	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
6117		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
6118				   LPFC_SP_EQN_DEF;
6119		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
6120			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6121					"2574 Not enough EQs (%d) from the "
6122					"pci function for supporting FCP "
6123					"EQs (%d)\n",
6124					phba->sli4_hba.max_cfg_param.max_eq,
6125					phba->cfg_fcp_eq_count);
6126			goto out_error;
6127		}
6128		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6129				"2575 Not enough EQs (%d) from the pci "
6130				"function for supporting the requested "
6131				"FCP EQs (%d), the actual FCP EQs can "
6132				"be supported: %d\n",
6133				phba->sli4_hba.max_cfg_param.max_eq,
6134				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
6135	}
6136	/* It does not make sense to have more EQs than WQs */
6137	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
6138		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6139				"2593 The FCP EQ count(%d) cannot be greater "
6140				"than the FCP WQ count(%d), limiting the "
6141				"FCP EQ count to %d\n", cfg_fcp_eq_count,
6142				phba->cfg_fcp_wq_count,
6143				phba->cfg_fcp_wq_count);
6144		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
6145	}
6146	/* The actual number of FCP event queues adopted */
6147	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
6148	/* The overall number of event queues used */
6149	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
6150
6151	/*
6152	 * Create Event Queues (EQs)
6153	 */
6154
6155	/* Get EQ depth from module parameter, fake the default for now */
6156	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
6157	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
6158
6159	/* Create slow path event queue */
6160	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6161				      phba->sli4_hba.eq_ecount);
6162	if (!qdesc) {
6163		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6164				"0496 Failed allocate slow-path EQ\n");
6165		goto out_error;
6166	}
6167	phba->sli4_hba.sp_eq = qdesc;
6168
6169	/* Create fast-path FCP Event Queue(s) */
6170	phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
6171			       phba->cfg_fcp_eq_count), GFP_KERNEL);
6172	if (!phba->sli4_hba.fp_eq) {
6173		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6174				"2576 Failed allocate memory for fast-path "
6175				"EQ record array\n");
6176		goto out_free_sp_eq;
6177	}
6178	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6179		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6180					      phba->sli4_hba.eq_ecount);
6181		if (!qdesc) {
6182			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6183					"0497 Failed allocate fast-path EQ\n");
6184			goto out_free_fp_eq;
6185		}
6186		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
6187	}
6188
6189	/*
6190	 * Create Complete Queues (CQs)
6191	 */
6192
6193	/* Get CQ depth from module parameter, fake the default for now */
6194	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6195	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6196
6197	/* Create slow-path Mailbox Command Complete Queue */
6198	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6199				      phba->sli4_hba.cq_ecount);
6200	if (!qdesc) {
6201		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6202				"0500 Failed allocate slow-path mailbox CQ\n");
6203		goto out_free_fp_eq;
6204	}
6205	phba->sli4_hba.mbx_cq = qdesc;
6206
6207	/* Create slow-path ELS Complete Queue */
6208	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6209				      phba->sli4_hba.cq_ecount);
6210	if (!qdesc) {
6211		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6212				"0501 Failed allocate slow-path ELS CQ\n");
6213		goto out_free_mbx_cq;
6214	}
6215	phba->sli4_hba.els_cq = qdesc;
6216
6217
6218	/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
6219	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6220				phba->cfg_fcp_eq_count), GFP_KERNEL);
6221	if (!phba->sli4_hba.fcp_cq) {
6222		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6223				"2577 Failed allocate memory for fast-path "
6224				"CQ record array\n");
6225		goto out_free_els_cq;
6226	}
6227	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
6228		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6229					      phba->sli4_hba.cq_ecount);
6230		if (!qdesc) {
6231			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6232					"0499 Failed allocate fast-path FCP "
6233					"CQ (%d)\n", fcp_cqidx);
6234			goto out_free_fcp_cq;
6235		}
6236		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
6237	}
6238
6239	/* Create Mailbox Command Queue */
6240	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6241	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6242
6243	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
6244				      phba->sli4_hba.mq_ecount);
6245	if (!qdesc) {
6246		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6247				"0505 Failed allocate slow-path MQ\n");
6248		goto out_free_fcp_cq;
6249	}
6250	phba->sli4_hba.mbx_wq = qdesc;
6251
6252	/*
6253	 * Create all the Work Queues (WQs)
6254	 */
6255	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6256	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6257
6258	/* Create slow-path ELS Work Queue */
6259	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6260				      phba->sli4_hba.wq_ecount);
6261	if (!qdesc) {
6262		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6263				"0504 Failed allocate slow-path ELS WQ\n");
6264		goto out_free_mbx_wq;
6265	}
6266	phba->sli4_hba.els_wq = qdesc;
6267
6268	/* Create fast-path FCP Work Queue(s) */
6269	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6270				phba->cfg_fcp_wq_count), GFP_KERNEL);
6271	if (!phba->sli4_hba.fcp_wq) {
6272		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6273				"2578 Failed allocate memory for fast-path "
6274				"WQ record array\n");
6275		goto out_free_els_wq;
6276	}
6277	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6278		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6279					      phba->sli4_hba.wq_ecount);
6280		if (!qdesc) {
6281			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6282					"0503 Failed allocate fast-path FCP "
6283					"WQ (%d)\n", fcp_wqidx);
6284			goto out_free_fcp_wq;
6285		}
6286		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
6287	}
6288
6289	/*
6290	 * Create Receive Queue (RQ)
6291	 */
6292	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6293	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6294
6295	/* Create Receive Queue for header */
6296	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6297				      phba->sli4_hba.rq_ecount);
6298	if (!qdesc) {
6299		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6300				"0506 Failed allocate receive HRQ\n");
6301		goto out_free_fcp_wq;
6302	}
6303	phba->sli4_hba.hdr_rq = qdesc;
6304
6305	/* Create Receive Queue for data */
6306	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6307				      phba->sli4_hba.rq_ecount);
6308	if (!qdesc) {
6309		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6310				"0507 Failed allocate receive DRQ\n");
6311		goto out_free_hdr_rq;
6312	}
6313	phba->sli4_hba.dat_rq = qdesc;
6314
6315	return 0;
6316
6317out_free_hdr_rq:
6318	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6319	phba->sli4_hba.hdr_rq = NULL;
6320out_free_fcp_wq:
6321	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
6322		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
6323		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
6324	}
6325	kfree(phba->sli4_hba.fcp_wq);
6326out_free_els_wq:
6327	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6328	phba->sli4_hba.els_wq = NULL;
6329out_free_mbx_wq:
6330	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6331	phba->sli4_hba.mbx_wq = NULL;
6332out_free_fcp_cq:
6333	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
6334		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
6335		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
6336	}
6337	kfree(phba->sli4_hba.fcp_cq);
6338out_free_els_cq:
6339	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6340	phba->sli4_hba.els_cq = NULL;
6341out_free_mbx_cq:
6342	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6343	phba->sli4_hba.mbx_cq = NULL;
6344out_free_fp_eq:
6345	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
6346		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
6347		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
6348	}
6349	kfree(phba->sli4_hba.fp_eq);
6350out_free_sp_eq:
6351	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6352	phba->sli4_hba.sp_eq = NULL;
6353out_error:
6354	return -ENOMEM;
6355}
6356
6357/**
6358 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
6359 * @phba: pointer to lpfc hba data structure.
6360 *
6361 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
6362 * operation.
6363 *
6364 * Return codes
6365 *      0 - successful
6366 *      -ENOMEM - No available memory
6367 *      -EIO - The mailbox failed to complete successfully.
6368 **/
6369static void
6370lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6371{
6372	int fcp_qidx;
6373
6374	/* Release mailbox command work queue */
6375	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6376	phba->sli4_hba.mbx_wq = NULL;
6377
6378	/* Release ELS work queue */
6379	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6380	phba->sli4_hba.els_wq = NULL;
6381
6382	/* Release FCP work queue */
6383	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6384		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
6385	kfree(phba->sli4_hba.fcp_wq);
6386	phba->sli4_hba.fcp_wq = NULL;
6387
6388	/* Release unsolicited receive queue */
6389	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6390	phba->sli4_hba.hdr_rq = NULL;
6391	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
6392	phba->sli4_hba.dat_rq = NULL;
6393
6394	/* Release ELS complete queue */
6395	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6396	phba->sli4_hba.els_cq = NULL;
6397
6398	/* Release mailbox command complete queue */
6399	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6400	phba->sli4_hba.mbx_cq = NULL;
6401
6402	/* Release FCP response complete queue */
6403	fcp_qidx = 0;
6404	do
6405		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6406	while (++fcp_qidx < phba->cfg_fcp_eq_count);
6407	kfree(phba->sli4_hba.fcp_cq);
6408	phba->sli4_hba.fcp_cq = NULL;
6409
6410	/* Release fast-path event queue */
6411	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6412		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
6413	kfree(phba->sli4_hba.fp_eq);
6414	phba->sli4_hba.fp_eq = NULL;
6415
6416	/* Release slow-path event queue */
6417	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6418	phba->sli4_hba.sp_eq = NULL;
6419
6420	return;
6421}
6422
6423/**
6424 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
6425 * @phba: pointer to lpfc hba data structure.
6426 *
6427 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
6428 * operation.
6429 *
6430 * Return codes
6431 *      0 - successful
6432 *      -ENOMEM - No available memory
6433 *      -EIO - The mailbox failed to complete successfully.
6434 **/
6435int
6436lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6437{
6438	int rc = -ENOMEM;
6439	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6440	int fcp_cq_index = 0;
6441
6442	/*
6443	 * Set up Event Queues (EQs)
6444	 */
6445
6446	/* Set up slow-path event queue */
6447	if (!phba->sli4_hba.sp_eq) {
6448		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6449				"0520 Slow-path EQ not allocated\n");
6450		goto out_error;
6451	}
6452	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
6453			    LPFC_SP_DEF_IMAX);
6454	if (rc) {
6455		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6456				"0521 Failed setup of slow-path EQ: "
6457				"rc = 0x%x\n", rc);
6458		goto out_error;
6459	}
6460	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6461			"2583 Slow-path EQ setup: queue-id=%d\n",
6462			phba->sli4_hba.sp_eq->queue_id);
6463
6464	/* Set up fast-path event queue */
6465	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6466		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
6467			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6468					"0522 Fast-path EQ (%d) not "
6469					"allocated\n", fcp_eqidx);
6470			goto out_destroy_fp_eq;
6471		}
6472		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
6473				    phba->cfg_fcp_imax);
6474		if (rc) {
6475			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6476					"0523 Failed setup of fast-path EQ "
6477					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
6478			goto out_destroy_fp_eq;
6479		}
6480		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6481				"2584 Fast-path EQ setup: "
6482				"queue[%d]-id=%d\n", fcp_eqidx,
6483				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
6484	}
6485
6486	/*
6487	 * Set up Complete Queues (CQs)
6488	 */
6489
6490	/* Set up slow-path MBOX Complete Queue as the first CQ */
6491	if (!phba->sli4_hba.mbx_cq) {
6492		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6493				"0528 Mailbox CQ not allocated\n");
6494		goto out_destroy_fp_eq;
6495	}
6496	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
6497			    LPFC_MCQ, LPFC_MBOX);
6498	if (rc) {
6499		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6500				"0529 Failed setup of slow-path mailbox CQ: "
6501				"rc = 0x%x\n", rc);
6502		goto out_destroy_fp_eq;
6503	}
6504	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6505			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6506			phba->sli4_hba.mbx_cq->queue_id,
6507			phba->sli4_hba.sp_eq->queue_id);
6508
6509	/* Set up slow-path ELS Complete Queue */
6510	if (!phba->sli4_hba.els_cq) {
6511		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6512				"0530 ELS CQ not allocated\n");
6513		goto out_destroy_mbx_cq;
6514	}
6515	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
6516			    LPFC_WCQ, LPFC_ELS);
6517	if (rc) {
6518		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6519				"0531 Failed setup of slow-path ELS CQ: "
6520				"rc = 0x%x\n", rc);
6521		goto out_destroy_mbx_cq;
6522	}
6523	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6524			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
6525			phba->sli4_hba.els_cq->queue_id,
6526			phba->sli4_hba.sp_eq->queue_id);
6527
6528	/* Set up fast-path FCP Response Complete Queue */
6529	fcp_cqidx = 0;
6530	do {
6531		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6532			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6533					"0526 Fast-path FCP CQ (%d) not "
6534					"allocated\n", fcp_cqidx);
6535			goto out_destroy_fcp_cq;
6536		}
6537		if (phba->cfg_fcp_eq_count)
6538			rc = lpfc_cq_create(phba,
6539					    phba->sli4_hba.fcp_cq[fcp_cqidx],
6540					    phba->sli4_hba.fp_eq[fcp_cqidx],
6541					    LPFC_WCQ, LPFC_FCP);
6542		else
6543			rc = lpfc_cq_create(phba,
6544					    phba->sli4_hba.fcp_cq[fcp_cqidx],
6545					    phba->sli4_hba.sp_eq,
6546					    LPFC_WCQ, LPFC_FCP);
6547		if (rc) {
6548			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6549					"0527 Failed setup of fast-path FCP "
6550					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6551			goto out_destroy_fcp_cq;
6552		}
6553		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6554				"2588 FCP CQ setup: cq[%d]-id=%d, "
6555				"parent %seq[%d]-id=%d\n",
6556				fcp_cqidx,
6557				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6558				(phba->cfg_fcp_eq_count) ? "" : "sp_",
6559				fcp_cqidx,
6560				(phba->cfg_fcp_eq_count) ?
6561				   phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
6562				   phba->sli4_hba.sp_eq->queue_id);
6563	} while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6564
6565	/*
6566	 * Set up all the Work Queues (WQs)
6567	 */
6568
6569	/* Set up Mailbox Command Queue */
6570	if (!phba->sli4_hba.mbx_wq) {
6571		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6572				"0538 Slow-path MQ not allocated\n");
6573		goto out_destroy_fcp_cq;
6574	}
6575	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
6576			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
6577	if (rc) {
6578		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6579				"0539 Failed setup of slow-path MQ: "
6580				"rc = 0x%x\n", rc);
6581		goto out_destroy_fcp_cq;
6582	}
6583	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6584			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
6585			phba->sli4_hba.mbx_wq->queue_id,
6586			phba->sli4_hba.mbx_cq->queue_id);
6587
6588	/* Set up slow-path ELS Work Queue */
6589	if (!phba->sli4_hba.els_wq) {
6590		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6591				"0536 Slow-path ELS WQ not allocated\n");
6592		goto out_destroy_mbx_wq;
6593	}
6594	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
6595			    phba->sli4_hba.els_cq, LPFC_ELS);
6596	if (rc) {
6597		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6598				"0537 Failed setup of slow-path ELS WQ: "
6599				"rc = 0x%x\n", rc);
6600		goto out_destroy_mbx_wq;
6601	}
6602	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6603			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
6604			phba->sli4_hba.els_wq->queue_id,
6605			phba->sli4_hba.els_cq->queue_id);
6606
6607	/* Set up fast-path FCP Work Queue */
6608	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6609		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6610			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6611					"0534 Fast-path FCP WQ (%d) not "
6612					"allocated\n", fcp_wqidx);
6613			goto out_destroy_fcp_wq;
6614		}
6615		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6616				    phba->sli4_hba.fcp_cq[fcp_cq_index],
6617				    LPFC_FCP);
6618		if (rc) {
6619			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6620					"0535 Failed setup of fast-path FCP "
6621					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6622			goto out_destroy_fcp_wq;
6623		}
6624		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6625				"2591 FCP WQ setup: wq[%d]-id=%d, "
6626				"parent cq[%d]-id=%d\n",
6627				fcp_wqidx,
6628				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6629				fcp_cq_index,
6630				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6631		/* Round robin FCP Work Queue's Completion Queue assignment */
6632		if (phba->cfg_fcp_eq_count)
6633			fcp_cq_index = ((fcp_cq_index + 1) %
6634					phba->cfg_fcp_eq_count);
6635	}
6636
6637	/*
6638	 * Create Receive Queue (RQ)
6639	 */
6640	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6641		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6642				"0540 Receive Queue not allocated\n");
6643		goto out_destroy_fcp_wq;
6644	}
6645	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6646			    phba->sli4_hba.els_cq, LPFC_USOL);
6647	if (rc) {
6648		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6649				"0541 Failed setup of Receive Queue: "
6650				"rc = 0x%x\n", rc);
6651		goto out_destroy_fcp_wq;
6652	}
6653	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6654			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6655			"parent cq-id=%d\n",
6656			phba->sli4_hba.hdr_rq->queue_id,
6657			phba->sli4_hba.dat_rq->queue_id,
6658			phba->sli4_hba.els_cq->queue_id);
6659	return 0;
6660
6661out_destroy_fcp_wq:
6662	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6663		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6664	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6665out_destroy_mbx_wq:
6666	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6667out_destroy_fcp_cq:
6668	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6669		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6670	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6671out_destroy_mbx_cq:
6672	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6673out_destroy_fp_eq:
6674	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6675		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6676	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6677out_error:
6678	return rc;
6679}
6680
6681/**
6682 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
6683 * @phba: pointer to lpfc hba data structure.
6684 *
6685 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
6686 * operation.
6687 *
6688 * Return codes
6689 *      0 - successful
6690 *      -ENOMEM - No available memory
6691 *      -EIO - The mailbox failed to complete successfully.
6692 **/
6693void
6694lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6695{
6696	int fcp_qidx;
6697
6698	/* Unset mailbox command work queue */
6699	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6700	/* Unset ELS work queue */
6701	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6702	/* Unset unsolicited receive queue */
6703	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
6704	/* Unset FCP work queue */
6705	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6706		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
6707	/* Unset mailbox command complete queue */
6708	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6709	/* Unset ELS complete queue */
6710	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6711	/* Unset FCP response complete queue */
6712	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6713		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6714	/* Unset fast-path event queue */
6715	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6716		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6717	/* Unset slow-path event queue */
6718	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6719}
6720
6721/**
6722 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
6723 * @phba: pointer to lpfc hba data structure.
6724 *
6725 * This routine is invoked to allocate and set up a pool of completion queue
6726 * events. The body of the completion queue event is a completion queue entry
6727 * CQE. For now, this pool is used for the interrupt service routine to queue
6728 * the following HBA completion queue events for the worker thread to process:
6729 *   - Mailbox asynchronous events
6730 *   - Receive queue completion unsolicited events
6731 * Later, this can be used for all the slow-path events.
6732 *
6733 * Return codes
6734 *      0 - successful
6735 *      -ENOMEM - No available memory
6736 **/
6737static int
6738lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
6739{
6740	struct lpfc_cq_event *cq_event;
6741	int i;
6742
6743	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
6744		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
6745		if (!cq_event)
6746			goto out_pool_create_fail;
6747		list_add_tail(&cq_event->list,
6748			      &phba->sli4_hba.sp_cqe_event_pool);
6749	}
6750	return 0;
6751
6752out_pool_create_fail:
6753	lpfc_sli4_cq_event_pool_destroy(phba);
6754	return -ENOMEM;
6755}
6756
6757/**
6758 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
6759 * @phba: pointer to lpfc hba data structure.
6760 *
6761 * This routine is invoked to free the pool of completion queue events at
6762 * driver unload time. Note that, it is the responsibility of the driver
6763 * cleanup routine to free all the outstanding completion-queue events
6764 * allocated from this pool back into the pool before invoking this routine
6765 * to destroy the pool.
6766 **/
6767static void
6768lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
6769{
6770	struct lpfc_cq_event *cq_event, *next_cq_event;
6771
6772	list_for_each_entry_safe(cq_event, next_cq_event,
6773				 &phba->sli4_hba.sp_cqe_event_pool, list) {
6774		list_del(&cq_event->list);
6775		kfree(cq_event);
6776	}
6777}
6778
6779/**
6780 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6781 * @phba: pointer to lpfc hba data structure.
6782 *
6783 * This routine is the lock free version of the API invoked to allocate a
6784 * completion-queue event from the free pool.
6785 *
6786 * Return: Pointer to the newly allocated completion-queue event if successful
6787 *         NULL otherwise.
6788 **/
6789struct lpfc_cq_event *
6790__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6791{
6792	struct lpfc_cq_event *cq_event = NULL;
6793
6794	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
6795			 struct lpfc_cq_event, list);
6796	return cq_event;
6797}
6798
6799/**
6800 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6801 * @phba: pointer to lpfc hba data structure.
6802 *
6803 * This routine is the lock version of the API invoked to allocate a
6804 * completion-queue event from the free pool.
6805 *
6806 * Return: Pointer to the newly allocated completion-queue event if successful
6807 *         NULL otherwise.
6808 **/
6809struct lpfc_cq_event *
6810lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6811{
6812	struct lpfc_cq_event *cq_event;
6813	unsigned long iflags;
6814
6815	spin_lock_irqsave(&phba->hbalock, iflags);
6816	cq_event = __lpfc_sli4_cq_event_alloc(phba);
6817	spin_unlock_irqrestore(&phba->hbalock, iflags);
6818	return cq_event;
6819}
6820
6821/**
6822 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6823 * @phba: pointer to lpfc hba data structure.
6824 * @cq_event: pointer to the completion queue event to be freed.
6825 *
6826 * This routine is the lock free version of the API invoked to release a
6827 * completion-queue event back into the free pool.
6828 **/
6829void
6830__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6831			     struct lpfc_cq_event *cq_event)
6832{
6833	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
6834}
6835
6836/**
6837 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6838 * @phba: pointer to lpfc hba data structure.
6839 * @cq_event: pointer to the completion queue event to be freed.
6840 *
6841 * This routine is the lock version of the API invoked to release a
6842 * completion-queue event back into the free pool.
6843 **/
6844void
6845lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6846			   struct lpfc_cq_event *cq_event)
6847{
6848	unsigned long iflags;
6849	spin_lock_irqsave(&phba->hbalock, iflags);
6850	__lpfc_sli4_cq_event_release(phba, cq_event);
6851	spin_unlock_irqrestore(&phba->hbalock, iflags);
6852}
6853
6854/**
6855 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
6856 * @phba: pointer to lpfc hba data structure.
6857 *
6858 * This routine is to free all the pending completion-queue events to the
6859 * back into the free pool for device reset.
6860 **/
6861static void
6862lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
6863{
6864	LIST_HEAD(cqelist);
6865	struct lpfc_cq_event *cqe;
6866	unsigned long iflags;
6867
6868	/* Retrieve all the pending WCQEs from pending WCQE lists */
6869	spin_lock_irqsave(&phba->hbalock, iflags);
6870	/* Pending FCP XRI abort events */
6871	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
6872			 &cqelist);
6873	/* Pending ELS XRI abort events */
6874	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
6875			 &cqelist);
6876	/* Pending asynnc events */
6877	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
6878			 &cqelist);
6879	spin_unlock_irqrestore(&phba->hbalock, iflags);
6880
6881	while (!list_empty(&cqelist)) {
6882		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
6883		lpfc_sli4_cq_event_release(phba, cqe);
6884	}
6885}
6886
6887/**
6888 * lpfc_pci_function_reset - Reset pci function.
6889 * @phba: pointer to lpfc hba data structure.
6890 *
6891 * This routine is invoked to request a PCI function reset. It will destroys
6892 * all resources assigned to the PCI function which originates this request.
6893 *
6894 * Return codes
6895 *      0 - successful
6896 *      -ENOMEM - No available memory
6897 *      -EIO - The mailbox failed to complete successfully.
6898 **/
6899int
6900lpfc_pci_function_reset(struct lpfc_hba *phba)
6901{
6902	LPFC_MBOXQ_t *mboxq;
6903	uint32_t rc = 0, if_type;
6904	uint32_t shdr_status, shdr_add_status;
6905	uint32_t rdy_chk, num_resets = 0, reset_again = 0;
6906	union lpfc_sli4_cfg_shdr *shdr;
6907	struct lpfc_register reg_data;
6908
6909	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6910	switch (if_type) {
6911	case LPFC_SLI_INTF_IF_TYPE_0:
6912		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6913						       GFP_KERNEL);
6914		if (!mboxq) {
6915			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6916					"0494 Unable to allocate memory for "
6917					"issuing SLI_FUNCTION_RESET mailbox "
6918					"command\n");
6919			return -ENOMEM;
6920		}
6921
6922		/* Setup PCI function reset mailbox-ioctl command */
6923		lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6924				 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
6925				 LPFC_SLI4_MBX_EMBED);
6926		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6927		shdr = (union lpfc_sli4_cfg_shdr *)
6928			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6929		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6930		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
6931					 &shdr->response);
6932		if (rc != MBX_TIMEOUT)
6933			mempool_free(mboxq, phba->mbox_mem_pool);
6934		if (shdr_status || shdr_add_status || rc) {
6935			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6936					"0495 SLI_FUNCTION_RESET mailbox "
6937					"failed with status x%x add_status x%x,"
6938					" mbx status x%x\n",
6939					shdr_status, shdr_add_status, rc);
6940			rc = -ENXIO;
6941		}
6942		break;
6943	case LPFC_SLI_INTF_IF_TYPE_2:
6944		for (num_resets = 0;
6945		     num_resets < MAX_IF_TYPE_2_RESETS;
6946		     num_resets++) {
6947			reg_data.word0 = 0;
6948			bf_set(lpfc_sliport_ctrl_end, &reg_data,
6949			       LPFC_SLIPORT_LITTLE_ENDIAN);
6950			bf_set(lpfc_sliport_ctrl_ip, &reg_data,
6951			       LPFC_SLIPORT_INIT_PORT);
6952			writel(reg_data.word0, phba->sli4_hba.u.if_type2.
6953			       CTRLregaddr);
6954
6955			/*
6956			 * Poll the Port Status Register and wait for RDY for
6957			 * up to 10 seconds.  If the port doesn't respond, treat
6958			 * it as an error.  If the port responds with RN, start
6959			 * the loop again.
6960			 */
6961			for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
6962				if (lpfc_readl(phba->sli4_hba.u.if_type2.
6963					      STATUSregaddr, &reg_data.word0)) {
6964					rc = -ENODEV;
6965					break;
6966				}
6967				if (bf_get(lpfc_sliport_status_rdy, &reg_data))
6968					break;
6969				if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
6970					reset_again++;
6971					break;
6972				}
6973				msleep(10);
6974			}
6975
6976			/*
6977			 * If the port responds to the init request with
6978			 * reset needed, delay for a bit and restart the loop.
6979			 */
6980			if (reset_again) {
6981				msleep(10);
6982				reset_again = 0;
6983				continue;
6984			}
6985
6986			/* Detect any port errors. */
6987			if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
6988				 &reg_data.word0)) {
6989				rc = -ENODEV;
6990				break;
6991			}
6992			if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
6993			    (rdy_chk >= 1000)) {
6994				phba->work_status[0] = readl(
6995					phba->sli4_hba.u.if_type2.ERR1regaddr);
6996				phba->work_status[1] = readl(
6997					phba->sli4_hba.u.if_type2.ERR2regaddr);
6998				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6999					"2890 Port Error Detected "
7000					"during Port Reset: "
7001					"port status reg 0x%x, "
7002					"error 1=0x%x, error 2=0x%x\n",
7003					reg_data.word0,
7004					phba->work_status[0],
7005					phba->work_status[1]);
7006				rc = -ENODEV;
7007			}
7008
7009			/*
7010			 * Terminate the outer loop provided the Port indicated
7011			 * ready within 10 seconds.
7012			 */
7013			if (rdy_chk < 1000)
7014				break;
7015		}
7016		/* delay driver action following IF_TYPE_2 function reset */
7017		msleep(100);
7018		break;
7019	case LPFC_SLI_INTF_IF_TYPE_1:
7020	default:
7021		break;
7022	}
7023
7024	/* Catch the not-ready port failure after a port reset. */
7025	if (num_resets >= MAX_IF_TYPE_2_RESETS)
7026		rc = -ENODEV;
7027
7028	return rc;
7029}
7030
7031/**
7032 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
7033 * @phba: pointer to lpfc hba data structure.
7034 * @cnt: number of nop mailbox commands to send.
7035 *
7036 * This routine is invoked to send a number @cnt of NOP mailbox command and
7037 * wait for each command to complete.
7038 *
7039 * Return: the number of NOP mailbox command completed.
7040 **/
7041static int
7042lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
7043{
7044	LPFC_MBOXQ_t *mboxq;
7045	int length, cmdsent;
7046	uint32_t mbox_tmo;
7047	uint32_t rc = 0;
7048	uint32_t shdr_status, shdr_add_status;
7049	union lpfc_sli4_cfg_shdr *shdr;
7050
7051	if (cnt == 0) {
7052		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7053				"2518 Requested to send 0 NOP mailbox cmd\n");
7054		return cnt;
7055	}
7056
7057	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7058	if (!mboxq) {
7059		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7060				"2519 Unable to allocate memory for issuing "
7061				"NOP mailbox command\n");
7062		return 0;
7063	}
7064
7065	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
7066	length = (sizeof(struct lpfc_mbx_nop) -
7067		  sizeof(struct lpfc_sli4_cfg_mhdr));
7068	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7069			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
7070
7071	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
7072	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
7073		if (!phba->sli4_hba.intr_enable)
7074			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7075		else
7076			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7077		if (rc == MBX_TIMEOUT)
7078			break;
7079		/* Check return status */
7080		shdr = (union lpfc_sli4_cfg_shdr *)
7081			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7082		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7083		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7084					 &shdr->response);
7085		if (shdr_status || shdr_add_status || rc) {
7086			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7087					"2520 NOP mailbox command failed "
7088					"status x%x add_status x%x mbx "
7089					"status x%x\n", shdr_status,
7090					shdr_add_status, rc);
7091			break;
7092		}
7093	}
7094
7095	if (rc != MBX_TIMEOUT)
7096		mempool_free(mboxq, phba->mbox_mem_pool);
7097
7098	return cmdsent;
7099}
7100
7101/**
7102 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
7103 * @phba: pointer to lpfc hba data structure.
7104 *
7105 * This routine is invoked to set up the PCI device memory space for device
7106 * with SLI-4 interface spec.
7107 *
7108 * Return codes
7109 * 	0 - successful
7110 * 	other values - error
7111 **/
7112static int
7113lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7114{
7115	struct pci_dev *pdev;
7116	unsigned long bar0map_len, bar1map_len, bar2map_len;
7117	int error = -ENODEV;
7118	uint32_t if_type;
7119
7120	/* Obtain PCI device reference */
7121	if (!phba->pcidev)
7122		return error;
7123	else
7124		pdev = phba->pcidev;
7125
7126	/* Set the device DMA mask size */
7127	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
7128	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
7129		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
7130		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
7131			return error;
7132		}
7133	}
7134
7135	/*
7136	 * The BARs and register set definitions and offset locations are
7137	 * dependent on the if_type.
7138	 */
7139	if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
7140				  &phba->sli4_hba.sli_intf.word0)) {
7141		return error;
7142	}
7143
7144	/* There is no SLI3 failback for SLI4 devices. */
7145	if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
7146	    LPFC_SLI_INTF_VALID) {
7147		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7148				"2894 SLI_INTF reg contents invalid "
7149				"sli_intf reg 0x%x\n",
7150				phba->sli4_hba.sli_intf.word0);
7151		return error;
7152	}
7153
7154	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7155	/*
7156	 * Get the bus address of SLI4 device Bar regions and the
7157	 * number of bytes required by each mapping. The mapping of the
7158	 * particular PCI BARs regions is dependent on the type of
7159	 * SLI4 device.
7160	 */
7161	if (pci_resource_start(pdev, 0)) {
7162		phba->pci_bar0_map = pci_resource_start(pdev, 0);
7163		bar0map_len = pci_resource_len(pdev, 0);
7164
7165		/*
7166		 * Map SLI4 PCI Config Space Register base to a kernel virtual
7167		 * addr
7168		 */
7169		phba->sli4_hba.conf_regs_memmap_p =
7170			ioremap(phba->pci_bar0_map, bar0map_len);
7171		if (!phba->sli4_hba.conf_regs_memmap_p) {
7172			dev_printk(KERN_ERR, &pdev->dev,
7173				   "ioremap failed for SLI4 PCI config "
7174				   "registers.\n");
7175			goto out;
7176		}
7177		/* Set up BAR0 PCI config space register memory map */
7178		lpfc_sli4_bar0_register_memmap(phba, if_type);
7179	} else {
7180		phba->pci_bar0_map = pci_resource_start(pdev, 1);
7181		bar0map_len = pci_resource_len(pdev, 1);
7182		if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7183			dev_printk(KERN_ERR, &pdev->dev,
7184			   "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
7185			goto out;
7186		}
7187		phba->sli4_hba.conf_regs_memmap_p =
7188				ioremap(phba->pci_bar0_map, bar0map_len);
7189		if (!phba->sli4_hba.conf_regs_memmap_p) {
7190			dev_printk(KERN_ERR, &pdev->dev,
7191				"ioremap failed for SLI4 PCI config "
7192				"registers.\n");
7193				goto out;
7194		}
7195		lpfc_sli4_bar0_register_memmap(phba, if_type);
7196	}
7197
7198	if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7199	    (pci_resource_start(pdev, 2))) {
7200		/*
7201		 * Map SLI4 if type 0 HBA Control Register base to a kernel
7202		 * virtual address and setup the registers.
7203		 */
7204		phba->pci_bar1_map = pci_resource_start(pdev, 2);
7205		bar1map_len = pci_resource_len(pdev, 2);
7206		phba->sli4_hba.ctrl_regs_memmap_p =
7207				ioremap(phba->pci_bar1_map, bar1map_len);
7208		if (!phba->sli4_hba.ctrl_regs_memmap_p) {
7209			dev_printk(KERN_ERR, &pdev->dev,
7210			   "ioremap failed for SLI4 HBA control registers.\n");
7211			goto out_iounmap_conf;
7212		}
7213		lpfc_sli4_bar1_register_memmap(phba);
7214	}
7215
7216	if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7217	    (pci_resource_start(pdev, 4))) {
7218		/*
7219		 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
7220		 * virtual address and setup the registers.
7221		 */
7222		phba->pci_bar2_map = pci_resource_start(pdev, 4);
7223		bar2map_len = pci_resource_len(pdev, 4);
7224		phba->sli4_hba.drbl_regs_memmap_p =
7225				ioremap(phba->pci_bar2_map, bar2map_len);
7226		if (!phba->sli4_hba.drbl_regs_memmap_p) {
7227			dev_printk(KERN_ERR, &pdev->dev,
7228			   "ioremap failed for SLI4 HBA doorbell registers.\n");
7229			goto out_iounmap_ctrl;
7230		}
7231		error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
7232		if (error)
7233			goto out_iounmap_all;
7234	}
7235
7236	return 0;
7237
7238out_iounmap_all:
7239	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7240out_iounmap_ctrl:
7241	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7242out_iounmap_conf:
7243	iounmap(phba->sli4_hba.conf_regs_memmap_p);
7244out:
7245	return error;
7246}
7247
7248/**
7249 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
7250 * @phba: pointer to lpfc hba data structure.
7251 *
7252 * This routine is invoked to unset the PCI device memory space for device
7253 * with SLI-4 interface spec.
7254 **/
7255static void
7256lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
7257{
7258	struct pci_dev *pdev;
7259
7260	/* Obtain PCI device reference */
7261	if (!phba->pcidev)
7262		return;
7263	else
7264		pdev = phba->pcidev;
7265
7266	/* Free coherent DMA memory allocated */
7267
7268	/* Unmap I/O memory space */
7269	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7270	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7271	iounmap(phba->sli4_hba.conf_regs_memmap_p);
7272
7273	return;
7274}
7275
7276/**
7277 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
7278 * @phba: pointer to lpfc hba data structure.
7279 *
7280 * This routine is invoked to enable the MSI-X interrupt vectors to device
7281 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
7282 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
7283 * invoked, enables either all or nothing, depending on the current
7284 * availability of PCI vector resources. The device driver is responsible
7285 * for calling the individual request_irq() to register each MSI-X vector
7286 * with a interrupt handler, which is done in this function. Note that
7287 * later when device is unloading, the driver should always call free_irq()
7288 * on all MSI-X vectors it has done request_irq() on before calling
7289 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
7290 * will be left with MSI-X enabled and leaks its vectors.
7291 *
7292 * Return codes
7293 *   0 - successful
7294 *   other values - error
7295 **/
7296static int
7297lpfc_sli_enable_msix(struct lpfc_hba *phba)
7298{
7299	int rc, i;
7300	LPFC_MBOXQ_t *pmb;
7301
7302	/* Set up MSI-X multi-message vectors */
7303	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7304		phba->msix_entries[i].entry = i;
7305
7306	/* Configure MSI-X capability structure */
7307	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
7308				ARRAY_SIZE(phba->msix_entries));
7309	if (rc) {
7310		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7311				"0420 PCI enable MSI-X failed (%d)\n", rc);
7312		goto msi_fail_out;
7313	}
7314	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7315		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7316				"0477 MSI-X entry[%d]: vector=x%x "
7317				"message=%d\n", i,
7318				phba->msix_entries[i].vector,
7319				phba->msix_entries[i].entry);
7320	/*
7321	 * Assign MSI-X vectors to interrupt handlers
7322	 */
7323
7324	/* vector-0 is associated to slow-path handler */
7325	rc = request_irq(phba->msix_entries[0].vector,
7326			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
7327			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7328	if (rc) {
7329		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7330				"0421 MSI-X slow-path request_irq failed "
7331				"(%d)\n", rc);
7332		goto msi_fail_out;
7333	}
7334
7335	/* vector-1 is associated to fast-path handler */
7336	rc = request_irq(phba->msix_entries[1].vector,
7337			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
7338			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
7339
7340	if (rc) {
7341		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7342				"0429 MSI-X fast-path request_irq failed "
7343				"(%d)\n", rc);
7344		goto irq_fail_out;
7345	}
7346
7347	/*
7348	 * Configure HBA MSI-X attention conditions to messages
7349	 */
7350	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7351
7352	if (!pmb) {
7353		rc = -ENOMEM;
7354		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7355				"0474 Unable to allocate memory for issuing "
7356				"MBOX_CONFIG_MSI command\n");
7357		goto mem_fail_out;
7358	}
7359	rc = lpfc_config_msi(phba, pmb);
7360	if (rc)
7361		goto mbx_fail_out;
7362	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
7363	if (rc != MBX_SUCCESS) {
7364		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
7365				"0351 Config MSI mailbox command failed, "
7366				"mbxCmd x%x, mbxStatus x%x\n",
7367				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
7368		goto mbx_fail_out;
7369	}
7370
7371	/* Free memory allocated for mailbox command */
7372	mempool_free(pmb, phba->mbox_mem_pool);
7373	return rc;
7374
7375mbx_fail_out:
7376	/* Free memory allocated for mailbox command */
7377	mempool_free(pmb, phba->mbox_mem_pool);
7378
7379mem_fail_out:
7380	/* free the irq already requested */
7381	free_irq(phba->msix_entries[1].vector, phba);
7382
7383irq_fail_out:
7384	/* free the irq already requested */
7385	free_irq(phba->msix_entries[0].vector, phba);
7386
7387msi_fail_out:
7388	/* Unconfigure MSI-X capability structure */
7389	pci_disable_msix(phba->pcidev);
7390	return rc;
7391}
7392
7393/**
7394 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
7395 * @phba: pointer to lpfc hba data structure.
7396 *
7397 * This routine is invoked to release the MSI-X vectors and then disable the
7398 * MSI-X interrupt mode to device with SLI-3 interface spec.
7399 **/
7400static void
7401lpfc_sli_disable_msix(struct lpfc_hba *phba)
7402{
7403	int i;
7404
7405	/* Free up MSI-X multi-message vectors */
7406	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7407		free_irq(phba->msix_entries[i].vector, phba);
7408	/* Disable MSI-X */
7409	pci_disable_msix(phba->pcidev);
7410
7411	return;
7412}
7413
7414/**
7415 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
7416 * @phba: pointer to lpfc hba data structure.
7417 *
7418 * This routine is invoked to enable the MSI interrupt mode to device with
7419 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
7420 * enable the MSI vector. The device driver is responsible for calling the
7421 * request_irq() to register MSI vector with a interrupt the handler, which
7422 * is done in this function.
7423 *
7424 * Return codes
7425 * 	0 - successful
7426 * 	other values - error
7427 */
7428static int
7429lpfc_sli_enable_msi(struct lpfc_hba *phba)
7430{
7431	int rc;
7432
7433	rc = pci_enable_msi(phba->pcidev);
7434	if (!rc)
7435		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7436				"0462 PCI enable MSI mode success.\n");
7437	else {
7438		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7439				"0471 PCI enable MSI mode failed (%d)\n", rc);
7440		return rc;
7441	}
7442
7443	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7444			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7445	if (rc) {
7446		pci_disable_msi(phba->pcidev);
7447		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7448				"0478 MSI request_irq failed (%d)\n", rc);
7449	}
7450	return rc;
7451}
7452
7453/**
7454 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
7455 * @phba: pointer to lpfc hba data structure.
7456 *
7457 * This routine is invoked to disable the MSI interrupt mode to device with
7458 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
7459 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7460 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7461 * its vector.
7462 */
7463static void
7464lpfc_sli_disable_msi(struct lpfc_hba *phba)
7465{
7466	free_irq(phba->pcidev->irq, phba);
7467	pci_disable_msi(phba->pcidev);
7468	return;
7469}
7470
7471/**
7472 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
7473 * @phba: pointer to lpfc hba data structure.
7474 *
7475 * This routine is invoked to enable device interrupt and associate driver's
7476 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
7477 * spec. Depends on the interrupt mode configured to the driver, the driver
7478 * will try to fallback from the configured interrupt mode to an interrupt
7479 * mode which is supported by the platform, kernel, and device in the order
7480 * of:
7481 * MSI-X -> MSI -> IRQ.
7482 *
7483 * Return codes
7484 *   0 - successful
7485 *   other values - error
7486 **/
7487static uint32_t
7488lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7489{
7490	uint32_t intr_mode = LPFC_INTR_ERROR;
7491	int retval;
7492
7493	if (cfg_mode == 2) {
7494		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
7495		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
7496		if (!retval) {
7497			/* Now, try to enable MSI-X interrupt mode */
7498			retval = lpfc_sli_enable_msix(phba);
7499			if (!retval) {
7500				/* Indicate initialization to MSI-X mode */
7501				phba->intr_type = MSIX;
7502				intr_mode = 2;
7503			}
7504		}
7505	}
7506
7507	/* Fallback to MSI if MSI-X initialization failed */
7508	if (cfg_mode >= 1 && phba->intr_type == NONE) {
7509		retval = lpfc_sli_enable_msi(phba);
7510		if (!retval) {
7511			/* Indicate initialization to MSI mode */
7512			phba->intr_type = MSI;
7513			intr_mode = 1;
7514		}
7515	}
7516
7517	/* Fallback to INTx if both MSI-X/MSI initalization failed */
7518	if (phba->intr_type == NONE) {
7519		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7520				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7521		if (!retval) {
7522			/* Indicate initialization to INTx mode */
7523			phba->intr_type = INTx;
7524			intr_mode = 0;
7525		}
7526	}
7527	return intr_mode;
7528}
7529
7530/**
7531 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
7532 * @phba: pointer to lpfc hba data structure.
7533 *
7534 * This routine is invoked to disable device interrupt and disassociate the
7535 * driver's interrupt handler(s) from interrupt vector(s) to device with
7536 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
7537 * release the interrupt vector(s) for the message signaled interrupt.
7538 **/
7539static void
7540lpfc_sli_disable_intr(struct lpfc_hba *phba)
7541{
7542	/* Disable the currently initialized interrupt mode */
7543	if (phba->intr_type == MSIX)
7544		lpfc_sli_disable_msix(phba);
7545	else if (phba->intr_type == MSI)
7546		lpfc_sli_disable_msi(phba);
7547	else if (phba->intr_type == INTx)
7548		free_irq(phba->pcidev->irq, phba);
7549
7550	/* Reset interrupt management states */
7551	phba->intr_type = NONE;
7552	phba->sli.slistat.sli_intr = 0;
7553
7554	return;
7555}
7556
7557/**
7558 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
7559 * @phba: pointer to lpfc hba data structure.
7560 *
7561 * This routine is invoked to enable the MSI-X interrupt vectors to device
7562 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
7563 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
7564 * enables either all or nothing, depending on the current availability of
7565 * PCI vector resources. The device driver is responsible for calling the
7566 * individual request_irq() to register each MSI-X vector with a interrupt
7567 * handler, which is done in this function. Note that later when device is
7568 * unloading, the driver should always call free_irq() on all MSI-X vectors
7569 * it has done request_irq() on before calling pci_disable_msix(). Failure
7570 * to do so results in a BUG_ON() and a device will be left with MSI-X
7571 * enabled and leaks its vectors.
7572 *
7573 * Return codes
7574 * 0 - successful
7575 * other values - error
7576 **/
7577static int
7578lpfc_sli4_enable_msix(struct lpfc_hba *phba)
7579{
7580	int vectors, rc, index;
7581
7582	/* Set up MSI-X multi-message vectors */
7583	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
7584		phba->sli4_hba.msix_entries[index].entry = index;
7585
7586	/* Configure MSI-X capability structure */
7587	vectors = phba->sli4_hba.cfg_eqn;
7588enable_msix_vectors:
7589	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
7590			     vectors);
7591	if (rc > 1) {
7592		vectors = rc;
7593		goto enable_msix_vectors;
7594	} else if (rc) {
7595		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7596				"0484 PCI enable MSI-X failed (%d)\n", rc);
7597		goto msi_fail_out;
7598	}
7599
7600	/* Log MSI-X vector assignment */
7601	for (index = 0; index < vectors; index++)
7602		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7603				"0489 MSI-X entry[%d]: vector=x%x "
7604				"message=%d\n", index,
7605				phba->sli4_hba.msix_entries[index].vector,
7606				phba->sli4_hba.msix_entries[index].entry);
7607	/*
7608	 * Assign MSI-X vectors to interrupt handlers
7609	 */
7610	if (vectors > 1)
7611		rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7612				 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
7613				 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7614	else
7615		/* All Interrupts need to be handled by one EQ */
7616		rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7617				 &lpfc_sli4_intr_handler, IRQF_SHARED,
7618				 LPFC_DRIVER_NAME, phba);
7619	if (rc) {
7620		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7621				"0485 MSI-X slow-path request_irq failed "
7622				"(%d)\n", rc);
7623		goto msi_fail_out;
7624	}
7625
7626	/* The rest of the vector(s) are associated to fast-path handler(s) */
7627	for (index = 1; index < vectors; index++) {
7628		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
7629		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
7630		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
7631				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
7632				 LPFC_FP_DRIVER_HANDLER_NAME,
7633				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7634		if (rc) {
7635			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7636					"0486 MSI-X fast-path (%d) "
7637					"request_irq failed (%d)\n", index, rc);
7638			goto cfg_fail_out;
7639		}
7640	}
7641	phba->sli4_hba.msix_vec_nr = vectors;
7642
7643	return rc;
7644
7645cfg_fail_out:
7646	/* free the irq already requested */
7647	for (--index; index >= 1; index--)
7648		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
7649			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7650
7651	/* free the irq already requested */
7652	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7653
7654msi_fail_out:
7655	/* Unconfigure MSI-X capability structure */
7656	pci_disable_msix(phba->pcidev);
7657	return rc;
7658}
7659
7660/**
7661 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
7662 * @phba: pointer to lpfc hba data structure.
7663 *
7664 * This routine is invoked to release the MSI-X vectors and then disable the
7665 * MSI-X interrupt mode to device with SLI-4 interface spec.
7666 **/
7667static void
7668lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7669{
7670	int index;
7671
7672	/* Free up MSI-X multi-message vectors */
7673	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7674
7675	for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
7676		free_irq(phba->sli4_hba.msix_entries[index].vector,
7677			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7678
7679	/* Disable MSI-X */
7680	pci_disable_msix(phba->pcidev);
7681
7682	return;
7683}
7684
7685/**
7686 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
7687 * @phba: pointer to lpfc hba data structure.
7688 *
7689 * This routine is invoked to enable the MSI interrupt mode to device with
7690 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
7691 * to enable the MSI vector. The device driver is responsible for calling
7692 * the request_irq() to register MSI vector with a interrupt the handler,
7693 * which is done in this function.
7694 *
7695 * Return codes
7696 * 	0 - successful
7697 * 	other values - error
7698 **/
7699static int
7700lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7701{
7702	int rc, index;
7703
7704	rc = pci_enable_msi(phba->pcidev);
7705	if (!rc)
7706		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7707				"0487 PCI enable MSI mode success.\n");
7708	else {
7709		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7710				"0488 PCI enable MSI mode failed (%d)\n", rc);
7711		return rc;
7712	}
7713
7714	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7715			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7716	if (rc) {
7717		pci_disable_msi(phba->pcidev);
7718		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7719				"0490 MSI request_irq failed (%d)\n", rc);
7720		return rc;
7721	}
7722
7723	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
7724		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7725		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7726	}
7727
7728	return 0;
7729}
7730
7731/**
7732 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
7733 * @phba: pointer to lpfc hba data structure.
7734 *
7735 * This routine is invoked to disable the MSI interrupt mode to device with
7736 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
7737 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7738 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7739 * its vector.
7740 **/
7741static void
7742lpfc_sli4_disable_msi(struct lpfc_hba *phba)
7743{
7744	free_irq(phba->pcidev->irq, phba);
7745	pci_disable_msi(phba->pcidev);
7746	return;
7747}
7748
7749/**
7750 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
7751 * @phba: pointer to lpfc hba data structure.
7752 *
7753 * This routine is invoked to enable device interrupt and associate driver's
7754 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
7755 * interface spec. Depends on the interrupt mode configured to the driver,
7756 * the driver will try to fallback from the configured interrupt mode to an
7757 * interrupt mode which is supported by the platform, kernel, and device in
7758 * the order of:
7759 * MSI-X -> MSI -> IRQ.
7760 *
7761 * Return codes
7762 * 	0 - successful
7763 * 	other values - error
7764 **/
7765static uint32_t
7766lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7767{
7768	uint32_t intr_mode = LPFC_INTR_ERROR;
7769	int retval, index;
7770
7771	if (cfg_mode == 2) {
7772		/* Preparation before conf_msi mbox cmd */
7773		retval = 0;
7774		if (!retval) {
7775			/* Now, try to enable MSI-X interrupt mode */
7776			retval = lpfc_sli4_enable_msix(phba);
7777			if (!retval) {
7778				/* Indicate initialization to MSI-X mode */
7779				phba->intr_type = MSIX;
7780				intr_mode = 2;
7781			}
7782		}
7783	}
7784
7785	/* Fallback to MSI if MSI-X initialization failed */
7786	if (cfg_mode >= 1 && phba->intr_type == NONE) {
7787		retval = lpfc_sli4_enable_msi(phba);
7788		if (!retval) {
7789			/* Indicate initialization to MSI mode */
7790			phba->intr_type = MSI;
7791			intr_mode = 1;
7792		}
7793	}
7794
7795	/* Fallback to INTx if both MSI-X/MSI initalization failed */
7796	if (phba->intr_type == NONE) {
7797		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7798				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7799		if (!retval) {
7800			/* Indicate initialization to INTx mode */
7801			phba->intr_type = INTx;
7802			intr_mode = 0;
7803			for (index = 0; index < phba->cfg_fcp_eq_count;
7804			     index++) {
7805				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7806				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7807			}
7808		}
7809	}
7810	return intr_mode;
7811}
7812
7813/**
7814 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
7815 * @phba: pointer to lpfc hba data structure.
7816 *
7817 * This routine is invoked to disable device interrupt and disassociate
7818 * the driver's interrupt handler(s) from interrupt vector(s) to device
7819 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
7820 * will release the interrupt vector(s) for the message signaled interrupt.
7821 **/
7822static void
7823lpfc_sli4_disable_intr(struct lpfc_hba *phba)
7824{
7825	/* Disable the currently initialized interrupt mode */
7826	if (phba->intr_type == MSIX)
7827		lpfc_sli4_disable_msix(phba);
7828	else if (phba->intr_type == MSI)
7829		lpfc_sli4_disable_msi(phba);
7830	else if (phba->intr_type == INTx)
7831		free_irq(phba->pcidev->irq, phba);
7832
7833	/* Reset interrupt management states */
7834	phba->intr_type = NONE;
7835	phba->sli.slistat.sli_intr = 0;
7836
7837	return;
7838}
7839
7840/**
7841 * lpfc_unset_hba - Unset SLI3 hba device initialization
7842 * @phba: pointer to lpfc hba data structure.
7843 *
7844 * This routine is invoked to unset the HBA device initialization steps to
7845 * a device with SLI-3 interface spec.
7846 **/
7847static void
7848lpfc_unset_hba(struct lpfc_hba *phba)
7849{
7850	struct lpfc_vport *vport = phba->pport;
7851	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7852
7853	spin_lock_irq(shost->host_lock);
7854	vport->load_flag |= FC_UNLOADING;
7855	spin_unlock_irq(shost->host_lock);
7856
7857	lpfc_stop_hba_timers(phba);
7858
7859	phba->pport->work_port_events = 0;
7860
7861	lpfc_sli_hba_down(phba);
7862
7863	lpfc_sli_brdrestart(phba);
7864
7865	lpfc_sli_disable_intr(phba);
7866
7867	return;
7868}
7869
7870/**
7871 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
7872 * @phba: pointer to lpfc hba data structure.
7873 *
7874 * This routine is invoked to unset the HBA device initialization steps to
7875 * a device with SLI-4 interface spec.
7876 **/
7877static void
7878lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7879{
7880	struct lpfc_vport *vport = phba->pport;
7881	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7882
7883	spin_lock_irq(shost->host_lock);
7884	vport->load_flag |= FC_UNLOADING;
7885	spin_unlock_irq(shost->host_lock);
7886
7887	phba->pport->work_port_events = 0;
7888
7889	/* Stop the SLI4 device port */
7890	lpfc_stop_port(phba);
7891
7892	lpfc_sli4_disable_intr(phba);
7893
7894	/* Reset SLI4 HBA FCoE function */
7895	lpfc_pci_function_reset(phba);
7896
7897	return;
7898}
7899
7900/**
7901 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
7902 * @phba: Pointer to HBA context object.
7903 *
7904 * This function is called in the SLI4 code path to wait for completion
7905 * of device's XRIs exchange busy. It will check the XRI exchange busy
7906 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
7907 * that, it will check the XRI exchange busy on outstanding FCP and ELS
7908 * I/Os every 30 seconds, log error message, and wait forever. Only when
7909 * all XRI exchange busy complete, the driver unload shall proceed with
7910 * invoking the function reset ioctl mailbox command to the CNA and the
7911 * the rest of the driver unload resource release.
7912 **/
7913static void
7914lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
7915{
7916	int wait_time = 0;
7917	int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
7918	int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7919
7920	while (!fcp_xri_cmpl || !els_xri_cmpl) {
7921		if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
7922			if (!fcp_xri_cmpl)
7923				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7924						"2877 FCP XRI exchange busy "
7925						"wait time: %d seconds.\n",
7926						wait_time/1000);
7927			if (!els_xri_cmpl)
7928				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7929						"2878 ELS XRI exchange busy "
7930						"wait time: %d seconds.\n",
7931						wait_time/1000);
7932			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
7933			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
7934		} else {
7935			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
7936			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
7937		}
7938		fcp_xri_cmpl =
7939			list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
7940		els_xri_cmpl =
7941			list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7942	}
7943}
7944
7945/**
7946 * lpfc_sli4_hba_unset - Unset the fcoe hba
7947 * @phba: Pointer to HBA context object.
7948 *
7949 * This function is called in the SLI4 code path to reset the HBA's FCoE
7950 * function. The caller is not required to hold any lock. This routine
7951 * issues PCI function reset mailbox command to reset the FCoE function.
7952 * At the end of the function, it calls lpfc_hba_down_post function to
7953 * free any pending commands.
7954 **/
7955static void
7956lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7957{
7958	int wait_cnt = 0;
7959	LPFC_MBOXQ_t *mboxq;
7960	struct pci_dev *pdev = phba->pcidev;
7961
7962	lpfc_stop_hba_timers(phba);
7963	phba->sli4_hba.intr_enable = 0;
7964
7965	/*
7966	 * Gracefully wait out the potential current outstanding asynchronous
7967	 * mailbox command.
7968	 */
7969
7970	/* First, block any pending async mailbox command from posted */
7971	spin_lock_irq(&phba->hbalock);
7972	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7973	spin_unlock_irq(&phba->hbalock);
7974	/* Now, trying to wait it out if we can */
7975	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7976		msleep(10);
7977		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
7978			break;
7979	}
7980	/* Forcefully release the outstanding mailbox command if timed out */
7981	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7982		spin_lock_irq(&phba->hbalock);
7983		mboxq = phba->sli.mbox_active;
7984		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7985		__lpfc_mbox_cmpl_put(phba, mboxq);
7986		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7987		phba->sli.mbox_active = NULL;
7988		spin_unlock_irq(&phba->hbalock);
7989	}
7990
7991	/* Abort all iocbs associated with the hba */
7992	lpfc_sli_hba_iocb_abort(phba);
7993
7994	/* Wait for completion of device XRI exchange busy */
7995	lpfc_sli4_xri_exchange_busy_wait(phba);
7996
7997	/* Disable PCI subsystem interrupt */
7998	lpfc_sli4_disable_intr(phba);
7999
8000	/* Disable SR-IOV if enabled */
8001	if (phba->cfg_sriov_nr_virtfn)
8002		pci_disable_sriov(pdev);
8003
8004	/* Stop kthread signal shall trigger work_done one more time */
8005	kthread_stop(phba->worker_thread);
8006
8007	/* Reset SLI4 HBA FCoE function */
8008	lpfc_pci_function_reset(phba);
8009
8010	/* Stop the SLI4 device port */
8011	phba->pport->work_port_events = 0;
8012}
8013
8014 /**
8015 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
8016 * @phba: Pointer to HBA context object.
8017 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8018 *
8019 * This function is called in the SLI4 code path to read the port's
8020 * sli4 capabilities.
8021 *
8022 * This function may be be called from any context that can block-wait
8023 * for the completion.  The expectation is that this routine is called
8024 * typically from probe_one or from the online routine.
8025 **/
8026int
8027lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8028{
8029	int rc;
8030	struct lpfc_mqe *mqe;
8031	struct lpfc_pc_sli4_params *sli4_params;
8032	uint32_t mbox_tmo;
8033
8034	rc = 0;
8035	mqe = &mboxq->u.mqe;
8036
8037	/* Read the port's SLI4 Parameters port capabilities */
8038	lpfc_pc_sli4_params(mboxq);
8039	if (!phba->sli4_hba.intr_enable)
8040		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8041	else {
8042		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
8043		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8044	}
8045
8046	if (unlikely(rc))
8047		return 1;
8048
8049	sli4_params = &phba->sli4_hba.pc_sli4_params;
8050	sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
8051	sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
8052	sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
8053	sli4_params->featurelevel_1 = bf_get(featurelevel_1,
8054					     &mqe->un.sli4_params);
8055	sli4_params->featurelevel_2 = bf_get(featurelevel_2,
8056					     &mqe->un.sli4_params);
8057	sli4_params->proto_types = mqe->un.sli4_params.word3;
8058	sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
8059	sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
8060	sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
8061	sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
8062	sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
8063	sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
8064	sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
8065	sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
8066	sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
8067	sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
8068	sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
8069	sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
8070	sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
8071	sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
8072	sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
8073	sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
8074	sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
8075	sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
8076	sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
8077	sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
8078
8079	/* Make sure that sge_supp_len can be handled by the driver */
8080	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8081		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8082
8083	return rc;
8084}
8085
8086/**
8087 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
8088 * @phba: Pointer to HBA context object.
8089 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8090 *
8091 * This function is called in the SLI4 code path to read the port's
8092 * sli4 capabilities.
8093 *
8094 * This function may be be called from any context that can block-wait
8095 * for the completion.  The expectation is that this routine is called
8096 * typically from probe_one or from the online routine.
8097 **/
8098int
8099lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8100{
8101	int rc;
8102	struct lpfc_mqe *mqe = &mboxq->u.mqe;
8103	struct lpfc_pc_sli4_params *sli4_params;
8104	int length;
8105	struct lpfc_sli4_parameters *mbx_sli4_parameters;
8106
8107	/* Read the port's SLI4 Config Parameters */
8108	length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
8109		  sizeof(struct lpfc_sli4_cfg_mhdr));
8110	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8111			 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
8112			 length, LPFC_SLI4_MBX_EMBED);
8113	if (!phba->sli4_hba.intr_enable)
8114		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8115	else
8116		rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
8117			lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
8118	if (unlikely(rc))
8119		return rc;
8120	sli4_params = &phba->sli4_hba.pc_sli4_params;
8121	mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
8122	sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
8123	sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
8124	sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
8125	sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
8126					     mbx_sli4_parameters);
8127	sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
8128					     mbx_sli4_parameters);
8129	if (bf_get(cfg_phwq, mbx_sli4_parameters))
8130		phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
8131	else
8132		phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
8133	sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
8134	sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
8135	sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
8136	sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
8137	sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
8138	sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
8139	sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
8140					    mbx_sli4_parameters);
8141	sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
8142					   mbx_sli4_parameters);
8143
8144	/* Make sure that sge_supp_len can be handled by the driver */
8145	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8146		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8147
8148	return 0;
8149}
8150
8151/**
8152 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
8153 * @pdev: pointer to PCI device
8154 * @pid: pointer to PCI device identifier
8155 *
8156 * This routine is to be called to attach a device with SLI-3 interface spec
8157 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8158 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8159 * information of the device and driver to see if the driver state that it can
8160 * support this kind of device. If the match is successful, the driver core
8161 * invokes this routine. If this routine determines it can claim the HBA, it
8162 * does all the initialization that it needs to do to handle the HBA properly.
8163 *
8164 * Return code
8165 * 	0 - driver can claim the device
8166 * 	negative value - driver can not claim the device
8167 **/
8168static int __devinit
8169lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
8170{
8171	struct lpfc_hba   *phba;
8172	struct lpfc_vport *vport = NULL;
8173	struct Scsi_Host  *shost = NULL;
8174	int error;
8175	uint32_t cfg_mode, intr_mode;
8176
8177	/* Allocate memory for HBA structure */
8178	phba = lpfc_hba_alloc(pdev);
8179	if (!phba)
8180		return -ENOMEM;
8181
8182	/* Perform generic PCI device enabling operation */
8183	error = lpfc_enable_pci_dev(phba);
8184	if (error) {
8185		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8186				"1401 Failed to enable pci device.\n");
8187		goto out_free_phba;
8188	}
8189
8190	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
8191	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
8192	if (error)
8193		goto out_disable_pci_dev;
8194
8195	/* Set up SLI-3 specific device PCI memory space */
8196	error = lpfc_sli_pci_mem_setup(phba);
8197	if (error) {
8198		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8199				"1402 Failed to set up pci memory space.\n");
8200		goto out_disable_pci_dev;
8201	}
8202
8203	/* Set up phase-1 common device driver resources */
8204	error = lpfc_setup_driver_resource_phase1(phba);
8205	if (error) {
8206		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8207				"1403 Failed to set up driver resource.\n");
8208		goto out_unset_pci_mem_s3;
8209	}
8210
8211	/* Set up SLI-3 specific device driver resources */
8212	error = lpfc_sli_driver_resource_setup(phba);
8213	if (error) {
8214		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8215				"1404 Failed to set up driver resource.\n");
8216		goto out_unset_pci_mem_s3;
8217	}
8218
8219	/* Initialize and populate the iocb list per host */
8220	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
8221	if (error) {
8222		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8223				"1405 Failed to initialize iocb list.\n");
8224		goto out_unset_driver_resource_s3;
8225	}
8226
8227	/* Set up common device driver resources */
8228	error = lpfc_setup_driver_resource_phase2(phba);
8229	if (error) {
8230		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8231				"1406 Failed to set up driver resource.\n");
8232		goto out_free_iocb_list;
8233	}
8234
8235	/* Create SCSI host to the physical port */
8236	error = lpfc_create_shost(phba);
8237	if (error) {
8238		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8239				"1407 Failed to create scsi host.\n");
8240		goto out_unset_driver_resource;
8241	}
8242
8243	/* Configure sysfs attributes */
8244	vport = phba->pport;
8245	error = lpfc_alloc_sysfs_attr(vport);
8246	if (error) {
8247		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8248				"1476 Failed to allocate sysfs attr\n");
8249		goto out_destroy_shost;
8250	}
8251
8252	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8253	/* Now, trying to enable interrupt and bring up the device */
8254	cfg_mode = phba->cfg_use_msi;
8255	while (true) {
8256		/* Put device to a known state before enabling interrupt */
8257		lpfc_stop_port(phba);
8258		/* Configure and enable interrupt */
8259		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
8260		if (intr_mode == LPFC_INTR_ERROR) {
8261			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8262					"0431 Failed to enable interrupt.\n");
8263			error = -ENODEV;
8264			goto out_free_sysfs_attr;
8265		}
8266		/* SLI-3 HBA setup */
8267		if (lpfc_sli_hba_setup(phba)) {
8268			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8269					"1477 Failed to set up hba\n");
8270			error = -ENODEV;
8271			goto out_remove_device;
8272		}
8273
8274		/* Wait 50ms for the interrupts of previous mailbox commands */
8275		msleep(50);
8276		/* Check active interrupts on message signaled interrupts */
8277		if (intr_mode == 0 ||
8278		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
8279			/* Log the current active interrupt mode */
8280			phba->intr_mode = intr_mode;
8281			lpfc_log_intr_mode(phba, intr_mode);
8282			break;
8283		} else {
8284			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8285					"0447 Configure interrupt mode (%d) "
8286					"failed active interrupt test.\n",
8287					intr_mode);
8288			/* Disable the current interrupt mode */
8289			lpfc_sli_disable_intr(phba);
8290			/* Try next level of interrupt mode */
8291			cfg_mode = --intr_mode;
8292		}
8293	}
8294
8295	/* Perform post initialization setup */
8296	lpfc_post_init_setup(phba);
8297
8298	/* Check if there are static vports to be created. */
8299	lpfc_create_static_vport(phba);
8300
8301	return 0;
8302
8303out_remove_device:
8304	lpfc_unset_hba(phba);
8305out_free_sysfs_attr:
8306	lpfc_free_sysfs_attr(vport);
8307out_destroy_shost:
8308	lpfc_destroy_shost(phba);
8309out_unset_driver_resource:
8310	lpfc_unset_driver_resource_phase2(phba);
8311out_free_iocb_list:
8312	lpfc_free_iocb_list(phba);
8313out_unset_driver_resource_s3:
8314	lpfc_sli_driver_resource_unset(phba);
8315out_unset_pci_mem_s3:
8316	lpfc_sli_pci_mem_unset(phba);
8317out_disable_pci_dev:
8318	lpfc_disable_pci_dev(phba);
8319	if (shost)
8320		scsi_host_put(shost);
8321out_free_phba:
8322	lpfc_hba_free(phba);
8323	return error;
8324}
8325
8326/**
8327 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
8328 * @pdev: pointer to PCI device
8329 *
8330 * This routine is to be called to disattach a device with SLI-3 interface
8331 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8332 * removed from PCI bus, it performs all the necessary cleanup for the HBA
8333 * device to be removed from the PCI subsystem properly.
8334 **/
8335static void __devexit
8336lpfc_pci_remove_one_s3(struct pci_dev *pdev)
8337{
8338	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
8339	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8340	struct lpfc_vport **vports;
8341	struct lpfc_hba   *phba = vport->phba;
8342	int i;
8343	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
8344
8345	spin_lock_irq(&phba->hbalock);
8346	vport->load_flag |= FC_UNLOADING;
8347	spin_unlock_irq(&phba->hbalock);
8348
8349	lpfc_free_sysfs_attr(vport);
8350
8351	/* Release all the vports against this physical port */
8352	vports = lpfc_create_vport_work_array(phba);
8353	if (vports != NULL)
8354		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8355			fc_vport_terminate(vports[i]->fc_vport);
8356	lpfc_destroy_vport_work_array(phba, vports);
8357
8358	/* Remove FC host and then SCSI host with the physical port */
8359	fc_remove_host(shost);
8360	scsi_remove_host(shost);
8361	lpfc_cleanup(vport);
8362
8363	/*
8364	 * Bring down the SLI Layer. This step disable all interrupts,
8365	 * clears the rings, discards all mailbox commands, and resets
8366	 * the HBA.
8367	 */
8368
8369	/* HBA interrupt will be disabled after this call */
8370	lpfc_sli_hba_down(phba);
8371	/* Stop kthread signal shall trigger work_done one more time */
8372	kthread_stop(phba->worker_thread);
8373	/* Final cleanup of txcmplq and reset the HBA */
8374	lpfc_sli_brdrestart(phba);
8375
8376	lpfc_stop_hba_timers(phba);
8377	spin_lock_irq(&phba->hbalock);
8378	list_del_init(&vport->listentry);
8379	spin_unlock_irq(&phba->hbalock);
8380
8381	lpfc_debugfs_terminate(vport);
8382
8383	/* Disable SR-IOV if enabled */
8384	if (phba->cfg_sriov_nr_virtfn)
8385		pci_disable_sriov(pdev);
8386
8387	/* Disable interrupt */
8388	lpfc_sli_disable_intr(phba);
8389
8390	pci_set_drvdata(pdev, NULL);
8391	scsi_host_put(shost);
8392
8393	/*
8394	 * Call scsi_free before mem_free since scsi bufs are released to their
8395	 * corresponding pools here.
8396	 */
8397	lpfc_scsi_free(phba);
8398	lpfc_mem_free_all(phba);
8399
8400	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
8401			  phba->hbqslimp.virt, phba->hbqslimp.phys);
8402
8403	/* Free resources associated with SLI2 interface */
8404	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8405			  phba->slim2p.virt, phba->slim2p.phys);
8406
8407	/* unmap adapter SLIM and Control Registers */
8408	iounmap(phba->ctrl_regs_memmap_p);
8409	iounmap(phba->slim_memmap_p);
8410
8411	lpfc_hba_free(phba);
8412
8413	pci_release_selected_regions(pdev, bars);
8414	pci_disable_device(pdev);
8415}
8416
8417/**
8418 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
8419 * @pdev: pointer to PCI device
8420 * @msg: power management message
8421 *
8422 * This routine is to be called from the kernel's PCI subsystem to support
8423 * system Power Management (PM) to device with SLI-3 interface spec. When
8424 * PM invokes this method, it quiesces the device by stopping the driver's
8425 * worker thread for the device, turning off device's interrupt and DMA,
8426 * and bring the device offline. Note that as the driver implements the
8427 * minimum PM requirements to a power-aware driver's PM support for the
8428 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8429 * to the suspend() method call will be treated as SUSPEND and the driver will
8430 * fully reinitialize its device during resume() method call, the driver will
8431 * set device to PCI_D3hot state in PCI config space instead of setting it
8432 * according to the @msg provided by the PM.
8433 *
8434 * Return code
8435 * 	0 - driver suspended the device
8436 * 	Error otherwise
8437 **/
8438static int
8439lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
8440{
8441	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8442	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8443
8444	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8445			"0473 PCI device Power Management suspend.\n");
8446
8447	/* Bring down the device */
8448	lpfc_offline_prep(phba);
8449	lpfc_offline(phba);
8450	kthread_stop(phba->worker_thread);
8451
8452	/* Disable interrupt from device */
8453	lpfc_sli_disable_intr(phba);
8454
8455	/* Save device state to PCI config space */
8456	pci_save_state(pdev);
8457	pci_set_power_state(pdev, PCI_D3hot);
8458
8459	return 0;
8460}
8461
8462/**
8463 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
8464 * @pdev: pointer to PCI device
8465 *
8466 * This routine is to be called from the kernel's PCI subsystem to support
8467 * system Power Management (PM) to device with SLI-3 interface spec. When PM
8468 * invokes this method, it restores the device's PCI config space state and
8469 * fully reinitializes the device and brings it online. Note that as the
8470 * driver implements the minimum PM requirements to a power-aware driver's
8471 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
8472 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
8473 * driver will fully reinitialize its device during resume() method call,
8474 * the device will be set to PCI_D0 directly in PCI config space before
8475 * restoring the state.
8476 *
8477 * Return code
8478 * 	0 - driver suspended the device
8479 * 	Error otherwise
8480 **/
8481static int
8482lpfc_pci_resume_one_s3(struct pci_dev *pdev)
8483{
8484	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8485	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8486	uint32_t intr_mode;
8487	int error;
8488
8489	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8490			"0452 PCI device Power Management resume.\n");
8491
8492	/* Restore device state from PCI config space */
8493	pci_set_power_state(pdev, PCI_D0);
8494	pci_restore_state(pdev);
8495
8496	/*
8497	 * As the new kernel behavior of pci_restore_state() API call clears
8498	 * device saved_state flag, need to save the restored state again.
8499	 */
8500	pci_save_state(pdev);
8501
8502	if (pdev->is_busmaster)
8503		pci_set_master(pdev);
8504
8505	/* Startup the kernel thread for this host adapter. */
8506	phba->worker_thread = kthread_run(lpfc_do_work, phba,
8507					"lpfc_worker_%d", phba->brd_no);
8508	if (IS_ERR(phba->worker_thread)) {
8509		error = PTR_ERR(phba->worker_thread);
8510		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8511				"0434 PM resume failed to start worker "
8512				"thread: error=x%x.\n", error);
8513		return error;
8514	}
8515
8516	/* Configure and enable interrupt */
8517	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8518	if (intr_mode == LPFC_INTR_ERROR) {
8519		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8520				"0430 PM resume Failed to enable interrupt\n");
8521		return -EIO;
8522	} else
8523		phba->intr_mode = intr_mode;
8524
8525	/* Restart HBA and bring it online */
8526	lpfc_sli_brdrestart(phba);
8527	lpfc_online(phba);
8528
8529	/* Log the current active interrupt mode */
8530	lpfc_log_intr_mode(phba, phba->intr_mode);
8531
8532	return 0;
8533}
8534
8535/**
8536 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
8537 * @phba: pointer to lpfc hba data structure.
8538 *
8539 * This routine is called to prepare the SLI3 device for PCI slot recover. It
8540 * aborts all the outstanding SCSI I/Os to the pci device.
8541 **/
8542static void
8543lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
8544{
8545	struct lpfc_sli *psli = &phba->sli;
8546	struct lpfc_sli_ring  *pring;
8547
8548	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8549			"2723 PCI channel I/O abort preparing for recovery\n");
8550
8551	/*
8552	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8553	 * and let the SCSI mid-layer to retry them to recover.
8554	 */
8555	pring = &psli->ring[psli->fcp_ring];
8556	lpfc_sli_abort_iocb_ring(phba, pring);
8557}
8558
8559/**
8560 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
8561 * @phba: pointer to lpfc hba data structure.
8562 *
8563 * This routine is called to prepare the SLI3 device for PCI slot reset. It
8564 * disables the device interrupt and pci device, and aborts the internal FCP
8565 * pending I/Os.
8566 **/
8567static void
8568lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
8569{
8570	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8571			"2710 PCI channel disable preparing for reset\n");
8572
8573	/* Block any management I/Os to the device */
8574	lpfc_block_mgmt_io(phba);
8575
8576	/* Block all SCSI devices' I/Os on the host */
8577	lpfc_scsi_dev_block(phba);
8578
8579	/* stop all timers */
8580	lpfc_stop_hba_timers(phba);
8581
8582	/* Disable interrupt and pci device */
8583	lpfc_sli_disable_intr(phba);
8584	pci_disable_device(phba->pcidev);
8585
8586	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
8587	lpfc_sli_flush_fcp_rings(phba);
8588}
8589
8590/**
8591 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
8592 * @phba: pointer to lpfc hba data structure.
8593 *
8594 * This routine is called to prepare the SLI3 device for PCI slot permanently
8595 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
8596 * pending I/Os.
8597 **/
8598static void
8599lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
8600{
8601	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8602			"2711 PCI channel permanent disable for failure\n");
8603	/* Block all SCSI devices' I/Os on the host */
8604	lpfc_scsi_dev_block(phba);
8605
8606	/* stop all timers */
8607	lpfc_stop_hba_timers(phba);
8608
8609	/* Clean up all driver's outstanding SCSI I/Os */
8610	lpfc_sli_flush_fcp_rings(phba);
8611}
8612
8613/**
8614 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
8615 * @pdev: pointer to PCI device.
8616 * @state: the current PCI connection state.
8617 *
8618 * This routine is called from the PCI subsystem for I/O error handling to
8619 * device with SLI-3 interface spec. This function is called by the PCI
8620 * subsystem after a PCI bus error affecting this device has been detected.
8621 * When this function is invoked, it will need to stop all the I/Os and
8622 * interrupt(s) to the device. Once that is done, it will return
8623 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
8624 * as desired.
8625 *
8626 * Return codes
8627 * 	PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
8628 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8629 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8630 **/
8631static pci_ers_result_t
8632lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
8633{
8634	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8635	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8636
8637	switch (state) {
8638	case pci_channel_io_normal:
8639		/* Non-fatal error, prepare for recovery */
8640		lpfc_sli_prep_dev_for_recover(phba);
8641		return PCI_ERS_RESULT_CAN_RECOVER;
8642	case pci_channel_io_frozen:
8643		/* Fatal error, prepare for slot reset */
8644		lpfc_sli_prep_dev_for_reset(phba);
8645		return PCI_ERS_RESULT_NEED_RESET;
8646	case pci_channel_io_perm_failure:
8647		/* Permanent failure, prepare for device down */
8648		lpfc_sli_prep_dev_for_perm_failure(phba);
8649		return PCI_ERS_RESULT_DISCONNECT;
8650	default:
8651		/* Unknown state, prepare and request slot reset */
8652		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8653				"0472 Unknown PCI error state: x%x\n", state);
8654		lpfc_sli_prep_dev_for_reset(phba);
8655		return PCI_ERS_RESULT_NEED_RESET;
8656	}
8657}
8658
8659/**
8660 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
8661 * @pdev: pointer to PCI device.
8662 *
8663 * This routine is called from the PCI subsystem for error handling to
8664 * device with SLI-3 interface spec. This is called after PCI bus has been
8665 * reset to restart the PCI card from scratch, as if from a cold-boot.
8666 * During the PCI subsystem error recovery, after driver returns
8667 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
8668 * recovery and then call this routine before calling the .resume method
8669 * to recover the device. This function will initialize the HBA device,
8670 * enable the interrupt, but it will just put the HBA to offline state
8671 * without passing any I/O traffic.
8672 *
8673 * Return codes
8674 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
8675 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8676 */
8677static pci_ers_result_t
8678lpfc_io_slot_reset_s3(struct pci_dev *pdev)
8679{
8680	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8681	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8682	struct lpfc_sli *psli = &phba->sli;
8683	uint32_t intr_mode;
8684
8685	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
8686	if (pci_enable_device_mem(pdev)) {
8687		printk(KERN_ERR "lpfc: Cannot re-enable "
8688			"PCI device after reset.\n");
8689		return PCI_ERS_RESULT_DISCONNECT;
8690	}
8691
8692	pci_restore_state(pdev);
8693
8694	/*
8695	 * As the new kernel behavior of pci_restore_state() API call clears
8696	 * device saved_state flag, need to save the restored state again.
8697	 */
8698	pci_save_state(pdev);
8699
8700	if (pdev->is_busmaster)
8701		pci_set_master(pdev);
8702
8703	spin_lock_irq(&phba->hbalock);
8704	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8705	spin_unlock_irq(&phba->hbalock);
8706
8707	/* Configure and enable interrupt */
8708	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8709	if (intr_mode == LPFC_INTR_ERROR) {
8710		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8711				"0427 Cannot re-enable interrupt after "
8712				"slot reset.\n");
8713		return PCI_ERS_RESULT_DISCONNECT;
8714	} else
8715		phba->intr_mode = intr_mode;
8716
8717	/* Take device offline, it will perform cleanup */
8718	lpfc_offline_prep(phba);
8719	lpfc_offline(phba);
8720	lpfc_sli_brdrestart(phba);
8721
8722	/* Log the current active interrupt mode */
8723	lpfc_log_intr_mode(phba, phba->intr_mode);
8724
8725	return PCI_ERS_RESULT_RECOVERED;
8726}
8727
8728/**
8729 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
8730 * @pdev: pointer to PCI device
8731 *
8732 * This routine is called from the PCI subsystem for error handling to device
8733 * with SLI-3 interface spec. It is called when kernel error recovery tells
8734 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8735 * error recovery. After this call, traffic can start to flow from this device
8736 * again.
8737 */
8738static void
8739lpfc_io_resume_s3(struct pci_dev *pdev)
8740{
8741	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8742	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8743
8744	/* Bring device online, it will be no-op for non-fatal error resume */
8745	lpfc_online(phba);
8746
8747	/* Clean up Advanced Error Reporting (AER) if needed */
8748	if (phba->hba_flag & HBA_AER_ENABLED)
8749		pci_cleanup_aer_uncorrect_error_status(pdev);
8750}
8751
8752/**
8753 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
8754 * @phba: pointer to lpfc hba data structure.
8755 *
8756 * returns the number of ELS/CT IOCBs to reserve
8757 **/
8758int
8759lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
8760{
8761	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
8762
8763	if (phba->sli_rev == LPFC_SLI_REV4) {
8764		if (max_xri <= 100)
8765			return 10;
8766		else if (max_xri <= 256)
8767			return 25;
8768		else if (max_xri <= 512)
8769			return 50;
8770		else if (max_xri <= 1024)
8771			return 100;
8772		else
8773			return 150;
8774	} else
8775		return 0;
8776}
8777
8778/**
8779 * lpfc_write_firmware - attempt to write a firmware image to the port
8780 * @phba: pointer to lpfc hba data structure.
8781 * @fw: pointer to firmware image returned from request_firmware.
8782 *
8783 * returns the number of bytes written if write is successful.
8784 * returns a negative error value if there were errors.
8785 * returns 0 if firmware matches currently active firmware on port.
8786 **/
8787int
8788lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
8789{
8790	char fwrev[32];
8791	struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
8792	struct list_head dma_buffer_list;
8793	int i, rc = 0;
8794	struct lpfc_dmabuf *dmabuf, *next;
8795	uint32_t offset = 0, temp_offset = 0;
8796
8797	INIT_LIST_HEAD(&dma_buffer_list);
8798	if ((image->magic_number != LPFC_GROUP_OJECT_MAGIC_NUM) ||
8799	    (bf_get(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) ||
8800	    (bf_get(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
8801	    (image->size != fw->size)) {
8802		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8803				"3022 Invalid FW image found. "
8804				"Magic:%d Type:%x ID:%x\n",
8805				image->magic_number,
8806				bf_get(lpfc_grp_hdr_file_type, image),
8807				bf_get(lpfc_grp_hdr_id, image));
8808		return -EINVAL;
8809	}
8810	lpfc_decode_firmware_rev(phba, fwrev, 1);
8811	if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) {
8812		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8813				"3023 Updating Firmware. Current Version:%s "
8814				"New Version:%s\n",
8815				fwrev, image->rev_name);
8816		for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
8817			dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
8818					 GFP_KERNEL);
8819			if (!dmabuf) {
8820				rc = -ENOMEM;
8821				goto out;
8822			}
8823			dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8824							  SLI4_PAGE_SIZE,
8825							  &dmabuf->phys,
8826							  GFP_KERNEL);
8827			if (!dmabuf->virt) {
8828				kfree(dmabuf);
8829				rc = -ENOMEM;
8830				goto out;
8831			}
8832			list_add_tail(&dmabuf->list, &dma_buffer_list);
8833		}
8834		while (offset < fw->size) {
8835			temp_offset = offset;
8836			list_for_each_entry(dmabuf, &dma_buffer_list, list) {
8837				if (offset + SLI4_PAGE_SIZE > fw->size) {
8838					temp_offset += fw->size - offset;
8839					memcpy(dmabuf->virt,
8840					       fw->data + temp_offset,
8841					       fw->size - offset);
8842					break;
8843				}
8844				temp_offset += SLI4_PAGE_SIZE;
8845				memcpy(dmabuf->virt, fw->data + temp_offset,
8846				       SLI4_PAGE_SIZE);
8847			}
8848			rc = lpfc_wr_object(phba, &dma_buffer_list,
8849				    (fw->size - offset), &offset);
8850			if (rc) {
8851				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8852						"3024 Firmware update failed. "
8853						"%d\n", rc);
8854				goto out;
8855			}
8856		}
8857		rc = offset;
8858	}
8859out:
8860	list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
8861		list_del(&dmabuf->list);
8862		dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
8863				  dmabuf->virt, dmabuf->phys);
8864		kfree(dmabuf);
8865	}
8866	return rc;
8867}
8868
8869/**
8870 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
8871 * @pdev: pointer to PCI device
8872 * @pid: pointer to PCI device identifier
8873 *
8874 * This routine is called from the kernel's PCI subsystem to device with
8875 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8876 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8877 * information of the device and driver to see if the driver state that it
8878 * can support this kind of device. If the match is successful, the driver
8879 * core invokes this routine. If this routine determines it can claim the HBA,
8880 * it does all the initialization that it needs to do to handle the HBA
8881 * properly.
8882 *
8883 * Return code
8884 * 	0 - driver can claim the device
8885 * 	negative value - driver can not claim the device
8886 **/
8887static int __devinit
8888lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8889{
8890	struct lpfc_hba   *phba;
8891	struct lpfc_vport *vport = NULL;
8892	struct Scsi_Host  *shost = NULL;
8893	int error;
8894	uint32_t cfg_mode, intr_mode;
8895	int mcnt;
8896	int adjusted_fcp_eq_count;
8897	int fcp_qidx;
8898	const struct firmware *fw;
8899	uint8_t file_name[16];
8900
8901	/* Allocate memory for HBA structure */
8902	phba = lpfc_hba_alloc(pdev);
8903	if (!phba)
8904		return -ENOMEM;
8905
8906	/* Perform generic PCI device enabling operation */
8907	error = lpfc_enable_pci_dev(phba);
8908	if (error) {
8909		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8910				"1409 Failed to enable pci device.\n");
8911		goto out_free_phba;
8912	}
8913
8914	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
8915	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
8916	if (error)
8917		goto out_disable_pci_dev;
8918
8919	/* Set up SLI-4 specific device PCI memory space */
8920	error = lpfc_sli4_pci_mem_setup(phba);
8921	if (error) {
8922		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8923				"1410 Failed to set up pci memory space.\n");
8924		goto out_disable_pci_dev;
8925	}
8926
8927	/* Set up phase-1 common device driver resources */
8928	error = lpfc_setup_driver_resource_phase1(phba);
8929	if (error) {
8930		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8931				"1411 Failed to set up driver resource.\n");
8932		goto out_unset_pci_mem_s4;
8933	}
8934
8935	/* Set up SLI-4 Specific device driver resources */
8936	error = lpfc_sli4_driver_resource_setup(phba);
8937	if (error) {
8938		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8939				"1412 Failed to set up driver resource.\n");
8940		goto out_unset_pci_mem_s4;
8941	}
8942
8943	/* Initialize and populate the iocb list per host */
8944
8945	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8946			"2821 initialize iocb list %d.\n",
8947			phba->cfg_iocb_cnt*1024);
8948	error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
8949
8950	if (error) {
8951		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8952				"1413 Failed to initialize iocb list.\n");
8953		goto out_unset_driver_resource_s4;
8954	}
8955
8956	INIT_LIST_HEAD(&phba->active_rrq_list);
8957
8958	/* Set up common device driver resources */
8959	error = lpfc_setup_driver_resource_phase2(phba);
8960	if (error) {
8961		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8962				"1414 Failed to set up driver resource.\n");
8963		goto out_free_iocb_list;
8964	}
8965
8966	/* Create SCSI host to the physical port */
8967	error = lpfc_create_shost(phba);
8968	if (error) {
8969		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8970				"1415 Failed to create scsi host.\n");
8971		goto out_unset_driver_resource;
8972	}
8973
8974	/* Configure sysfs attributes */
8975	vport = phba->pport;
8976	error = lpfc_alloc_sysfs_attr(vport);
8977	if (error) {
8978		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8979				"1416 Failed to allocate sysfs attr\n");
8980		goto out_destroy_shost;
8981	}
8982
8983	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8984	/* Now, trying to enable interrupt and bring up the device */
8985	cfg_mode = phba->cfg_use_msi;
8986	while (true) {
8987		/* Put device to a known state before enabling interrupt */
8988		lpfc_stop_port(phba);
8989		/* Configure and enable interrupt */
8990		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
8991		if (intr_mode == LPFC_INTR_ERROR) {
8992			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8993					"0426 Failed to enable interrupt.\n");
8994			error = -ENODEV;
8995			goto out_free_sysfs_attr;
8996		}
8997		/* Default to single EQ for non-MSI-X */
8998		if (phba->intr_type != MSIX)
8999			adjusted_fcp_eq_count = 0;
9000		else if (phba->sli4_hba.msix_vec_nr <
9001					phba->cfg_fcp_eq_count + 1)
9002			adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
9003		else
9004			adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
9005		/* Free unused EQs */
9006		for (fcp_qidx = adjusted_fcp_eq_count;
9007		     fcp_qidx < phba->cfg_fcp_eq_count;
9008		     fcp_qidx++) {
9009			lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
9010			/* do not delete the first fcp_cq */
9011			if (fcp_qidx)
9012				lpfc_sli4_queue_free(
9013					phba->sli4_hba.fcp_cq[fcp_qidx]);
9014		}
9015		phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
9016		/* Set up SLI-4 HBA */
9017		if (lpfc_sli4_hba_setup(phba)) {
9018			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9019					"1421 Failed to set up hba\n");
9020			error = -ENODEV;
9021			goto out_disable_intr;
9022		}
9023
9024		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
9025		if (intr_mode != 0)
9026			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
9027							    LPFC_ACT_INTR_CNT);
9028
9029		/* Check active interrupts received only for MSI/MSI-X */
9030		if (intr_mode == 0 ||
9031		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
9032			/* Log the current active interrupt mode */
9033			phba->intr_mode = intr_mode;
9034			lpfc_log_intr_mode(phba, intr_mode);
9035			break;
9036		}
9037		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9038				"0451 Configure interrupt mode (%d) "
9039				"failed active interrupt test.\n",
9040				intr_mode);
9041		/* Unset the previous SLI-4 HBA setup. */
9042		/*
9043		 * TODO:  Is this operation compatible with IF TYPE 2
9044		 * devices?  All port state is deleted and cleared.
9045		 */
9046		lpfc_sli4_unset_hba(phba);
9047		/* Try next level of interrupt mode */
9048		cfg_mode = --intr_mode;
9049	}
9050
9051	/* Perform post initialization setup */
9052	lpfc_post_init_setup(phba);
9053
9054	/* check for firmware upgrade or downgrade */
9055	snprintf(file_name, 16, "%s.grp", phba->ModelName);
9056	error = request_firmware(&fw, file_name, &phba->pcidev->dev);
9057	if (!error) {
9058		lpfc_write_firmware(phba, fw);
9059		release_firmware(fw);
9060	}
9061
9062	/* Check if there are static vports to be created. */
9063	lpfc_create_static_vport(phba);
9064
9065	return 0;
9066
9067out_disable_intr:
9068	lpfc_sli4_disable_intr(phba);
9069out_free_sysfs_attr:
9070	lpfc_free_sysfs_attr(vport);
9071out_destroy_shost:
9072	lpfc_destroy_shost(phba);
9073out_unset_driver_resource:
9074	lpfc_unset_driver_resource_phase2(phba);
9075out_free_iocb_list:
9076	lpfc_free_iocb_list(phba);
9077out_unset_driver_resource_s4:
9078	lpfc_sli4_driver_resource_unset(phba);
9079out_unset_pci_mem_s4:
9080	lpfc_sli4_pci_mem_unset(phba);
9081out_disable_pci_dev:
9082	lpfc_disable_pci_dev(phba);
9083	if (shost)
9084		scsi_host_put(shost);
9085out_free_phba:
9086	lpfc_hba_free(phba);
9087	return error;
9088}
9089
9090/**
9091 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
9092 * @pdev: pointer to PCI device
9093 *
9094 * This routine is called from the kernel's PCI subsystem to device with
9095 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
9096 * removed from PCI bus, it performs all the necessary cleanup for the HBA
9097 * device to be removed from the PCI subsystem properly.
9098 **/
9099static void __devexit
9100lpfc_pci_remove_one_s4(struct pci_dev *pdev)
9101{
9102	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9103	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
9104	struct lpfc_vport **vports;
9105	struct lpfc_hba *phba = vport->phba;
9106	int i;
9107
9108	/* Mark the device unloading flag */
9109	spin_lock_irq(&phba->hbalock);
9110	vport->load_flag |= FC_UNLOADING;
9111	spin_unlock_irq(&phba->hbalock);
9112
9113	/* Free the HBA sysfs attributes */
9114	lpfc_free_sysfs_attr(vport);
9115
9116	/* Release all the vports against this physical port */
9117	vports = lpfc_create_vport_work_array(phba);
9118	if (vports != NULL)
9119		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
9120			fc_vport_terminate(vports[i]->fc_vport);
9121	lpfc_destroy_vport_work_array(phba, vports);
9122
9123	/* Remove FC host and then SCSI host with the physical port */
9124	fc_remove_host(shost);
9125	scsi_remove_host(shost);
9126
9127	/* Perform cleanup on the physical port */
9128	lpfc_cleanup(vport);
9129
9130	/*
9131	 * Bring down the SLI Layer. This step disables all interrupts,
9132	 * clears the rings, discards all mailbox commands, and resets
9133	 * the HBA FCoE function.
9134	 */
9135	lpfc_debugfs_terminate(vport);
9136	lpfc_sli4_hba_unset(phba);
9137
9138	spin_lock_irq(&phba->hbalock);
9139	list_del_init(&vport->listentry);
9140	spin_unlock_irq(&phba->hbalock);
9141
9142	/* Perform scsi free before driver resource_unset since scsi
9143	 * buffers are released to their corresponding pools here.
9144	 */
9145	lpfc_scsi_free(phba);
9146	lpfc_sli4_driver_resource_unset(phba);
9147
9148	/* Unmap adapter Control and Doorbell registers */
9149	lpfc_sli4_pci_mem_unset(phba);
9150
9151	/* Release PCI resources and disable device's PCI function */
9152	scsi_host_put(shost);
9153	lpfc_disable_pci_dev(phba);
9154
9155	/* Finally, free the driver's device data structure */
9156	lpfc_hba_free(phba);
9157
9158	return;
9159}
9160
9161/**
9162 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
9163 * @pdev: pointer to PCI device
9164 * @msg: power management message
9165 *
9166 * This routine is called from the kernel's PCI subsystem to support system
9167 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
9168 * this method, it quiesces the device by stopping the driver's worker
9169 * thread for the device, turning off device's interrupt and DMA, and bring
9170 * the device offline. Note that as the driver implements the minimum PM
9171 * requirements to a power-aware driver's PM support for suspend/resume -- all
9172 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
9173 * method call will be treated as SUSPEND and the driver will fully
9174 * reinitialize its device during resume() method call, the driver will set
9175 * device to PCI_D3hot state in PCI config space instead of setting it
9176 * according to the @msg provided by the PM.
9177 *
9178 * Return code
9179 * 	0 - driver suspended the device
9180 * 	Error otherwise
9181 **/
9182static int
9183lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
9184{
9185	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9186	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9187
9188	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9189			"2843 PCI device Power Management suspend.\n");
9190
9191	/* Bring down the device */
9192	lpfc_offline_prep(phba);
9193	lpfc_offline(phba);
9194	kthread_stop(phba->worker_thread);
9195
9196	/* Disable interrupt from device */
9197	lpfc_sli4_disable_intr(phba);
9198
9199	/* Save device state to PCI config space */
9200	pci_save_state(pdev);
9201	pci_set_power_state(pdev, PCI_D3hot);
9202
9203	return 0;
9204}
9205
9206/**
9207 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
9208 * @pdev: pointer to PCI device
9209 *
9210 * This routine is called from the kernel's PCI subsystem to support system
9211 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
9212 * this method, it restores the device's PCI config space state and fully
9213 * reinitializes the device and brings it online. Note that as the driver
9214 * implements the minimum PM requirements to a power-aware driver's PM for
9215 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
9216 * to the suspend() method call will be treated as SUSPEND and the driver
9217 * will fully reinitialize its device during resume() method call, the device
9218 * will be set to PCI_D0 directly in PCI config space before restoring the
9219 * state.
9220 *
9221 * Return code
9222 * 	0 - driver suspended the device
9223 * 	Error otherwise
9224 **/
9225static int
9226lpfc_pci_resume_one_s4(struct pci_dev *pdev)
9227{
9228	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9229	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9230	uint32_t intr_mode;
9231	int error;
9232
9233	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9234			"0292 PCI device Power Management resume.\n");
9235
9236	/* Restore device state from PCI config space */
9237	pci_set_power_state(pdev, PCI_D0);
9238	pci_restore_state(pdev);
9239
9240	/*
9241	 * As the new kernel behavior of pci_restore_state() API call clears
9242	 * device saved_state flag, need to save the restored state again.
9243	 */
9244	pci_save_state(pdev);
9245
9246	if (pdev->is_busmaster)
9247		pci_set_master(pdev);
9248
9249	 /* Startup the kernel thread for this host adapter. */
9250	phba->worker_thread = kthread_run(lpfc_do_work, phba,
9251					"lpfc_worker_%d", phba->brd_no);
9252	if (IS_ERR(phba->worker_thread)) {
9253		error = PTR_ERR(phba->worker_thread);
9254		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9255				"0293 PM resume failed to start worker "
9256				"thread: error=x%x.\n", error);
9257		return error;
9258	}
9259
9260	/* Configure and enable interrupt */
9261	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9262	if (intr_mode == LPFC_INTR_ERROR) {
9263		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9264				"0294 PM resume Failed to enable interrupt\n");
9265		return -EIO;
9266	} else
9267		phba->intr_mode = intr_mode;
9268
9269	/* Restart HBA and bring it online */
9270	lpfc_sli_brdrestart(phba);
9271	lpfc_online(phba);
9272
9273	/* Log the current active interrupt mode */
9274	lpfc_log_intr_mode(phba, phba->intr_mode);
9275
9276	return 0;
9277}
9278
9279/**
9280 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
9281 * @phba: pointer to lpfc hba data structure.
9282 *
9283 * This routine is called to prepare the SLI4 device for PCI slot recover. It
9284 * aborts all the outstanding SCSI I/Os to the pci device.
9285 **/
9286static void
9287lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
9288{
9289	struct lpfc_sli *psli = &phba->sli;
9290	struct lpfc_sli_ring  *pring;
9291
9292	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9293			"2828 PCI channel I/O abort preparing for recovery\n");
9294	/*
9295	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9296	 * and let the SCSI mid-layer to retry them to recover.
9297	 */
9298	pring = &psli->ring[psli->fcp_ring];
9299	lpfc_sli_abort_iocb_ring(phba, pring);
9300}
9301
9302/**
9303 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
9304 * @phba: pointer to lpfc hba data structure.
9305 *
9306 * This routine is called to prepare the SLI4 device for PCI slot reset. It
9307 * disables the device interrupt and pci device, and aborts the internal FCP
9308 * pending I/Os.
9309 **/
9310static void
9311lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9312{
9313	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9314			"2826 PCI channel disable preparing for reset\n");
9315
9316	/* Block any management I/Os to the device */
9317	lpfc_block_mgmt_io(phba);
9318
9319	/* Block all SCSI devices' I/Os on the host */
9320	lpfc_scsi_dev_block(phba);
9321
9322	/* stop all timers */
9323	lpfc_stop_hba_timers(phba);
9324
9325	/* Disable interrupt and pci device */
9326	lpfc_sli4_disable_intr(phba);
9327	pci_disable_device(phba->pcidev);
9328
9329	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
9330	lpfc_sli_flush_fcp_rings(phba);
9331}
9332
9333/**
9334 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
9335 * @phba: pointer to lpfc hba data structure.
9336 *
9337 * This routine is called to prepare the SLI4 device for PCI slot permanently
9338 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
9339 * pending I/Os.
9340 **/
9341static void
9342lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
9343{
9344	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9345			"2827 PCI channel permanent disable for failure\n");
9346
9347	/* Block all SCSI devices' I/Os on the host */
9348	lpfc_scsi_dev_block(phba);
9349
9350	/* stop all timers */
9351	lpfc_stop_hba_timers(phba);
9352
9353	/* Clean up all driver's outstanding SCSI I/Os */
9354	lpfc_sli_flush_fcp_rings(phba);
9355}
9356
9357/**
9358 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
9359 * @pdev: pointer to PCI device.
9360 * @state: the current PCI connection state.
9361 *
9362 * This routine is called from the PCI subsystem for error handling to device
9363 * with SLI-4 interface spec. This function is called by the PCI subsystem
9364 * after a PCI bus error affecting this device has been detected. When this
9365 * function is invoked, it will need to stop all the I/Os and interrupt(s)
9366 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
9367 * for the PCI subsystem to perform proper recovery as desired.
9368 *
9369 * Return codes
9370 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9371 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9372 **/
9373static pci_ers_result_t
9374lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
9375{
9376	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9377	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9378
9379	switch (state) {
9380	case pci_channel_io_normal:
9381		/* Non-fatal error, prepare for recovery */
9382		lpfc_sli4_prep_dev_for_recover(phba);
9383		return PCI_ERS_RESULT_CAN_RECOVER;
9384	case pci_channel_io_frozen:
9385		/* Fatal error, prepare for slot reset */
9386		lpfc_sli4_prep_dev_for_reset(phba);
9387		return PCI_ERS_RESULT_NEED_RESET;
9388	case pci_channel_io_perm_failure:
9389		/* Permanent failure, prepare for device down */
9390		lpfc_sli4_prep_dev_for_perm_failure(phba);
9391		return PCI_ERS_RESULT_DISCONNECT;
9392	default:
9393		/* Unknown state, prepare and request slot reset */
9394		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9395				"2825 Unknown PCI error state: x%x\n", state);
9396		lpfc_sli4_prep_dev_for_reset(phba);
9397		return PCI_ERS_RESULT_NEED_RESET;
9398	}
9399}
9400
9401/**
9402 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
9403 * @pdev: pointer to PCI device.
9404 *
9405 * This routine is called from the PCI subsystem for error handling to device
9406 * with SLI-4 interface spec. It is called after PCI bus has been reset to
9407 * restart the PCI card from scratch, as if from a cold-boot. During the
9408 * PCI subsystem error recovery, after the driver returns
9409 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
9410 * recovery and then call this routine before calling the .resume method to
9411 * recover the device. This function will initialize the HBA device, enable
9412 * the interrupt, but it will just put the HBA to offline state without
9413 * passing any I/O traffic.
9414 *
9415 * Return codes
9416 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
9417 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9418 */
9419static pci_ers_result_t
9420lpfc_io_slot_reset_s4(struct pci_dev *pdev)
9421{
9422	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9423	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9424	struct lpfc_sli *psli = &phba->sli;
9425	uint32_t intr_mode;
9426
9427	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
9428	if (pci_enable_device_mem(pdev)) {
9429		printk(KERN_ERR "lpfc: Cannot re-enable "
9430			"PCI device after reset.\n");
9431		return PCI_ERS_RESULT_DISCONNECT;
9432	}
9433
9434	pci_restore_state(pdev);
9435	if (pdev->is_busmaster)
9436		pci_set_master(pdev);
9437
9438	spin_lock_irq(&phba->hbalock);
9439	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9440	spin_unlock_irq(&phba->hbalock);
9441
9442	/* Configure and enable interrupt */
9443	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9444	if (intr_mode == LPFC_INTR_ERROR) {
9445		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9446				"2824 Cannot re-enable interrupt after "
9447				"slot reset.\n");
9448		return PCI_ERS_RESULT_DISCONNECT;
9449	} else
9450		phba->intr_mode = intr_mode;
9451
9452	/* Log the current active interrupt mode */
9453	lpfc_log_intr_mode(phba, phba->intr_mode);
9454
9455	return PCI_ERS_RESULT_RECOVERED;
9456}
9457
9458/**
9459 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
9460 * @pdev: pointer to PCI device
9461 *
9462 * This routine is called from the PCI subsystem for error handling to device
9463 * with SLI-4 interface spec. It is called when kernel error recovery tells
9464 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
9465 * error recovery. After this call, traffic can start to flow from this device
9466 * again.
9467 **/
9468static void
9469lpfc_io_resume_s4(struct pci_dev *pdev)
9470{
9471	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9472	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9473
9474	/*
9475	 * In case of slot reset, as function reset is performed through
9476	 * mailbox command which needs DMA to be enabled, this operation
9477	 * has to be moved to the io resume phase. Taking device offline
9478	 * will perform the necessary cleanup.
9479	 */
9480	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
9481		/* Perform device reset */
9482		lpfc_offline_prep(phba);
9483		lpfc_offline(phba);
9484		lpfc_sli_brdrestart(phba);
9485		/* Bring the device back online */
9486		lpfc_online(phba);
9487	}
9488
9489	/* Clean up Advanced Error Reporting (AER) if needed */
9490	if (phba->hba_flag & HBA_AER_ENABLED)
9491		pci_cleanup_aer_uncorrect_error_status(pdev);
9492}
9493
9494/**
9495 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
9496 * @pdev: pointer to PCI device
9497 * @pid: pointer to PCI device identifier
9498 *
9499 * This routine is to be registered to the kernel's PCI subsystem. When an
9500 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
9501 * at PCI device-specific information of the device and driver to see if the
9502 * driver state that it can support this kind of device. If the match is
9503 * successful, the driver core invokes this routine. This routine dispatches
9504 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
9505 * do all the initialization that it needs to do to handle the HBA device
9506 * properly.
9507 *
9508 * Return code
9509 * 	0 - driver can claim the device
9510 * 	negative value - driver can not claim the device
9511 **/
9512static int __devinit
9513lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
9514{
9515	int rc;
9516	struct lpfc_sli_intf intf;
9517
9518	if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
9519		return -ENODEV;
9520
9521	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
9522	    (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
9523		rc = lpfc_pci_probe_one_s4(pdev, pid);
9524	else
9525		rc = lpfc_pci_probe_one_s3(pdev, pid);
9526
9527	return rc;
9528}
9529
9530/**
9531 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
9532 * @pdev: pointer to PCI device
9533 *
9534 * This routine is to be registered to the kernel's PCI subsystem. When an
9535 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
9536 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
9537 * remove routine, which will perform all the necessary cleanup for the
9538 * device to be removed from the PCI subsystem properly.
9539 **/
9540static void __devexit
9541lpfc_pci_remove_one(struct pci_dev *pdev)
9542{
9543	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9544	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9545
9546	switch (phba->pci_dev_grp) {
9547	case LPFC_PCI_DEV_LP:
9548		lpfc_pci_remove_one_s3(pdev);
9549		break;
9550	case LPFC_PCI_DEV_OC:
9551		lpfc_pci_remove_one_s4(pdev);
9552		break;
9553	default:
9554		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9555				"1424 Invalid PCI device group: 0x%x\n",
9556				phba->pci_dev_grp);
9557		break;
9558	}
9559	return;
9560}
9561
9562/**
9563 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
9564 * @pdev: pointer to PCI device
9565 * @msg: power management message
9566 *
9567 * This routine is to be registered to the kernel's PCI subsystem to support
9568 * system Power Management (PM). When PM invokes this method, it dispatches
9569 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
9570 * suspend the device.
9571 *
9572 * Return code
9573 * 	0 - driver suspended the device
9574 * 	Error otherwise
9575 **/
9576static int
9577lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
9578{
9579	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9580	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9581	int rc = -ENODEV;
9582
9583	switch (phba->pci_dev_grp) {
9584	case LPFC_PCI_DEV_LP:
9585		rc = lpfc_pci_suspend_one_s3(pdev, msg);
9586		break;
9587	case LPFC_PCI_DEV_OC:
9588		rc = lpfc_pci_suspend_one_s4(pdev, msg);
9589		break;
9590	default:
9591		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9592				"1425 Invalid PCI device group: 0x%x\n",
9593				phba->pci_dev_grp);
9594		break;
9595	}
9596	return rc;
9597}
9598
9599/**
9600 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
9601 * @pdev: pointer to PCI device
9602 *
9603 * This routine is to be registered to the kernel's PCI subsystem to support
9604 * system Power Management (PM). When PM invokes this method, it dispatches
9605 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
9606 * resume the device.
9607 *
9608 * Return code
9609 * 	0 - driver suspended the device
9610 * 	Error otherwise
9611 **/
9612static int
9613lpfc_pci_resume_one(struct pci_dev *pdev)
9614{
9615	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9616	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9617	int rc = -ENODEV;
9618
9619	switch (phba->pci_dev_grp) {
9620	case LPFC_PCI_DEV_LP:
9621		rc = lpfc_pci_resume_one_s3(pdev);
9622		break;
9623	case LPFC_PCI_DEV_OC:
9624		rc = lpfc_pci_resume_one_s4(pdev);
9625		break;
9626	default:
9627		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9628				"1426 Invalid PCI device group: 0x%x\n",
9629				phba->pci_dev_grp);
9630		break;
9631	}
9632	return rc;
9633}
9634
9635/**
9636 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
9637 * @pdev: pointer to PCI device.
9638 * @state: the current PCI connection state.
9639 *
9640 * This routine is registered to the PCI subsystem for error handling. This
9641 * function is called by the PCI subsystem after a PCI bus error affecting
9642 * this device has been detected. When this routine is invoked, it dispatches
9643 * the action to the proper SLI-3 or SLI-4 device error detected handling
9644 * routine, which will perform the proper error detected operation.
9645 *
9646 * Return codes
9647 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9648 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9649 **/
9650static pci_ers_result_t
9651lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
9652{
9653	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9654	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9655	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
9656
9657	switch (phba->pci_dev_grp) {
9658	case LPFC_PCI_DEV_LP:
9659		rc = lpfc_io_error_detected_s3(pdev, state);
9660		break;
9661	case LPFC_PCI_DEV_OC:
9662		rc = lpfc_io_error_detected_s4(pdev, state);
9663		break;
9664	default:
9665		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9666				"1427 Invalid PCI device group: 0x%x\n",
9667				phba->pci_dev_grp);
9668		break;
9669	}
9670	return rc;
9671}
9672
9673/**
9674 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
9675 * @pdev: pointer to PCI device.
9676 *
9677 * This routine is registered to the PCI subsystem for error handling. This
9678 * function is called after PCI bus has been reset to restart the PCI card
9679 * from scratch, as if from a cold-boot. When this routine is invoked, it
9680 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
9681 * routine, which will perform the proper device reset.
9682 *
9683 * Return codes
9684 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
9685 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9686 **/
9687static pci_ers_result_t
9688lpfc_io_slot_reset(struct pci_dev *pdev)
9689{
9690	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9691	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9692	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
9693
9694	switch (phba->pci_dev_grp) {
9695	case LPFC_PCI_DEV_LP:
9696		rc = lpfc_io_slot_reset_s3(pdev);
9697		break;
9698	case LPFC_PCI_DEV_OC:
9699		rc = lpfc_io_slot_reset_s4(pdev);
9700		break;
9701	default:
9702		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9703				"1428 Invalid PCI device group: 0x%x\n",
9704				phba->pci_dev_grp);
9705		break;
9706	}
9707	return rc;
9708}
9709
9710/**
9711 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
9712 * @pdev: pointer to PCI device
9713 *
9714 * This routine is registered to the PCI subsystem for error handling. It
9715 * is called when kernel error recovery tells the lpfc driver that it is
9716 * OK to resume normal PCI operation after PCI bus error recovery. When
9717 * this routine is invoked, it dispatches the action to the proper SLI-3
9718 * or SLI-4 device io_resume routine, which will resume the device operation.
9719 **/
9720static void
9721lpfc_io_resume(struct pci_dev *pdev)
9722{
9723	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9724	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9725
9726	switch (phba->pci_dev_grp) {
9727	case LPFC_PCI_DEV_LP:
9728		lpfc_io_resume_s3(pdev);
9729		break;
9730	case LPFC_PCI_DEV_OC:
9731		lpfc_io_resume_s4(pdev);
9732		break;
9733	default:
9734		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9735				"1429 Invalid PCI device group: 0x%x\n",
9736				phba->pci_dev_grp);
9737		break;
9738	}
9739	return;
9740}
9741
9742static struct pci_device_id lpfc_id_table[] = {
9743	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
9744		PCI_ANY_ID, PCI_ANY_ID, },
9745	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
9746		PCI_ANY_ID, PCI_ANY_ID, },
9747	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
9748		PCI_ANY_ID, PCI_ANY_ID, },
9749	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
9750		PCI_ANY_ID, PCI_ANY_ID, },
9751	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
9752		PCI_ANY_ID, PCI_ANY_ID, },
9753	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
9754		PCI_ANY_ID, PCI_ANY_ID, },
9755	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
9756		PCI_ANY_ID, PCI_ANY_ID, },
9757	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
9758		PCI_ANY_ID, PCI_ANY_ID, },
9759	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
9760		PCI_ANY_ID, PCI_ANY_ID, },
9761	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
9762		PCI_ANY_ID, PCI_ANY_ID, },
9763	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
9764		PCI_ANY_ID, PCI_ANY_ID, },
9765	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
9766		PCI_ANY_ID, PCI_ANY_ID, },
9767	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
9768		PCI_ANY_ID, PCI_ANY_ID, },
9769	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
9770		PCI_ANY_ID, PCI_ANY_ID, },
9771	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
9772		PCI_ANY_ID, PCI_ANY_ID, },
9773	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
9774		PCI_ANY_ID, PCI_ANY_ID, },
9775	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
9776		PCI_ANY_ID, PCI_ANY_ID, },
9777	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
9778		PCI_ANY_ID, PCI_ANY_ID, },
9779	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
9780		PCI_ANY_ID, PCI_ANY_ID, },
9781	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
9782		PCI_ANY_ID, PCI_ANY_ID, },
9783	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
9784		PCI_ANY_ID, PCI_ANY_ID, },
9785	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
9786		PCI_ANY_ID, PCI_ANY_ID, },
9787	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
9788		PCI_ANY_ID, PCI_ANY_ID, },
9789	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
9790		PCI_ANY_ID, PCI_ANY_ID, },
9791	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
9792		PCI_ANY_ID, PCI_ANY_ID, },
9793	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
9794		PCI_ANY_ID, PCI_ANY_ID, },
9795	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
9796		PCI_ANY_ID, PCI_ANY_ID, },
9797	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
9798		PCI_ANY_ID, PCI_ANY_ID, },
9799	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
9800		PCI_ANY_ID, PCI_ANY_ID, },
9801	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
9802		PCI_ANY_ID, PCI_ANY_ID, },
9803	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
9804		PCI_ANY_ID, PCI_ANY_ID, },
9805	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
9806		PCI_ANY_ID, PCI_ANY_ID, },
9807	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
9808		PCI_ANY_ID, PCI_ANY_ID, },
9809	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
9810		PCI_ANY_ID, PCI_ANY_ID, },
9811	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
9812		PCI_ANY_ID, PCI_ANY_ID, },
9813	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
9814		PCI_ANY_ID, PCI_ANY_ID, },
9815	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
9816		PCI_ANY_ID, PCI_ANY_ID, },
9817	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
9818		PCI_ANY_ID, PCI_ANY_ID, },
9819	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
9820		PCI_ANY_ID, PCI_ANY_ID, },
9821	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
9822		PCI_ANY_ID, PCI_ANY_ID, },
9823	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
9824		PCI_ANY_ID, PCI_ANY_ID, },
9825	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
9826		PCI_ANY_ID, PCI_ANY_ID, },
9827	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
9828		PCI_ANY_ID, PCI_ANY_ID, },
9829	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
9830		PCI_ANY_ID, PCI_ANY_ID, },
9831	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
9832		PCI_ANY_ID, PCI_ANY_ID, },
9833	{ 0 }
9834};
9835
9836MODULE_DEVICE_TABLE(pci, lpfc_id_table);
9837
9838static struct pci_error_handlers lpfc_err_handler = {
9839	.error_detected = lpfc_io_error_detected,
9840	.slot_reset = lpfc_io_slot_reset,
9841	.resume = lpfc_io_resume,
9842};
9843
9844static struct pci_driver lpfc_driver = {
9845	.name		= LPFC_DRIVER_NAME,
9846	.id_table	= lpfc_id_table,
9847	.probe		= lpfc_pci_probe_one,
9848	.remove		= __devexit_p(lpfc_pci_remove_one),
9849	.suspend        = lpfc_pci_suspend_one,
9850	.resume		= lpfc_pci_resume_one,
9851	.err_handler    = &lpfc_err_handler,
9852};
9853
9854/**
9855 * lpfc_init - lpfc module initialization routine
9856 *
9857 * This routine is to be invoked when the lpfc module is loaded into the
9858 * kernel. The special kernel macro module_init() is used to indicate the
9859 * role of this routine to the kernel as lpfc module entry point.
9860 *
9861 * Return codes
9862 *   0 - successful
9863 *   -ENOMEM - FC attach transport failed
9864 *   all others - failed
9865 */
9866static int __init
9867lpfc_init(void)
9868{
9869	int error = 0;
9870
9871	printk(LPFC_MODULE_DESC "\n");
9872	printk(LPFC_COPYRIGHT "\n");
9873
9874	if (lpfc_enable_npiv) {
9875		lpfc_transport_functions.vport_create = lpfc_vport_create;
9876		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
9877	}
9878	lpfc_transport_template =
9879				fc_attach_transport(&lpfc_transport_functions);
9880	if (lpfc_transport_template == NULL)
9881		return -ENOMEM;
9882	if (lpfc_enable_npiv) {
9883		lpfc_vport_transport_template =
9884			fc_attach_transport(&lpfc_vport_transport_functions);
9885		if (lpfc_vport_transport_template == NULL) {
9886			fc_release_transport(lpfc_transport_template);
9887			return -ENOMEM;
9888		}
9889	}
9890	error = pci_register_driver(&lpfc_driver);
9891	if (error) {
9892		fc_release_transport(lpfc_transport_template);
9893		if (lpfc_enable_npiv)
9894			fc_release_transport(lpfc_vport_transport_template);
9895	}
9896
9897	return error;
9898}
9899
9900/**
9901 * lpfc_exit - lpfc module removal routine
9902 *
9903 * This routine is invoked when the lpfc module is removed from the kernel.
9904 * The special kernel macro module_exit() is used to indicate the role of
9905 * this routine to the kernel as lpfc module exit point.
9906 */
9907static void __exit
9908lpfc_exit(void)
9909{
9910	pci_unregister_driver(&lpfc_driver);
9911	fc_release_transport(lpfc_transport_template);
9912	if (lpfc_enable_npiv)
9913		fc_release_transport(lpfc_vport_transport_template);
9914	if (_dump_buf_data) {
9915		printk(KERN_ERR	"9062 BLKGRD: freeing %lu pages for "
9916				"_dump_buf_data at 0x%p\n",
9917				(1L << _dump_buf_data_order), _dump_buf_data);
9918		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
9919	}
9920
9921	if (_dump_buf_dif) {
9922		printk(KERN_ERR	"9049 BLKGRD: freeing %lu pages for "
9923				"_dump_buf_dif at 0x%p\n",
9924				(1L << _dump_buf_dif_order), _dump_buf_dif);
9925		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
9926	}
9927}
9928
9929module_init(lpfc_init);
9930module_exit(lpfc_exit);
9931MODULE_LICENSE("GPL");
9932MODULE_DESCRIPTION(LPFC_MODULE_DESC);
9933MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
9934MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
9935