lpfc_init.c revision cd1c8301db15ee52bfc5a0e5bc16b52bab8475aa
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
29#include <linux/spinlock.h>
30#include <linux/ctype.h>
31#include <linux/aer.h>
32#include <linux/slab.h>
33#include <linux/firmware.h>
34
35#include <scsi/scsi.h>
36#include <scsi/scsi_device.h>
37#include <scsi/scsi_host.h>
38#include <scsi/scsi_transport_fc.h>
39
40#include "lpfc_hw4.h"
41#include "lpfc_hw.h"
42#include "lpfc_sli.h"
43#include "lpfc_sli4.h"
44#include "lpfc_nl.h"
45#include "lpfc_disc.h"
46#include "lpfc_scsi.h"
47#include "lpfc.h"
48#include "lpfc_logmsg.h"
49#include "lpfc_crtn.h"
50#include "lpfc_vport.h"
51#include "lpfc_version.h"
52
53char *_dump_buf_data;
54unsigned long _dump_buf_data_order;
55char *_dump_buf_dif;
56unsigned long _dump_buf_dif_order;
57spinlock_t _dump_buf_lock;
58
59static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
60static int lpfc_post_rcv_buf(struct lpfc_hba *);
61static int lpfc_sli4_queue_create(struct lpfc_hba *);
62static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
63static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
64static int lpfc_setup_endian_order(struct lpfc_hba *);
65static int lpfc_sli4_read_config(struct lpfc_hba *);
66static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
67static void lpfc_free_sgl_list(struct lpfc_hba *);
68static int lpfc_init_sgl_list(struct lpfc_hba *);
69static int lpfc_init_active_sgl_array(struct lpfc_hba *);
70static void lpfc_free_active_sgl(struct lpfc_hba *);
71static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
72static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
73static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
74static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
75static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
76
77static struct scsi_transport_template *lpfc_transport_template = NULL;
78static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
79static DEFINE_IDR(lpfc_hba_index);
80
81/**
82 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
83 * @phba: pointer to lpfc hba data structure.
84 *
85 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
86 * mailbox command. It retrieves the revision information from the HBA and
87 * collects the Vital Product Data (VPD) about the HBA for preparing the
88 * configuration of the HBA.
89 *
90 * Return codes:
91 *   0 - success.
92 *   -ERESTART - requests the SLI layer to reset the HBA and try again.
93 *   Any other value - indicates an error.
94 **/
95int
96lpfc_config_port_prep(struct lpfc_hba *phba)
97{
98	lpfc_vpd_t *vp = &phba->vpd;
99	int i = 0, rc;
100	LPFC_MBOXQ_t *pmb;
101	MAILBOX_t *mb;
102	char *lpfc_vpd_data = NULL;
103	uint16_t offset = 0;
104	static char licensed[56] =
105		    "key unlock for use with gnu public licensed code only\0";
106	static int init_key = 1;
107
108	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
109	if (!pmb) {
110		phba->link_state = LPFC_HBA_ERROR;
111		return -ENOMEM;
112	}
113
114	mb = &pmb->u.mb;
115	phba->link_state = LPFC_INIT_MBX_CMDS;
116
117	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
118		if (init_key) {
119			uint32_t *ptext = (uint32_t *) licensed;
120
121			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
122				*ptext = cpu_to_be32(*ptext);
123			init_key = 0;
124		}
125
126		lpfc_read_nv(phba, pmb);
127		memset((char*)mb->un.varRDnvp.rsvd3, 0,
128			sizeof (mb->un.varRDnvp.rsvd3));
129		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
130			 sizeof (licensed));
131
132		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
133
134		if (rc != MBX_SUCCESS) {
135			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
136					"0324 Config Port initialization "
137					"error, mbxCmd x%x READ_NVPARM, "
138					"mbxStatus x%x\n",
139					mb->mbxCommand, mb->mbxStatus);
140			mempool_free(pmb, phba->mbox_mem_pool);
141			return -ERESTART;
142		}
143		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
144		       sizeof(phba->wwnn));
145		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
146		       sizeof(phba->wwpn));
147	}
148
149	phba->sli3_options = 0x0;
150
151	/* Setup and issue mailbox READ REV command */
152	lpfc_read_rev(phba, pmb);
153	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
154	if (rc != MBX_SUCCESS) {
155		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
156				"0439 Adapter failed to init, mbxCmd x%x "
157				"READ_REV, mbxStatus x%x\n",
158				mb->mbxCommand, mb->mbxStatus);
159		mempool_free( pmb, phba->mbox_mem_pool);
160		return -ERESTART;
161	}
162
163
164	/*
165	 * The value of rr must be 1 since the driver set the cv field to 1.
166	 * This setting requires the FW to set all revision fields.
167	 */
168	if (mb->un.varRdRev.rr == 0) {
169		vp->rev.rBit = 0;
170		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
171				"0440 Adapter failed to init, READ_REV has "
172				"missing revision information.\n");
173		mempool_free(pmb, phba->mbox_mem_pool);
174		return -ERESTART;
175	}
176
177	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
178		mempool_free(pmb, phba->mbox_mem_pool);
179		return -EINVAL;
180	}
181
182	/* Save information as VPD data */
183	vp->rev.rBit = 1;
184	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
185	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
186	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
187	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
188	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
189	vp->rev.biuRev = mb->un.varRdRev.biuRev;
190	vp->rev.smRev = mb->un.varRdRev.smRev;
191	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
192	vp->rev.endecRev = mb->un.varRdRev.endecRev;
193	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
194	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
195	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
196	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
197	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
198	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
199
200	/* If the sli feature level is less then 9, we must
201	 * tear down all RPIs and VPIs on link down if NPIV
202	 * is enabled.
203	 */
204	if (vp->rev.feaLevelHigh < 9)
205		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
206
207	if (lpfc_is_LC_HBA(phba->pcidev->device))
208		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
209						sizeof (phba->RandomData));
210
211	/* Get adapter VPD information */
212	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
213	if (!lpfc_vpd_data)
214		goto out_free_mbox;
215	do {
216		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
218
219		if (rc != MBX_SUCCESS) {
220			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
221					"0441 VPD not present on adapter, "
222					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
223					mb->mbxCommand, mb->mbxStatus);
224			mb->un.varDmp.word_cnt = 0;
225		}
226		/* dump mem may return a zero when finished or we got a
227		 * mailbox error, either way we are done.
228		 */
229		if (mb->un.varDmp.word_cnt == 0)
230			break;
231		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
232			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
233		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
234				      lpfc_vpd_data + offset,
235				      mb->un.varDmp.word_cnt);
236		offset += mb->un.varDmp.word_cnt;
237	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
238	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
239
240	kfree(lpfc_vpd_data);
241out_free_mbox:
242	mempool_free(pmb, phba->mbox_mem_pool);
243	return 0;
244}
245
246/**
247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
248 * @phba: pointer to lpfc hba data structure.
249 * @pmboxq: pointer to the driver internal queue element for mailbox command.
250 *
251 * This is the completion handler for driver's configuring asynchronous event
252 * mailbox command to the device. If the mailbox command returns successfully,
253 * it will set internal async event support flag to 1; otherwise, it will
254 * set internal async event support flag to 0.
255 **/
256static void
257lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
258{
259	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
260		phba->temp_sensor_support = 1;
261	else
262		phba->temp_sensor_support = 0;
263	mempool_free(pmboxq, phba->mbox_mem_pool);
264	return;
265}
266
267/**
268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
269 * @phba: pointer to lpfc hba data structure.
270 * @pmboxq: pointer to the driver internal queue element for mailbox command.
271 *
272 * This is the completion handler for dump mailbox command for getting
273 * wake up parameters. When this command complete, the response contain
274 * Option rom version of the HBA. This function translate the version number
275 * into a human readable string and store it in OptionROMVersion.
276 **/
277static void
278lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
279{
280	struct prog_id *prg;
281	uint32_t prog_id_word;
282	char dist = ' ';
283	/* character array used for decoding dist type. */
284	char dist_char[] = "nabx";
285
286	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
287		mempool_free(pmboxq, phba->mbox_mem_pool);
288		return;
289	}
290
291	prg = (struct prog_id *) &prog_id_word;
292
293	/* word 7 contain option rom version */
294	prog_id_word = pmboxq->u.mb.un.varWords[7];
295
296	/* Decode the Option rom version word to a readable string */
297	if (prg->dist < 4)
298		dist = dist_char[prg->dist];
299
300	if ((prg->dist == 3) && (prg->num == 0))
301		sprintf(phba->OptionROMVersion, "%d.%d%d",
302			prg->ver, prg->rev, prg->lev);
303	else
304		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
305			prg->ver, prg->rev, prg->lev,
306			dist, prg->num);
307	mempool_free(pmboxq, phba->mbox_mem_pool);
308	return;
309}
310
311/**
312 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
313 *	cfg_soft_wwnn, cfg_soft_wwpn
314 * @vport: pointer to lpfc vport data structure.
315 *
316 *
317 * Return codes
318 *   None.
319 **/
320void
321lpfc_update_vport_wwn(struct lpfc_vport *vport)
322{
323	/* If the soft name exists then update it using the service params */
324	if (vport->phba->cfg_soft_wwnn)
325		u64_to_wwn(vport->phba->cfg_soft_wwnn,
326			   vport->fc_sparam.nodeName.u.wwn);
327	if (vport->phba->cfg_soft_wwpn)
328		u64_to_wwn(vport->phba->cfg_soft_wwpn,
329			   vport->fc_sparam.portName.u.wwn);
330
331	/*
332	 * If the name is empty or there exists a soft name
333	 * then copy the service params name, otherwise use the fc name
334	 */
335	if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
336		memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
337			sizeof(struct lpfc_name));
338	else
339		memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
340			sizeof(struct lpfc_name));
341
342	if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
343		memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
344			sizeof(struct lpfc_name));
345	else
346		memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
347			sizeof(struct lpfc_name));
348}
349
350/**
351 * lpfc_config_port_post - Perform lpfc initialization after config port
352 * @phba: pointer to lpfc hba data structure.
353 *
354 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
355 * command call. It performs all internal resource and state setups on the
356 * port: post IOCB buffers, enable appropriate host interrupt attentions,
357 * ELS ring timers, etc.
358 *
359 * Return codes
360 *   0 - success.
361 *   Any other value - error.
362 **/
363int
364lpfc_config_port_post(struct lpfc_hba *phba)
365{
366	struct lpfc_vport *vport = phba->pport;
367	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
368	LPFC_MBOXQ_t *pmb;
369	MAILBOX_t *mb;
370	struct lpfc_dmabuf *mp;
371	struct lpfc_sli *psli = &phba->sli;
372	uint32_t status, timeout;
373	int i, j;
374	int rc;
375
376	spin_lock_irq(&phba->hbalock);
377	/*
378	 * If the Config port completed correctly the HBA is not
379	 * over heated any more.
380	 */
381	if (phba->over_temp_state == HBA_OVER_TEMP)
382		phba->over_temp_state = HBA_NORMAL_TEMP;
383	spin_unlock_irq(&phba->hbalock);
384
385	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
386	if (!pmb) {
387		phba->link_state = LPFC_HBA_ERROR;
388		return -ENOMEM;
389	}
390	mb = &pmb->u.mb;
391
392	/* Get login parameters for NID.  */
393	rc = lpfc_read_sparam(phba, pmb, 0);
394	if (rc) {
395		mempool_free(pmb, phba->mbox_mem_pool);
396		return -ENOMEM;
397	}
398
399	pmb->vport = vport;
400	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
401		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
402				"0448 Adapter failed init, mbxCmd x%x "
403				"READ_SPARM mbxStatus x%x\n",
404				mb->mbxCommand, mb->mbxStatus);
405		phba->link_state = LPFC_HBA_ERROR;
406		mp = (struct lpfc_dmabuf *) pmb->context1;
407		mempool_free(pmb, phba->mbox_mem_pool);
408		lpfc_mbuf_free(phba, mp->virt, mp->phys);
409		kfree(mp);
410		return -EIO;
411	}
412
413	mp = (struct lpfc_dmabuf *) pmb->context1;
414
415	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
416	lpfc_mbuf_free(phba, mp->virt, mp->phys);
417	kfree(mp);
418	pmb->context1 = NULL;
419	lpfc_update_vport_wwn(vport);
420
421	/* Update the fc_host data structures with new wwn. */
422	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
423	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
424	fc_host_max_npiv_vports(shost) = phba->max_vpi;
425
426	/* If no serial number in VPD data, use low 6 bytes of WWNN */
427	/* This should be consolidated into parse_vpd ? - mr */
428	if (phba->SerialNumber[0] == 0) {
429		uint8_t *outptr;
430
431		outptr = &vport->fc_nodename.u.s.IEEE[0];
432		for (i = 0; i < 12; i++) {
433			status = *outptr++;
434			j = ((status & 0xf0) >> 4);
435			if (j <= 9)
436				phba->SerialNumber[i] =
437				    (char)((uint8_t) 0x30 + (uint8_t) j);
438			else
439				phba->SerialNumber[i] =
440				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
441			i++;
442			j = (status & 0xf);
443			if (j <= 9)
444				phba->SerialNumber[i] =
445				    (char)((uint8_t) 0x30 + (uint8_t) j);
446			else
447				phba->SerialNumber[i] =
448				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
449		}
450	}
451
452	lpfc_read_config(phba, pmb);
453	pmb->vport = vport;
454	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
455		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
456				"0453 Adapter failed to init, mbxCmd x%x "
457				"READ_CONFIG, mbxStatus x%x\n",
458				mb->mbxCommand, mb->mbxStatus);
459		phba->link_state = LPFC_HBA_ERROR;
460		mempool_free( pmb, phba->mbox_mem_pool);
461		return -EIO;
462	}
463
464	/* Check if the port is disabled */
465	lpfc_sli_read_link_ste(phba);
466
467	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
468	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
469		phba->cfg_hba_queue_depth =
470			(mb->un.varRdConfig.max_xri + 1) -
471					lpfc_sli4_get_els_iocb_cnt(phba);
472
473	phba->lmt = mb->un.varRdConfig.lmt;
474
475	/* Get the default values for Model Name and Description */
476	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
477
478	if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G)
479	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G)
480		&& !(phba->lmt & LMT_1Gb))
481	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G)
482		&& !(phba->lmt & LMT_2Gb))
483	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G)
484		&& !(phba->lmt & LMT_4Gb))
485	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G)
486		&& !(phba->lmt & LMT_8Gb))
487	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G)
488		&& !(phba->lmt & LMT_10Gb))
489	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
490		&& !(phba->lmt & LMT_16Gb))) {
491		/* Reset link speed to auto */
492		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
493			"1302 Invalid speed for this board: "
494			"Reset link speed to auto: x%x\n",
495			phba->cfg_link_speed);
496			phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
497	}
498
499	phba->link_state = LPFC_LINK_DOWN;
500
501	/* Only process IOCBs on ELS ring till hba_state is READY */
502	if (psli->ring[psli->extra_ring].cmdringaddr)
503		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
504	if (psli->ring[psli->fcp_ring].cmdringaddr)
505		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
506	if (psli->ring[psli->next_ring].cmdringaddr)
507		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
508
509	/* Post receive buffers for desired rings */
510	if (phba->sli_rev != 3)
511		lpfc_post_rcv_buf(phba);
512
513	/*
514	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
515	 */
516	if (phba->intr_type == MSIX) {
517		rc = lpfc_config_msi(phba, pmb);
518		if (rc) {
519			mempool_free(pmb, phba->mbox_mem_pool);
520			return -EIO;
521		}
522		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
523		if (rc != MBX_SUCCESS) {
524			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
525					"0352 Config MSI mailbox command "
526					"failed, mbxCmd x%x, mbxStatus x%x\n",
527					pmb->u.mb.mbxCommand,
528					pmb->u.mb.mbxStatus);
529			mempool_free(pmb, phba->mbox_mem_pool);
530			return -EIO;
531		}
532	}
533
534	spin_lock_irq(&phba->hbalock);
535	/* Initialize ERATT handling flag */
536	phba->hba_flag &= ~HBA_ERATT_HANDLED;
537
538	/* Enable appropriate host interrupts */
539	if (lpfc_readl(phba->HCregaddr, &status)) {
540		spin_unlock_irq(&phba->hbalock);
541		return -EIO;
542	}
543	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
544	if (psli->num_rings > 0)
545		status |= HC_R0INT_ENA;
546	if (psli->num_rings > 1)
547		status |= HC_R1INT_ENA;
548	if (psli->num_rings > 2)
549		status |= HC_R2INT_ENA;
550	if (psli->num_rings > 3)
551		status |= HC_R3INT_ENA;
552
553	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
554	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
555		status &= ~(HC_R0INT_ENA);
556
557	writel(status, phba->HCregaddr);
558	readl(phba->HCregaddr); /* flush */
559	spin_unlock_irq(&phba->hbalock);
560
561	/* Set up ring-0 (ELS) timer */
562	timeout = phba->fc_ratov * 2;
563	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
564	/* Set up heart beat (HB) timer */
565	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
566	phba->hb_outstanding = 0;
567	phba->last_completion_time = jiffies;
568	/* Set up error attention (ERATT) polling timer */
569	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
570
571	if (phba->hba_flag & LINK_DISABLED) {
572		lpfc_printf_log(phba,
573			KERN_ERR, LOG_INIT,
574			"2598 Adapter Link is disabled.\n");
575		lpfc_down_link(phba, pmb);
576		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
577		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
578		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
579			lpfc_printf_log(phba,
580			KERN_ERR, LOG_INIT,
581			"2599 Adapter failed to issue DOWN_LINK"
582			" mbox command rc 0x%x\n", rc);
583
584			mempool_free(pmb, phba->mbox_mem_pool);
585			return -EIO;
586		}
587	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
588		lpfc_init_link(phba, pmb, phba->cfg_topology,
589			phba->cfg_link_speed);
590		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
591		lpfc_set_loopback_flag(phba);
592		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
593		if (rc != MBX_SUCCESS) {
594			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
595				"0454 Adapter failed to init, mbxCmd x%x "
596				"INIT_LINK, mbxStatus x%x\n",
597				mb->mbxCommand, mb->mbxStatus);
598
599			/* Clear all interrupt enable conditions */
600			writel(0, phba->HCregaddr);
601			readl(phba->HCregaddr); /* flush */
602			/* Clear all pending interrupts */
603			writel(0xffffffff, phba->HAregaddr);
604			readl(phba->HAregaddr); /* flush */
605			phba->link_state = LPFC_HBA_ERROR;
606			if (rc != MBX_BUSY)
607				mempool_free(pmb, phba->mbox_mem_pool);
608			return -EIO;
609		}
610	}
611	/* MBOX buffer will be freed in mbox compl */
612	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
613	if (!pmb) {
614		phba->link_state = LPFC_HBA_ERROR;
615		return -ENOMEM;
616	}
617
618	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
619	pmb->mbox_cmpl = lpfc_config_async_cmpl;
620	pmb->vport = phba->pport;
621	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
622
623	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
624		lpfc_printf_log(phba,
625				KERN_ERR,
626				LOG_INIT,
627				"0456 Adapter failed to issue "
628				"ASYNCEVT_ENABLE mbox status x%x\n",
629				rc);
630		mempool_free(pmb, phba->mbox_mem_pool);
631	}
632
633	/* Get Option rom version */
634	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
635	if (!pmb) {
636		phba->link_state = LPFC_HBA_ERROR;
637		return -ENOMEM;
638	}
639
640	lpfc_dump_wakeup_param(phba, pmb);
641	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
642	pmb->vport = phba->pport;
643	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
644
645	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
646		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
647				"to get Option ROM version status x%x\n", rc);
648		mempool_free(pmb, phba->mbox_mem_pool);
649	}
650
651	return 0;
652}
653
654/**
655 * lpfc_hba_init_link - Initialize the FC link
656 * @phba: pointer to lpfc hba data structure.
657 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
658 *
659 * This routine will issue the INIT_LINK mailbox command call.
660 * It is available to other drivers through the lpfc_hba data
661 * structure for use as a delayed link up mechanism with the
662 * module parameter lpfc_suppress_link_up.
663 *
664 * Return code
665 *		0 - success
666 *		Any other value - error
667 **/
668int
669lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
670{
671	struct lpfc_vport *vport = phba->pport;
672	LPFC_MBOXQ_t *pmb;
673	MAILBOX_t *mb;
674	int rc;
675
676	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
677	if (!pmb) {
678		phba->link_state = LPFC_HBA_ERROR;
679		return -ENOMEM;
680	}
681	mb = &pmb->u.mb;
682	pmb->vport = vport;
683
684	lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
685	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
686	lpfc_set_loopback_flag(phba);
687	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
688	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
689		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
690			"0498 Adapter failed to init, mbxCmd x%x "
691			"INIT_LINK, mbxStatus x%x\n",
692			mb->mbxCommand, mb->mbxStatus);
693		if (phba->sli_rev <= LPFC_SLI_REV3) {
694			/* Clear all interrupt enable conditions */
695			writel(0, phba->HCregaddr);
696			readl(phba->HCregaddr); /* flush */
697			/* Clear all pending interrupts */
698			writel(0xffffffff, phba->HAregaddr);
699			readl(phba->HAregaddr); /* flush */
700		}
701		phba->link_state = LPFC_HBA_ERROR;
702		if (rc != MBX_BUSY || flag == MBX_POLL)
703			mempool_free(pmb, phba->mbox_mem_pool);
704		return -EIO;
705	}
706	phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
707	if (flag == MBX_POLL)
708		mempool_free(pmb, phba->mbox_mem_pool);
709
710	return 0;
711}
712
713/**
714 * lpfc_hba_down_link - this routine downs the FC link
715 * @phba: pointer to lpfc hba data structure.
716 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
717 *
718 * This routine will issue the DOWN_LINK mailbox command call.
719 * It is available to other drivers through the lpfc_hba data
720 * structure for use to stop the link.
721 *
722 * Return code
723 *		0 - success
724 *		Any other value - error
725 **/
726int
727lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
728{
729	LPFC_MBOXQ_t *pmb;
730	int rc;
731
732	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
733	if (!pmb) {
734		phba->link_state = LPFC_HBA_ERROR;
735		return -ENOMEM;
736	}
737
738	lpfc_printf_log(phba,
739		KERN_ERR, LOG_INIT,
740		"0491 Adapter Link is disabled.\n");
741	lpfc_down_link(phba, pmb);
742	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
743	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
744	if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
745		lpfc_printf_log(phba,
746		KERN_ERR, LOG_INIT,
747		"2522 Adapter failed to issue DOWN_LINK"
748		" mbox command rc 0x%x\n", rc);
749
750		mempool_free(pmb, phba->mbox_mem_pool);
751		return -EIO;
752	}
753	if (flag == MBX_POLL)
754		mempool_free(pmb, phba->mbox_mem_pool);
755
756	return 0;
757}
758
759/**
760 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
761 * @phba: pointer to lpfc HBA data structure.
762 *
763 * This routine will do LPFC uninitialization before the HBA is reset when
764 * bringing down the SLI Layer.
765 *
766 * Return codes
767 *   0 - success.
768 *   Any other value - error.
769 **/
770int
771lpfc_hba_down_prep(struct lpfc_hba *phba)
772{
773	struct lpfc_vport **vports;
774	int i;
775
776	if (phba->sli_rev <= LPFC_SLI_REV3) {
777		/* Disable interrupts */
778		writel(0, phba->HCregaddr);
779		readl(phba->HCregaddr); /* flush */
780	}
781
782	if (phba->pport->load_flag & FC_UNLOADING)
783		lpfc_cleanup_discovery_resources(phba->pport);
784	else {
785		vports = lpfc_create_vport_work_array(phba);
786		if (vports != NULL)
787			for (i = 0; i <= phba->max_vports &&
788				vports[i] != NULL; i++)
789				lpfc_cleanup_discovery_resources(vports[i]);
790		lpfc_destroy_vport_work_array(phba, vports);
791	}
792	return 0;
793}
794
795/**
796 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
797 * @phba: pointer to lpfc HBA data structure.
798 *
799 * This routine will do uninitialization after the HBA is reset when bring
800 * down the SLI Layer.
801 *
802 * Return codes
803 *   0 - success.
804 *   Any other value - error.
805 **/
806static int
807lpfc_hba_down_post_s3(struct lpfc_hba *phba)
808{
809	struct lpfc_sli *psli = &phba->sli;
810	struct lpfc_sli_ring *pring;
811	struct lpfc_dmabuf *mp, *next_mp;
812	LIST_HEAD(completions);
813	int i;
814
815	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
816		lpfc_sli_hbqbuf_free_all(phba);
817	else {
818		/* Cleanup preposted buffers on the ELS ring */
819		pring = &psli->ring[LPFC_ELS_RING];
820		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
821			list_del(&mp->list);
822			pring->postbufq_cnt--;
823			lpfc_mbuf_free(phba, mp->virt, mp->phys);
824			kfree(mp);
825		}
826	}
827
828	spin_lock_irq(&phba->hbalock);
829	for (i = 0; i < psli->num_rings; i++) {
830		pring = &psli->ring[i];
831
832		/* At this point in time the HBA is either reset or DOA. Either
833		 * way, nothing should be on txcmplq as it will NEVER complete.
834		 */
835		list_splice_init(&pring->txcmplq, &completions);
836		pring->txcmplq_cnt = 0;
837		spin_unlock_irq(&phba->hbalock);
838
839		/* Cancel all the IOCBs from the completions list */
840		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
841				      IOERR_SLI_ABORTED);
842
843		lpfc_sli_abort_iocb_ring(phba, pring);
844		spin_lock_irq(&phba->hbalock);
845	}
846	spin_unlock_irq(&phba->hbalock);
847
848	return 0;
849}
850
851/**
852 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
853 * @phba: pointer to lpfc HBA data structure.
854 *
855 * This routine will do uninitialization after the HBA is reset when bring
856 * down the SLI Layer.
857 *
858 * Return codes
859 *   0 - success.
860 *   Any other value - error.
861 **/
862static int
863lpfc_hba_down_post_s4(struct lpfc_hba *phba)
864{
865	struct lpfc_scsi_buf *psb, *psb_next;
866	LIST_HEAD(aborts);
867	int ret;
868	unsigned long iflag = 0;
869	struct lpfc_sglq *sglq_entry = NULL;
870
871	ret = lpfc_hba_down_post_s3(phba);
872	if (ret)
873		return ret;
874	/* At this point in time the HBA is either reset or DOA. Either
875	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
876	 * on the lpfc_sgl_list so that it can either be freed if the
877	 * driver is unloading or reposted if the driver is restarting
878	 * the port.
879	 */
880	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
881					/* scsl_buf_list */
882	/* abts_sgl_list_lock required because worker thread uses this
883	 * list.
884	 */
885	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
886	list_for_each_entry(sglq_entry,
887		&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
888		sglq_entry->state = SGL_FREED;
889
890	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
891			&phba->sli4_hba.lpfc_sgl_list);
892	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
893	/* abts_scsi_buf_list_lock required because worker thread uses this
894	 * list.
895	 */
896	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
897	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
898			&aborts);
899	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
900	spin_unlock_irq(&phba->hbalock);
901
902	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
903		psb->pCmd = NULL;
904		psb->status = IOSTAT_SUCCESS;
905	}
906	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
907	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
908	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
909	return 0;
910}
911
912/**
913 * lpfc_hba_down_post - Wrapper func for hba down post routine
914 * @phba: pointer to lpfc HBA data structure.
915 *
916 * This routine wraps the actual SLI3 or SLI4 routine for performing
917 * uninitialization after the HBA is reset when bring down the SLI Layer.
918 *
919 * Return codes
920 *   0 - success.
921 *   Any other value - error.
922 **/
923int
924lpfc_hba_down_post(struct lpfc_hba *phba)
925{
926	return (*phba->lpfc_hba_down_post)(phba);
927}
928
929/**
930 * lpfc_hb_timeout - The HBA-timer timeout handler
931 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
932 *
933 * This is the HBA-timer timeout handler registered to the lpfc driver. When
934 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
935 * work-port-events bitmap and the worker thread is notified. This timeout
936 * event will be used by the worker thread to invoke the actual timeout
937 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
938 * be performed in the timeout handler and the HBA timeout event bit shall
939 * be cleared by the worker thread after it has taken the event bitmap out.
940 **/
941static void
942lpfc_hb_timeout(unsigned long ptr)
943{
944	struct lpfc_hba *phba;
945	uint32_t tmo_posted;
946	unsigned long iflag;
947
948	phba = (struct lpfc_hba *)ptr;
949
950	/* Check for heart beat timeout conditions */
951	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
952	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
953	if (!tmo_posted)
954		phba->pport->work_port_events |= WORKER_HB_TMO;
955	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
956
957	/* Tell the worker thread there is work to do */
958	if (!tmo_posted)
959		lpfc_worker_wake_up(phba);
960	return;
961}
962
963/**
964 * lpfc_rrq_timeout - The RRQ-timer timeout handler
965 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
966 *
967 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
968 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
969 * work-port-events bitmap and the worker thread is notified. This timeout
970 * event will be used by the worker thread to invoke the actual timeout
971 * handler routine, lpfc_rrq_handler. Any periodical operations will
972 * be performed in the timeout handler and the RRQ timeout event bit shall
973 * be cleared by the worker thread after it has taken the event bitmap out.
974 **/
975static void
976lpfc_rrq_timeout(unsigned long ptr)
977{
978	struct lpfc_hba *phba;
979	unsigned long iflag;
980
981	phba = (struct lpfc_hba *)ptr;
982	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
983	phba->hba_flag |= HBA_RRQ_ACTIVE;
984	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
985	lpfc_worker_wake_up(phba);
986}
987
988/**
989 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
990 * @phba: pointer to lpfc hba data structure.
991 * @pmboxq: pointer to the driver internal queue element for mailbox command.
992 *
993 * This is the callback function to the lpfc heart-beat mailbox command.
994 * If configured, the lpfc driver issues the heart-beat mailbox command to
995 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
996 * heart-beat mailbox command is issued, the driver shall set up heart-beat
997 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
998 * heart-beat outstanding state. Once the mailbox command comes back and
999 * no error conditions detected, the heart-beat mailbox command timer is
1000 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1001 * state is cleared for the next heart-beat. If the timer expired with the
1002 * heart-beat outstanding state set, the driver will put the HBA offline.
1003 **/
1004static void
1005lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1006{
1007	unsigned long drvr_flag;
1008
1009	spin_lock_irqsave(&phba->hbalock, drvr_flag);
1010	phba->hb_outstanding = 0;
1011	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1012
1013	/* Check and reset heart-beat timer is necessary */
1014	mempool_free(pmboxq, phba->mbox_mem_pool);
1015	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1016		!(phba->link_state == LPFC_HBA_ERROR) &&
1017		!(phba->pport->load_flag & FC_UNLOADING))
1018		mod_timer(&phba->hb_tmofunc,
1019			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1020	return;
1021}
1022
1023/**
1024 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1025 * @phba: pointer to lpfc hba data structure.
1026 *
1027 * This is the actual HBA-timer timeout handler to be invoked by the worker
1028 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1029 * handler performs any periodic operations needed for the device. If such
1030 * periodic event has already been attended to either in the interrupt handler
1031 * or by processing slow-ring or fast-ring events within the HBA-timer
1032 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1033 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1034 * is configured and there is no heart-beat mailbox command outstanding, a
1035 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1036 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1037 * to offline.
1038 **/
1039void
1040lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1041{
1042	struct lpfc_vport **vports;
1043	LPFC_MBOXQ_t *pmboxq;
1044	struct lpfc_dmabuf *buf_ptr;
1045	int retval, i;
1046	struct lpfc_sli *psli = &phba->sli;
1047	LIST_HEAD(completions);
1048
1049	vports = lpfc_create_vport_work_array(phba);
1050	if (vports != NULL)
1051		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1052			lpfc_rcv_seq_check_edtov(vports[i]);
1053	lpfc_destroy_vport_work_array(phba, vports);
1054
1055	if ((phba->link_state == LPFC_HBA_ERROR) ||
1056		(phba->pport->load_flag & FC_UNLOADING) ||
1057		(phba->pport->fc_flag & FC_OFFLINE_MODE))
1058		return;
1059
1060	spin_lock_irq(&phba->pport->work_port_lock);
1061
1062	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
1063		jiffies)) {
1064		spin_unlock_irq(&phba->pport->work_port_lock);
1065		if (!phba->hb_outstanding)
1066			mod_timer(&phba->hb_tmofunc,
1067				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1068		else
1069			mod_timer(&phba->hb_tmofunc,
1070				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1071		return;
1072	}
1073	spin_unlock_irq(&phba->pport->work_port_lock);
1074
1075	if (phba->elsbuf_cnt &&
1076		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1077		spin_lock_irq(&phba->hbalock);
1078		list_splice_init(&phba->elsbuf, &completions);
1079		phba->elsbuf_cnt = 0;
1080		phba->elsbuf_prev_cnt = 0;
1081		spin_unlock_irq(&phba->hbalock);
1082
1083		while (!list_empty(&completions)) {
1084			list_remove_head(&completions, buf_ptr,
1085				struct lpfc_dmabuf, list);
1086			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1087			kfree(buf_ptr);
1088		}
1089	}
1090	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1091
1092	/* If there is no heart beat outstanding, issue a heartbeat command */
1093	if (phba->cfg_enable_hba_heartbeat) {
1094		if (!phba->hb_outstanding) {
1095			if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1096				(list_empty(&psli->mboxq))) {
1097				pmboxq = mempool_alloc(phba->mbox_mem_pool,
1098							GFP_KERNEL);
1099				if (!pmboxq) {
1100					mod_timer(&phba->hb_tmofunc,
1101						 jiffies +
1102						 HZ * LPFC_HB_MBOX_INTERVAL);
1103					return;
1104				}
1105
1106				lpfc_heart_beat(phba, pmboxq);
1107				pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1108				pmboxq->vport = phba->pport;
1109				retval = lpfc_sli_issue_mbox(phba, pmboxq,
1110						MBX_NOWAIT);
1111
1112				if (retval != MBX_BUSY &&
1113					retval != MBX_SUCCESS) {
1114					mempool_free(pmboxq,
1115							phba->mbox_mem_pool);
1116					mod_timer(&phba->hb_tmofunc,
1117						jiffies +
1118						HZ * LPFC_HB_MBOX_INTERVAL);
1119					return;
1120				}
1121				phba->skipped_hb = 0;
1122				phba->hb_outstanding = 1;
1123			} else if (time_before_eq(phba->last_completion_time,
1124					phba->skipped_hb)) {
1125				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1126					"2857 Last completion time not "
1127					" updated in %d ms\n",
1128					jiffies_to_msecs(jiffies
1129						 - phba->last_completion_time));
1130			} else
1131				phba->skipped_hb = jiffies;
1132
1133			mod_timer(&phba->hb_tmofunc,
1134				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1135			return;
1136		} else {
1137			/*
1138			* If heart beat timeout called with hb_outstanding set
1139			* we need to give the hb mailbox cmd a chance to
1140			* complete or TMO.
1141			*/
1142			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1143					"0459 Adapter heartbeat still out"
1144					"standing:last compl time was %d ms.\n",
1145					jiffies_to_msecs(jiffies
1146						 - phba->last_completion_time));
1147			mod_timer(&phba->hb_tmofunc,
1148				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1149		}
1150	}
1151}
1152
1153/**
1154 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1155 * @phba: pointer to lpfc hba data structure.
1156 *
1157 * This routine is called to bring the HBA offline when HBA hardware error
1158 * other than Port Error 6 has been detected.
1159 **/
1160static void
1161lpfc_offline_eratt(struct lpfc_hba *phba)
1162{
1163	struct lpfc_sli   *psli = &phba->sli;
1164
1165	spin_lock_irq(&phba->hbalock);
1166	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1167	spin_unlock_irq(&phba->hbalock);
1168	lpfc_offline_prep(phba);
1169
1170	lpfc_offline(phba);
1171	lpfc_reset_barrier(phba);
1172	spin_lock_irq(&phba->hbalock);
1173	lpfc_sli_brdreset(phba);
1174	spin_unlock_irq(&phba->hbalock);
1175	lpfc_hba_down_post(phba);
1176	lpfc_sli_brdready(phba, HS_MBRDY);
1177	lpfc_unblock_mgmt_io(phba);
1178	phba->link_state = LPFC_HBA_ERROR;
1179	return;
1180}
1181
1182/**
1183 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1184 * @phba: pointer to lpfc hba data structure.
1185 *
1186 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1187 * other than Port Error 6 has been detected.
1188 **/
1189static void
1190lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1191{
1192	lpfc_offline_prep(phba);
1193	lpfc_offline(phba);
1194	lpfc_sli4_brdreset(phba);
1195	lpfc_hba_down_post(phba);
1196	lpfc_sli4_post_status_check(phba);
1197	lpfc_unblock_mgmt_io(phba);
1198	phba->link_state = LPFC_HBA_ERROR;
1199}
1200
1201/**
1202 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1203 * @phba: pointer to lpfc hba data structure.
1204 *
1205 * This routine is invoked to handle the deferred HBA hardware error
1206 * conditions. This type of error is indicated by HBA by setting ER1
1207 * and another ER bit in the host status register. The driver will
1208 * wait until the ER1 bit clears before handling the error condition.
1209 **/
1210static void
1211lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1212{
1213	uint32_t old_host_status = phba->work_hs;
1214	struct lpfc_sli_ring  *pring;
1215	struct lpfc_sli *psli = &phba->sli;
1216
1217	/* If the pci channel is offline, ignore possible errors,
1218	 * since we cannot communicate with the pci card anyway.
1219	 */
1220	if (pci_channel_offline(phba->pcidev)) {
1221		spin_lock_irq(&phba->hbalock);
1222		phba->hba_flag &= ~DEFER_ERATT;
1223		spin_unlock_irq(&phba->hbalock);
1224		return;
1225	}
1226
1227	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1228		"0479 Deferred Adapter Hardware Error "
1229		"Data: x%x x%x x%x\n",
1230		phba->work_hs,
1231		phba->work_status[0], phba->work_status[1]);
1232
1233	spin_lock_irq(&phba->hbalock);
1234	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1235	spin_unlock_irq(&phba->hbalock);
1236
1237
1238	/*
1239	 * Firmware stops when it triggred erratt. That could cause the I/Os
1240	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1241	 * SCSI layer retry it after re-establishing link.
1242	 */
1243	pring = &psli->ring[psli->fcp_ring];
1244	lpfc_sli_abort_iocb_ring(phba, pring);
1245
1246	/*
1247	 * There was a firmware error. Take the hba offline and then
1248	 * attempt to restart it.
1249	 */
1250	lpfc_offline_prep(phba);
1251	lpfc_offline(phba);
1252
1253	/* Wait for the ER1 bit to clear.*/
1254	while (phba->work_hs & HS_FFER1) {
1255		msleep(100);
1256		if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1257			phba->work_hs = UNPLUG_ERR ;
1258			break;
1259		}
1260		/* If driver is unloading let the worker thread continue */
1261		if (phba->pport->load_flag & FC_UNLOADING) {
1262			phba->work_hs = 0;
1263			break;
1264		}
1265	}
1266
1267	/*
1268	 * This is to ptrotect against a race condition in which
1269	 * first write to the host attention register clear the
1270	 * host status register.
1271	 */
1272	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1273		phba->work_hs = old_host_status & ~HS_FFER1;
1274
1275	spin_lock_irq(&phba->hbalock);
1276	phba->hba_flag &= ~DEFER_ERATT;
1277	spin_unlock_irq(&phba->hbalock);
1278	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1279	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1280}
1281
1282static void
1283lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1284{
1285	struct lpfc_board_event_header board_event;
1286	struct Scsi_Host *shost;
1287
1288	board_event.event_type = FC_REG_BOARD_EVENT;
1289	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1290	shost = lpfc_shost_from_vport(phba->pport);
1291	fc_host_post_vendor_event(shost, fc_get_event_number(),
1292				  sizeof(board_event),
1293				  (char *) &board_event,
1294				  LPFC_NL_VENDOR_ID);
1295}
1296
1297/**
1298 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1299 * @phba: pointer to lpfc hba data structure.
1300 *
1301 * This routine is invoked to handle the following HBA hardware error
1302 * conditions:
1303 * 1 - HBA error attention interrupt
1304 * 2 - DMA ring index out of range
1305 * 3 - Mailbox command came back as unknown
1306 **/
1307static void
1308lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1309{
1310	struct lpfc_vport *vport = phba->pport;
1311	struct lpfc_sli   *psli = &phba->sli;
1312	struct lpfc_sli_ring  *pring;
1313	uint32_t event_data;
1314	unsigned long temperature;
1315	struct temp_event temp_event_data;
1316	struct Scsi_Host  *shost;
1317
1318	/* If the pci channel is offline, ignore possible errors,
1319	 * since we cannot communicate with the pci card anyway.
1320	 */
1321	if (pci_channel_offline(phba->pcidev)) {
1322		spin_lock_irq(&phba->hbalock);
1323		phba->hba_flag &= ~DEFER_ERATT;
1324		spin_unlock_irq(&phba->hbalock);
1325		return;
1326	}
1327
1328	/* If resets are disabled then leave the HBA alone and return */
1329	if (!phba->cfg_enable_hba_reset)
1330		return;
1331
1332	/* Send an internal error event to mgmt application */
1333	lpfc_board_errevt_to_mgmt(phba);
1334
1335	if (phba->hba_flag & DEFER_ERATT)
1336		lpfc_handle_deferred_eratt(phba);
1337
1338	if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1339		if (phba->work_hs & HS_FFER6)
1340			/* Re-establishing Link */
1341			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1342					"1301 Re-establishing Link "
1343					"Data: x%x x%x x%x\n",
1344					phba->work_hs, phba->work_status[0],
1345					phba->work_status[1]);
1346		if (phba->work_hs & HS_FFER8)
1347			/* Device Zeroization */
1348			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1349					"2861 Host Authentication device "
1350					"zeroization Data:x%x x%x x%x\n",
1351					phba->work_hs, phba->work_status[0],
1352					phba->work_status[1]);
1353
1354		spin_lock_irq(&phba->hbalock);
1355		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1356		spin_unlock_irq(&phba->hbalock);
1357
1358		/*
1359		* Firmware stops when it triggled erratt with HS_FFER6.
1360		* That could cause the I/Os dropped by the firmware.
1361		* Error iocb (I/O) on txcmplq and let the SCSI layer
1362		* retry it after re-establishing link.
1363		*/
1364		pring = &psli->ring[psli->fcp_ring];
1365		lpfc_sli_abort_iocb_ring(phba, pring);
1366
1367		/*
1368		 * There was a firmware error.  Take the hba offline and then
1369		 * attempt to restart it.
1370		 */
1371		lpfc_offline_prep(phba);
1372		lpfc_offline(phba);
1373		lpfc_sli_brdrestart(phba);
1374		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1375			lpfc_unblock_mgmt_io(phba);
1376			return;
1377		}
1378		lpfc_unblock_mgmt_io(phba);
1379	} else if (phba->work_hs & HS_CRIT_TEMP) {
1380		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1381		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1382		temp_event_data.event_code = LPFC_CRIT_TEMP;
1383		temp_event_data.data = (uint32_t)temperature;
1384
1385		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1386				"0406 Adapter maximum temperature exceeded "
1387				"(%ld), taking this port offline "
1388				"Data: x%x x%x x%x\n",
1389				temperature, phba->work_hs,
1390				phba->work_status[0], phba->work_status[1]);
1391
1392		shost = lpfc_shost_from_vport(phba->pport);
1393		fc_host_post_vendor_event(shost, fc_get_event_number(),
1394					  sizeof(temp_event_data),
1395					  (char *) &temp_event_data,
1396					  SCSI_NL_VID_TYPE_PCI
1397					  | PCI_VENDOR_ID_EMULEX);
1398
1399		spin_lock_irq(&phba->hbalock);
1400		phba->over_temp_state = HBA_OVER_TEMP;
1401		spin_unlock_irq(&phba->hbalock);
1402		lpfc_offline_eratt(phba);
1403
1404	} else {
1405		/* The if clause above forces this code path when the status
1406		 * failure is a value other than FFER6. Do not call the offline
1407		 * twice. This is the adapter hardware error path.
1408		 */
1409		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1410				"0457 Adapter Hardware Error "
1411				"Data: x%x x%x x%x\n",
1412				phba->work_hs,
1413				phba->work_status[0], phba->work_status[1]);
1414
1415		event_data = FC_REG_DUMP_EVENT;
1416		shost = lpfc_shost_from_vport(vport);
1417		fc_host_post_vendor_event(shost, fc_get_event_number(),
1418				sizeof(event_data), (char *) &event_data,
1419				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1420
1421		lpfc_offline_eratt(phba);
1422	}
1423	return;
1424}
1425
1426/**
1427 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1428 * @phba: pointer to lpfc hba data structure.
1429 *
1430 * This routine is invoked to handle the SLI4 HBA hardware error attention
1431 * conditions.
1432 **/
1433static void
1434lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1435{
1436	struct lpfc_vport *vport = phba->pport;
1437	uint32_t event_data;
1438	struct Scsi_Host *shost;
1439	uint32_t if_type;
1440	struct lpfc_register portstat_reg;
1441	int rc;
1442
1443	/* If the pci channel is offline, ignore possible errors, since
1444	 * we cannot communicate with the pci card anyway.
1445	 */
1446	if (pci_channel_offline(phba->pcidev))
1447		return;
1448	/* If resets are disabled then leave the HBA alone and return */
1449	if (!phba->cfg_enable_hba_reset)
1450		return;
1451
1452	/* Send an internal error event to mgmt application */
1453	lpfc_board_errevt_to_mgmt(phba);
1454
1455	/* For now, the actual action for SLI4 device handling is not
1456	 * specified yet, just treated it as adaptor hardware failure
1457	 */
1458	event_data = FC_REG_DUMP_EVENT;
1459	shost = lpfc_shost_from_vport(vport);
1460	fc_host_post_vendor_event(shost, fc_get_event_number(),
1461				  sizeof(event_data), (char *) &event_data,
1462				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1463
1464	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1465	switch (if_type) {
1466	case LPFC_SLI_INTF_IF_TYPE_0:
1467		lpfc_sli4_offline_eratt(phba);
1468		break;
1469	case LPFC_SLI_INTF_IF_TYPE_2:
1470		portstat_reg.word0 =
1471			readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
1472
1473		if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1474			/* TODO: Register for Overtemp async events. */
1475			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1476				"2889 Port Overtemperature event, "
1477				"taking port\n");
1478			spin_lock_irq(&phba->hbalock);
1479			phba->over_temp_state = HBA_OVER_TEMP;
1480			spin_unlock_irq(&phba->hbalock);
1481			lpfc_sli4_offline_eratt(phba);
1482			return;
1483		}
1484		/*
1485		 * On error status condition, driver need to wait for port
1486		 * ready before performing reset.
1487		 */
1488		rc = lpfc_sli4_pdev_status_reg_wait(phba);
1489		if (!rc) {
1490			/* need reset: attempt for port recovery */
1491			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1492					"2887 Port Error: Attempting "
1493					"Port Recovery\n");
1494			lpfc_offline_prep(phba);
1495			lpfc_offline(phba);
1496			lpfc_sli_brdrestart(phba);
1497			if (lpfc_online(phba) == 0) {
1498				lpfc_unblock_mgmt_io(phba);
1499				return;
1500			}
1501			/* fall through for not able to recover */
1502		}
1503		lpfc_sli4_offline_eratt(phba);
1504		break;
1505	case LPFC_SLI_INTF_IF_TYPE_1:
1506	default:
1507		break;
1508	}
1509}
1510
1511/**
1512 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1513 * @phba: pointer to lpfc HBA data structure.
1514 *
1515 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1516 * routine from the API jump table function pointer from the lpfc_hba struct.
1517 *
1518 * Return codes
1519 *   0 - success.
1520 *   Any other value - error.
1521 **/
1522void
1523lpfc_handle_eratt(struct lpfc_hba *phba)
1524{
1525	(*phba->lpfc_handle_eratt)(phba);
1526}
1527
1528/**
1529 * lpfc_handle_latt - The HBA link event handler
1530 * @phba: pointer to lpfc hba data structure.
1531 *
1532 * This routine is invoked from the worker thread to handle a HBA host
1533 * attention link event.
1534 **/
1535void
1536lpfc_handle_latt(struct lpfc_hba *phba)
1537{
1538	struct lpfc_vport *vport = phba->pport;
1539	struct lpfc_sli   *psli = &phba->sli;
1540	LPFC_MBOXQ_t *pmb;
1541	volatile uint32_t control;
1542	struct lpfc_dmabuf *mp;
1543	int rc = 0;
1544
1545	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1546	if (!pmb) {
1547		rc = 1;
1548		goto lpfc_handle_latt_err_exit;
1549	}
1550
1551	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1552	if (!mp) {
1553		rc = 2;
1554		goto lpfc_handle_latt_free_pmb;
1555	}
1556
1557	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1558	if (!mp->virt) {
1559		rc = 3;
1560		goto lpfc_handle_latt_free_mp;
1561	}
1562
1563	/* Cleanup any outstanding ELS commands */
1564	lpfc_els_flush_all_cmd(phba);
1565
1566	psli->slistat.link_event++;
1567	lpfc_read_topology(phba, pmb, mp);
1568	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
1569	pmb->vport = vport;
1570	/* Block ELS IOCBs until we have processed this mbox command */
1571	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1572	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1573	if (rc == MBX_NOT_FINISHED) {
1574		rc = 4;
1575		goto lpfc_handle_latt_free_mbuf;
1576	}
1577
1578	/* Clear Link Attention in HA REG */
1579	spin_lock_irq(&phba->hbalock);
1580	writel(HA_LATT, phba->HAregaddr);
1581	readl(phba->HAregaddr); /* flush */
1582	spin_unlock_irq(&phba->hbalock);
1583
1584	return;
1585
1586lpfc_handle_latt_free_mbuf:
1587	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1588	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1589lpfc_handle_latt_free_mp:
1590	kfree(mp);
1591lpfc_handle_latt_free_pmb:
1592	mempool_free(pmb, phba->mbox_mem_pool);
1593lpfc_handle_latt_err_exit:
1594	/* Enable Link attention interrupts */
1595	spin_lock_irq(&phba->hbalock);
1596	psli->sli_flag |= LPFC_PROCESS_LA;
1597	control = readl(phba->HCregaddr);
1598	control |= HC_LAINT_ENA;
1599	writel(control, phba->HCregaddr);
1600	readl(phba->HCregaddr); /* flush */
1601
1602	/* Clear Link Attention in HA REG */
1603	writel(HA_LATT, phba->HAregaddr);
1604	readl(phba->HAregaddr); /* flush */
1605	spin_unlock_irq(&phba->hbalock);
1606	lpfc_linkdown(phba);
1607	phba->link_state = LPFC_HBA_ERROR;
1608
1609	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1610		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1611
1612	return;
1613}
1614
1615/**
1616 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1617 * @phba: pointer to lpfc hba data structure.
1618 * @vpd: pointer to the vital product data.
1619 * @len: length of the vital product data in bytes.
1620 *
1621 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1622 * an array of characters. In this routine, the ModelName, ProgramType, and
1623 * ModelDesc, etc. fields of the phba data structure will be populated.
1624 *
1625 * Return codes
1626 *   0 - pointer to the VPD passed in is NULL
1627 *   1 - success
1628 **/
1629int
1630lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1631{
1632	uint8_t lenlo, lenhi;
1633	int Length;
1634	int i, j;
1635	int finished = 0;
1636	int index = 0;
1637
1638	if (!vpd)
1639		return 0;
1640
1641	/* Vital Product */
1642	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1643			"0455 Vital Product Data: x%x x%x x%x x%x\n",
1644			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1645			(uint32_t) vpd[3]);
1646	while (!finished && (index < (len - 4))) {
1647		switch (vpd[index]) {
1648		case 0x82:
1649		case 0x91:
1650			index += 1;
1651			lenlo = vpd[index];
1652			index += 1;
1653			lenhi = vpd[index];
1654			index += 1;
1655			i = ((((unsigned short)lenhi) << 8) + lenlo);
1656			index += i;
1657			break;
1658		case 0x90:
1659			index += 1;
1660			lenlo = vpd[index];
1661			index += 1;
1662			lenhi = vpd[index];
1663			index += 1;
1664			Length = ((((unsigned short)lenhi) << 8) + lenlo);
1665			if (Length > len - index)
1666				Length = len - index;
1667			while (Length > 0) {
1668			/* Look for Serial Number */
1669			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1670				index += 2;
1671				i = vpd[index];
1672				index += 1;
1673				j = 0;
1674				Length -= (3+i);
1675				while(i--) {
1676					phba->SerialNumber[j++] = vpd[index++];
1677					if (j == 31)
1678						break;
1679				}
1680				phba->SerialNumber[j] = 0;
1681				continue;
1682			}
1683			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1684				phba->vpd_flag |= VPD_MODEL_DESC;
1685				index += 2;
1686				i = vpd[index];
1687				index += 1;
1688				j = 0;
1689				Length -= (3+i);
1690				while(i--) {
1691					phba->ModelDesc[j++] = vpd[index++];
1692					if (j == 255)
1693						break;
1694				}
1695				phba->ModelDesc[j] = 0;
1696				continue;
1697			}
1698			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1699				phba->vpd_flag |= VPD_MODEL_NAME;
1700				index += 2;
1701				i = vpd[index];
1702				index += 1;
1703				j = 0;
1704				Length -= (3+i);
1705				while(i--) {
1706					phba->ModelName[j++] = vpd[index++];
1707					if (j == 79)
1708						break;
1709				}
1710				phba->ModelName[j] = 0;
1711				continue;
1712			}
1713			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1714				phba->vpd_flag |= VPD_PROGRAM_TYPE;
1715				index += 2;
1716				i = vpd[index];
1717				index += 1;
1718				j = 0;
1719				Length -= (3+i);
1720				while(i--) {
1721					phba->ProgramType[j++] = vpd[index++];
1722					if (j == 255)
1723						break;
1724				}
1725				phba->ProgramType[j] = 0;
1726				continue;
1727			}
1728			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1729				phba->vpd_flag |= VPD_PORT;
1730				index += 2;
1731				i = vpd[index];
1732				index += 1;
1733				j = 0;
1734				Length -= (3+i);
1735				while(i--) {
1736					if ((phba->sli_rev == LPFC_SLI_REV4) &&
1737					    (phba->sli4_hba.pport_name_sta ==
1738					     LPFC_SLI4_PPNAME_GET)) {
1739						j++;
1740						index++;
1741					} else
1742						phba->Port[j++] = vpd[index++];
1743					if (j == 19)
1744						break;
1745				}
1746				if ((phba->sli_rev != LPFC_SLI_REV4) ||
1747				    (phba->sli4_hba.pport_name_sta ==
1748				     LPFC_SLI4_PPNAME_NON))
1749					phba->Port[j] = 0;
1750				continue;
1751			}
1752			else {
1753				index += 2;
1754				i = vpd[index];
1755				index += 1;
1756				index += i;
1757				Length -= (3 + i);
1758			}
1759		}
1760		finished = 0;
1761		break;
1762		case 0x78:
1763			finished = 1;
1764			break;
1765		default:
1766			index ++;
1767			break;
1768		}
1769	}
1770
1771	return(1);
1772}
1773
1774/**
1775 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1776 * @phba: pointer to lpfc hba data structure.
1777 * @mdp: pointer to the data structure to hold the derived model name.
1778 * @descp: pointer to the data structure to hold the derived description.
1779 *
1780 * This routine retrieves HBA's description based on its registered PCI device
1781 * ID. The @descp passed into this function points to an array of 256 chars. It
1782 * shall be returned with the model name, maximum speed, and the host bus type.
1783 * The @mdp passed into this function points to an array of 80 chars. When the
1784 * function returns, the @mdp will be filled with the model name.
1785 **/
1786static void
1787lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1788{
1789	lpfc_vpd_t *vp;
1790	uint16_t dev_id = phba->pcidev->device;
1791	int max_speed;
1792	int GE = 0;
1793	int oneConnect = 0; /* default is not a oneConnect */
1794	struct {
1795		char *name;
1796		char *bus;
1797		char *function;
1798	} m = {"<Unknown>", "", ""};
1799
1800	if (mdp && mdp[0] != '\0'
1801		&& descp && descp[0] != '\0')
1802		return;
1803
1804	if (phba->lmt & LMT_16Gb)
1805		max_speed = 16;
1806	else if (phba->lmt & LMT_10Gb)
1807		max_speed = 10;
1808	else if (phba->lmt & LMT_8Gb)
1809		max_speed = 8;
1810	else if (phba->lmt & LMT_4Gb)
1811		max_speed = 4;
1812	else if (phba->lmt & LMT_2Gb)
1813		max_speed = 2;
1814	else
1815		max_speed = 1;
1816
1817	vp = &phba->vpd;
1818
1819	switch (dev_id) {
1820	case PCI_DEVICE_ID_FIREFLY:
1821		m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1822		break;
1823	case PCI_DEVICE_ID_SUPERFLY:
1824		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1825			m = (typeof(m)){"LP7000", "PCI",
1826					"Fibre Channel Adapter"};
1827		else
1828			m = (typeof(m)){"LP7000E", "PCI",
1829					"Fibre Channel Adapter"};
1830		break;
1831	case PCI_DEVICE_ID_DRAGONFLY:
1832		m = (typeof(m)){"LP8000", "PCI",
1833				"Fibre Channel Adapter"};
1834		break;
1835	case PCI_DEVICE_ID_CENTAUR:
1836		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1837			m = (typeof(m)){"LP9002", "PCI",
1838					"Fibre Channel Adapter"};
1839		else
1840			m = (typeof(m)){"LP9000", "PCI",
1841					"Fibre Channel Adapter"};
1842		break;
1843	case PCI_DEVICE_ID_RFLY:
1844		m = (typeof(m)){"LP952", "PCI",
1845				"Fibre Channel Adapter"};
1846		break;
1847	case PCI_DEVICE_ID_PEGASUS:
1848		m = (typeof(m)){"LP9802", "PCI-X",
1849				"Fibre Channel Adapter"};
1850		break;
1851	case PCI_DEVICE_ID_THOR:
1852		m = (typeof(m)){"LP10000", "PCI-X",
1853				"Fibre Channel Adapter"};
1854		break;
1855	case PCI_DEVICE_ID_VIPER:
1856		m = (typeof(m)){"LPX1000",  "PCI-X",
1857				"Fibre Channel Adapter"};
1858		break;
1859	case PCI_DEVICE_ID_PFLY:
1860		m = (typeof(m)){"LP982", "PCI-X",
1861				"Fibre Channel Adapter"};
1862		break;
1863	case PCI_DEVICE_ID_TFLY:
1864		m = (typeof(m)){"LP1050", "PCI-X",
1865				"Fibre Channel Adapter"};
1866		break;
1867	case PCI_DEVICE_ID_HELIOS:
1868		m = (typeof(m)){"LP11000", "PCI-X2",
1869				"Fibre Channel Adapter"};
1870		break;
1871	case PCI_DEVICE_ID_HELIOS_SCSP:
1872		m = (typeof(m)){"LP11000-SP", "PCI-X2",
1873				"Fibre Channel Adapter"};
1874		break;
1875	case PCI_DEVICE_ID_HELIOS_DCSP:
1876		m = (typeof(m)){"LP11002-SP",  "PCI-X2",
1877				"Fibre Channel Adapter"};
1878		break;
1879	case PCI_DEVICE_ID_NEPTUNE:
1880		m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1881		break;
1882	case PCI_DEVICE_ID_NEPTUNE_SCSP:
1883		m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1884		break;
1885	case PCI_DEVICE_ID_NEPTUNE_DCSP:
1886		m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1887		break;
1888	case PCI_DEVICE_ID_BMID:
1889		m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1890		break;
1891	case PCI_DEVICE_ID_BSMB:
1892		m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1893		break;
1894	case PCI_DEVICE_ID_ZEPHYR:
1895		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1896		break;
1897	case PCI_DEVICE_ID_ZEPHYR_SCSP:
1898		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1899		break;
1900	case PCI_DEVICE_ID_ZEPHYR_DCSP:
1901		m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1902		GE = 1;
1903		break;
1904	case PCI_DEVICE_ID_ZMID:
1905		m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1906		break;
1907	case PCI_DEVICE_ID_ZSMB:
1908		m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1909		break;
1910	case PCI_DEVICE_ID_LP101:
1911		m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1912		break;
1913	case PCI_DEVICE_ID_LP10000S:
1914		m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1915		break;
1916	case PCI_DEVICE_ID_LP11000S:
1917		m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1918		break;
1919	case PCI_DEVICE_ID_LPE11000S:
1920		m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1921		break;
1922	case PCI_DEVICE_ID_SAT:
1923		m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1924		break;
1925	case PCI_DEVICE_ID_SAT_MID:
1926		m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1927		break;
1928	case PCI_DEVICE_ID_SAT_SMB:
1929		m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1930		break;
1931	case PCI_DEVICE_ID_SAT_DCSP:
1932		m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1933		break;
1934	case PCI_DEVICE_ID_SAT_SCSP:
1935		m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1936		break;
1937	case PCI_DEVICE_ID_SAT_S:
1938		m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1939		break;
1940	case PCI_DEVICE_ID_HORNET:
1941		m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1942		GE = 1;
1943		break;
1944	case PCI_DEVICE_ID_PROTEUS_VF:
1945		m = (typeof(m)){"LPev12000", "PCIe IOV",
1946				"Fibre Channel Adapter"};
1947		break;
1948	case PCI_DEVICE_ID_PROTEUS_PF:
1949		m = (typeof(m)){"LPev12000", "PCIe IOV",
1950				"Fibre Channel Adapter"};
1951		break;
1952	case PCI_DEVICE_ID_PROTEUS_S:
1953		m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1954				"Fibre Channel Adapter"};
1955		break;
1956	case PCI_DEVICE_ID_TIGERSHARK:
1957		oneConnect = 1;
1958		m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1959		break;
1960	case PCI_DEVICE_ID_TOMCAT:
1961		oneConnect = 1;
1962		m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1963		break;
1964	case PCI_DEVICE_ID_FALCON:
1965		m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1966				"EmulexSecure Fibre"};
1967		break;
1968	case PCI_DEVICE_ID_BALIUS:
1969		m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
1970				"Fibre Channel Adapter"};
1971		break;
1972	case PCI_DEVICE_ID_LANCER_FC:
1973	case PCI_DEVICE_ID_LANCER_FC_VF:
1974		m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
1975		break;
1976	case PCI_DEVICE_ID_LANCER_FCOE:
1977	case PCI_DEVICE_ID_LANCER_FCOE_VF:
1978		oneConnect = 1;
1979		m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
1980		break;
1981	default:
1982		m = (typeof(m)){"Unknown", "", ""};
1983		break;
1984	}
1985
1986	if (mdp && mdp[0] == '\0')
1987		snprintf(mdp, 79,"%s", m.name);
1988	/*
1989	 * oneConnect hba requires special processing, they are all initiators
1990	 * and we put the port number on the end
1991	 */
1992	if (descp && descp[0] == '\0') {
1993		if (oneConnect)
1994			snprintf(descp, 255,
1995				"Emulex OneConnect %s, %s Initiator, Port %s",
1996				m.name, m.function,
1997				phba->Port);
1998		else
1999			snprintf(descp, 255,
2000				"Emulex %s %d%s %s %s",
2001				m.name, max_speed, (GE) ? "GE" : "Gb",
2002				m.bus, m.function);
2003	}
2004}
2005
2006/**
2007 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2008 * @phba: pointer to lpfc hba data structure.
2009 * @pring: pointer to a IOCB ring.
2010 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2011 *
2012 * This routine posts a given number of IOCBs with the associated DMA buffer
2013 * descriptors specified by the cnt argument to the given IOCB ring.
2014 *
2015 * Return codes
2016 *   The number of IOCBs NOT able to be posted to the IOCB ring.
2017 **/
2018int
2019lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2020{
2021	IOCB_t *icmd;
2022	struct lpfc_iocbq *iocb;
2023	struct lpfc_dmabuf *mp1, *mp2;
2024
2025	cnt += pring->missbufcnt;
2026
2027	/* While there are buffers to post */
2028	while (cnt > 0) {
2029		/* Allocate buffer for  command iocb */
2030		iocb = lpfc_sli_get_iocbq(phba);
2031		if (iocb == NULL) {
2032			pring->missbufcnt = cnt;
2033			return cnt;
2034		}
2035		icmd = &iocb->iocb;
2036
2037		/* 2 buffers can be posted per command */
2038		/* Allocate buffer to post */
2039		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2040		if (mp1)
2041		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2042		if (!mp1 || !mp1->virt) {
2043			kfree(mp1);
2044			lpfc_sli_release_iocbq(phba, iocb);
2045			pring->missbufcnt = cnt;
2046			return cnt;
2047		}
2048
2049		INIT_LIST_HEAD(&mp1->list);
2050		/* Allocate buffer to post */
2051		if (cnt > 1) {
2052			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2053			if (mp2)
2054				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2055							    &mp2->phys);
2056			if (!mp2 || !mp2->virt) {
2057				kfree(mp2);
2058				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2059				kfree(mp1);
2060				lpfc_sli_release_iocbq(phba, iocb);
2061				pring->missbufcnt = cnt;
2062				return cnt;
2063			}
2064
2065			INIT_LIST_HEAD(&mp2->list);
2066		} else {
2067			mp2 = NULL;
2068		}
2069
2070		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2071		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2072		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2073		icmd->ulpBdeCount = 1;
2074		cnt--;
2075		if (mp2) {
2076			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2077			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2078			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2079			cnt--;
2080			icmd->ulpBdeCount = 2;
2081		}
2082
2083		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2084		icmd->ulpLe = 1;
2085
2086		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2087		    IOCB_ERROR) {
2088			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2089			kfree(mp1);
2090			cnt++;
2091			if (mp2) {
2092				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2093				kfree(mp2);
2094				cnt++;
2095			}
2096			lpfc_sli_release_iocbq(phba, iocb);
2097			pring->missbufcnt = cnt;
2098			return cnt;
2099		}
2100		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2101		if (mp2)
2102			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2103	}
2104	pring->missbufcnt = 0;
2105	return 0;
2106}
2107
2108/**
2109 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2110 * @phba: pointer to lpfc hba data structure.
2111 *
2112 * This routine posts initial receive IOCB buffers to the ELS ring. The
2113 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2114 * set to 64 IOCBs.
2115 *
2116 * Return codes
2117 *   0 - success (currently always success)
2118 **/
2119static int
2120lpfc_post_rcv_buf(struct lpfc_hba *phba)
2121{
2122	struct lpfc_sli *psli = &phba->sli;
2123
2124	/* Ring 0, ELS / CT buffers */
2125	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2126	/* Ring 2 - FCP no buffers needed */
2127
2128	return 0;
2129}
2130
2131#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2132
2133/**
2134 * lpfc_sha_init - Set up initial array of hash table entries
2135 * @HashResultPointer: pointer to an array as hash table.
2136 *
2137 * This routine sets up the initial values to the array of hash table entries
2138 * for the LC HBAs.
2139 **/
2140static void
2141lpfc_sha_init(uint32_t * HashResultPointer)
2142{
2143	HashResultPointer[0] = 0x67452301;
2144	HashResultPointer[1] = 0xEFCDAB89;
2145	HashResultPointer[2] = 0x98BADCFE;
2146	HashResultPointer[3] = 0x10325476;
2147	HashResultPointer[4] = 0xC3D2E1F0;
2148}
2149
2150/**
2151 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2152 * @HashResultPointer: pointer to an initial/result hash table.
2153 * @HashWorkingPointer: pointer to an working hash table.
2154 *
2155 * This routine iterates an initial hash table pointed by @HashResultPointer
2156 * with the values from the working hash table pointeed by @HashWorkingPointer.
2157 * The results are putting back to the initial hash table, returned through
2158 * the @HashResultPointer as the result hash table.
2159 **/
2160static void
2161lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2162{
2163	int t;
2164	uint32_t TEMP;
2165	uint32_t A, B, C, D, E;
2166	t = 16;
2167	do {
2168		HashWorkingPointer[t] =
2169		    S(1,
2170		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2171								     8] ^
2172		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2173	} while (++t <= 79);
2174	t = 0;
2175	A = HashResultPointer[0];
2176	B = HashResultPointer[1];
2177	C = HashResultPointer[2];
2178	D = HashResultPointer[3];
2179	E = HashResultPointer[4];
2180
2181	do {
2182		if (t < 20) {
2183			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2184		} else if (t < 40) {
2185			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2186		} else if (t < 60) {
2187			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2188		} else {
2189			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2190		}
2191		TEMP += S(5, A) + E + HashWorkingPointer[t];
2192		E = D;
2193		D = C;
2194		C = S(30, B);
2195		B = A;
2196		A = TEMP;
2197	} while (++t <= 79);
2198
2199	HashResultPointer[0] += A;
2200	HashResultPointer[1] += B;
2201	HashResultPointer[2] += C;
2202	HashResultPointer[3] += D;
2203	HashResultPointer[4] += E;
2204
2205}
2206
2207/**
2208 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2209 * @RandomChallenge: pointer to the entry of host challenge random number array.
2210 * @HashWorking: pointer to the entry of the working hash array.
2211 *
2212 * This routine calculates the working hash array referred by @HashWorking
2213 * from the challenge random numbers associated with the host, referred by
2214 * @RandomChallenge. The result is put into the entry of the working hash
2215 * array and returned by reference through @HashWorking.
2216 **/
2217static void
2218lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2219{
2220	*HashWorking = (*RandomChallenge ^ *HashWorking);
2221}
2222
2223/**
2224 * lpfc_hba_init - Perform special handling for LC HBA initialization
2225 * @phba: pointer to lpfc hba data structure.
2226 * @hbainit: pointer to an array of unsigned 32-bit integers.
2227 *
2228 * This routine performs the special handling for LC HBA initialization.
2229 **/
2230void
2231lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2232{
2233	int t;
2234	uint32_t *HashWorking;
2235	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2236
2237	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2238	if (!HashWorking)
2239		return;
2240
2241	HashWorking[0] = HashWorking[78] = *pwwnn++;
2242	HashWorking[1] = HashWorking[79] = *pwwnn;
2243
2244	for (t = 0; t < 7; t++)
2245		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2246
2247	lpfc_sha_init(hbainit);
2248	lpfc_sha_iterate(hbainit, HashWorking);
2249	kfree(HashWorking);
2250}
2251
2252/**
2253 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2254 * @vport: pointer to a virtual N_Port data structure.
2255 *
2256 * This routine performs the necessary cleanups before deleting the @vport.
2257 * It invokes the discovery state machine to perform necessary state
2258 * transitions and to release the ndlps associated with the @vport. Note,
2259 * the physical port is treated as @vport 0.
2260 **/
2261void
2262lpfc_cleanup(struct lpfc_vport *vport)
2263{
2264	struct lpfc_hba   *phba = vport->phba;
2265	struct lpfc_nodelist *ndlp, *next_ndlp;
2266	int i = 0;
2267
2268	if (phba->link_state > LPFC_LINK_DOWN)
2269		lpfc_port_link_failure(vport);
2270
2271	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2272		if (!NLP_CHK_NODE_ACT(ndlp)) {
2273			ndlp = lpfc_enable_node(vport, ndlp,
2274						NLP_STE_UNUSED_NODE);
2275			if (!ndlp)
2276				continue;
2277			spin_lock_irq(&phba->ndlp_lock);
2278			NLP_SET_FREE_REQ(ndlp);
2279			spin_unlock_irq(&phba->ndlp_lock);
2280			/* Trigger the release of the ndlp memory */
2281			lpfc_nlp_put(ndlp);
2282			continue;
2283		}
2284		spin_lock_irq(&phba->ndlp_lock);
2285		if (NLP_CHK_FREE_REQ(ndlp)) {
2286			/* The ndlp should not be in memory free mode already */
2287			spin_unlock_irq(&phba->ndlp_lock);
2288			continue;
2289		} else
2290			/* Indicate request for freeing ndlp memory */
2291			NLP_SET_FREE_REQ(ndlp);
2292		spin_unlock_irq(&phba->ndlp_lock);
2293
2294		if (vport->port_type != LPFC_PHYSICAL_PORT &&
2295		    ndlp->nlp_DID == Fabric_DID) {
2296			/* Just free up ndlp with Fabric_DID for vports */
2297			lpfc_nlp_put(ndlp);
2298			continue;
2299		}
2300
2301		if (ndlp->nlp_type & NLP_FABRIC)
2302			lpfc_disc_state_machine(vport, ndlp, NULL,
2303					NLP_EVT_DEVICE_RECOVERY);
2304
2305		lpfc_disc_state_machine(vport, ndlp, NULL,
2306					     NLP_EVT_DEVICE_RM);
2307
2308	}
2309
2310	/* At this point, ALL ndlp's should be gone
2311	 * because of the previous NLP_EVT_DEVICE_RM.
2312	 * Lets wait for this to happen, if needed.
2313	 */
2314	while (!list_empty(&vport->fc_nodes)) {
2315		if (i++ > 3000) {
2316			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2317				"0233 Nodelist not empty\n");
2318			list_for_each_entry_safe(ndlp, next_ndlp,
2319						&vport->fc_nodes, nlp_listp) {
2320				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2321						LOG_NODE,
2322						"0282 did:x%x ndlp:x%p "
2323						"usgmap:x%x refcnt:%d\n",
2324						ndlp->nlp_DID, (void *)ndlp,
2325						ndlp->nlp_usg_map,
2326						atomic_read(
2327							&ndlp->kref.refcount));
2328			}
2329			break;
2330		}
2331
2332		/* Wait for any activity on ndlps to settle */
2333		msleep(10);
2334	}
2335	lpfc_cleanup_vports_rrqs(vport, NULL);
2336}
2337
2338/**
2339 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2340 * @vport: pointer to a virtual N_Port data structure.
2341 *
2342 * This routine stops all the timers associated with a @vport. This function
2343 * is invoked before disabling or deleting a @vport. Note that the physical
2344 * port is treated as @vport 0.
2345 **/
2346void
2347lpfc_stop_vport_timers(struct lpfc_vport *vport)
2348{
2349	del_timer_sync(&vport->els_tmofunc);
2350	del_timer_sync(&vport->fc_fdmitmo);
2351	del_timer_sync(&vport->delayed_disc_tmo);
2352	lpfc_can_disctmo(vport);
2353	return;
2354}
2355
2356/**
2357 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2358 * @phba: pointer to lpfc hba data structure.
2359 *
2360 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2361 * caller of this routine should already hold the host lock.
2362 **/
2363void
2364__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2365{
2366	/* Clear pending FCF rediscovery wait flag */
2367	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2368
2369	/* Now, try to stop the timer */
2370	del_timer(&phba->fcf.redisc_wait);
2371}
2372
2373/**
2374 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2375 * @phba: pointer to lpfc hba data structure.
2376 *
2377 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2378 * checks whether the FCF rediscovery wait timer is pending with the host
2379 * lock held before proceeding with disabling the timer and clearing the
2380 * wait timer pendig flag.
2381 **/
2382void
2383lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2384{
2385	spin_lock_irq(&phba->hbalock);
2386	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2387		/* FCF rediscovery timer already fired or stopped */
2388		spin_unlock_irq(&phba->hbalock);
2389		return;
2390	}
2391	__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2392	/* Clear failover in progress flags */
2393	phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2394	spin_unlock_irq(&phba->hbalock);
2395}
2396
2397/**
2398 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2399 * @phba: pointer to lpfc hba data structure.
2400 *
2401 * This routine stops all the timers associated with a HBA. This function is
2402 * invoked before either putting a HBA offline or unloading the driver.
2403 **/
2404void
2405lpfc_stop_hba_timers(struct lpfc_hba *phba)
2406{
2407	lpfc_stop_vport_timers(phba->pport);
2408	del_timer_sync(&phba->sli.mbox_tmo);
2409	del_timer_sync(&phba->fabric_block_timer);
2410	del_timer_sync(&phba->eratt_poll);
2411	del_timer_sync(&phba->hb_tmofunc);
2412	if (phba->sli_rev == LPFC_SLI_REV4) {
2413		del_timer_sync(&phba->rrq_tmr);
2414		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2415	}
2416	phba->hb_outstanding = 0;
2417
2418	switch (phba->pci_dev_grp) {
2419	case LPFC_PCI_DEV_LP:
2420		/* Stop any LightPulse device specific driver timers */
2421		del_timer_sync(&phba->fcp_poll_timer);
2422		break;
2423	case LPFC_PCI_DEV_OC:
2424		/* Stop any OneConnect device sepcific driver timers */
2425		lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2426		break;
2427	default:
2428		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2429				"0297 Invalid device group (x%x)\n",
2430				phba->pci_dev_grp);
2431		break;
2432	}
2433	return;
2434}
2435
2436/**
2437 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2438 * @phba: pointer to lpfc hba data structure.
2439 *
2440 * This routine marks a HBA's management interface as blocked. Once the HBA's
2441 * management interface is marked as blocked, all the user space access to
2442 * the HBA, whether they are from sysfs interface or libdfc interface will
2443 * all be blocked. The HBA is set to block the management interface when the
2444 * driver prepares the HBA interface for online or offline.
2445 **/
2446static void
2447lpfc_block_mgmt_io(struct lpfc_hba * phba)
2448{
2449	unsigned long iflag;
2450	uint8_t actcmd = MBX_HEARTBEAT;
2451	unsigned long timeout;
2452
2453	timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2454	spin_lock_irqsave(&phba->hbalock, iflag);
2455	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2456	if (phba->sli.mbox_active) {
2457		actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2458		/* Determine how long we might wait for the active mailbox
2459		 * command to be gracefully completed by firmware.
2460		 */
2461		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2462				phba->sli.mbox_active) * 1000) + jiffies;
2463	}
2464	spin_unlock_irqrestore(&phba->hbalock, iflag);
2465
2466	/* Wait for the outstnading mailbox command to complete */
2467	while (phba->sli.mbox_active) {
2468		/* Check active mailbox complete status every 2ms */
2469		msleep(2);
2470		if (time_after(jiffies, timeout)) {
2471			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2472				"2813 Mgmt IO is Blocked %x "
2473				"- mbox cmd %x still active\n",
2474				phba->sli.sli_flag, actcmd);
2475			break;
2476		}
2477	}
2478}
2479
2480/**
2481 * lpfc_online - Initialize and bring a HBA online
2482 * @phba: pointer to lpfc hba data structure.
2483 *
2484 * This routine initializes the HBA and brings a HBA online. During this
2485 * process, the management interface is blocked to prevent user space access
2486 * to the HBA interfering with the driver initialization.
2487 *
2488 * Return codes
2489 *   0 - successful
2490 *   1 - failed
2491 **/
2492int
2493lpfc_online(struct lpfc_hba *phba)
2494{
2495	struct lpfc_vport *vport;
2496	struct lpfc_vport **vports;
2497	int i;
2498
2499	if (!phba)
2500		return 0;
2501	vport = phba->pport;
2502
2503	if (!(vport->fc_flag & FC_OFFLINE_MODE))
2504		return 0;
2505
2506	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2507			"0458 Bring Adapter online\n");
2508
2509	lpfc_block_mgmt_io(phba);
2510
2511	if (!lpfc_sli_queue_setup(phba)) {
2512		lpfc_unblock_mgmt_io(phba);
2513		return 1;
2514	}
2515
2516	if (phba->sli_rev == LPFC_SLI_REV4) {
2517		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2518			lpfc_unblock_mgmt_io(phba);
2519			return 1;
2520		}
2521	} else {
2522		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
2523			lpfc_unblock_mgmt_io(phba);
2524			return 1;
2525		}
2526	}
2527
2528	vports = lpfc_create_vport_work_array(phba);
2529	if (vports != NULL)
2530		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2531			struct Scsi_Host *shost;
2532			shost = lpfc_shost_from_vport(vports[i]);
2533			spin_lock_irq(shost->host_lock);
2534			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2535			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2536				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2537			if (phba->sli_rev == LPFC_SLI_REV4)
2538				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2539			spin_unlock_irq(shost->host_lock);
2540		}
2541		lpfc_destroy_vport_work_array(phba, vports);
2542
2543	lpfc_unblock_mgmt_io(phba);
2544	return 0;
2545}
2546
2547/**
2548 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2549 * @phba: pointer to lpfc hba data structure.
2550 *
2551 * This routine marks a HBA's management interface as not blocked. Once the
2552 * HBA's management interface is marked as not blocked, all the user space
2553 * access to the HBA, whether they are from sysfs interface or libdfc
2554 * interface will be allowed. The HBA is set to block the management interface
2555 * when the driver prepares the HBA interface for online or offline and then
2556 * set to unblock the management interface afterwards.
2557 **/
2558void
2559lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2560{
2561	unsigned long iflag;
2562
2563	spin_lock_irqsave(&phba->hbalock, iflag);
2564	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2565	spin_unlock_irqrestore(&phba->hbalock, iflag);
2566}
2567
2568/**
2569 * lpfc_offline_prep - Prepare a HBA to be brought offline
2570 * @phba: pointer to lpfc hba data structure.
2571 *
2572 * This routine is invoked to prepare a HBA to be brought offline. It performs
2573 * unregistration login to all the nodes on all vports and flushes the mailbox
2574 * queue to make it ready to be brought offline.
2575 **/
2576void
2577lpfc_offline_prep(struct lpfc_hba * phba)
2578{
2579	struct lpfc_vport *vport = phba->pport;
2580	struct lpfc_nodelist  *ndlp, *next_ndlp;
2581	struct lpfc_vport **vports;
2582	struct Scsi_Host *shost;
2583	int i;
2584
2585	if (vport->fc_flag & FC_OFFLINE_MODE)
2586		return;
2587
2588	lpfc_block_mgmt_io(phba);
2589
2590	lpfc_linkdown(phba);
2591
2592	/* Issue an unreg_login to all nodes on all vports */
2593	vports = lpfc_create_vport_work_array(phba);
2594	if (vports != NULL) {
2595		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2596			if (vports[i]->load_flag & FC_UNLOADING)
2597				continue;
2598			shost = lpfc_shost_from_vport(vports[i]);
2599			spin_lock_irq(shost->host_lock);
2600			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2601			vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2602			vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2603			spin_unlock_irq(shost->host_lock);
2604
2605			shost =	lpfc_shost_from_vport(vports[i]);
2606			list_for_each_entry_safe(ndlp, next_ndlp,
2607						 &vports[i]->fc_nodes,
2608						 nlp_listp) {
2609				if (!NLP_CHK_NODE_ACT(ndlp))
2610					continue;
2611				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2612					continue;
2613				if (ndlp->nlp_type & NLP_FABRIC) {
2614					lpfc_disc_state_machine(vports[i], ndlp,
2615						NULL, NLP_EVT_DEVICE_RECOVERY);
2616					lpfc_disc_state_machine(vports[i], ndlp,
2617						NULL, NLP_EVT_DEVICE_RM);
2618				}
2619				spin_lock_irq(shost->host_lock);
2620				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2621				spin_unlock_irq(shost->host_lock);
2622				lpfc_unreg_rpi(vports[i], ndlp);
2623			}
2624		}
2625	}
2626	lpfc_destroy_vport_work_array(phba, vports);
2627
2628	lpfc_sli_mbox_sys_shutdown(phba);
2629}
2630
2631/**
2632 * lpfc_offline - Bring a HBA offline
2633 * @phba: pointer to lpfc hba data structure.
2634 *
2635 * This routine actually brings a HBA offline. It stops all the timers
2636 * associated with the HBA, brings down the SLI layer, and eventually
2637 * marks the HBA as in offline state for the upper layer protocol.
2638 **/
2639void
2640lpfc_offline(struct lpfc_hba *phba)
2641{
2642	struct Scsi_Host  *shost;
2643	struct lpfc_vport **vports;
2644	int i;
2645
2646	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2647		return;
2648
2649	/* stop port and all timers associated with this hba */
2650	lpfc_stop_port(phba);
2651	vports = lpfc_create_vport_work_array(phba);
2652	if (vports != NULL)
2653		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2654			lpfc_stop_vport_timers(vports[i]);
2655	lpfc_destroy_vport_work_array(phba, vports);
2656	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2657			"0460 Bring Adapter offline\n");
2658	/* Bring down the SLI Layer and cleanup.  The HBA is offline
2659	   now.  */
2660	lpfc_sli_hba_down(phba);
2661	spin_lock_irq(&phba->hbalock);
2662	phba->work_ha = 0;
2663	spin_unlock_irq(&phba->hbalock);
2664	vports = lpfc_create_vport_work_array(phba);
2665	if (vports != NULL)
2666		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2667			shost = lpfc_shost_from_vport(vports[i]);
2668			spin_lock_irq(shost->host_lock);
2669			vports[i]->work_port_events = 0;
2670			vports[i]->fc_flag |= FC_OFFLINE_MODE;
2671			spin_unlock_irq(shost->host_lock);
2672		}
2673	lpfc_destroy_vport_work_array(phba, vports);
2674}
2675
2676/**
2677 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2678 * @phba: pointer to lpfc hba data structure.
2679 *
2680 * This routine is to free all the SCSI buffers and IOCBs from the driver
2681 * list back to kernel. It is called from lpfc_pci_remove_one to free
2682 * the internal resources before the device is removed from the system.
2683 *
2684 * Return codes
2685 *   0 - successful (for now, it always returns 0)
2686 **/
2687static int
2688lpfc_scsi_free(struct lpfc_hba *phba)
2689{
2690	struct lpfc_scsi_buf *sb, *sb_next;
2691	struct lpfc_iocbq *io, *io_next;
2692
2693	spin_lock_irq(&phba->hbalock);
2694	/* Release all the lpfc_scsi_bufs maintained by this host. */
2695	spin_lock(&phba->scsi_buf_list_lock);
2696	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2697		list_del(&sb->list);
2698		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2699			      sb->dma_handle);
2700		kfree(sb);
2701		phba->total_scsi_bufs--;
2702	}
2703	spin_unlock(&phba->scsi_buf_list_lock);
2704
2705	/* Release all the lpfc_iocbq entries maintained by this host. */
2706	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2707		list_del(&io->list);
2708		kfree(io);
2709		phba->total_iocbq_bufs--;
2710	}
2711
2712	spin_unlock_irq(&phba->hbalock);
2713	return 0;
2714}
2715
2716/**
2717 * lpfc_create_port - Create an FC port
2718 * @phba: pointer to lpfc hba data structure.
2719 * @instance: a unique integer ID to this FC port.
2720 * @dev: pointer to the device data structure.
2721 *
2722 * This routine creates a FC port for the upper layer protocol. The FC port
2723 * can be created on top of either a physical port or a virtual port provided
2724 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2725 * and associates the FC port created before adding the shost into the SCSI
2726 * layer.
2727 *
2728 * Return codes
2729 *   @vport - pointer to the virtual N_Port data structure.
2730 *   NULL - port create failed.
2731 **/
2732struct lpfc_vport *
2733lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2734{
2735	struct lpfc_vport *vport;
2736	struct Scsi_Host  *shost;
2737	int error = 0;
2738
2739	if (dev != &phba->pcidev->dev)
2740		shost = scsi_host_alloc(&lpfc_vport_template,
2741					sizeof(struct lpfc_vport));
2742	else
2743		shost = scsi_host_alloc(&lpfc_template,
2744					sizeof(struct lpfc_vport));
2745	if (!shost)
2746		goto out;
2747
2748	vport = (struct lpfc_vport *) shost->hostdata;
2749	vport->phba = phba;
2750	vport->load_flag |= FC_LOADING;
2751	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2752	vport->fc_rscn_flush = 0;
2753
2754	lpfc_get_vport_cfgparam(vport);
2755	shost->unique_id = instance;
2756	shost->max_id = LPFC_MAX_TARGET;
2757	shost->max_lun = vport->cfg_max_luns;
2758	shost->this_id = -1;
2759	shost->max_cmd_len = 16;
2760	if (phba->sli_rev == LPFC_SLI_REV4) {
2761		shost->dma_boundary =
2762			phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2763		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2764	}
2765
2766	/*
2767	 * Set initial can_queue value since 0 is no longer supported and
2768	 * scsi_add_host will fail. This will be adjusted later based on the
2769	 * max xri value determined in hba setup.
2770	 */
2771	shost->can_queue = phba->cfg_hba_queue_depth - 10;
2772	if (dev != &phba->pcidev->dev) {
2773		shost->transportt = lpfc_vport_transport_template;
2774		vport->port_type = LPFC_NPIV_PORT;
2775	} else {
2776		shost->transportt = lpfc_transport_template;
2777		vport->port_type = LPFC_PHYSICAL_PORT;
2778	}
2779
2780	/* Initialize all internally managed lists. */
2781	INIT_LIST_HEAD(&vport->fc_nodes);
2782	INIT_LIST_HEAD(&vport->rcv_buffer_list);
2783	spin_lock_init(&vport->work_port_lock);
2784
2785	init_timer(&vport->fc_disctmo);
2786	vport->fc_disctmo.function = lpfc_disc_timeout;
2787	vport->fc_disctmo.data = (unsigned long)vport;
2788
2789	init_timer(&vport->fc_fdmitmo);
2790	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2791	vport->fc_fdmitmo.data = (unsigned long)vport;
2792
2793	init_timer(&vport->els_tmofunc);
2794	vport->els_tmofunc.function = lpfc_els_timeout;
2795	vport->els_tmofunc.data = (unsigned long)vport;
2796
2797	init_timer(&vport->delayed_disc_tmo);
2798	vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
2799	vport->delayed_disc_tmo.data = (unsigned long)vport;
2800
2801	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2802	if (error)
2803		goto out_put_shost;
2804
2805	spin_lock_irq(&phba->hbalock);
2806	list_add_tail(&vport->listentry, &phba->port_list);
2807	spin_unlock_irq(&phba->hbalock);
2808	return vport;
2809
2810out_put_shost:
2811	scsi_host_put(shost);
2812out:
2813	return NULL;
2814}
2815
2816/**
2817 * destroy_port -  destroy an FC port
2818 * @vport: pointer to an lpfc virtual N_Port data structure.
2819 *
2820 * This routine destroys a FC port from the upper layer protocol. All the
2821 * resources associated with the port are released.
2822 **/
2823void
2824destroy_port(struct lpfc_vport *vport)
2825{
2826	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2827	struct lpfc_hba  *phba = vport->phba;
2828
2829	lpfc_debugfs_terminate(vport);
2830	fc_remove_host(shost);
2831	scsi_remove_host(shost);
2832
2833	spin_lock_irq(&phba->hbalock);
2834	list_del_init(&vport->listentry);
2835	spin_unlock_irq(&phba->hbalock);
2836
2837	lpfc_cleanup(vport);
2838	return;
2839}
2840
2841/**
2842 * lpfc_get_instance - Get a unique integer ID
2843 *
2844 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2845 * uses the kernel idr facility to perform the task.
2846 *
2847 * Return codes:
2848 *   instance - a unique integer ID allocated as the new instance.
2849 *   -1 - lpfc get instance failed.
2850 **/
2851int
2852lpfc_get_instance(void)
2853{
2854	int instance = 0;
2855
2856	/* Assign an unused number */
2857	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2858		return -1;
2859	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2860		return -1;
2861	return instance;
2862}
2863
2864/**
2865 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2866 * @shost: pointer to SCSI host data structure.
2867 * @time: elapsed time of the scan in jiffies.
2868 *
2869 * This routine is called by the SCSI layer with a SCSI host to determine
2870 * whether the scan host is finished.
2871 *
2872 * Note: there is no scan_start function as adapter initialization will have
2873 * asynchronously kicked off the link initialization.
2874 *
2875 * Return codes
2876 *   0 - SCSI host scan is not over yet.
2877 *   1 - SCSI host scan is over.
2878 **/
2879int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2880{
2881	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2882	struct lpfc_hba   *phba = vport->phba;
2883	int stat = 0;
2884
2885	spin_lock_irq(shost->host_lock);
2886
2887	if (vport->load_flag & FC_UNLOADING) {
2888		stat = 1;
2889		goto finished;
2890	}
2891	if (time >= 30 * HZ) {
2892		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2893				"0461 Scanning longer than 30 "
2894				"seconds.  Continuing initialization\n");
2895		stat = 1;
2896		goto finished;
2897	}
2898	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2899		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2900				"0465 Link down longer than 15 "
2901				"seconds.  Continuing initialization\n");
2902		stat = 1;
2903		goto finished;
2904	}
2905
2906	if (vport->port_state != LPFC_VPORT_READY)
2907		goto finished;
2908	if (vport->num_disc_nodes || vport->fc_prli_sent)
2909		goto finished;
2910	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2911		goto finished;
2912	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2913		goto finished;
2914
2915	stat = 1;
2916
2917finished:
2918	spin_unlock_irq(shost->host_lock);
2919	return stat;
2920}
2921
2922/**
2923 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2924 * @shost: pointer to SCSI host data structure.
2925 *
2926 * This routine initializes a given SCSI host attributes on a FC port. The
2927 * SCSI host can be either on top of a physical port or a virtual port.
2928 **/
2929void lpfc_host_attrib_init(struct Scsi_Host *shost)
2930{
2931	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2932	struct lpfc_hba   *phba = vport->phba;
2933	/*
2934	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
2935	 */
2936
2937	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2938	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2939	fc_host_supported_classes(shost) = FC_COS_CLASS3;
2940
2941	memset(fc_host_supported_fc4s(shost), 0,
2942	       sizeof(fc_host_supported_fc4s(shost)));
2943	fc_host_supported_fc4s(shost)[2] = 1;
2944	fc_host_supported_fc4s(shost)[7] = 1;
2945
2946	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2947				 sizeof fc_host_symbolic_name(shost));
2948
2949	fc_host_supported_speeds(shost) = 0;
2950	if (phba->lmt & LMT_16Gb)
2951		fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
2952	if (phba->lmt & LMT_10Gb)
2953		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2954	if (phba->lmt & LMT_8Gb)
2955		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2956	if (phba->lmt & LMT_4Gb)
2957		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2958	if (phba->lmt & LMT_2Gb)
2959		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2960	if (phba->lmt & LMT_1Gb)
2961		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2962
2963	fc_host_maxframe_size(shost) =
2964		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2965		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2966
2967	fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
2968
2969	/* This value is also unchanging */
2970	memset(fc_host_active_fc4s(shost), 0,
2971	       sizeof(fc_host_active_fc4s(shost)));
2972	fc_host_active_fc4s(shost)[2] = 1;
2973	fc_host_active_fc4s(shost)[7] = 1;
2974
2975	fc_host_max_npiv_vports(shost) = phba->max_vpi;
2976	spin_lock_irq(shost->host_lock);
2977	vport->load_flag &= ~FC_LOADING;
2978	spin_unlock_irq(shost->host_lock);
2979}
2980
2981/**
2982 * lpfc_stop_port_s3 - Stop SLI3 device port
2983 * @phba: pointer to lpfc hba data structure.
2984 *
2985 * This routine is invoked to stop an SLI3 device port, it stops the device
2986 * from generating interrupts and stops the device driver's timers for the
2987 * device.
2988 **/
2989static void
2990lpfc_stop_port_s3(struct lpfc_hba *phba)
2991{
2992	/* Clear all interrupt enable conditions */
2993	writel(0, phba->HCregaddr);
2994	readl(phba->HCregaddr); /* flush */
2995	/* Clear all pending interrupts */
2996	writel(0xffffffff, phba->HAregaddr);
2997	readl(phba->HAregaddr); /* flush */
2998
2999	/* Reset some HBA SLI setup states */
3000	lpfc_stop_hba_timers(phba);
3001	phba->pport->work_port_events = 0;
3002}
3003
3004/**
3005 * lpfc_stop_port_s4 - Stop SLI4 device port
3006 * @phba: pointer to lpfc hba data structure.
3007 *
3008 * This routine is invoked to stop an SLI4 device port, it stops the device
3009 * from generating interrupts and stops the device driver's timers for the
3010 * device.
3011 **/
3012static void
3013lpfc_stop_port_s4(struct lpfc_hba *phba)
3014{
3015	/* Reset some HBA SLI4 setup states */
3016	lpfc_stop_hba_timers(phba);
3017	phba->pport->work_port_events = 0;
3018	phba->sli4_hba.intr_enable = 0;
3019}
3020
3021/**
3022 * lpfc_stop_port - Wrapper function for stopping hba port
3023 * @phba: Pointer to HBA context object.
3024 *
3025 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
3026 * the API jump table function pointer from the lpfc_hba struct.
3027 **/
3028void
3029lpfc_stop_port(struct lpfc_hba *phba)
3030{
3031	phba->lpfc_stop_port(phba);
3032}
3033
3034/**
3035 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
3036 * @phba: Pointer to hba for which this call is being executed.
3037 *
3038 * This routine starts the timer waiting for the FCF rediscovery to complete.
3039 **/
3040void
3041lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
3042{
3043	unsigned long fcf_redisc_wait_tmo =
3044		(jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
3045	/* Start fcf rediscovery wait period timer */
3046	mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
3047	spin_lock_irq(&phba->hbalock);
3048	/* Allow action to new fcf asynchronous event */
3049	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
3050	/* Mark the FCF rediscovery pending state */
3051	phba->fcf.fcf_flag |= FCF_REDISC_PEND;
3052	spin_unlock_irq(&phba->hbalock);
3053}
3054
3055/**
3056 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
3057 * @ptr: Map to lpfc_hba data structure pointer.
3058 *
3059 * This routine is invoked when waiting for FCF table rediscover has been
3060 * timed out. If new FCF record(s) has (have) been discovered during the
3061 * wait period, a new FCF event shall be added to the FCOE async event
3062 * list, and then worker thread shall be waked up for processing from the
3063 * worker thread context.
3064 **/
3065void
3066lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
3067{
3068	struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
3069
3070	/* Don't send FCF rediscovery event if timer cancelled */
3071	spin_lock_irq(&phba->hbalock);
3072	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3073		spin_unlock_irq(&phba->hbalock);
3074		return;
3075	}
3076	/* Clear FCF rediscovery timer pending flag */
3077	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3078	/* FCF rediscovery event to worker thread */
3079	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
3080	spin_unlock_irq(&phba->hbalock);
3081	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3082			"2776 FCF rediscover quiescent timer expired\n");
3083	/* wake up worker thread */
3084	lpfc_worker_wake_up(phba);
3085}
3086
3087/**
3088 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3089 * @phba: pointer to lpfc hba data structure.
3090 * @acqe_link: pointer to the async link completion queue entry.
3091 *
3092 * This routine is to parse the SLI4 link-attention link fault code and
3093 * translate it into the base driver's read link attention mailbox command
3094 * status.
3095 *
3096 * Return: Link-attention status in terms of base driver's coding.
3097 **/
3098static uint16_t
3099lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3100			   struct lpfc_acqe_link *acqe_link)
3101{
3102	uint16_t latt_fault;
3103
3104	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3105	case LPFC_ASYNC_LINK_FAULT_NONE:
3106	case LPFC_ASYNC_LINK_FAULT_LOCAL:
3107	case LPFC_ASYNC_LINK_FAULT_REMOTE:
3108		latt_fault = 0;
3109		break;
3110	default:
3111		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3112				"0398 Invalid link fault code: x%x\n",
3113				bf_get(lpfc_acqe_link_fault, acqe_link));
3114		latt_fault = MBXERR_ERROR;
3115		break;
3116	}
3117	return latt_fault;
3118}
3119
3120/**
3121 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3122 * @phba: pointer to lpfc hba data structure.
3123 * @acqe_link: pointer to the async link completion queue entry.
3124 *
3125 * This routine is to parse the SLI4 link attention type and translate it
3126 * into the base driver's link attention type coding.
3127 *
3128 * Return: Link attention type in terms of base driver's coding.
3129 **/
3130static uint8_t
3131lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3132			  struct lpfc_acqe_link *acqe_link)
3133{
3134	uint8_t att_type;
3135
3136	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3137	case LPFC_ASYNC_LINK_STATUS_DOWN:
3138	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3139		att_type = LPFC_ATT_LINK_DOWN;
3140		break;
3141	case LPFC_ASYNC_LINK_STATUS_UP:
3142		/* Ignore physical link up events - wait for logical link up */
3143		att_type = LPFC_ATT_RESERVED;
3144		break;
3145	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3146		att_type = LPFC_ATT_LINK_UP;
3147		break;
3148	default:
3149		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3150				"0399 Invalid link attention type: x%x\n",
3151				bf_get(lpfc_acqe_link_status, acqe_link));
3152		att_type = LPFC_ATT_RESERVED;
3153		break;
3154	}
3155	return att_type;
3156}
3157
3158/**
3159 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3160 * @phba: pointer to lpfc hba data structure.
3161 * @acqe_link: pointer to the async link completion queue entry.
3162 *
3163 * This routine is to parse the SLI4 link-attention link speed and translate
3164 * it into the base driver's link-attention link speed coding.
3165 *
3166 * Return: Link-attention link speed in terms of base driver's coding.
3167 **/
3168static uint8_t
3169lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3170				struct lpfc_acqe_link *acqe_link)
3171{
3172	uint8_t link_speed;
3173
3174	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3175	case LPFC_ASYNC_LINK_SPEED_ZERO:
3176	case LPFC_ASYNC_LINK_SPEED_10MBPS:
3177	case LPFC_ASYNC_LINK_SPEED_100MBPS:
3178		link_speed = LPFC_LINK_SPEED_UNKNOWN;
3179		break;
3180	case LPFC_ASYNC_LINK_SPEED_1GBPS:
3181		link_speed = LPFC_LINK_SPEED_1GHZ;
3182		break;
3183	case LPFC_ASYNC_LINK_SPEED_10GBPS:
3184		link_speed = LPFC_LINK_SPEED_10GHZ;
3185		break;
3186	default:
3187		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3188				"0483 Invalid link-attention link speed: x%x\n",
3189				bf_get(lpfc_acqe_link_speed, acqe_link));
3190		link_speed = LPFC_LINK_SPEED_UNKNOWN;
3191		break;
3192	}
3193	return link_speed;
3194}
3195
3196/**
3197 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3198 * @phba: pointer to lpfc hba data structure.
3199 * @acqe_link: pointer to the async link completion queue entry.
3200 *
3201 * This routine is to handle the SLI4 asynchronous FCoE link event.
3202 **/
3203static void
3204lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3205			 struct lpfc_acqe_link *acqe_link)
3206{
3207	struct lpfc_dmabuf *mp;
3208	LPFC_MBOXQ_t *pmb;
3209	MAILBOX_t *mb;
3210	struct lpfc_mbx_read_top *la;
3211	uint8_t att_type;
3212	int rc;
3213
3214	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3215	if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
3216		return;
3217	phba->fcoe_eventtag = acqe_link->event_tag;
3218	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3219	if (!pmb) {
3220		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3221				"0395 The mboxq allocation failed\n");
3222		return;
3223	}
3224	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3225	if (!mp) {
3226		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3227				"0396 The lpfc_dmabuf allocation failed\n");
3228		goto out_free_pmb;
3229	}
3230	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3231	if (!mp->virt) {
3232		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3233				"0397 The mbuf allocation failed\n");
3234		goto out_free_dmabuf;
3235	}
3236
3237	/* Cleanup any outstanding ELS commands */
3238	lpfc_els_flush_all_cmd(phba);
3239
3240	/* Block ELS IOCBs until we have done process link event */
3241	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3242
3243	/* Update link event statistics */
3244	phba->sli.slistat.link_event++;
3245
3246	/* Create lpfc_handle_latt mailbox command from link ACQE */
3247	lpfc_read_topology(phba, pmb, mp);
3248	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3249	pmb->vport = phba->pport;
3250
3251	/* Keep the link status for extra SLI4 state machine reference */
3252	phba->sli4_hba.link_state.speed =
3253				bf_get(lpfc_acqe_link_speed, acqe_link);
3254	phba->sli4_hba.link_state.duplex =
3255				bf_get(lpfc_acqe_link_duplex, acqe_link);
3256	phba->sli4_hba.link_state.status =
3257				bf_get(lpfc_acqe_link_status, acqe_link);
3258	phba->sli4_hba.link_state.type =
3259				bf_get(lpfc_acqe_link_type, acqe_link);
3260	phba->sli4_hba.link_state.number =
3261				bf_get(lpfc_acqe_link_number, acqe_link);
3262	phba->sli4_hba.link_state.fault =
3263				bf_get(lpfc_acqe_link_fault, acqe_link);
3264	phba->sli4_hba.link_state.logical_speed =
3265			bf_get(lpfc_acqe_logical_link_speed, acqe_link);
3266	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3267			"2900 Async FC/FCoE Link event - Speed:%dGBit "
3268			"duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3269			"Logical speed:%dMbps Fault:%d\n",
3270			phba->sli4_hba.link_state.speed,
3271			phba->sli4_hba.link_state.topology,
3272			phba->sli4_hba.link_state.status,
3273			phba->sli4_hba.link_state.type,
3274			phba->sli4_hba.link_state.number,
3275			phba->sli4_hba.link_state.logical_speed * 10,
3276			phba->sli4_hba.link_state.fault);
3277	/*
3278	 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3279	 * topology info. Note: Optional for non FC-AL ports.
3280	 */
3281	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3282		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3283		if (rc == MBX_NOT_FINISHED)
3284			goto out_free_dmabuf;
3285		return;
3286	}
3287	/*
3288	 * For FCoE Mode: fill in all the topology information we need and call
3289	 * the READ_TOPOLOGY completion routine to continue without actually
3290	 * sending the READ_TOPOLOGY mailbox command to the port.
3291	 */
3292	/* Parse and translate status field */
3293	mb = &pmb->u.mb;
3294	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3295
3296	/* Parse and translate link attention fields */
3297	la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3298	la->eventTag = acqe_link->event_tag;
3299	bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3300	bf_set(lpfc_mbx_read_top_link_spd, la,
3301	       lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3302
3303	/* Fake the the following irrelvant fields */
3304	bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3305	bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3306	bf_set(lpfc_mbx_read_top_il, la, 0);
3307	bf_set(lpfc_mbx_read_top_pb, la, 0);
3308	bf_set(lpfc_mbx_read_top_fa, la, 0);
3309	bf_set(lpfc_mbx_read_top_mm, la, 0);
3310
3311	/* Invoke the lpfc_handle_latt mailbox command callback function */
3312	lpfc_mbx_cmpl_read_topology(phba, pmb);
3313
3314	return;
3315
3316out_free_dmabuf:
3317	kfree(mp);
3318out_free_pmb:
3319	mempool_free(pmb, phba->mbox_mem_pool);
3320}
3321
3322/**
3323 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
3324 * @phba: pointer to lpfc hba data structure.
3325 * @acqe_fc: pointer to the async fc completion queue entry.
3326 *
3327 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
3328 * that the event was received and then issue a read_topology mailbox command so
3329 * that the rest of the driver will treat it the same as SLI3.
3330 **/
3331static void
3332lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3333{
3334	struct lpfc_dmabuf *mp;
3335	LPFC_MBOXQ_t *pmb;
3336	int rc;
3337
3338	if (bf_get(lpfc_trailer_type, acqe_fc) !=
3339	    LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
3340		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3341				"2895 Non FC link Event detected.(%d)\n",
3342				bf_get(lpfc_trailer_type, acqe_fc));
3343		return;
3344	}
3345	/* Keep the link status for extra SLI4 state machine reference */
3346	phba->sli4_hba.link_state.speed =
3347				bf_get(lpfc_acqe_fc_la_speed, acqe_fc);
3348	phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3349	phba->sli4_hba.link_state.topology =
3350				bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
3351	phba->sli4_hba.link_state.status =
3352				bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
3353	phba->sli4_hba.link_state.type =
3354				bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
3355	phba->sli4_hba.link_state.number =
3356				bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
3357	phba->sli4_hba.link_state.fault =
3358				bf_get(lpfc_acqe_link_fault, acqe_fc);
3359	phba->sli4_hba.link_state.logical_speed =
3360				bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
3361	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3362			"2896 Async FC event - Speed:%dGBaud Topology:x%x "
3363			"LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3364			"%dMbps Fault:%d\n",
3365			phba->sli4_hba.link_state.speed,
3366			phba->sli4_hba.link_state.topology,
3367			phba->sli4_hba.link_state.status,
3368			phba->sli4_hba.link_state.type,
3369			phba->sli4_hba.link_state.number,
3370			phba->sli4_hba.link_state.logical_speed * 10,
3371			phba->sli4_hba.link_state.fault);
3372	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3373	if (!pmb) {
3374		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3375				"2897 The mboxq allocation failed\n");
3376		return;
3377	}
3378	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3379	if (!mp) {
3380		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3381				"2898 The lpfc_dmabuf allocation failed\n");
3382		goto out_free_pmb;
3383	}
3384	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3385	if (!mp->virt) {
3386		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3387				"2899 The mbuf allocation failed\n");
3388		goto out_free_dmabuf;
3389	}
3390
3391	/* Cleanup any outstanding ELS commands */
3392	lpfc_els_flush_all_cmd(phba);
3393
3394	/* Block ELS IOCBs until we have done process link event */
3395	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3396
3397	/* Update link event statistics */
3398	phba->sli.slistat.link_event++;
3399
3400	/* Create lpfc_handle_latt mailbox command from link ACQE */
3401	lpfc_read_topology(phba, pmb, mp);
3402	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3403	pmb->vport = phba->pport;
3404
3405	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3406	if (rc == MBX_NOT_FINISHED)
3407		goto out_free_dmabuf;
3408	return;
3409
3410out_free_dmabuf:
3411	kfree(mp);
3412out_free_pmb:
3413	mempool_free(pmb, phba->mbox_mem_pool);
3414}
3415
3416/**
3417 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
3418 * @phba: pointer to lpfc hba data structure.
3419 * @acqe_fc: pointer to the async SLI completion queue entry.
3420 *
3421 * This routine is to handle the SLI4 asynchronous SLI events.
3422 **/
3423static void
3424lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3425{
3426	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3427			"2901 Async SLI event - Event Data1:x%08x Event Data2:"
3428			"x%08x SLI Event Type:%d",
3429			acqe_sli->event_data1, acqe_sli->event_data2,
3430			bf_get(lpfc_trailer_type, acqe_sli));
3431	return;
3432}
3433
3434/**
3435 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3436 * @vport: pointer to vport data structure.
3437 *
3438 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3439 * response to a CVL event.
3440 *
3441 * Return the pointer to the ndlp with the vport if successful, otherwise
3442 * return NULL.
3443 **/
3444static struct lpfc_nodelist *
3445lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3446{
3447	struct lpfc_nodelist *ndlp;
3448	struct Scsi_Host *shost;
3449	struct lpfc_hba *phba;
3450
3451	if (!vport)
3452		return NULL;
3453	phba = vport->phba;
3454	if (!phba)
3455		return NULL;
3456	ndlp = lpfc_findnode_did(vport, Fabric_DID);
3457	if (!ndlp) {
3458		/* Cannot find existing Fabric ndlp, so allocate a new one */
3459		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3460		if (!ndlp)
3461			return 0;
3462		lpfc_nlp_init(vport, ndlp, Fabric_DID);
3463		/* Set the node type */
3464		ndlp->nlp_type |= NLP_FABRIC;
3465		/* Put ndlp onto node list */
3466		lpfc_enqueue_node(vport, ndlp);
3467	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
3468		/* re-setup ndlp without removing from node list */
3469		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3470		if (!ndlp)
3471			return 0;
3472	}
3473	if ((phba->pport->port_state < LPFC_FLOGI) &&
3474		(phba->pport->port_state != LPFC_VPORT_FAILED))
3475		return NULL;
3476	/* If virtual link is not yet instantiated ignore CVL */
3477	if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
3478		&& (vport->port_state != LPFC_VPORT_FAILED))
3479		return NULL;
3480	shost = lpfc_shost_from_vport(vport);
3481	if (!shost)
3482		return NULL;
3483	lpfc_linkdown_port(vport);
3484	lpfc_cleanup_pending_mbox(vport);
3485	spin_lock_irq(shost->host_lock);
3486	vport->fc_flag |= FC_VPORT_CVL_RCVD;
3487	spin_unlock_irq(shost->host_lock);
3488
3489	return ndlp;
3490}
3491
3492/**
3493 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3494 * @vport: pointer to lpfc hba data structure.
3495 *
3496 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3497 * response to a FCF dead event.
3498 **/
3499static void
3500lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3501{
3502	struct lpfc_vport **vports;
3503	int i;
3504
3505	vports = lpfc_create_vport_work_array(phba);
3506	if (vports)
3507		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3508			lpfc_sli4_perform_vport_cvl(vports[i]);
3509	lpfc_destroy_vport_work_array(phba, vports);
3510}
3511
3512/**
3513 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
3514 * @phba: pointer to lpfc hba data structure.
3515 * @acqe_link: pointer to the async fcoe completion queue entry.
3516 *
3517 * This routine is to handle the SLI4 asynchronous fcoe event.
3518 **/
3519static void
3520lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3521			struct lpfc_acqe_fip *acqe_fip)
3522{
3523	uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
3524	int rc;
3525	struct lpfc_vport *vport;
3526	struct lpfc_nodelist *ndlp;
3527	struct Scsi_Host  *shost;
3528	int active_vlink_present;
3529	struct lpfc_vport **vports;
3530	int i;
3531
3532	phba->fc_eventTag = acqe_fip->event_tag;
3533	phba->fcoe_eventtag = acqe_fip->event_tag;
3534	switch (event_type) {
3535	case LPFC_FIP_EVENT_TYPE_NEW_FCF:
3536	case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
3537		if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
3538			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3539					LOG_DISCOVERY,
3540					"2546 New FCF event, evt_tag:x%x, "
3541					"index:x%x\n",
3542					acqe_fip->event_tag,
3543					acqe_fip->index);
3544		else
3545			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3546					LOG_DISCOVERY,
3547					"2788 FCF param modified event, "
3548					"evt_tag:x%x, index:x%x\n",
3549					acqe_fip->event_tag,
3550					acqe_fip->index);
3551		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3552			/*
3553			 * During period of FCF discovery, read the FCF
3554			 * table record indexed by the event to update
3555			 * FCF roundrobin failover eligible FCF bmask.
3556			 */
3557			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3558					LOG_DISCOVERY,
3559					"2779 Read FCF (x%x) for updating "
3560					"roundrobin FCF failover bmask\n",
3561					acqe_fip->index);
3562			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
3563		}
3564
3565		/* If the FCF discovery is in progress, do nothing. */
3566		spin_lock_irq(&phba->hbalock);
3567		if (phba->hba_flag & FCF_TS_INPROG) {
3568			spin_unlock_irq(&phba->hbalock);
3569			break;
3570		}
3571		/* If fast FCF failover rescan event is pending, do nothing */
3572		if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3573			spin_unlock_irq(&phba->hbalock);
3574			break;
3575		}
3576
3577		/* If the FCF has been in discovered state, do nothing. */
3578		if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3579			spin_unlock_irq(&phba->hbalock);
3580			break;
3581		}
3582		spin_unlock_irq(&phba->hbalock);
3583
3584		/* Otherwise, scan the entire FCF table and re-discover SAN */
3585		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3586				"2770 Start FCF table scan per async FCF "
3587				"event, evt_tag:x%x, index:x%x\n",
3588				acqe_fip->event_tag, acqe_fip->index);
3589		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3590						     LPFC_FCOE_FCF_GET_FIRST);
3591		if (rc)
3592			lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3593					"2547 Issue FCF scan read FCF mailbox "
3594					"command failed (x%x)\n", rc);
3595		break;
3596
3597	case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
3598		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3599			"2548 FCF Table full count 0x%x tag 0x%x\n",
3600			bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
3601			acqe_fip->event_tag);
3602		break;
3603
3604	case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
3605		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3606			"2549 FCF (x%x) disconnected from network, "
3607			"tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
3608		/*
3609		 * If we are in the middle of FCF failover process, clear
3610		 * the corresponding FCF bit in the roundrobin bitmap.
3611		 */
3612		spin_lock_irq(&phba->hbalock);
3613		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3614			spin_unlock_irq(&phba->hbalock);
3615			/* Update FLOGI FCF failover eligible FCF bmask */
3616			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
3617			break;
3618		}
3619		spin_unlock_irq(&phba->hbalock);
3620
3621		/* If the event is not for currently used fcf do nothing */
3622		if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
3623			break;
3624
3625		/*
3626		 * Otherwise, request the port to rediscover the entire FCF
3627		 * table for a fast recovery from case that the current FCF
3628		 * is no longer valid as we are not in the middle of FCF
3629		 * failover process already.
3630		 */
3631		spin_lock_irq(&phba->hbalock);
3632		/* Mark the fast failover process in progress */
3633		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3634		spin_unlock_irq(&phba->hbalock);
3635
3636		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3637				"2771 Start FCF fast failover process due to "
3638				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3639				"\n", acqe_fip->event_tag, acqe_fip->index);
3640		rc = lpfc_sli4_redisc_fcf_table(phba);
3641		if (rc) {
3642			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3643					LOG_DISCOVERY,
3644					"2772 Issue FCF rediscover mabilbox "
3645					"command failed, fail through to FCF "
3646					"dead event\n");
3647			spin_lock_irq(&phba->hbalock);
3648			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3649			spin_unlock_irq(&phba->hbalock);
3650			/*
3651			 * Last resort will fail over by treating this
3652			 * as a link down to FCF registration.
3653			 */
3654			lpfc_sli4_fcf_dead_failthrough(phba);
3655		} else {
3656			/* Reset FCF roundrobin bmask for new discovery */
3657			lpfc_sli4_clear_fcf_rr_bmask(phba);
3658			/*
3659			 * Handling fast FCF failover to a DEAD FCF event is
3660			 * considered equalivant to receiving CVL to all vports.
3661			 */
3662			lpfc_sli4_perform_all_vport_cvl(phba);
3663		}
3664		break;
3665	case LPFC_FIP_EVENT_TYPE_CVL:
3666		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3667			"2718 Clear Virtual Link Received for VPI 0x%x"
3668			" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3669
3670		vport = lpfc_find_vport_by_vpid(phba,
3671						acqe_fip->index);
3672		ndlp = lpfc_sli4_perform_vport_cvl(vport);
3673		if (!ndlp)
3674			break;
3675		active_vlink_present = 0;
3676
3677		vports = lpfc_create_vport_work_array(phba);
3678		if (vports) {
3679			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3680					i++) {
3681				if ((!(vports[i]->fc_flag &
3682					FC_VPORT_CVL_RCVD)) &&
3683					(vports[i]->port_state > LPFC_FDISC)) {
3684					active_vlink_present = 1;
3685					break;
3686				}
3687			}
3688			lpfc_destroy_vport_work_array(phba, vports);
3689		}
3690
3691		if (active_vlink_present) {
3692			/*
3693			 * If there are other active VLinks present,
3694			 * re-instantiate the Vlink using FDISC.
3695			 */
3696			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3697			shost = lpfc_shost_from_vport(vport);
3698			spin_lock_irq(shost->host_lock);
3699			ndlp->nlp_flag |= NLP_DELAY_TMO;
3700			spin_unlock_irq(shost->host_lock);
3701			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3702			vport->port_state = LPFC_FDISC;
3703		} else {
3704			/*
3705			 * Otherwise, we request port to rediscover
3706			 * the entire FCF table for a fast recovery
3707			 * from possible case that the current FCF
3708			 * is no longer valid if we are not already
3709			 * in the FCF failover process.
3710			 */
3711			spin_lock_irq(&phba->hbalock);
3712			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3713				spin_unlock_irq(&phba->hbalock);
3714				break;
3715			}
3716			/* Mark the fast failover process in progress */
3717			phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3718			spin_unlock_irq(&phba->hbalock);
3719			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3720					LOG_DISCOVERY,
3721					"2773 Start FCF failover per CVL, "
3722					"evt_tag:x%x\n", acqe_fip->event_tag);
3723			rc = lpfc_sli4_redisc_fcf_table(phba);
3724			if (rc) {
3725				lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3726						LOG_DISCOVERY,
3727						"2774 Issue FCF rediscover "
3728						"mabilbox command failed, "
3729						"through to CVL event\n");
3730				spin_lock_irq(&phba->hbalock);
3731				phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3732				spin_unlock_irq(&phba->hbalock);
3733				/*
3734				 * Last resort will be re-try on the
3735				 * the current registered FCF entry.
3736				 */
3737				lpfc_retry_pport_discovery(phba);
3738			} else
3739				/*
3740				 * Reset FCF roundrobin bmask for new
3741				 * discovery.
3742				 */
3743				lpfc_sli4_clear_fcf_rr_bmask(phba);
3744		}
3745		break;
3746	default:
3747		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3748			"0288 Unknown FCoE event type 0x%x event tag "
3749			"0x%x\n", event_type, acqe_fip->event_tag);
3750		break;
3751	}
3752}
3753
3754/**
3755 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3756 * @phba: pointer to lpfc hba data structure.
3757 * @acqe_link: pointer to the async dcbx completion queue entry.
3758 *
3759 * This routine is to handle the SLI4 asynchronous dcbx event.
3760 **/
3761static void
3762lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3763			 struct lpfc_acqe_dcbx *acqe_dcbx)
3764{
3765	phba->fc_eventTag = acqe_dcbx->event_tag;
3766	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3767			"0290 The SLI4 DCBX asynchronous event is not "
3768			"handled yet\n");
3769}
3770
3771/**
3772 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
3773 * @phba: pointer to lpfc hba data structure.
3774 * @acqe_link: pointer to the async grp5 completion queue entry.
3775 *
3776 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
3777 * is an asynchronous notified of a logical link speed change.  The Port
3778 * reports the logical link speed in units of 10Mbps.
3779 **/
3780static void
3781lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
3782			 struct lpfc_acqe_grp5 *acqe_grp5)
3783{
3784	uint16_t prev_ll_spd;
3785
3786	phba->fc_eventTag = acqe_grp5->event_tag;
3787	phba->fcoe_eventtag = acqe_grp5->event_tag;
3788	prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
3789	phba->sli4_hba.link_state.logical_speed =
3790		(bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
3791	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3792			"2789 GRP5 Async Event: Updating logical link speed "
3793			"from %dMbps to %dMbps\n", (prev_ll_spd * 10),
3794			(phba->sli4_hba.link_state.logical_speed*10));
3795}
3796
3797/**
3798 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3799 * @phba: pointer to lpfc hba data structure.
3800 *
3801 * This routine is invoked by the worker thread to process all the pending
3802 * SLI4 asynchronous events.
3803 **/
3804void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3805{
3806	struct lpfc_cq_event *cq_event;
3807
3808	/* First, declare the async event has been handled */
3809	spin_lock_irq(&phba->hbalock);
3810	phba->hba_flag &= ~ASYNC_EVENT;
3811	spin_unlock_irq(&phba->hbalock);
3812	/* Now, handle all the async events */
3813	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3814		/* Get the first event from the head of the event queue */
3815		spin_lock_irq(&phba->hbalock);
3816		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3817				 cq_event, struct lpfc_cq_event, list);
3818		spin_unlock_irq(&phba->hbalock);
3819		/* Process the asynchronous event */
3820		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3821		case LPFC_TRAILER_CODE_LINK:
3822			lpfc_sli4_async_link_evt(phba,
3823						 &cq_event->cqe.acqe_link);
3824			break;
3825		case LPFC_TRAILER_CODE_FCOE:
3826			lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
3827			break;
3828		case LPFC_TRAILER_CODE_DCBX:
3829			lpfc_sli4_async_dcbx_evt(phba,
3830						 &cq_event->cqe.acqe_dcbx);
3831			break;
3832		case LPFC_TRAILER_CODE_GRP5:
3833			lpfc_sli4_async_grp5_evt(phba,
3834						 &cq_event->cqe.acqe_grp5);
3835			break;
3836		case LPFC_TRAILER_CODE_FC:
3837			lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
3838			break;
3839		case LPFC_TRAILER_CODE_SLI:
3840			lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
3841			break;
3842		default:
3843			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3844					"1804 Invalid asynchrous event code: "
3845					"x%x\n", bf_get(lpfc_trailer_code,
3846					&cq_event->cqe.mcqe_cmpl));
3847			break;
3848		}
3849		/* Free the completion event processed to the free pool */
3850		lpfc_sli4_cq_event_release(phba, cq_event);
3851	}
3852}
3853
3854/**
3855 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3856 * @phba: pointer to lpfc hba data structure.
3857 *
3858 * This routine is invoked by the worker thread to process FCF table
3859 * rediscovery pending completion event.
3860 **/
3861void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3862{
3863	int rc;
3864
3865	spin_lock_irq(&phba->hbalock);
3866	/* Clear FCF rediscovery timeout event */
3867	phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3868	/* Clear driver fast failover FCF record flag */
3869	phba->fcf.failover_rec.flag = 0;
3870	/* Set state for FCF fast failover */
3871	phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3872	spin_unlock_irq(&phba->hbalock);
3873
3874	/* Scan FCF table from the first entry to re-discover SAN */
3875	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3876			"2777 Start post-quiescent FCF table scan\n");
3877	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3878	if (rc)
3879		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3880				"2747 Issue FCF scan read FCF mailbox "
3881				"command failed 0x%x\n", rc);
3882}
3883
3884/**
3885 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3886 * @phba: pointer to lpfc hba data structure.
3887 * @dev_grp: The HBA PCI-Device group number.
3888 *
3889 * This routine is invoked to set up the per HBA PCI-Device group function
3890 * API jump table entries.
3891 *
3892 * Return: 0 if success, otherwise -ENODEV
3893 **/
3894int
3895lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3896{
3897	int rc;
3898
3899	/* Set up lpfc PCI-device group */
3900	phba->pci_dev_grp = dev_grp;
3901
3902	/* The LPFC_PCI_DEV_OC uses SLI4 */
3903	if (dev_grp == LPFC_PCI_DEV_OC)
3904		phba->sli_rev = LPFC_SLI_REV4;
3905
3906	/* Set up device INIT API function jump table */
3907	rc = lpfc_init_api_table_setup(phba, dev_grp);
3908	if (rc)
3909		return -ENODEV;
3910	/* Set up SCSI API function jump table */
3911	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3912	if (rc)
3913		return -ENODEV;
3914	/* Set up SLI API function jump table */
3915	rc = lpfc_sli_api_table_setup(phba, dev_grp);
3916	if (rc)
3917		return -ENODEV;
3918	/* Set up MBOX API function jump table */
3919	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3920	if (rc)
3921		return -ENODEV;
3922
3923	return 0;
3924}
3925
3926/**
3927 * lpfc_log_intr_mode - Log the active interrupt mode
3928 * @phba: pointer to lpfc hba data structure.
3929 * @intr_mode: active interrupt mode adopted.
3930 *
3931 * This routine it invoked to log the currently used active interrupt mode
3932 * to the device.
3933 **/
3934static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3935{
3936	switch (intr_mode) {
3937	case 0:
3938		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3939				"0470 Enable INTx interrupt mode.\n");
3940		break;
3941	case 1:
3942		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3943				"0481 Enabled MSI interrupt mode.\n");
3944		break;
3945	case 2:
3946		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3947				"0480 Enabled MSI-X interrupt mode.\n");
3948		break;
3949	default:
3950		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3951				"0482 Illegal interrupt mode.\n");
3952		break;
3953	}
3954	return;
3955}
3956
3957/**
3958 * lpfc_enable_pci_dev - Enable a generic PCI device.
3959 * @phba: pointer to lpfc hba data structure.
3960 *
3961 * This routine is invoked to enable the PCI device that is common to all
3962 * PCI devices.
3963 *
3964 * Return codes
3965 * 	0 - successful
3966 * 	other values - error
3967 **/
3968static int
3969lpfc_enable_pci_dev(struct lpfc_hba *phba)
3970{
3971	struct pci_dev *pdev;
3972	int bars = 0;
3973
3974	/* Obtain PCI device reference */
3975	if (!phba->pcidev)
3976		goto out_error;
3977	else
3978		pdev = phba->pcidev;
3979	/* Select PCI BARs */
3980	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3981	/* Enable PCI device */
3982	if (pci_enable_device_mem(pdev))
3983		goto out_error;
3984	/* Request PCI resource for the device */
3985	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3986		goto out_disable_device;
3987	/* Set up device as PCI master and save state for EEH */
3988	pci_set_master(pdev);
3989	pci_try_set_mwi(pdev);
3990	pci_save_state(pdev);
3991
3992	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
3993	if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
3994		pdev->needs_freset = 1;
3995
3996	return 0;
3997
3998out_disable_device:
3999	pci_disable_device(pdev);
4000out_error:
4001	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4002			"1401 Failed to enable pci device, bars:x%x\n", bars);
4003	return -ENODEV;
4004}
4005
4006/**
4007 * lpfc_disable_pci_dev - Disable a generic PCI device.
4008 * @phba: pointer to lpfc hba data structure.
4009 *
4010 * This routine is invoked to disable the PCI device that is common to all
4011 * PCI devices.
4012 **/
4013static void
4014lpfc_disable_pci_dev(struct lpfc_hba *phba)
4015{
4016	struct pci_dev *pdev;
4017	int bars;
4018
4019	/* Obtain PCI device reference */
4020	if (!phba->pcidev)
4021		return;
4022	else
4023		pdev = phba->pcidev;
4024	/* Select PCI BARs */
4025	bars = pci_select_bars(pdev, IORESOURCE_MEM);
4026	/* Release PCI resource and disable PCI device */
4027	pci_release_selected_regions(pdev, bars);
4028	pci_disable_device(pdev);
4029	/* Null out PCI private reference to driver */
4030	pci_set_drvdata(pdev, NULL);
4031
4032	return;
4033}
4034
4035/**
4036 * lpfc_reset_hba - Reset a hba
4037 * @phba: pointer to lpfc hba data structure.
4038 *
4039 * This routine is invoked to reset a hba device. It brings the HBA
4040 * offline, performs a board restart, and then brings the board back
4041 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
4042 * on outstanding mailbox commands.
4043 **/
4044void
4045lpfc_reset_hba(struct lpfc_hba *phba)
4046{
4047	/* If resets are disabled then set error state and return. */
4048	if (!phba->cfg_enable_hba_reset) {
4049		phba->link_state = LPFC_HBA_ERROR;
4050		return;
4051	}
4052	lpfc_offline_prep(phba);
4053	lpfc_offline(phba);
4054	lpfc_sli_brdrestart(phba);
4055	lpfc_online(phba);
4056	lpfc_unblock_mgmt_io(phba);
4057}
4058
4059/**
4060 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
4061 * @phba: pointer to lpfc hba data structure.
4062 *
4063 * This function enables the PCI SR-IOV virtual functions to a physical
4064 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4065 * enable the number of virtual functions to the physical function. As
4066 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4067 * API call does not considered as an error condition for most of the device.
4068 **/
4069uint16_t
4070lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
4071{
4072	struct pci_dev *pdev = phba->pcidev;
4073	uint16_t nr_virtfn;
4074	int pos;
4075
4076	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4077	if (pos == 0)
4078		return 0;
4079
4080	pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
4081	return nr_virtfn;
4082}
4083
4084/**
4085 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4086 * @phba: pointer to lpfc hba data structure.
4087 * @nr_vfn: number of virtual functions to be enabled.
4088 *
4089 * This function enables the PCI SR-IOV virtual functions to a physical
4090 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4091 * enable the number of virtual functions to the physical function. As
4092 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4093 * API call does not considered as an error condition for most of the device.
4094 **/
4095int
4096lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4097{
4098	struct pci_dev *pdev = phba->pcidev;
4099	uint16_t max_nr_vfn;
4100	int rc;
4101
4102	max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
4103	if (nr_vfn > max_nr_vfn) {
4104		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4105				"3057 Requested vfs (%d) greater than "
4106				"supported vfs (%d)", nr_vfn, max_nr_vfn);
4107		return -EINVAL;
4108	}
4109
4110	rc = pci_enable_sriov(pdev, nr_vfn);
4111	if (rc) {
4112		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4113				"2806 Failed to enable sriov on this device "
4114				"with vfn number nr_vf:%d, rc:%d\n",
4115				nr_vfn, rc);
4116	} else
4117		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4118				"2807 Successful enable sriov on this device "
4119				"with vfn number nr_vf:%d\n", nr_vfn);
4120	return rc;
4121}
4122
4123/**
4124 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4125 * @phba: pointer to lpfc hba data structure.
4126 *
4127 * This routine is invoked to set up the driver internal resources specific to
4128 * support the SLI-3 HBA device it attached to.
4129 *
4130 * Return codes
4131 * 	0 - successful
4132 * 	other values - error
4133 **/
4134static int
4135lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4136{
4137	struct lpfc_sli *psli;
4138	int rc;
4139
4140	/*
4141	 * Initialize timers used by driver
4142	 */
4143
4144	/* Heartbeat timer */
4145	init_timer(&phba->hb_tmofunc);
4146	phba->hb_tmofunc.function = lpfc_hb_timeout;
4147	phba->hb_tmofunc.data = (unsigned long)phba;
4148
4149	psli = &phba->sli;
4150	/* MBOX heartbeat timer */
4151	init_timer(&psli->mbox_tmo);
4152	psli->mbox_tmo.function = lpfc_mbox_timeout;
4153	psli->mbox_tmo.data = (unsigned long) phba;
4154	/* FCP polling mode timer */
4155	init_timer(&phba->fcp_poll_timer);
4156	phba->fcp_poll_timer.function = lpfc_poll_timeout;
4157	phba->fcp_poll_timer.data = (unsigned long) phba;
4158	/* Fabric block timer */
4159	init_timer(&phba->fabric_block_timer);
4160	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4161	phba->fabric_block_timer.data = (unsigned long) phba;
4162	/* EA polling mode timer */
4163	init_timer(&phba->eratt_poll);
4164	phba->eratt_poll.function = lpfc_poll_eratt;
4165	phba->eratt_poll.data = (unsigned long) phba;
4166
4167	/* Host attention work mask setup */
4168	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
4169	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
4170
4171	/* Get all the module params for configuring this host */
4172	lpfc_get_cfgparam(phba);
4173	if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
4174		phba->menlo_flag |= HBA_MENLO_SUPPORT;
4175		/* check for menlo minimum sg count */
4176		if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
4177			phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4178	}
4179
4180	/*
4181	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4182	 * used to create the sg_dma_buf_pool must be dynamically calculated.
4183	 * 2 segments are added since the IOCB needs a command and response bde.
4184	 */
4185	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4186		sizeof(struct fcp_rsp) +
4187			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4188
4189	if (phba->cfg_enable_bg) {
4190		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4191		phba->cfg_sg_dma_buf_size +=
4192			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4193	}
4194
4195	/* Also reinitialize the host templates with new values. */
4196	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4197	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4198
4199	phba->max_vpi = LPFC_MAX_VPI;
4200	/* This will be set to correct value after config_port mbox */
4201	phba->max_vports = 0;
4202
4203	/*
4204	 * Initialize the SLI Layer to run with lpfc HBAs.
4205	 */
4206	lpfc_sli_setup(phba);
4207	lpfc_sli_queue_setup(phba);
4208
4209	/* Allocate device driver memory */
4210	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4211		return -ENOMEM;
4212
4213	/*
4214	 * Enable sr-iov virtual functions if supported and configured
4215	 * through the module parameter.
4216	 */
4217	if (phba->cfg_sriov_nr_virtfn > 0) {
4218		rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4219						 phba->cfg_sriov_nr_virtfn);
4220		if (rc) {
4221			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4222					"2808 Requested number of SR-IOV "
4223					"virtual functions (%d) is not "
4224					"supported\n",
4225					phba->cfg_sriov_nr_virtfn);
4226			phba->cfg_sriov_nr_virtfn = 0;
4227		}
4228	}
4229
4230	return 0;
4231}
4232
4233/**
4234 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
4235 * @phba: pointer to lpfc hba data structure.
4236 *
4237 * This routine is invoked to unset the driver internal resources set up
4238 * specific for supporting the SLI-3 HBA device it attached to.
4239 **/
4240static void
4241lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4242{
4243	/* Free device driver memory allocated */
4244	lpfc_mem_free_all(phba);
4245
4246	return;
4247}
4248
4249/**
4250 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
4251 * @phba: pointer to lpfc hba data structure.
4252 *
4253 * This routine is invoked to set up the driver internal resources specific to
4254 * support the SLI-4 HBA device it attached to.
4255 *
4256 * Return codes
4257 * 	0 - successful
4258 * 	other values - error
4259 **/
4260static int
4261lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4262{
4263	struct lpfc_sli *psli;
4264	LPFC_MBOXQ_t *mboxq;
4265	int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
4266	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4267	struct lpfc_mqe *mqe;
4268	int longs, sli_family;
4269
4270	/* Before proceed, wait for POST done and device ready */
4271	rc = lpfc_sli4_post_status_check(phba);
4272	if (rc)
4273		return -ENODEV;
4274
4275	/*
4276	 * Initialize timers used by driver
4277	 */
4278
4279	/* Heartbeat timer */
4280	init_timer(&phba->hb_tmofunc);
4281	phba->hb_tmofunc.function = lpfc_hb_timeout;
4282	phba->hb_tmofunc.data = (unsigned long)phba;
4283	init_timer(&phba->rrq_tmr);
4284	phba->rrq_tmr.function = lpfc_rrq_timeout;
4285	phba->rrq_tmr.data = (unsigned long)phba;
4286
4287	psli = &phba->sli;
4288	/* MBOX heartbeat timer */
4289	init_timer(&psli->mbox_tmo);
4290	psli->mbox_tmo.function = lpfc_mbox_timeout;
4291	psli->mbox_tmo.data = (unsigned long) phba;
4292	/* Fabric block timer */
4293	init_timer(&phba->fabric_block_timer);
4294	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4295	phba->fabric_block_timer.data = (unsigned long) phba;
4296	/* EA polling mode timer */
4297	init_timer(&phba->eratt_poll);
4298	phba->eratt_poll.function = lpfc_poll_eratt;
4299	phba->eratt_poll.data = (unsigned long) phba;
4300	/* FCF rediscover timer */
4301	init_timer(&phba->fcf.redisc_wait);
4302	phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4303	phba->fcf.redisc_wait.data = (unsigned long)phba;
4304
4305	/*
4306	 * Control structure for handling external multi-buffer mailbox
4307	 * command pass-through.
4308	 */
4309	memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
4310		sizeof(struct lpfc_mbox_ext_buf_ctx));
4311	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4312
4313	/*
4314	 * We need to do a READ_CONFIG mailbox command here before
4315	 * calling lpfc_get_cfgparam. For VFs this will report the
4316	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
4317	 * All of the resources allocated
4318	 * for this Port are tied to these values.
4319	 */
4320	/* Get all the module params for configuring this host */
4321	lpfc_get_cfgparam(phba);
4322	phba->max_vpi = LPFC_MAX_VPI;
4323	/* This will be set to correct value after the read_config mbox */
4324	phba->max_vports = 0;
4325
4326	/* Program the default value of vlan_id and fc_map */
4327	phba->valid_vlan = 0;
4328	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4329	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4330	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4331
4332	/*
4333	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4334	 * used to create the sg_dma_buf_pool must be dynamically calculated.
4335	 * 2 segments are added since the IOCB needs a command and response bde.
4336	 * To insure that the scsi sgl does not cross a 4k page boundary only
4337	 * sgl sizes of must be a power of 2.
4338	 */
4339	buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4340		    ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
4341
4342	sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
4343	max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
4344	switch (sli_family) {
4345	case LPFC_SLI_INTF_FAMILY_BE2:
4346	case LPFC_SLI_INTF_FAMILY_BE3:
4347		/* There is a single hint for BE - 2 pages per BPL. */
4348		if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
4349		    LPFC_SLI_INTF_SLI_HINT1_1)
4350			max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
4351		break;
4352	case LPFC_SLI_INTF_FAMILY_LNCR_A0:
4353	case LPFC_SLI_INTF_FAMILY_LNCR_B0:
4354	default:
4355		break;
4356	}
4357	for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4358	     dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4359	     dma_buf_size = dma_buf_size << 1)
4360		;
4361	if (dma_buf_size == max_buf_size)
4362		phba->cfg_sg_seg_cnt = (dma_buf_size -
4363			sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
4364			(2 * sizeof(struct sli4_sge))) /
4365				sizeof(struct sli4_sge);
4366	phba->cfg_sg_dma_buf_size = dma_buf_size;
4367
4368	/* Initialize buffer queue management fields */
4369	hbq_count = lpfc_sli_hbq_count();
4370	for (i = 0; i < hbq_count; ++i)
4371		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4372	INIT_LIST_HEAD(&phba->rb_pend_list);
4373	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4374	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
4375
4376	/*
4377	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
4378	 */
4379	/* Initialize the Abort scsi buffer list used by driver */
4380	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
4381	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
4382	/* This abort list used by worker thread */
4383	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4384
4385	/*
4386	 * Initialize driver internal slow-path work queues
4387	 */
4388
4389	/* Driver internel slow-path CQ Event pool */
4390	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
4391	/* Response IOCB work queue list */
4392	INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
4393	/* Asynchronous event CQ Event work queue list */
4394	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4395	/* Fast-path XRI aborted CQ Event work queue list */
4396	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4397	/* Slow-path XRI aborted CQ Event work queue list */
4398	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4399	/* Receive queue CQ Event work queue list */
4400	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4401
4402	/* Initialize extent block lists. */
4403	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
4404	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
4405	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
4406	INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
4407
4408	/* Initialize the driver internal SLI layer lists. */
4409	lpfc_sli_setup(phba);
4410	lpfc_sli_queue_setup(phba);
4411
4412	/* Allocate device driver memory */
4413	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4414	if (rc)
4415		return -ENOMEM;
4416
4417	/* IF Type 2 ports get initialized now. */
4418	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4419	    LPFC_SLI_INTF_IF_TYPE_2) {
4420		rc = lpfc_pci_function_reset(phba);
4421		if (unlikely(rc))
4422			return -ENODEV;
4423	}
4424
4425	/* Create the bootstrap mailbox command */
4426	rc = lpfc_create_bootstrap_mbox(phba);
4427	if (unlikely(rc))
4428		goto out_free_mem;
4429
4430	/* Set up the host's endian order with the device. */
4431	rc = lpfc_setup_endian_order(phba);
4432	if (unlikely(rc))
4433		goto out_free_bsmbx;
4434
4435	/* Set up the hba's configuration parameters. */
4436	rc = lpfc_sli4_read_config(phba);
4437	if (unlikely(rc))
4438		goto out_free_bsmbx;
4439
4440	/* IF Type 0 ports get initialized now. */
4441	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4442	    LPFC_SLI_INTF_IF_TYPE_0) {
4443		rc = lpfc_pci_function_reset(phba);
4444		if (unlikely(rc))
4445			goto out_free_bsmbx;
4446	}
4447
4448	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4449						       GFP_KERNEL);
4450	if (!mboxq) {
4451		rc = -ENOMEM;
4452		goto out_free_bsmbx;
4453	}
4454
4455	/* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
4456	lpfc_supported_pages(mboxq);
4457	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4458	if (!rc) {
4459		mqe = &mboxq->u.mqe;
4460		memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4461		       LPFC_MAX_SUPPORTED_PAGES);
4462		for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4463			switch (pn_page[i]) {
4464			case LPFC_SLI4_PARAMETERS:
4465				phba->sli4_hba.pc_sli4_params.supported = 1;
4466				break;
4467			default:
4468				break;
4469			}
4470		}
4471		/* Read the port's SLI4 Parameters capabilities if supported. */
4472		if (phba->sli4_hba.pc_sli4_params.supported)
4473			rc = lpfc_pc_sli4_params_get(phba, mboxq);
4474		if (rc) {
4475			mempool_free(mboxq, phba->mbox_mem_pool);
4476			rc = -EIO;
4477			goto out_free_bsmbx;
4478		}
4479	}
4480	/*
4481	 * Get sli4 parameters that override parameters from Port capabilities.
4482	 * If this call fails, it isn't critical unless the SLI4 parameters come
4483	 * back in conflict.
4484	 */
4485	rc = lpfc_get_sli4_parameters(phba, mboxq);
4486	if (rc) {
4487		if (phba->sli4_hba.extents_in_use &&
4488		    phba->sli4_hba.rpi_hdrs_in_use) {
4489			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4490				"2999 Unsupported SLI4 Parameters "
4491				"Extents and RPI headers enabled.\n");
4492			goto out_free_bsmbx;
4493		}
4494	}
4495	mempool_free(mboxq, phba->mbox_mem_pool);
4496	/* Create all the SLI4 queues */
4497	rc = lpfc_sli4_queue_create(phba);
4498	if (rc)
4499		goto out_free_bsmbx;
4500
4501	/* Create driver internal CQE event pool */
4502	rc = lpfc_sli4_cq_event_pool_create(phba);
4503	if (rc)
4504		goto out_destroy_queue;
4505
4506	/* Initialize and populate the iocb list per host */
4507	rc = lpfc_init_sgl_list(phba);
4508	if (rc) {
4509		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4510				"1400 Failed to initialize sgl list.\n");
4511		goto out_destroy_cq_event_pool;
4512	}
4513	rc = lpfc_init_active_sgl_array(phba);
4514	if (rc) {
4515		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4516				"1430 Failed to initialize sgl list.\n");
4517		goto out_free_sgl_list;
4518	}
4519	rc = lpfc_sli4_init_rpi_hdrs(phba);
4520	if (rc) {
4521		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4522				"1432 Failed to initialize rpi headers.\n");
4523		goto out_free_active_sgl;
4524	}
4525
4526	/* Allocate eligible FCF bmask memory for FCF roundrobin failover */
4527	longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4528	phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4529					 GFP_KERNEL);
4530	if (!phba->fcf.fcf_rr_bmask) {
4531		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4532				"2759 Failed allocate memory for FCF round "
4533				"robin failover bmask\n");
4534		rc = -ENOMEM;
4535		goto out_remove_rpi_hdrs;
4536	}
4537
4538	phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4539				    phba->cfg_fcp_eq_count), GFP_KERNEL);
4540	if (!phba->sli4_hba.fcp_eq_hdl) {
4541		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4542				"2572 Failed allocate memory for fast-path "
4543				"per-EQ handle array\n");
4544		rc = -ENOMEM;
4545		goto out_free_fcf_rr_bmask;
4546	}
4547
4548	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4549				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4550	if (!phba->sli4_hba.msix_entries) {
4551		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4552				"2573 Failed allocate memory for msi-x "
4553				"interrupt vector entries\n");
4554		rc = -ENOMEM;
4555		goto out_free_fcp_eq_hdl;
4556	}
4557
4558	/*
4559	 * Enable sr-iov virtual functions if supported and configured
4560	 * through the module parameter.
4561	 */
4562	if (phba->cfg_sriov_nr_virtfn > 0) {
4563		rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4564						 phba->cfg_sriov_nr_virtfn);
4565		if (rc) {
4566			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4567					"3020 Requested number of SR-IOV "
4568					"virtual functions (%d) is not "
4569					"supported\n",
4570					phba->cfg_sriov_nr_virtfn);
4571			phba->cfg_sriov_nr_virtfn = 0;
4572		}
4573	}
4574
4575	return 0;
4576
4577out_free_fcp_eq_hdl:
4578	kfree(phba->sli4_hba.fcp_eq_hdl);
4579out_free_fcf_rr_bmask:
4580	kfree(phba->fcf.fcf_rr_bmask);
4581out_remove_rpi_hdrs:
4582	lpfc_sli4_remove_rpi_hdrs(phba);
4583out_free_active_sgl:
4584	lpfc_free_active_sgl(phba);
4585out_free_sgl_list:
4586	lpfc_free_sgl_list(phba);
4587out_destroy_cq_event_pool:
4588	lpfc_sli4_cq_event_pool_destroy(phba);
4589out_destroy_queue:
4590	lpfc_sli4_queue_destroy(phba);
4591out_free_bsmbx:
4592	lpfc_destroy_bootstrap_mbox(phba);
4593out_free_mem:
4594	lpfc_mem_free(phba);
4595	return rc;
4596}
4597
4598/**
4599 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4600 * @phba: pointer to lpfc hba data structure.
4601 *
4602 * This routine is invoked to unset the driver internal resources set up
4603 * specific for supporting the SLI-4 HBA device it attached to.
4604 **/
4605static void
4606lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4607{
4608	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4609
4610	/* Free memory allocated for msi-x interrupt vector entries */
4611	kfree(phba->sli4_hba.msix_entries);
4612
4613	/* Free memory allocated for fast-path work queue handles */
4614	kfree(phba->sli4_hba.fcp_eq_hdl);
4615
4616	/* Free the allocated rpi headers. */
4617	lpfc_sli4_remove_rpi_hdrs(phba);
4618	lpfc_sli4_remove_rpis(phba);
4619
4620	/* Free eligible FCF index bmask */
4621	kfree(phba->fcf.fcf_rr_bmask);
4622
4623	/* Free the ELS sgl list */
4624	lpfc_free_active_sgl(phba);
4625	lpfc_free_sgl_list(phba);
4626
4627	/* Free the SCSI sgl management array */
4628	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4629
4630	/* Free the SLI4 queues */
4631	lpfc_sli4_queue_destroy(phba);
4632
4633	/* Free the completion queue EQ event pool */
4634	lpfc_sli4_cq_event_release_all(phba);
4635	lpfc_sli4_cq_event_pool_destroy(phba);
4636
4637	/* Release resource identifiers. */
4638	lpfc_sli4_dealloc_resource_identifiers(phba);
4639
4640	/* Free the bsmbx region. */
4641	lpfc_destroy_bootstrap_mbox(phba);
4642
4643	/* Free the SLI Layer memory with SLI4 HBAs */
4644	lpfc_mem_free_all(phba);
4645
4646	/* Free the current connect table */
4647	list_for_each_entry_safe(conn_entry, next_conn_entry,
4648		&phba->fcf_conn_rec_list, list) {
4649		list_del_init(&conn_entry->list);
4650		kfree(conn_entry);
4651	}
4652
4653	return;
4654}
4655
4656/**
4657 * lpfc_init_api_table_setup - Set up init api function jump table
4658 * @phba: The hba struct for which this call is being executed.
4659 * @dev_grp: The HBA PCI-Device group number.
4660 *
4661 * This routine sets up the device INIT interface API function jump table
4662 * in @phba struct.
4663 *
4664 * Returns: 0 - success, -ENODEV - failure.
4665 **/
4666int
4667lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4668{
4669	phba->lpfc_hba_init_link = lpfc_hba_init_link;
4670	phba->lpfc_hba_down_link = lpfc_hba_down_link;
4671	phba->lpfc_selective_reset = lpfc_selective_reset;
4672	switch (dev_grp) {
4673	case LPFC_PCI_DEV_LP:
4674		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4675		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4676		phba->lpfc_stop_port = lpfc_stop_port_s3;
4677		break;
4678	case LPFC_PCI_DEV_OC:
4679		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4680		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4681		phba->lpfc_stop_port = lpfc_stop_port_s4;
4682		break;
4683	default:
4684		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4685				"1431 Invalid HBA PCI-device group: 0x%x\n",
4686				dev_grp);
4687		return -ENODEV;
4688		break;
4689	}
4690	return 0;
4691}
4692
4693/**
4694 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4695 * @phba: pointer to lpfc hba data structure.
4696 *
4697 * This routine is invoked to set up the driver internal resources before the
4698 * device specific resource setup to support the HBA device it attached to.
4699 *
4700 * Return codes
4701 *	0 - successful
4702 *	other values - error
4703 **/
4704static int
4705lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4706{
4707	/*
4708	 * Driver resources common to all SLI revisions
4709	 */
4710	atomic_set(&phba->fast_event_count, 0);
4711	spin_lock_init(&phba->hbalock);
4712
4713	/* Initialize ndlp management spinlock */
4714	spin_lock_init(&phba->ndlp_lock);
4715
4716	INIT_LIST_HEAD(&phba->port_list);
4717	INIT_LIST_HEAD(&phba->work_list);
4718	init_waitqueue_head(&phba->wait_4_mlo_m_q);
4719
4720	/* Initialize the wait queue head for the kernel thread */
4721	init_waitqueue_head(&phba->work_waitq);
4722
4723	/* Initialize the scsi buffer list used by driver for scsi IO */
4724	spin_lock_init(&phba->scsi_buf_list_lock);
4725	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4726
4727	/* Initialize the fabric iocb list */
4728	INIT_LIST_HEAD(&phba->fabric_iocb_list);
4729
4730	/* Initialize list to save ELS buffers */
4731	INIT_LIST_HEAD(&phba->elsbuf);
4732
4733	/* Initialize FCF connection rec list */
4734	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4735
4736	return 0;
4737}
4738
4739/**
4740 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4741 * @phba: pointer to lpfc hba data structure.
4742 *
4743 * This routine is invoked to set up the driver internal resources after the
4744 * device specific resource setup to support the HBA device it attached to.
4745 *
4746 * Return codes
4747 * 	0 - successful
4748 * 	other values - error
4749 **/
4750static int
4751lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4752{
4753	int error;
4754
4755	/* Startup the kernel thread for this host adapter. */
4756	phba->worker_thread = kthread_run(lpfc_do_work, phba,
4757					  "lpfc_worker_%d", phba->brd_no);
4758	if (IS_ERR(phba->worker_thread)) {
4759		error = PTR_ERR(phba->worker_thread);
4760		return error;
4761	}
4762
4763	return 0;
4764}
4765
4766/**
4767 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4768 * @phba: pointer to lpfc hba data structure.
4769 *
4770 * This routine is invoked to unset the driver internal resources set up after
4771 * the device specific resource setup for supporting the HBA device it
4772 * attached to.
4773 **/
4774static void
4775lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4776{
4777	/* Stop kernel worker thread */
4778	kthread_stop(phba->worker_thread);
4779}
4780
4781/**
4782 * lpfc_free_iocb_list - Free iocb list.
4783 * @phba: pointer to lpfc hba data structure.
4784 *
4785 * This routine is invoked to free the driver's IOCB list and memory.
4786 **/
4787static void
4788lpfc_free_iocb_list(struct lpfc_hba *phba)
4789{
4790	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4791
4792	spin_lock_irq(&phba->hbalock);
4793	list_for_each_entry_safe(iocbq_entry, iocbq_next,
4794				 &phba->lpfc_iocb_list, list) {
4795		list_del(&iocbq_entry->list);
4796		kfree(iocbq_entry);
4797		phba->total_iocbq_bufs--;
4798	}
4799	spin_unlock_irq(&phba->hbalock);
4800
4801	return;
4802}
4803
4804/**
4805 * lpfc_init_iocb_list - Allocate and initialize iocb list.
4806 * @phba: pointer to lpfc hba data structure.
4807 *
4808 * This routine is invoked to allocate and initizlize the driver's IOCB
4809 * list and set up the IOCB tag array accordingly.
4810 *
4811 * Return codes
4812 *	0 - successful
4813 *	other values - error
4814 **/
4815static int
4816lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4817{
4818	struct lpfc_iocbq *iocbq_entry = NULL;
4819	uint16_t iotag;
4820	int i;
4821
4822	/* Initialize and populate the iocb list per host.  */
4823	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
4824	for (i = 0; i < iocb_count; i++) {
4825		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
4826		if (iocbq_entry == NULL) {
4827			printk(KERN_ERR "%s: only allocated %d iocbs of "
4828				"expected %d count. Unloading driver.\n",
4829				__func__, i, LPFC_IOCB_LIST_CNT);
4830			goto out_free_iocbq;
4831		}
4832
4833		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4834		if (iotag == 0) {
4835			kfree(iocbq_entry);
4836			printk(KERN_ERR "%s: failed to allocate IOTAG. "
4837				"Unloading driver.\n", __func__);
4838			goto out_free_iocbq;
4839		}
4840		iocbq_entry->sli4_lxritag = NO_XRI;
4841		iocbq_entry->sli4_xritag = NO_XRI;
4842
4843		spin_lock_irq(&phba->hbalock);
4844		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4845		phba->total_iocbq_bufs++;
4846		spin_unlock_irq(&phba->hbalock);
4847	}
4848
4849	return 0;
4850
4851out_free_iocbq:
4852	lpfc_free_iocb_list(phba);
4853
4854	return -ENOMEM;
4855}
4856
4857/**
4858 * lpfc_free_sgl_list - Free sgl list.
4859 * @phba: pointer to lpfc hba data structure.
4860 *
4861 * This routine is invoked to free the driver's sgl list and memory.
4862 **/
4863static void
4864lpfc_free_sgl_list(struct lpfc_hba *phba)
4865{
4866	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4867	LIST_HEAD(sglq_list);
4868
4869	spin_lock_irq(&phba->hbalock);
4870	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
4871	spin_unlock_irq(&phba->hbalock);
4872
4873	list_for_each_entry_safe(sglq_entry, sglq_next,
4874				 &sglq_list, list) {
4875		list_del(&sglq_entry->list);
4876		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
4877		kfree(sglq_entry);
4878		phba->sli4_hba.total_sglq_bufs--;
4879	}
4880	kfree(phba->sli4_hba.lpfc_els_sgl_array);
4881}
4882
4883/**
4884 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
4885 * @phba: pointer to lpfc hba data structure.
4886 *
4887 * This routine is invoked to allocate the driver's active sgl memory.
4888 * This array will hold the sglq_entry's for active IOs.
4889 **/
4890static int
4891lpfc_init_active_sgl_array(struct lpfc_hba *phba)
4892{
4893	int size;
4894	size = sizeof(struct lpfc_sglq *);
4895	size *= phba->sli4_hba.max_cfg_param.max_xri;
4896
4897	phba->sli4_hba.lpfc_sglq_active_list =
4898		kzalloc(size, GFP_KERNEL);
4899	if (!phba->sli4_hba.lpfc_sglq_active_list)
4900		return -ENOMEM;
4901	return 0;
4902}
4903
4904/**
4905 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
4906 * @phba: pointer to lpfc hba data structure.
4907 *
4908 * This routine is invoked to walk through the array of active sglq entries
4909 * and free all of the resources.
4910 * This is just a place holder for now.
4911 **/
4912static void
4913lpfc_free_active_sgl(struct lpfc_hba *phba)
4914{
4915	kfree(phba->sli4_hba.lpfc_sglq_active_list);
4916}
4917
4918/**
4919 * lpfc_init_sgl_list - Allocate and initialize sgl list.
4920 * @phba: pointer to lpfc hba data structure.
4921 *
4922 * This routine is invoked to allocate and initizlize the driver's sgl
4923 * list and set up the sgl xritag tag array accordingly.
4924 *
4925 * Return codes
4926 *	0 - successful
4927 *	other values - error
4928 **/
4929static int
4930lpfc_init_sgl_list(struct lpfc_hba *phba)
4931{
4932	struct lpfc_sglq *sglq_entry = NULL;
4933	int i;
4934	int els_xri_cnt;
4935
4936	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4937	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4938				"2400 ELS XRI count %d.\n",
4939				els_xri_cnt);
4940	/* Initialize and populate the sglq list per host/VF. */
4941	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
4942	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
4943
4944	/* Sanity check on XRI management */
4945	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
4946		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4947				"2562 No room left for SCSI XRI allocation: "
4948				"max_xri=%d, els_xri=%d\n",
4949				phba->sli4_hba.max_cfg_param.max_xri,
4950				els_xri_cnt);
4951		return -ENOMEM;
4952	}
4953
4954	/* Allocate memory for the ELS XRI management array */
4955	phba->sli4_hba.lpfc_els_sgl_array =
4956			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
4957			GFP_KERNEL);
4958
4959	if (!phba->sli4_hba.lpfc_els_sgl_array) {
4960		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4961				"2401 Failed to allocate memory for ELS "
4962				"XRI management array of size %d.\n",
4963				els_xri_cnt);
4964		return -ENOMEM;
4965	}
4966
4967	/* Keep the SCSI XRI into the XRI management array */
4968	phba->sli4_hba.scsi_xri_max =
4969			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4970	phba->sli4_hba.scsi_xri_cnt = 0;
4971	phba->sli4_hba.lpfc_scsi_psb_array =
4972			kzalloc((sizeof(struct lpfc_scsi_buf *) *
4973			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
4974
4975	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
4976		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4977				"2563 Failed to allocate memory for SCSI "
4978				"XRI management array of size %d.\n",
4979				phba->sli4_hba.scsi_xri_max);
4980		kfree(phba->sli4_hba.lpfc_els_sgl_array);
4981		return -ENOMEM;
4982	}
4983
4984	for (i = 0; i < els_xri_cnt; i++) {
4985		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
4986		if (sglq_entry == NULL) {
4987			printk(KERN_ERR "%s: only allocated %d sgls of "
4988				"expected %d count. Unloading driver.\n",
4989				__func__, i, els_xri_cnt);
4990			goto out_free_mem;
4991		}
4992
4993		sglq_entry->buff_type = GEN_BUFF_TYPE;
4994		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4995		if (sglq_entry->virt == NULL) {
4996			kfree(sglq_entry);
4997			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
4998				"Unloading driver.\n", __func__);
4999			goto out_free_mem;
5000		}
5001		sglq_entry->sgl = sglq_entry->virt;
5002		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
5003
5004		/* The list order is used by later block SGL registraton */
5005		spin_lock_irq(&phba->hbalock);
5006		sglq_entry->state = SGL_FREED;
5007		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
5008		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
5009		phba->sli4_hba.total_sglq_bufs++;
5010		spin_unlock_irq(&phba->hbalock);
5011	}
5012	return 0;
5013
5014out_free_mem:
5015	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
5016	lpfc_free_sgl_list(phba);
5017	return -ENOMEM;
5018}
5019
5020/**
5021 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
5022 * @phba: pointer to lpfc hba data structure.
5023 *
5024 * This routine is invoked to post rpi header templates to the
5025 * port for those SLI4 ports that do not support extents.  This routine
5026 * posts a PAGE_SIZE memory region to the port to hold up to
5027 * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
5028 * and should be called only when interrupts are disabled.
5029 *
5030 * Return codes
5031 * 	0 - successful
5032 *	-ERROR - otherwise.
5033 **/
5034int
5035lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
5036{
5037	int rc = 0;
5038	struct lpfc_rpi_hdr *rpi_hdr;
5039
5040	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
5041	/*
5042	 * If the SLI4 port supports extents, posting the rpi header isn't
5043	 * required.  Set the expected maximum count and let the actual value
5044	 * get set when extents are fully allocated.
5045	 */
5046	if (!phba->sli4_hba.rpi_hdrs_in_use) {
5047		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
5048		return rc;
5049	}
5050	if (phba->sli4_hba.extents_in_use)
5051		return -EIO;
5052
5053	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
5054	if (!rpi_hdr) {
5055		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5056				"0391 Error during rpi post operation\n");
5057		lpfc_sli4_remove_rpis(phba);
5058		rc = -ENODEV;
5059	}
5060
5061	return rc;
5062}
5063
5064/**
5065 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
5066 * @phba: pointer to lpfc hba data structure.
5067 *
5068 * This routine is invoked to allocate a single 4KB memory region to
5069 * support rpis and stores them in the phba.  This single region
5070 * provides support for up to 64 rpis.  The region is used globally
5071 * by the device.
5072 *
5073 * Returns:
5074 *   A valid rpi hdr on success.
5075 *   A NULL pointer on any failure.
5076 **/
5077struct lpfc_rpi_hdr *
5078lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5079{
5080	uint16_t rpi_limit, curr_rpi_range;
5081	struct lpfc_dmabuf *dmabuf;
5082	struct lpfc_rpi_hdr *rpi_hdr;
5083	uint32_t rpi_count;
5084
5085	/*
5086	 * If the SLI4 port supports extents, posting the rpi header isn't
5087	 * required.  Set the expected maximum count and let the actual value
5088	 * get set when extents are fully allocated.
5089	 */
5090	if (!phba->sli4_hba.rpi_hdrs_in_use)
5091		return NULL;
5092	if (phba->sli4_hba.extents_in_use)
5093		return NULL;
5094
5095	/* The limit on the logical index is just the max_rpi count. */
5096	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
5097	phba->sli4_hba.max_cfg_param.max_rpi - 1;
5098
5099	spin_lock_irq(&phba->hbalock);
5100	/*
5101	 * Establish the starting RPI in this header block.  The starting
5102	 * rpi is normalized to a zero base because the physical rpi is
5103	 * port based.
5104	 */
5105	curr_rpi_range = phba->sli4_hba.next_rpi -
5106		phba->sli4_hba.max_cfg_param.rpi_base;
5107	spin_unlock_irq(&phba->hbalock);
5108
5109	/*
5110	 * The port has a limited number of rpis. The increment here
5111	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
5112	 * and to allow the full max_rpi range per port.
5113	 */
5114	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
5115		rpi_count = rpi_limit - curr_rpi_range;
5116	else
5117		rpi_count = LPFC_RPI_HDR_COUNT;
5118
5119	if (!rpi_count)
5120		return NULL;
5121	/*
5122	 * First allocate the protocol header region for the port.  The
5123	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
5124	 */
5125	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5126	if (!dmabuf)
5127		return NULL;
5128
5129	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5130					  LPFC_HDR_TEMPLATE_SIZE,
5131					  &dmabuf->phys,
5132					  GFP_KERNEL);
5133	if (!dmabuf->virt) {
5134		rpi_hdr = NULL;
5135		goto err_free_dmabuf;
5136	}
5137
5138	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
5139	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
5140		rpi_hdr = NULL;
5141		goto err_free_coherent;
5142	}
5143
5144	/* Save the rpi header data for cleanup later. */
5145	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
5146	if (!rpi_hdr)
5147		goto err_free_coherent;
5148
5149	rpi_hdr->dmabuf = dmabuf;
5150	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
5151	rpi_hdr->page_count = 1;
5152	spin_lock_irq(&phba->hbalock);
5153
5154	/* The rpi_hdr stores the logical index only. */
5155	rpi_hdr->start_rpi = curr_rpi_range;
5156	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
5157
5158	/*
5159	 * The next_rpi stores the next logical module-64 rpi value used
5160	 * to post physical rpis in subsequent rpi postings.
5161	 */
5162	phba->sli4_hba.next_rpi += rpi_count;
5163	spin_unlock_irq(&phba->hbalock);
5164	return rpi_hdr;
5165
5166 err_free_coherent:
5167	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
5168			  dmabuf->virt, dmabuf->phys);
5169 err_free_dmabuf:
5170	kfree(dmabuf);
5171	return NULL;
5172}
5173
5174/**
5175 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
5176 * @phba: pointer to lpfc hba data structure.
5177 *
5178 * This routine is invoked to remove all memory resources allocated
5179 * to support rpis for SLI4 ports not supporting extents. This routine
5180 * presumes the caller has released all rpis consumed by fabric or port
5181 * logins and is prepared to have the header pages removed.
5182 **/
5183void
5184lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
5185{
5186	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
5187
5188	if (!phba->sli4_hba.rpi_hdrs_in_use)
5189		goto exit;
5190
5191	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
5192				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
5193		list_del(&rpi_hdr->list);
5194		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
5195				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
5196		kfree(rpi_hdr->dmabuf);
5197		kfree(rpi_hdr);
5198	}
5199 exit:
5200	/* There are no rpis available to the port now. */
5201	phba->sli4_hba.next_rpi = 0;
5202}
5203
5204/**
5205 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
5206 * @pdev: pointer to pci device data structure.
5207 *
5208 * This routine is invoked to allocate the driver hba data structure for an
5209 * HBA device. If the allocation is successful, the phba reference to the
5210 * PCI device data structure is set.
5211 *
5212 * Return codes
5213 *      pointer to @phba - successful
5214 *      NULL - error
5215 **/
5216static struct lpfc_hba *
5217lpfc_hba_alloc(struct pci_dev *pdev)
5218{
5219	struct lpfc_hba *phba;
5220
5221	/* Allocate memory for HBA structure */
5222	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
5223	if (!phba) {
5224		dev_err(&pdev->dev, "failed to allocate hba struct\n");
5225		return NULL;
5226	}
5227
5228	/* Set reference to PCI device in HBA structure */
5229	phba->pcidev = pdev;
5230
5231	/* Assign an unused board number */
5232	phba->brd_no = lpfc_get_instance();
5233	if (phba->brd_no < 0) {
5234		kfree(phba);
5235		return NULL;
5236	}
5237
5238	spin_lock_init(&phba->ct_ev_lock);
5239	INIT_LIST_HEAD(&phba->ct_ev_waiters);
5240
5241	return phba;
5242}
5243
5244/**
5245 * lpfc_hba_free - Free driver hba data structure with a device.
5246 * @phba: pointer to lpfc hba data structure.
5247 *
5248 * This routine is invoked to free the driver hba data structure with an
5249 * HBA device.
5250 **/
5251static void
5252lpfc_hba_free(struct lpfc_hba *phba)
5253{
5254	/* Release the driver assigned board number */
5255	idr_remove(&lpfc_hba_index, phba->brd_no);
5256
5257	kfree(phba);
5258	return;
5259}
5260
5261/**
5262 * lpfc_create_shost - Create hba physical port with associated scsi host.
5263 * @phba: pointer to lpfc hba data structure.
5264 *
5265 * This routine is invoked to create HBA physical port and associate a SCSI
5266 * host with it.
5267 *
5268 * Return codes
5269 *      0 - successful
5270 *      other values - error
5271 **/
5272static int
5273lpfc_create_shost(struct lpfc_hba *phba)
5274{
5275	struct lpfc_vport *vport;
5276	struct Scsi_Host  *shost;
5277
5278	/* Initialize HBA FC structure */
5279	phba->fc_edtov = FF_DEF_EDTOV;
5280	phba->fc_ratov = FF_DEF_RATOV;
5281	phba->fc_altov = FF_DEF_ALTOV;
5282	phba->fc_arbtov = FF_DEF_ARBTOV;
5283
5284	atomic_set(&phba->sdev_cnt, 0);
5285	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
5286	if (!vport)
5287		return -ENODEV;
5288
5289	shost = lpfc_shost_from_vport(vport);
5290	phba->pport = vport;
5291	lpfc_debugfs_initialize(vport);
5292	/* Put reference to SCSI host to driver's device private data */
5293	pci_set_drvdata(phba->pcidev, shost);
5294
5295	return 0;
5296}
5297
5298/**
5299 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
5300 * @phba: pointer to lpfc hba data structure.
5301 *
5302 * This routine is invoked to destroy HBA physical port and the associated
5303 * SCSI host.
5304 **/
5305static void
5306lpfc_destroy_shost(struct lpfc_hba *phba)
5307{
5308	struct lpfc_vport *vport = phba->pport;
5309
5310	/* Destroy physical port that associated with the SCSI host */
5311	destroy_port(vport);
5312
5313	return;
5314}
5315
5316/**
5317 * lpfc_setup_bg - Setup Block guard structures and debug areas.
5318 * @phba: pointer to lpfc hba data structure.
5319 * @shost: the shost to be used to detect Block guard settings.
5320 *
5321 * This routine sets up the local Block guard protocol settings for @shost.
5322 * This routine also allocates memory for debugging bg buffers.
5323 **/
5324static void
5325lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
5326{
5327	int pagecnt = 10;
5328	if (lpfc_prot_mask && lpfc_prot_guard) {
5329		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5330				"1478 Registering BlockGuard with the "
5331				"SCSI layer\n");
5332		scsi_host_set_prot(shost, lpfc_prot_mask);
5333		scsi_host_set_guard(shost, lpfc_prot_guard);
5334	}
5335	if (!_dump_buf_data) {
5336		while (pagecnt) {
5337			spin_lock_init(&_dump_buf_lock);
5338			_dump_buf_data =
5339				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
5340			if (_dump_buf_data) {
5341				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5342					"9043 BLKGRD: allocated %d pages for "
5343				       "_dump_buf_data at 0x%p\n",
5344				       (1 << pagecnt), _dump_buf_data);
5345				_dump_buf_data_order = pagecnt;
5346				memset(_dump_buf_data, 0,
5347				       ((1 << PAGE_SHIFT) << pagecnt));
5348				break;
5349			} else
5350				--pagecnt;
5351		}
5352		if (!_dump_buf_data_order)
5353			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5354				"9044 BLKGRD: ERROR unable to allocate "
5355			       "memory for hexdump\n");
5356	} else
5357		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5358			"9045 BLKGRD: already allocated _dump_buf_data=0x%p"
5359		       "\n", _dump_buf_data);
5360	if (!_dump_buf_dif) {
5361		while (pagecnt) {
5362			_dump_buf_dif =
5363				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
5364			if (_dump_buf_dif) {
5365				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5366					"9046 BLKGRD: allocated %d pages for "
5367				       "_dump_buf_dif at 0x%p\n",
5368				       (1 << pagecnt), _dump_buf_dif);
5369				_dump_buf_dif_order = pagecnt;
5370				memset(_dump_buf_dif, 0,
5371				       ((1 << PAGE_SHIFT) << pagecnt));
5372				break;
5373			} else
5374				--pagecnt;
5375		}
5376		if (!_dump_buf_dif_order)
5377			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5378			"9047 BLKGRD: ERROR unable to allocate "
5379			       "memory for hexdump\n");
5380	} else
5381		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5382			"9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
5383		       _dump_buf_dif);
5384}
5385
5386/**
5387 * lpfc_post_init_setup - Perform necessary device post initialization setup.
5388 * @phba: pointer to lpfc hba data structure.
5389 *
5390 * This routine is invoked to perform all the necessary post initialization
5391 * setup for the device.
5392 **/
5393static void
5394lpfc_post_init_setup(struct lpfc_hba *phba)
5395{
5396	struct Scsi_Host  *shost;
5397	struct lpfc_adapter_event_header adapter_event;
5398
5399	/* Get the default values for Model Name and Description */
5400	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
5401
5402	/*
5403	 * hba setup may have changed the hba_queue_depth so we need to
5404	 * adjust the value of can_queue.
5405	 */
5406	shost = pci_get_drvdata(phba->pcidev);
5407	shost->can_queue = phba->cfg_hba_queue_depth - 10;
5408	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
5409		lpfc_setup_bg(phba, shost);
5410
5411	lpfc_host_attrib_init(shost);
5412
5413	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
5414		spin_lock_irq(shost->host_lock);
5415		lpfc_poll_start_timer(phba);
5416		spin_unlock_irq(shost->host_lock);
5417	}
5418
5419	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5420			"0428 Perform SCSI scan\n");
5421	/* Send board arrival event to upper layer */
5422	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
5423	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
5424	fc_host_post_vendor_event(shost, fc_get_event_number(),
5425				  sizeof(adapter_event),
5426				  (char *) &adapter_event,
5427				  LPFC_NL_VENDOR_ID);
5428	return;
5429}
5430
5431/**
5432 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
5433 * @phba: pointer to lpfc hba data structure.
5434 *
5435 * This routine is invoked to set up the PCI device memory space for device
5436 * with SLI-3 interface spec.
5437 *
5438 * Return codes
5439 * 	0 - successful
5440 * 	other values - error
5441 **/
5442static int
5443lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5444{
5445	struct pci_dev *pdev;
5446	unsigned long bar0map_len, bar2map_len;
5447	int i, hbq_count;
5448	void *ptr;
5449	int error = -ENODEV;
5450
5451	/* Obtain PCI device reference */
5452	if (!phba->pcidev)
5453		return error;
5454	else
5455		pdev = phba->pcidev;
5456
5457	/* Set the device DMA mask size */
5458	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5459	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5460		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5461		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5462			return error;
5463		}
5464	}
5465
5466	/* Get the bus address of Bar0 and Bar2 and the number of bytes
5467	 * required by each mapping.
5468	 */
5469	phba->pci_bar0_map = pci_resource_start(pdev, 0);
5470	bar0map_len = pci_resource_len(pdev, 0);
5471
5472	phba->pci_bar2_map = pci_resource_start(pdev, 2);
5473	bar2map_len = pci_resource_len(pdev, 2);
5474
5475	/* Map HBA SLIM to a kernel virtual address. */
5476	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5477	if (!phba->slim_memmap_p) {
5478		dev_printk(KERN_ERR, &pdev->dev,
5479			   "ioremap failed for SLIM memory.\n");
5480		goto out;
5481	}
5482
5483	/* Map HBA Control Registers to a kernel virtual address. */
5484	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5485	if (!phba->ctrl_regs_memmap_p) {
5486		dev_printk(KERN_ERR, &pdev->dev,
5487			   "ioremap failed for HBA control registers.\n");
5488		goto out_iounmap_slim;
5489	}
5490
5491	/* Allocate memory for SLI-2 structures */
5492	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5493					       SLI2_SLIM_SIZE,
5494					       &phba->slim2p.phys,
5495					       GFP_KERNEL);
5496	if (!phba->slim2p.virt)
5497		goto out_iounmap;
5498
5499	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5500	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5501	phba->mbox_ext = (phba->slim2p.virt +
5502		offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5503	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5504	phba->IOCBs = (phba->slim2p.virt +
5505		       offsetof(struct lpfc_sli2_slim, IOCBs));
5506
5507	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5508						 lpfc_sli_hbq_size(),
5509						 &phba->hbqslimp.phys,
5510						 GFP_KERNEL);
5511	if (!phba->hbqslimp.virt)
5512		goto out_free_slim;
5513
5514	hbq_count = lpfc_sli_hbq_count();
5515	ptr = phba->hbqslimp.virt;
5516	for (i = 0; i < hbq_count; ++i) {
5517		phba->hbqs[i].hbq_virt = ptr;
5518		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5519		ptr += (lpfc_hbq_defs[i]->entry_count *
5520			sizeof(struct lpfc_hbq_entry));
5521	}
5522	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5523	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5524
5525	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5526
5527	INIT_LIST_HEAD(&phba->rb_pend_list);
5528
5529	phba->MBslimaddr = phba->slim_memmap_p;
5530	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5531	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5532	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5533	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5534
5535	return 0;
5536
5537out_free_slim:
5538	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5539			  phba->slim2p.virt, phba->slim2p.phys);
5540out_iounmap:
5541	iounmap(phba->ctrl_regs_memmap_p);
5542out_iounmap_slim:
5543	iounmap(phba->slim_memmap_p);
5544out:
5545	return error;
5546}
5547
5548/**
5549 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5550 * @phba: pointer to lpfc hba data structure.
5551 *
5552 * This routine is invoked to unset the PCI device memory space for device
5553 * with SLI-3 interface spec.
5554 **/
5555static void
5556lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5557{
5558	struct pci_dev *pdev;
5559
5560	/* Obtain PCI device reference */
5561	if (!phba->pcidev)
5562		return;
5563	else
5564		pdev = phba->pcidev;
5565
5566	/* Free coherent DMA memory allocated */
5567	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5568			  phba->hbqslimp.virt, phba->hbqslimp.phys);
5569	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5570			  phba->slim2p.virt, phba->slim2p.phys);
5571
5572	/* I/O memory unmap */
5573	iounmap(phba->ctrl_regs_memmap_p);
5574	iounmap(phba->slim_memmap_p);
5575
5576	return;
5577}
5578
5579/**
5580 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
5581 * @phba: pointer to lpfc hba data structure.
5582 *
5583 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5584 * done and check status.
5585 *
5586 * Return 0 if successful, otherwise -ENODEV.
5587 **/
5588int
5589lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5590{
5591	struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
5592	struct lpfc_register reg_data;
5593	int i, port_error = 0;
5594	uint32_t if_type;
5595
5596	memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
5597	memset(&reg_data, 0, sizeof(reg_data));
5598	if (!phba->sli4_hba.PSMPHRregaddr)
5599		return -ENODEV;
5600
5601	/* Wait up to 30 seconds for the SLI Port POST done and ready */
5602	for (i = 0; i < 3000; i++) {
5603		if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
5604			&portsmphr_reg.word0) ||
5605			(bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
5606			/* Port has a fatal POST error, break out */
5607			port_error = -ENODEV;
5608			break;
5609		}
5610		if (LPFC_POST_STAGE_PORT_READY ==
5611		    bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
5612			break;
5613		msleep(10);
5614	}
5615
5616	/*
5617	 * If there was a port error during POST, then don't proceed with
5618	 * other register reads as the data may not be valid.  Just exit.
5619	 */
5620	if (port_error) {
5621		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5622			"1408 Port Failed POST - portsmphr=0x%x, "
5623			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
5624			"scr2=x%x, hscratch=x%x, pstatus=x%x\n",
5625			portsmphr_reg.word0,
5626			bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
5627			bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
5628			bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
5629			bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
5630			bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
5631			bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
5632			bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
5633			bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
5634	} else {
5635		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5636				"2534 Device Info: SLIFamily=0x%x, "
5637				"SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
5638				"SLIHint_2=0x%x, FT=0x%x\n",
5639				bf_get(lpfc_sli_intf_sli_family,
5640				       &phba->sli4_hba.sli_intf),
5641				bf_get(lpfc_sli_intf_slirev,
5642				       &phba->sli4_hba.sli_intf),
5643				bf_get(lpfc_sli_intf_if_type,
5644				       &phba->sli4_hba.sli_intf),
5645				bf_get(lpfc_sli_intf_sli_hint1,
5646				       &phba->sli4_hba.sli_intf),
5647				bf_get(lpfc_sli_intf_sli_hint2,
5648				       &phba->sli4_hba.sli_intf),
5649				bf_get(lpfc_sli_intf_func_type,
5650				       &phba->sli4_hba.sli_intf));
5651		/*
5652		 * Check for other Port errors during the initialization
5653		 * process.  Fail the load if the port did not come up
5654		 * correctly.
5655		 */
5656		if_type = bf_get(lpfc_sli_intf_if_type,
5657				 &phba->sli4_hba.sli_intf);
5658		switch (if_type) {
5659		case LPFC_SLI_INTF_IF_TYPE_0:
5660			phba->sli4_hba.ue_mask_lo =
5661			      readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
5662			phba->sli4_hba.ue_mask_hi =
5663			      readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
5664			uerrlo_reg.word0 =
5665			      readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
5666			uerrhi_reg.word0 =
5667				readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
5668			if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5669			    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5670				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5671						"1422 Unrecoverable Error "
5672						"Detected during POST "
5673						"uerr_lo_reg=0x%x, "
5674						"uerr_hi_reg=0x%x, "
5675						"ue_mask_lo_reg=0x%x, "
5676						"ue_mask_hi_reg=0x%x\n",
5677						uerrlo_reg.word0,
5678						uerrhi_reg.word0,
5679						phba->sli4_hba.ue_mask_lo,
5680						phba->sli4_hba.ue_mask_hi);
5681				port_error = -ENODEV;
5682			}
5683			break;
5684		case LPFC_SLI_INTF_IF_TYPE_2:
5685			/* Final checks.  The port status should be clean. */
5686			if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
5687				&reg_data.word0) ||
5688				(bf_get(lpfc_sliport_status_err, &reg_data) &&
5689				 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
5690				phba->work_status[0] =
5691					readl(phba->sli4_hba.u.if_type2.
5692					      ERR1regaddr);
5693				phba->work_status[1] =
5694					readl(phba->sli4_hba.u.if_type2.
5695					      ERR2regaddr);
5696				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5697					"2888 Port Error Detected "
5698					"during POST: "
5699					"port status reg 0x%x, "
5700					"port_smphr reg 0x%x, "
5701					"error 1=0x%x, error 2=0x%x\n",
5702					reg_data.word0,
5703					portsmphr_reg.word0,
5704					phba->work_status[0],
5705					phba->work_status[1]);
5706				port_error = -ENODEV;
5707			}
5708			break;
5709		case LPFC_SLI_INTF_IF_TYPE_1:
5710		default:
5711			break;
5712		}
5713	}
5714	return port_error;
5715}
5716
5717/**
5718 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5719 * @phba: pointer to lpfc hba data structure.
5720 * @if_type:  The SLI4 interface type getting configured.
5721 *
5722 * This routine is invoked to set up SLI4 BAR0 PCI config space register
5723 * memory map.
5724 **/
5725static void
5726lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
5727{
5728	switch (if_type) {
5729	case LPFC_SLI_INTF_IF_TYPE_0:
5730		phba->sli4_hba.u.if_type0.UERRLOregaddr =
5731			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
5732		phba->sli4_hba.u.if_type0.UERRHIregaddr =
5733			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
5734		phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
5735			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
5736		phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
5737			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
5738		phba->sli4_hba.SLIINTFregaddr =
5739			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5740		break;
5741	case LPFC_SLI_INTF_IF_TYPE_2:
5742		phba->sli4_hba.u.if_type2.ERR1regaddr =
5743			phba->sli4_hba.conf_regs_memmap_p +
5744						LPFC_CTL_PORT_ER1_OFFSET;
5745		phba->sli4_hba.u.if_type2.ERR2regaddr =
5746			phba->sli4_hba.conf_regs_memmap_p +
5747						LPFC_CTL_PORT_ER2_OFFSET;
5748		phba->sli4_hba.u.if_type2.CTRLregaddr =
5749			phba->sli4_hba.conf_regs_memmap_p +
5750						LPFC_CTL_PORT_CTL_OFFSET;
5751		phba->sli4_hba.u.if_type2.STATUSregaddr =
5752			phba->sli4_hba.conf_regs_memmap_p +
5753						LPFC_CTL_PORT_STA_OFFSET;
5754		phba->sli4_hba.SLIINTFregaddr =
5755			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5756		phba->sli4_hba.PSMPHRregaddr =
5757			phba->sli4_hba.conf_regs_memmap_p +
5758						LPFC_CTL_PORT_SEM_OFFSET;
5759		phba->sli4_hba.RQDBregaddr =
5760			phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
5761		phba->sli4_hba.WQDBregaddr =
5762			phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
5763		phba->sli4_hba.EQCQDBregaddr =
5764			phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
5765		phba->sli4_hba.MQDBregaddr =
5766			phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
5767		phba->sli4_hba.BMBXregaddr =
5768			phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
5769		break;
5770	case LPFC_SLI_INTF_IF_TYPE_1:
5771	default:
5772		dev_printk(KERN_ERR, &phba->pcidev->dev,
5773			   "FATAL - unsupported SLI4 interface type - %d\n",
5774			   if_type);
5775		break;
5776	}
5777}
5778
5779/**
5780 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5781 * @phba: pointer to lpfc hba data structure.
5782 *
5783 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5784 * memory map.
5785 **/
5786static void
5787lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5788{
5789	phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5790		LPFC_SLIPORT_IF0_SMPHR;
5791	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5792		LPFC_HST_ISR0;
5793	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5794		LPFC_HST_IMR0;
5795	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5796		LPFC_HST_ISCR0;
5797}
5798
5799/**
5800 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
5801 * @phba: pointer to lpfc hba data structure.
5802 * @vf: virtual function number
5803 *
5804 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5805 * based on the given viftual function number, @vf.
5806 *
5807 * Return 0 if successful, otherwise -ENODEV.
5808 **/
5809static int
5810lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5811{
5812	if (vf > LPFC_VIR_FUNC_MAX)
5813		return -ENODEV;
5814
5815	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5816				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5817	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5818				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5819	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5820				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5821	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5822				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5823	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5824				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5825	return 0;
5826}
5827
5828/**
5829 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
5830 * @phba: pointer to lpfc hba data structure.
5831 *
5832 * This routine is invoked to create the bootstrap mailbox
5833 * region consistent with the SLI-4 interface spec.  This
5834 * routine allocates all memory necessary to communicate
5835 * mailbox commands to the port and sets up all alignment
5836 * needs.  No locks are expected to be held when calling
5837 * this routine.
5838 *
5839 * Return codes
5840 * 	0 - successful
5841 * 	-ENOMEM - could not allocated memory.
5842 **/
5843static int
5844lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
5845{
5846	uint32_t bmbx_size;
5847	struct lpfc_dmabuf *dmabuf;
5848	struct dma_address *dma_address;
5849	uint32_t pa_addr;
5850	uint64_t phys_addr;
5851
5852	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5853	if (!dmabuf)
5854		return -ENOMEM;
5855
5856	/*
5857	 * The bootstrap mailbox region is comprised of 2 parts
5858	 * plus an alignment restriction of 16 bytes.
5859	 */
5860	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5861	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5862					  bmbx_size,
5863					  &dmabuf->phys,
5864					  GFP_KERNEL);
5865	if (!dmabuf->virt) {
5866		kfree(dmabuf);
5867		return -ENOMEM;
5868	}
5869	memset(dmabuf->virt, 0, bmbx_size);
5870
5871	/*
5872	 * Initialize the bootstrap mailbox pointers now so that the register
5873	 * operations are simple later.  The mailbox dma address is required
5874	 * to be 16-byte aligned.  Also align the virtual memory as each
5875	 * maibox is copied into the bmbx mailbox region before issuing the
5876	 * command to the port.
5877	 */
5878	phba->sli4_hba.bmbx.dmabuf = dmabuf;
5879	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
5880
5881	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
5882					      LPFC_ALIGN_16_BYTE);
5883	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
5884					      LPFC_ALIGN_16_BYTE);
5885
5886	/*
5887	 * Set the high and low physical addresses now.  The SLI4 alignment
5888	 * requirement is 16 bytes and the mailbox is posted to the port
5889	 * as two 30-bit addresses.  The other data is a bit marking whether
5890	 * the 30-bit address is the high or low address.
5891	 * Upcast bmbx aphys to 64bits so shift instruction compiles
5892	 * clean on 32 bit machines.
5893	 */
5894	dma_address = &phba->sli4_hba.bmbx.dma_address;
5895	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
5896	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
5897	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
5898					   LPFC_BMBX_BIT1_ADDR_HI);
5899
5900	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
5901	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
5902					   LPFC_BMBX_BIT1_ADDR_LO);
5903	return 0;
5904}
5905
5906/**
5907 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
5908 * @phba: pointer to lpfc hba data structure.
5909 *
5910 * This routine is invoked to teardown the bootstrap mailbox
5911 * region and release all host resources. This routine requires
5912 * the caller to ensure all mailbox commands recovered, no
5913 * additional mailbox comands are sent, and interrupts are disabled
5914 * before calling this routine.
5915 *
5916 **/
5917static void
5918lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5919{
5920	dma_free_coherent(&phba->pcidev->dev,
5921			  phba->sli4_hba.bmbx.bmbx_size,
5922			  phba->sli4_hba.bmbx.dmabuf->virt,
5923			  phba->sli4_hba.bmbx.dmabuf->phys);
5924
5925	kfree(phba->sli4_hba.bmbx.dmabuf);
5926	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
5927}
5928
5929/**
5930 * lpfc_sli4_read_config - Get the config parameters.
5931 * @phba: pointer to lpfc hba data structure.
5932 *
5933 * This routine is invoked to read the configuration parameters from the HBA.
5934 * The configuration parameters are used to set the base and maximum values
5935 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
5936 * allocation for the port.
5937 *
5938 * Return codes
5939 * 	0 - successful
5940 * 	-ENOMEM - No available memory
5941 *      -EIO - The mailbox failed to complete successfully.
5942 **/
5943static int
5944lpfc_sli4_read_config(struct lpfc_hba *phba)
5945{
5946	LPFC_MBOXQ_t *pmb;
5947	struct lpfc_mbx_read_config *rd_config;
5948	union  lpfc_sli4_cfg_shdr *shdr;
5949	uint32_t shdr_status, shdr_add_status;
5950	struct lpfc_mbx_get_func_cfg *get_func_cfg;
5951	struct lpfc_rsrc_desc_fcfcoe *desc;
5952	uint32_t desc_count;
5953	int length, i, rc = 0;
5954
5955	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5956	if (!pmb) {
5957		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5958				"2011 Unable to allocate memory for issuing "
5959				"SLI_CONFIG_SPECIAL mailbox command\n");
5960		return -ENOMEM;
5961	}
5962
5963	lpfc_read_config(phba, pmb);
5964
5965	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5966	if (rc != MBX_SUCCESS) {
5967		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5968			"2012 Mailbox failed , mbxCmd x%x "
5969			"READ_CONFIG, mbxStatus x%x\n",
5970			bf_get(lpfc_mqe_command, &pmb->u.mqe),
5971			bf_get(lpfc_mqe_status, &pmb->u.mqe));
5972		rc = -EIO;
5973	} else {
5974		rd_config = &pmb->u.mqe.un.rd_config;
5975		phba->sli4_hba.extents_in_use =
5976			bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
5977		phba->sli4_hba.max_cfg_param.max_xri =
5978			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5979		phba->sli4_hba.max_cfg_param.xri_base =
5980			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
5981		phba->sli4_hba.max_cfg_param.max_vpi =
5982			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
5983		phba->sli4_hba.max_cfg_param.vpi_base =
5984			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
5985		phba->sli4_hba.max_cfg_param.max_rpi =
5986			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
5987		phba->sli4_hba.max_cfg_param.rpi_base =
5988			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
5989		phba->sli4_hba.max_cfg_param.max_vfi =
5990			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
5991		phba->sli4_hba.max_cfg_param.vfi_base =
5992			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5993		phba->sli4_hba.max_cfg_param.max_fcfi =
5994			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5995		phba->sli4_hba.max_cfg_param.max_eq =
5996			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5997		phba->sli4_hba.max_cfg_param.max_rq =
5998			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
5999		phba->sli4_hba.max_cfg_param.max_wq =
6000			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
6001		phba->sli4_hba.max_cfg_param.max_cq =
6002			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
6003		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
6004		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
6005		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
6006		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
6007		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
6008		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
6009				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
6010		phba->max_vports = phba->max_vpi;
6011		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6012				"2003 cfg params Extents? %d "
6013				"XRI(B:%d M:%d), "
6014				"VPI(B:%d M:%d) "
6015				"VFI(B:%d M:%d) "
6016				"RPI(B:%d M:%d) "
6017				"FCFI(Count:%d)\n",
6018				phba->sli4_hba.extents_in_use,
6019				phba->sli4_hba.max_cfg_param.xri_base,
6020				phba->sli4_hba.max_cfg_param.max_xri,
6021				phba->sli4_hba.max_cfg_param.vpi_base,
6022				phba->sli4_hba.max_cfg_param.max_vpi,
6023				phba->sli4_hba.max_cfg_param.vfi_base,
6024				phba->sli4_hba.max_cfg_param.max_vfi,
6025				phba->sli4_hba.max_cfg_param.rpi_base,
6026				phba->sli4_hba.max_cfg_param.max_rpi,
6027				phba->sli4_hba.max_cfg_param.max_fcfi);
6028	}
6029
6030	if (rc)
6031		goto read_cfg_out;
6032
6033	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
6034	if (phba->cfg_hba_queue_depth >
6035		(phba->sli4_hba.max_cfg_param.max_xri -
6036			lpfc_sli4_get_els_iocb_cnt(phba)))
6037		phba->cfg_hba_queue_depth =
6038			phba->sli4_hba.max_cfg_param.max_xri -
6039				lpfc_sli4_get_els_iocb_cnt(phba);
6040
6041	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
6042	    LPFC_SLI_INTF_IF_TYPE_2)
6043		goto read_cfg_out;
6044
6045	/* get the pf# and vf# for SLI4 if_type 2 port */
6046	length = (sizeof(struct lpfc_mbx_get_func_cfg) -
6047		  sizeof(struct lpfc_sli4_cfg_mhdr));
6048	lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
6049			 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
6050			 length, LPFC_SLI4_MBX_EMBED);
6051
6052	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6053	shdr = (union lpfc_sli4_cfg_shdr *)
6054				&pmb->u.mqe.un.sli4_config.header.cfg_shdr;
6055	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6056	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6057	if (rc || shdr_status || shdr_add_status) {
6058		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6059				"3026 Mailbox failed , mbxCmd x%x "
6060				"GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6061				bf_get(lpfc_mqe_command, &pmb->u.mqe),
6062				bf_get(lpfc_mqe_status, &pmb->u.mqe));
6063		rc = -EIO;
6064		goto read_cfg_out;
6065	}
6066
6067	/* search for fc_fcoe resrouce descriptor */
6068	get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6069	desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6070
6071	for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6072		desc = (struct lpfc_rsrc_desc_fcfcoe *)
6073			&get_func_cfg->func_cfg.desc[i];
6074		if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6075		    bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
6076			phba->sli4_hba.iov.pf_number =
6077				bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6078			phba->sli4_hba.iov.vf_number =
6079				bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
6080			break;
6081		}
6082	}
6083
6084	if (i < LPFC_RSRC_DESC_MAX_NUM)
6085		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6086				"3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6087				"vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6088				phba->sli4_hba.iov.vf_number);
6089	else {
6090		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6091				"3028 GET_FUNCTION_CONFIG: failed to find "
6092				"Resrouce Descriptor:x%x\n",
6093				LPFC_RSRC_DESC_TYPE_FCFCOE);
6094		rc = -EIO;
6095	}
6096
6097read_cfg_out:
6098	mempool_free(pmb, phba->mbox_mem_pool);
6099	return rc;
6100}
6101
6102/**
6103 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
6104 * @phba: pointer to lpfc hba data structure.
6105 *
6106 * This routine is invoked to setup the port-side endian order when
6107 * the port if_type is 0.  This routine has no function for other
6108 * if_types.
6109 *
6110 * Return codes
6111 * 	0 - successful
6112 * 	-ENOMEM - No available memory
6113 *      -EIO - The mailbox failed to complete successfully.
6114 **/
6115static int
6116lpfc_setup_endian_order(struct lpfc_hba *phba)
6117{
6118	LPFC_MBOXQ_t *mboxq;
6119	uint32_t if_type, rc = 0;
6120	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
6121				      HOST_ENDIAN_HIGH_WORD1};
6122
6123	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6124	switch (if_type) {
6125	case LPFC_SLI_INTF_IF_TYPE_0:
6126		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6127						       GFP_KERNEL);
6128		if (!mboxq) {
6129			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6130					"0492 Unable to allocate memory for "
6131					"issuing SLI_CONFIG_SPECIAL mailbox "
6132					"command\n");
6133			return -ENOMEM;
6134		}
6135
6136		/*
6137		 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
6138		 * two words to contain special data values and no other data.
6139		 */
6140		memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
6141		memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
6142		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6143		if (rc != MBX_SUCCESS) {
6144			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6145					"0493 SLI_CONFIG_SPECIAL mailbox "
6146					"failed with status x%x\n",
6147					rc);
6148			rc = -EIO;
6149		}
6150		mempool_free(mboxq, phba->mbox_mem_pool);
6151		break;
6152	case LPFC_SLI_INTF_IF_TYPE_2:
6153	case LPFC_SLI_INTF_IF_TYPE_1:
6154	default:
6155		break;
6156	}
6157	return rc;
6158}
6159
6160/**
6161 * lpfc_sli4_queue_create - Create all the SLI4 queues
6162 * @phba: pointer to lpfc hba data structure.
6163 *
6164 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
6165 * operation. For each SLI4 queue type, the parameters such as queue entry
6166 * count (queue depth) shall be taken from the module parameter. For now,
6167 * we just use some constant number as place holder.
6168 *
6169 * Return codes
6170 *      0 - successful
6171 *      -ENOMEM - No available memory
6172 *      -EIO - The mailbox failed to complete successfully.
6173 **/
6174static int
6175lpfc_sli4_queue_create(struct lpfc_hba *phba)
6176{
6177	struct lpfc_queue *qdesc;
6178	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6179	int cfg_fcp_wq_count;
6180	int cfg_fcp_eq_count;
6181
6182	/*
6183	 * Sanity check for confiugred queue parameters against the run-time
6184	 * device parameters
6185	 */
6186
6187	/* Sanity check on FCP fast-path WQ parameters */
6188	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
6189	if (cfg_fcp_wq_count >
6190	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
6191		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
6192				   LPFC_SP_WQN_DEF;
6193		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
6194			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6195					"2581 Not enough WQs (%d) from "
6196					"the pci function for supporting "
6197					"FCP WQs (%d)\n",
6198					phba->sli4_hba.max_cfg_param.max_wq,
6199					phba->cfg_fcp_wq_count);
6200			goto out_error;
6201		}
6202		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6203				"2582 Not enough WQs (%d) from the pci "
6204				"function for supporting the requested "
6205				"FCP WQs (%d), the actual FCP WQs can "
6206				"be supported: %d\n",
6207				phba->sli4_hba.max_cfg_param.max_wq,
6208				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
6209	}
6210	/* The actual number of FCP work queues adopted */
6211	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
6212
6213	/* Sanity check on FCP fast-path EQ parameters */
6214	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
6215	if (cfg_fcp_eq_count >
6216	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
6217		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
6218				   LPFC_SP_EQN_DEF;
6219		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
6220			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6221					"2574 Not enough EQs (%d) from the "
6222					"pci function for supporting FCP "
6223					"EQs (%d)\n",
6224					phba->sli4_hba.max_cfg_param.max_eq,
6225					phba->cfg_fcp_eq_count);
6226			goto out_error;
6227		}
6228		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6229				"2575 Not enough EQs (%d) from the pci "
6230				"function for supporting the requested "
6231				"FCP EQs (%d), the actual FCP EQs can "
6232				"be supported: %d\n",
6233				phba->sli4_hba.max_cfg_param.max_eq,
6234				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
6235	}
6236	/* It does not make sense to have more EQs than WQs */
6237	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
6238		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6239				"2593 The FCP EQ count(%d) cannot be greater "
6240				"than the FCP WQ count(%d), limiting the "
6241				"FCP EQ count to %d\n", cfg_fcp_eq_count,
6242				phba->cfg_fcp_wq_count,
6243				phba->cfg_fcp_wq_count);
6244		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
6245	}
6246	/* The actual number of FCP event queues adopted */
6247	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
6248	/* The overall number of event queues used */
6249	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
6250
6251	/*
6252	 * Create Event Queues (EQs)
6253	 */
6254
6255	/* Get EQ depth from module parameter, fake the default for now */
6256	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
6257	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
6258
6259	/* Create slow path event queue */
6260	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6261				      phba->sli4_hba.eq_ecount);
6262	if (!qdesc) {
6263		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6264				"0496 Failed allocate slow-path EQ\n");
6265		goto out_error;
6266	}
6267	phba->sli4_hba.sp_eq = qdesc;
6268
6269	/* Create fast-path FCP Event Queue(s) */
6270	phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
6271			       phba->cfg_fcp_eq_count), GFP_KERNEL);
6272	if (!phba->sli4_hba.fp_eq) {
6273		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6274				"2576 Failed allocate memory for fast-path "
6275				"EQ record array\n");
6276		goto out_free_sp_eq;
6277	}
6278	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6279		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6280					      phba->sli4_hba.eq_ecount);
6281		if (!qdesc) {
6282			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6283					"0497 Failed allocate fast-path EQ\n");
6284			goto out_free_fp_eq;
6285		}
6286		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
6287	}
6288
6289	/*
6290	 * Create Complete Queues (CQs)
6291	 */
6292
6293	/* Get CQ depth from module parameter, fake the default for now */
6294	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6295	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6296
6297	/* Create slow-path Mailbox Command Complete Queue */
6298	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6299				      phba->sli4_hba.cq_ecount);
6300	if (!qdesc) {
6301		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6302				"0500 Failed allocate slow-path mailbox CQ\n");
6303		goto out_free_fp_eq;
6304	}
6305	phba->sli4_hba.mbx_cq = qdesc;
6306
6307	/* Create slow-path ELS Complete Queue */
6308	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6309				      phba->sli4_hba.cq_ecount);
6310	if (!qdesc) {
6311		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6312				"0501 Failed allocate slow-path ELS CQ\n");
6313		goto out_free_mbx_cq;
6314	}
6315	phba->sli4_hba.els_cq = qdesc;
6316
6317
6318	/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
6319	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6320				phba->cfg_fcp_eq_count), GFP_KERNEL);
6321	if (!phba->sli4_hba.fcp_cq) {
6322		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6323				"2577 Failed allocate memory for fast-path "
6324				"CQ record array\n");
6325		goto out_free_els_cq;
6326	}
6327	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
6328		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6329					      phba->sli4_hba.cq_ecount);
6330		if (!qdesc) {
6331			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6332					"0499 Failed allocate fast-path FCP "
6333					"CQ (%d)\n", fcp_cqidx);
6334			goto out_free_fcp_cq;
6335		}
6336		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
6337	}
6338
6339	/* Create Mailbox Command Queue */
6340	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6341	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6342
6343	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
6344				      phba->sli4_hba.mq_ecount);
6345	if (!qdesc) {
6346		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6347				"0505 Failed allocate slow-path MQ\n");
6348		goto out_free_fcp_cq;
6349	}
6350	phba->sli4_hba.mbx_wq = qdesc;
6351
6352	/*
6353	 * Create all the Work Queues (WQs)
6354	 */
6355	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6356	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6357
6358	/* Create slow-path ELS Work Queue */
6359	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6360				      phba->sli4_hba.wq_ecount);
6361	if (!qdesc) {
6362		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6363				"0504 Failed allocate slow-path ELS WQ\n");
6364		goto out_free_mbx_wq;
6365	}
6366	phba->sli4_hba.els_wq = qdesc;
6367
6368	/* Create fast-path FCP Work Queue(s) */
6369	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6370				phba->cfg_fcp_wq_count), GFP_KERNEL);
6371	if (!phba->sli4_hba.fcp_wq) {
6372		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6373				"2578 Failed allocate memory for fast-path "
6374				"WQ record array\n");
6375		goto out_free_els_wq;
6376	}
6377	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6378		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6379					      phba->sli4_hba.wq_ecount);
6380		if (!qdesc) {
6381			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6382					"0503 Failed allocate fast-path FCP "
6383					"WQ (%d)\n", fcp_wqidx);
6384			goto out_free_fcp_wq;
6385		}
6386		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
6387	}
6388
6389	/*
6390	 * Create Receive Queue (RQ)
6391	 */
6392	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6393	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6394
6395	/* Create Receive Queue for header */
6396	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6397				      phba->sli4_hba.rq_ecount);
6398	if (!qdesc) {
6399		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6400				"0506 Failed allocate receive HRQ\n");
6401		goto out_free_fcp_wq;
6402	}
6403	phba->sli4_hba.hdr_rq = qdesc;
6404
6405	/* Create Receive Queue for data */
6406	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6407				      phba->sli4_hba.rq_ecount);
6408	if (!qdesc) {
6409		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6410				"0507 Failed allocate receive DRQ\n");
6411		goto out_free_hdr_rq;
6412	}
6413	phba->sli4_hba.dat_rq = qdesc;
6414
6415	return 0;
6416
6417out_free_hdr_rq:
6418	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6419	phba->sli4_hba.hdr_rq = NULL;
6420out_free_fcp_wq:
6421	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
6422		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
6423		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
6424	}
6425	kfree(phba->sli4_hba.fcp_wq);
6426out_free_els_wq:
6427	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6428	phba->sli4_hba.els_wq = NULL;
6429out_free_mbx_wq:
6430	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6431	phba->sli4_hba.mbx_wq = NULL;
6432out_free_fcp_cq:
6433	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
6434		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
6435		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
6436	}
6437	kfree(phba->sli4_hba.fcp_cq);
6438out_free_els_cq:
6439	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6440	phba->sli4_hba.els_cq = NULL;
6441out_free_mbx_cq:
6442	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6443	phba->sli4_hba.mbx_cq = NULL;
6444out_free_fp_eq:
6445	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
6446		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
6447		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
6448	}
6449	kfree(phba->sli4_hba.fp_eq);
6450out_free_sp_eq:
6451	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6452	phba->sli4_hba.sp_eq = NULL;
6453out_error:
6454	return -ENOMEM;
6455}
6456
6457/**
6458 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
6459 * @phba: pointer to lpfc hba data structure.
6460 *
6461 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
6462 * operation.
6463 *
6464 * Return codes
6465 *      0 - successful
6466 *      -ENOMEM - No available memory
6467 *      -EIO - The mailbox failed to complete successfully.
6468 **/
6469static void
6470lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6471{
6472	int fcp_qidx;
6473
6474	/* Release mailbox command work queue */
6475	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6476	phba->sli4_hba.mbx_wq = NULL;
6477
6478	/* Release ELS work queue */
6479	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6480	phba->sli4_hba.els_wq = NULL;
6481
6482	/* Release FCP work queue */
6483	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6484		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
6485	kfree(phba->sli4_hba.fcp_wq);
6486	phba->sli4_hba.fcp_wq = NULL;
6487
6488	/* Release unsolicited receive queue */
6489	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6490	phba->sli4_hba.hdr_rq = NULL;
6491	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
6492	phba->sli4_hba.dat_rq = NULL;
6493
6494	/* Release ELS complete queue */
6495	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6496	phba->sli4_hba.els_cq = NULL;
6497
6498	/* Release mailbox command complete queue */
6499	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6500	phba->sli4_hba.mbx_cq = NULL;
6501
6502	/* Release FCP response complete queue */
6503	fcp_qidx = 0;
6504	do
6505		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6506	while (++fcp_qidx < phba->cfg_fcp_eq_count);
6507	kfree(phba->sli4_hba.fcp_cq);
6508	phba->sli4_hba.fcp_cq = NULL;
6509
6510	/* Release fast-path event queue */
6511	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6512		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
6513	kfree(phba->sli4_hba.fp_eq);
6514	phba->sli4_hba.fp_eq = NULL;
6515
6516	/* Release slow-path event queue */
6517	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6518	phba->sli4_hba.sp_eq = NULL;
6519
6520	return;
6521}
6522
6523/**
6524 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
6525 * @phba: pointer to lpfc hba data structure.
6526 *
6527 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
6528 * operation.
6529 *
6530 * Return codes
6531 *      0 - successful
6532 *      -ENOMEM - No available memory
6533 *      -EIO - The mailbox failed to complete successfully.
6534 **/
6535int
6536lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6537{
6538	int rc = -ENOMEM;
6539	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6540	int fcp_cq_index = 0;
6541
6542	/*
6543	 * Set up Event Queues (EQs)
6544	 */
6545
6546	/* Set up slow-path event queue */
6547	if (!phba->sli4_hba.sp_eq) {
6548		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6549				"0520 Slow-path EQ not allocated\n");
6550		goto out_error;
6551	}
6552	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
6553			    LPFC_SP_DEF_IMAX);
6554	if (rc) {
6555		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6556				"0521 Failed setup of slow-path EQ: "
6557				"rc = 0x%x\n", rc);
6558		goto out_error;
6559	}
6560	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6561			"2583 Slow-path EQ setup: queue-id=%d\n",
6562			phba->sli4_hba.sp_eq->queue_id);
6563
6564	/* Set up fast-path event queue */
6565	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6566		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
6567			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6568					"0522 Fast-path EQ (%d) not "
6569					"allocated\n", fcp_eqidx);
6570			goto out_destroy_fp_eq;
6571		}
6572		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
6573				    phba->cfg_fcp_imax);
6574		if (rc) {
6575			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6576					"0523 Failed setup of fast-path EQ "
6577					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
6578			goto out_destroy_fp_eq;
6579		}
6580		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6581				"2584 Fast-path EQ setup: "
6582				"queue[%d]-id=%d\n", fcp_eqidx,
6583				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
6584	}
6585
6586	/*
6587	 * Set up Complete Queues (CQs)
6588	 */
6589
6590	/* Set up slow-path MBOX Complete Queue as the first CQ */
6591	if (!phba->sli4_hba.mbx_cq) {
6592		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6593				"0528 Mailbox CQ not allocated\n");
6594		goto out_destroy_fp_eq;
6595	}
6596	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
6597			    LPFC_MCQ, LPFC_MBOX);
6598	if (rc) {
6599		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6600				"0529 Failed setup of slow-path mailbox CQ: "
6601				"rc = 0x%x\n", rc);
6602		goto out_destroy_fp_eq;
6603	}
6604	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6605			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6606			phba->sli4_hba.mbx_cq->queue_id,
6607			phba->sli4_hba.sp_eq->queue_id);
6608
6609	/* Set up slow-path ELS Complete Queue */
6610	if (!phba->sli4_hba.els_cq) {
6611		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6612				"0530 ELS CQ not allocated\n");
6613		goto out_destroy_mbx_cq;
6614	}
6615	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
6616			    LPFC_WCQ, LPFC_ELS);
6617	if (rc) {
6618		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6619				"0531 Failed setup of slow-path ELS CQ: "
6620				"rc = 0x%x\n", rc);
6621		goto out_destroy_mbx_cq;
6622	}
6623	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6624			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
6625			phba->sli4_hba.els_cq->queue_id,
6626			phba->sli4_hba.sp_eq->queue_id);
6627
6628	/* Set up fast-path FCP Response Complete Queue */
6629	fcp_cqidx = 0;
6630	do {
6631		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6632			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6633					"0526 Fast-path FCP CQ (%d) not "
6634					"allocated\n", fcp_cqidx);
6635			goto out_destroy_fcp_cq;
6636		}
6637		if (phba->cfg_fcp_eq_count)
6638			rc = lpfc_cq_create(phba,
6639					    phba->sli4_hba.fcp_cq[fcp_cqidx],
6640					    phba->sli4_hba.fp_eq[fcp_cqidx],
6641					    LPFC_WCQ, LPFC_FCP);
6642		else
6643			rc = lpfc_cq_create(phba,
6644					    phba->sli4_hba.fcp_cq[fcp_cqidx],
6645					    phba->sli4_hba.sp_eq,
6646					    LPFC_WCQ, LPFC_FCP);
6647		if (rc) {
6648			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6649					"0527 Failed setup of fast-path FCP "
6650					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6651			goto out_destroy_fcp_cq;
6652		}
6653		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6654				"2588 FCP CQ setup: cq[%d]-id=%d, "
6655				"parent %seq[%d]-id=%d\n",
6656				fcp_cqidx,
6657				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6658				(phba->cfg_fcp_eq_count) ? "" : "sp_",
6659				fcp_cqidx,
6660				(phba->cfg_fcp_eq_count) ?
6661				   phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
6662				   phba->sli4_hba.sp_eq->queue_id);
6663	} while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6664
6665	/*
6666	 * Set up all the Work Queues (WQs)
6667	 */
6668
6669	/* Set up Mailbox Command Queue */
6670	if (!phba->sli4_hba.mbx_wq) {
6671		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6672				"0538 Slow-path MQ not allocated\n");
6673		goto out_destroy_fcp_cq;
6674	}
6675	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
6676			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
6677	if (rc) {
6678		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6679				"0539 Failed setup of slow-path MQ: "
6680				"rc = 0x%x\n", rc);
6681		goto out_destroy_fcp_cq;
6682	}
6683	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6684			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
6685			phba->sli4_hba.mbx_wq->queue_id,
6686			phba->sli4_hba.mbx_cq->queue_id);
6687
6688	/* Set up slow-path ELS Work Queue */
6689	if (!phba->sli4_hba.els_wq) {
6690		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6691				"0536 Slow-path ELS WQ not allocated\n");
6692		goto out_destroy_mbx_wq;
6693	}
6694	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
6695			    phba->sli4_hba.els_cq, LPFC_ELS);
6696	if (rc) {
6697		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6698				"0537 Failed setup of slow-path ELS WQ: "
6699				"rc = 0x%x\n", rc);
6700		goto out_destroy_mbx_wq;
6701	}
6702	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6703			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
6704			phba->sli4_hba.els_wq->queue_id,
6705			phba->sli4_hba.els_cq->queue_id);
6706
6707	/* Set up fast-path FCP Work Queue */
6708	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6709		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6710			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6711					"0534 Fast-path FCP WQ (%d) not "
6712					"allocated\n", fcp_wqidx);
6713			goto out_destroy_fcp_wq;
6714		}
6715		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6716				    phba->sli4_hba.fcp_cq[fcp_cq_index],
6717				    LPFC_FCP);
6718		if (rc) {
6719			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6720					"0535 Failed setup of fast-path FCP "
6721					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6722			goto out_destroy_fcp_wq;
6723		}
6724		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6725				"2591 FCP WQ setup: wq[%d]-id=%d, "
6726				"parent cq[%d]-id=%d\n",
6727				fcp_wqidx,
6728				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6729				fcp_cq_index,
6730				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6731		/* Round robin FCP Work Queue's Completion Queue assignment */
6732		if (phba->cfg_fcp_eq_count)
6733			fcp_cq_index = ((fcp_cq_index + 1) %
6734					phba->cfg_fcp_eq_count);
6735	}
6736
6737	/*
6738	 * Create Receive Queue (RQ)
6739	 */
6740	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6741		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6742				"0540 Receive Queue not allocated\n");
6743		goto out_destroy_fcp_wq;
6744	}
6745
6746	lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
6747	lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
6748
6749	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6750			    phba->sli4_hba.els_cq, LPFC_USOL);
6751	if (rc) {
6752		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6753				"0541 Failed setup of Receive Queue: "
6754				"rc = 0x%x\n", rc);
6755		goto out_destroy_fcp_wq;
6756	}
6757
6758	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6759			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6760			"parent cq-id=%d\n",
6761			phba->sli4_hba.hdr_rq->queue_id,
6762			phba->sli4_hba.dat_rq->queue_id,
6763			phba->sli4_hba.els_cq->queue_id);
6764	return 0;
6765
6766out_destroy_fcp_wq:
6767	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6768		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6769	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6770out_destroy_mbx_wq:
6771	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6772out_destroy_fcp_cq:
6773	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6774		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6775	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6776out_destroy_mbx_cq:
6777	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6778out_destroy_fp_eq:
6779	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6780		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6781	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6782out_error:
6783	return rc;
6784}
6785
6786/**
6787 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
6788 * @phba: pointer to lpfc hba data structure.
6789 *
6790 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
6791 * operation.
6792 *
6793 * Return codes
6794 *      0 - successful
6795 *      -ENOMEM - No available memory
6796 *      -EIO - The mailbox failed to complete successfully.
6797 **/
6798void
6799lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6800{
6801	int fcp_qidx;
6802
6803	/* Unset mailbox command work queue */
6804	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6805	/* Unset ELS work queue */
6806	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6807	/* Unset unsolicited receive queue */
6808	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
6809	/* Unset FCP work queue */
6810	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6811		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
6812	/* Unset mailbox command complete queue */
6813	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6814	/* Unset ELS complete queue */
6815	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6816	/* Unset FCP response complete queue */
6817	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6818		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6819	/* Unset fast-path event queue */
6820	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6821		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6822	/* Unset slow-path event queue */
6823	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6824}
6825
6826/**
6827 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
6828 * @phba: pointer to lpfc hba data structure.
6829 *
6830 * This routine is invoked to allocate and set up a pool of completion queue
6831 * events. The body of the completion queue event is a completion queue entry
6832 * CQE. For now, this pool is used for the interrupt service routine to queue
6833 * the following HBA completion queue events for the worker thread to process:
6834 *   - Mailbox asynchronous events
6835 *   - Receive queue completion unsolicited events
6836 * Later, this can be used for all the slow-path events.
6837 *
6838 * Return codes
6839 *      0 - successful
6840 *      -ENOMEM - No available memory
6841 **/
6842static int
6843lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
6844{
6845	struct lpfc_cq_event *cq_event;
6846	int i;
6847
6848	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
6849		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
6850		if (!cq_event)
6851			goto out_pool_create_fail;
6852		list_add_tail(&cq_event->list,
6853			      &phba->sli4_hba.sp_cqe_event_pool);
6854	}
6855	return 0;
6856
6857out_pool_create_fail:
6858	lpfc_sli4_cq_event_pool_destroy(phba);
6859	return -ENOMEM;
6860}
6861
6862/**
6863 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
6864 * @phba: pointer to lpfc hba data structure.
6865 *
6866 * This routine is invoked to free the pool of completion queue events at
6867 * driver unload time. Note that, it is the responsibility of the driver
6868 * cleanup routine to free all the outstanding completion-queue events
6869 * allocated from this pool back into the pool before invoking this routine
6870 * to destroy the pool.
6871 **/
6872static void
6873lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
6874{
6875	struct lpfc_cq_event *cq_event, *next_cq_event;
6876
6877	list_for_each_entry_safe(cq_event, next_cq_event,
6878				 &phba->sli4_hba.sp_cqe_event_pool, list) {
6879		list_del(&cq_event->list);
6880		kfree(cq_event);
6881	}
6882}
6883
6884/**
6885 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6886 * @phba: pointer to lpfc hba data structure.
6887 *
6888 * This routine is the lock free version of the API invoked to allocate a
6889 * completion-queue event from the free pool.
6890 *
6891 * Return: Pointer to the newly allocated completion-queue event if successful
6892 *         NULL otherwise.
6893 **/
6894struct lpfc_cq_event *
6895__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6896{
6897	struct lpfc_cq_event *cq_event = NULL;
6898
6899	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
6900			 struct lpfc_cq_event, list);
6901	return cq_event;
6902}
6903
6904/**
6905 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6906 * @phba: pointer to lpfc hba data structure.
6907 *
6908 * This routine is the lock version of the API invoked to allocate a
6909 * completion-queue event from the free pool.
6910 *
6911 * Return: Pointer to the newly allocated completion-queue event if successful
6912 *         NULL otherwise.
6913 **/
6914struct lpfc_cq_event *
6915lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6916{
6917	struct lpfc_cq_event *cq_event;
6918	unsigned long iflags;
6919
6920	spin_lock_irqsave(&phba->hbalock, iflags);
6921	cq_event = __lpfc_sli4_cq_event_alloc(phba);
6922	spin_unlock_irqrestore(&phba->hbalock, iflags);
6923	return cq_event;
6924}
6925
6926/**
6927 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6928 * @phba: pointer to lpfc hba data structure.
6929 * @cq_event: pointer to the completion queue event to be freed.
6930 *
6931 * This routine is the lock free version of the API invoked to release a
6932 * completion-queue event back into the free pool.
6933 **/
6934void
6935__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6936			     struct lpfc_cq_event *cq_event)
6937{
6938	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
6939}
6940
6941/**
6942 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6943 * @phba: pointer to lpfc hba data structure.
6944 * @cq_event: pointer to the completion queue event to be freed.
6945 *
6946 * This routine is the lock version of the API invoked to release a
6947 * completion-queue event back into the free pool.
6948 **/
6949void
6950lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6951			   struct lpfc_cq_event *cq_event)
6952{
6953	unsigned long iflags;
6954	spin_lock_irqsave(&phba->hbalock, iflags);
6955	__lpfc_sli4_cq_event_release(phba, cq_event);
6956	spin_unlock_irqrestore(&phba->hbalock, iflags);
6957}
6958
6959/**
6960 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
6961 * @phba: pointer to lpfc hba data structure.
6962 *
6963 * This routine is to free all the pending completion-queue events to the
6964 * back into the free pool for device reset.
6965 **/
6966static void
6967lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
6968{
6969	LIST_HEAD(cqelist);
6970	struct lpfc_cq_event *cqe;
6971	unsigned long iflags;
6972
6973	/* Retrieve all the pending WCQEs from pending WCQE lists */
6974	spin_lock_irqsave(&phba->hbalock, iflags);
6975	/* Pending FCP XRI abort events */
6976	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
6977			 &cqelist);
6978	/* Pending ELS XRI abort events */
6979	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
6980			 &cqelist);
6981	/* Pending asynnc events */
6982	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
6983			 &cqelist);
6984	spin_unlock_irqrestore(&phba->hbalock, iflags);
6985
6986	while (!list_empty(&cqelist)) {
6987		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
6988		lpfc_sli4_cq_event_release(phba, cqe);
6989	}
6990}
6991
6992/**
6993 * lpfc_pci_function_reset - Reset pci function.
6994 * @phba: pointer to lpfc hba data structure.
6995 *
6996 * This routine is invoked to request a PCI function reset. It will destroys
6997 * all resources assigned to the PCI function which originates this request.
6998 *
6999 * Return codes
7000 *      0 - successful
7001 *      -ENOMEM - No available memory
7002 *      -EIO - The mailbox failed to complete successfully.
7003 **/
7004int
7005lpfc_pci_function_reset(struct lpfc_hba *phba)
7006{
7007	LPFC_MBOXQ_t *mboxq;
7008	uint32_t rc = 0, if_type;
7009	uint32_t shdr_status, shdr_add_status;
7010	uint32_t rdy_chk, num_resets = 0, reset_again = 0;
7011	union lpfc_sli4_cfg_shdr *shdr;
7012	struct lpfc_register reg_data;
7013
7014	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7015	switch (if_type) {
7016	case LPFC_SLI_INTF_IF_TYPE_0:
7017		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7018						       GFP_KERNEL);
7019		if (!mboxq) {
7020			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7021					"0494 Unable to allocate memory for "
7022					"issuing SLI_FUNCTION_RESET mailbox "
7023					"command\n");
7024			return -ENOMEM;
7025		}
7026
7027		/* Setup PCI function reset mailbox-ioctl command */
7028		lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7029				 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
7030				 LPFC_SLI4_MBX_EMBED);
7031		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7032		shdr = (union lpfc_sli4_cfg_shdr *)
7033			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7034		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7035		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7036					 &shdr->response);
7037		if (rc != MBX_TIMEOUT)
7038			mempool_free(mboxq, phba->mbox_mem_pool);
7039		if (shdr_status || shdr_add_status || rc) {
7040			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7041					"0495 SLI_FUNCTION_RESET mailbox "
7042					"failed with status x%x add_status x%x,"
7043					" mbx status x%x\n",
7044					shdr_status, shdr_add_status, rc);
7045			rc = -ENXIO;
7046		}
7047		break;
7048	case LPFC_SLI_INTF_IF_TYPE_2:
7049		for (num_resets = 0;
7050		     num_resets < MAX_IF_TYPE_2_RESETS;
7051		     num_resets++) {
7052			reg_data.word0 = 0;
7053			bf_set(lpfc_sliport_ctrl_end, &reg_data,
7054			       LPFC_SLIPORT_LITTLE_ENDIAN);
7055			bf_set(lpfc_sliport_ctrl_ip, &reg_data,
7056			       LPFC_SLIPORT_INIT_PORT);
7057			writel(reg_data.word0, phba->sli4_hba.u.if_type2.
7058			       CTRLregaddr);
7059
7060			/*
7061			 * Poll the Port Status Register and wait for RDY for
7062			 * up to 10 seconds.  If the port doesn't respond, treat
7063			 * it as an error.  If the port responds with RN, start
7064			 * the loop again.
7065			 */
7066			for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
7067				msleep(10);
7068				if (lpfc_readl(phba->sli4_hba.u.if_type2.
7069					      STATUSregaddr, &reg_data.word0)) {
7070					rc = -ENODEV;
7071					goto out;
7072				}
7073				if (bf_get(lpfc_sliport_status_rdy, &reg_data))
7074					break;
7075				if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
7076					reset_again++;
7077					break;
7078				}
7079			}
7080
7081			/*
7082			 * If the port responds to the init request with
7083			 * reset needed, delay for a bit and restart the loop.
7084			 */
7085			if (reset_again) {
7086				msleep(10);
7087				reset_again = 0;
7088				continue;
7089			}
7090
7091			/* Detect any port errors. */
7092			if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
7093			    (rdy_chk >= 1000)) {
7094				phba->work_status[0] = readl(
7095					phba->sli4_hba.u.if_type2.ERR1regaddr);
7096				phba->work_status[1] = readl(
7097					phba->sli4_hba.u.if_type2.ERR2regaddr);
7098				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7099					"2890 Port Error Detected "
7100					"during Port Reset: "
7101					"port status reg 0x%x, "
7102					"error 1=0x%x, error 2=0x%x\n",
7103					reg_data.word0,
7104					phba->work_status[0],
7105					phba->work_status[1]);
7106				rc = -ENODEV;
7107			}
7108
7109			/*
7110			 * Terminate the outer loop provided the Port indicated
7111			 * ready within 10 seconds.
7112			 */
7113			if (rdy_chk < 1000)
7114				break;
7115		}
7116		/* delay driver action following IF_TYPE_2 function reset */
7117		msleep(100);
7118		break;
7119	case LPFC_SLI_INTF_IF_TYPE_1:
7120	default:
7121		break;
7122	}
7123
7124out:
7125	/* Catch the not-ready port failure after a port reset. */
7126	if (num_resets >= MAX_IF_TYPE_2_RESETS)
7127		rc = -ENODEV;
7128
7129	return rc;
7130}
7131
7132/**
7133 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
7134 * @phba: pointer to lpfc hba data structure.
7135 * @cnt: number of nop mailbox commands to send.
7136 *
7137 * This routine is invoked to send a number @cnt of NOP mailbox command and
7138 * wait for each command to complete.
7139 *
7140 * Return: the number of NOP mailbox command completed.
7141 **/
7142static int
7143lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
7144{
7145	LPFC_MBOXQ_t *mboxq;
7146	int length, cmdsent;
7147	uint32_t mbox_tmo;
7148	uint32_t rc = 0;
7149	uint32_t shdr_status, shdr_add_status;
7150	union lpfc_sli4_cfg_shdr *shdr;
7151
7152	if (cnt == 0) {
7153		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7154				"2518 Requested to send 0 NOP mailbox cmd\n");
7155		return cnt;
7156	}
7157
7158	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7159	if (!mboxq) {
7160		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7161				"2519 Unable to allocate memory for issuing "
7162				"NOP mailbox command\n");
7163		return 0;
7164	}
7165
7166	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
7167	length = (sizeof(struct lpfc_mbx_nop) -
7168		  sizeof(struct lpfc_sli4_cfg_mhdr));
7169	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7170			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
7171
7172	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
7173		if (!phba->sli4_hba.intr_enable)
7174			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7175		else {
7176			mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
7177			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7178		}
7179		if (rc == MBX_TIMEOUT)
7180			break;
7181		/* Check return status */
7182		shdr = (union lpfc_sli4_cfg_shdr *)
7183			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7184		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7185		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7186					 &shdr->response);
7187		if (shdr_status || shdr_add_status || rc) {
7188			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7189					"2520 NOP mailbox command failed "
7190					"status x%x add_status x%x mbx "
7191					"status x%x\n", shdr_status,
7192					shdr_add_status, rc);
7193			break;
7194		}
7195	}
7196
7197	if (rc != MBX_TIMEOUT)
7198		mempool_free(mboxq, phba->mbox_mem_pool);
7199
7200	return cmdsent;
7201}
7202
7203/**
7204 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
7205 * @phba: pointer to lpfc hba data structure.
7206 *
7207 * This routine is invoked to set up the PCI device memory space for device
7208 * with SLI-4 interface spec.
7209 *
7210 * Return codes
7211 * 	0 - successful
7212 * 	other values - error
7213 **/
7214static int
7215lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7216{
7217	struct pci_dev *pdev;
7218	unsigned long bar0map_len, bar1map_len, bar2map_len;
7219	int error = -ENODEV;
7220	uint32_t if_type;
7221
7222	/* Obtain PCI device reference */
7223	if (!phba->pcidev)
7224		return error;
7225	else
7226		pdev = phba->pcidev;
7227
7228	/* Set the device DMA mask size */
7229	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
7230	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
7231		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
7232		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
7233			return error;
7234		}
7235	}
7236
7237	/*
7238	 * The BARs and register set definitions and offset locations are
7239	 * dependent on the if_type.
7240	 */
7241	if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
7242				  &phba->sli4_hba.sli_intf.word0)) {
7243		return error;
7244	}
7245
7246	/* There is no SLI3 failback for SLI4 devices. */
7247	if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
7248	    LPFC_SLI_INTF_VALID) {
7249		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7250				"2894 SLI_INTF reg contents invalid "
7251				"sli_intf reg 0x%x\n",
7252				phba->sli4_hba.sli_intf.word0);
7253		return error;
7254	}
7255
7256	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7257	/*
7258	 * Get the bus address of SLI4 device Bar regions and the
7259	 * number of bytes required by each mapping. The mapping of the
7260	 * particular PCI BARs regions is dependent on the type of
7261	 * SLI4 device.
7262	 */
7263	if (pci_resource_start(pdev, 0)) {
7264		phba->pci_bar0_map = pci_resource_start(pdev, 0);
7265		bar0map_len = pci_resource_len(pdev, 0);
7266
7267		/*
7268		 * Map SLI4 PCI Config Space Register base to a kernel virtual
7269		 * addr
7270		 */
7271		phba->sli4_hba.conf_regs_memmap_p =
7272			ioremap(phba->pci_bar0_map, bar0map_len);
7273		if (!phba->sli4_hba.conf_regs_memmap_p) {
7274			dev_printk(KERN_ERR, &pdev->dev,
7275				   "ioremap failed for SLI4 PCI config "
7276				   "registers.\n");
7277			goto out;
7278		}
7279		/* Set up BAR0 PCI config space register memory map */
7280		lpfc_sli4_bar0_register_memmap(phba, if_type);
7281	} else {
7282		phba->pci_bar0_map = pci_resource_start(pdev, 1);
7283		bar0map_len = pci_resource_len(pdev, 1);
7284		if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7285			dev_printk(KERN_ERR, &pdev->dev,
7286			   "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
7287			goto out;
7288		}
7289		phba->sli4_hba.conf_regs_memmap_p =
7290				ioremap(phba->pci_bar0_map, bar0map_len);
7291		if (!phba->sli4_hba.conf_regs_memmap_p) {
7292			dev_printk(KERN_ERR, &pdev->dev,
7293				"ioremap failed for SLI4 PCI config "
7294				"registers.\n");
7295				goto out;
7296		}
7297		lpfc_sli4_bar0_register_memmap(phba, if_type);
7298	}
7299
7300	if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7301	    (pci_resource_start(pdev, 2))) {
7302		/*
7303		 * Map SLI4 if type 0 HBA Control Register base to a kernel
7304		 * virtual address and setup the registers.
7305		 */
7306		phba->pci_bar1_map = pci_resource_start(pdev, 2);
7307		bar1map_len = pci_resource_len(pdev, 2);
7308		phba->sli4_hba.ctrl_regs_memmap_p =
7309				ioremap(phba->pci_bar1_map, bar1map_len);
7310		if (!phba->sli4_hba.ctrl_regs_memmap_p) {
7311			dev_printk(KERN_ERR, &pdev->dev,
7312			   "ioremap failed for SLI4 HBA control registers.\n");
7313			goto out_iounmap_conf;
7314		}
7315		lpfc_sli4_bar1_register_memmap(phba);
7316	}
7317
7318	if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7319	    (pci_resource_start(pdev, 4))) {
7320		/*
7321		 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
7322		 * virtual address and setup the registers.
7323		 */
7324		phba->pci_bar2_map = pci_resource_start(pdev, 4);
7325		bar2map_len = pci_resource_len(pdev, 4);
7326		phba->sli4_hba.drbl_regs_memmap_p =
7327				ioremap(phba->pci_bar2_map, bar2map_len);
7328		if (!phba->sli4_hba.drbl_regs_memmap_p) {
7329			dev_printk(KERN_ERR, &pdev->dev,
7330			   "ioremap failed for SLI4 HBA doorbell registers.\n");
7331			goto out_iounmap_ctrl;
7332		}
7333		error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
7334		if (error)
7335			goto out_iounmap_all;
7336	}
7337
7338	return 0;
7339
7340out_iounmap_all:
7341	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7342out_iounmap_ctrl:
7343	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7344out_iounmap_conf:
7345	iounmap(phba->sli4_hba.conf_regs_memmap_p);
7346out:
7347	return error;
7348}
7349
7350/**
7351 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
7352 * @phba: pointer to lpfc hba data structure.
7353 *
7354 * This routine is invoked to unset the PCI device memory space for device
7355 * with SLI-4 interface spec.
7356 **/
7357static void
7358lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
7359{
7360	struct pci_dev *pdev;
7361
7362	/* Obtain PCI device reference */
7363	if (!phba->pcidev)
7364		return;
7365	else
7366		pdev = phba->pcidev;
7367
7368	/* Free coherent DMA memory allocated */
7369
7370	/* Unmap I/O memory space */
7371	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7372	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7373	iounmap(phba->sli4_hba.conf_regs_memmap_p);
7374
7375	return;
7376}
7377
7378/**
7379 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
7380 * @phba: pointer to lpfc hba data structure.
7381 *
7382 * This routine is invoked to enable the MSI-X interrupt vectors to device
7383 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
7384 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
7385 * invoked, enables either all or nothing, depending on the current
7386 * availability of PCI vector resources. The device driver is responsible
7387 * for calling the individual request_irq() to register each MSI-X vector
7388 * with a interrupt handler, which is done in this function. Note that
7389 * later when device is unloading, the driver should always call free_irq()
7390 * on all MSI-X vectors it has done request_irq() on before calling
7391 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
7392 * will be left with MSI-X enabled and leaks its vectors.
7393 *
7394 * Return codes
7395 *   0 - successful
7396 *   other values - error
7397 **/
7398static int
7399lpfc_sli_enable_msix(struct lpfc_hba *phba)
7400{
7401	int rc, i;
7402	LPFC_MBOXQ_t *pmb;
7403
7404	/* Set up MSI-X multi-message vectors */
7405	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7406		phba->msix_entries[i].entry = i;
7407
7408	/* Configure MSI-X capability structure */
7409	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
7410				ARRAY_SIZE(phba->msix_entries));
7411	if (rc) {
7412		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7413				"0420 PCI enable MSI-X failed (%d)\n", rc);
7414		goto msi_fail_out;
7415	}
7416	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7417		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7418				"0477 MSI-X entry[%d]: vector=x%x "
7419				"message=%d\n", i,
7420				phba->msix_entries[i].vector,
7421				phba->msix_entries[i].entry);
7422	/*
7423	 * Assign MSI-X vectors to interrupt handlers
7424	 */
7425
7426	/* vector-0 is associated to slow-path handler */
7427	rc = request_irq(phba->msix_entries[0].vector,
7428			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
7429			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7430	if (rc) {
7431		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7432				"0421 MSI-X slow-path request_irq failed "
7433				"(%d)\n", rc);
7434		goto msi_fail_out;
7435	}
7436
7437	/* vector-1 is associated to fast-path handler */
7438	rc = request_irq(phba->msix_entries[1].vector,
7439			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
7440			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
7441
7442	if (rc) {
7443		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7444				"0429 MSI-X fast-path request_irq failed "
7445				"(%d)\n", rc);
7446		goto irq_fail_out;
7447	}
7448
7449	/*
7450	 * Configure HBA MSI-X attention conditions to messages
7451	 */
7452	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7453
7454	if (!pmb) {
7455		rc = -ENOMEM;
7456		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7457				"0474 Unable to allocate memory for issuing "
7458				"MBOX_CONFIG_MSI command\n");
7459		goto mem_fail_out;
7460	}
7461	rc = lpfc_config_msi(phba, pmb);
7462	if (rc)
7463		goto mbx_fail_out;
7464	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
7465	if (rc != MBX_SUCCESS) {
7466		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
7467				"0351 Config MSI mailbox command failed, "
7468				"mbxCmd x%x, mbxStatus x%x\n",
7469				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
7470		goto mbx_fail_out;
7471	}
7472
7473	/* Free memory allocated for mailbox command */
7474	mempool_free(pmb, phba->mbox_mem_pool);
7475	return rc;
7476
7477mbx_fail_out:
7478	/* Free memory allocated for mailbox command */
7479	mempool_free(pmb, phba->mbox_mem_pool);
7480
7481mem_fail_out:
7482	/* free the irq already requested */
7483	free_irq(phba->msix_entries[1].vector, phba);
7484
7485irq_fail_out:
7486	/* free the irq already requested */
7487	free_irq(phba->msix_entries[0].vector, phba);
7488
7489msi_fail_out:
7490	/* Unconfigure MSI-X capability structure */
7491	pci_disable_msix(phba->pcidev);
7492	return rc;
7493}
7494
7495/**
7496 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
7497 * @phba: pointer to lpfc hba data structure.
7498 *
7499 * This routine is invoked to release the MSI-X vectors and then disable the
7500 * MSI-X interrupt mode to device with SLI-3 interface spec.
7501 **/
7502static void
7503lpfc_sli_disable_msix(struct lpfc_hba *phba)
7504{
7505	int i;
7506
7507	/* Free up MSI-X multi-message vectors */
7508	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7509		free_irq(phba->msix_entries[i].vector, phba);
7510	/* Disable MSI-X */
7511	pci_disable_msix(phba->pcidev);
7512
7513	return;
7514}
7515
7516/**
7517 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
7518 * @phba: pointer to lpfc hba data structure.
7519 *
7520 * This routine is invoked to enable the MSI interrupt mode to device with
7521 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
7522 * enable the MSI vector. The device driver is responsible for calling the
7523 * request_irq() to register MSI vector with a interrupt the handler, which
7524 * is done in this function.
7525 *
7526 * Return codes
7527 * 	0 - successful
7528 * 	other values - error
7529 */
7530static int
7531lpfc_sli_enable_msi(struct lpfc_hba *phba)
7532{
7533	int rc;
7534
7535	rc = pci_enable_msi(phba->pcidev);
7536	if (!rc)
7537		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7538				"0462 PCI enable MSI mode success.\n");
7539	else {
7540		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7541				"0471 PCI enable MSI mode failed (%d)\n", rc);
7542		return rc;
7543	}
7544
7545	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7546			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7547	if (rc) {
7548		pci_disable_msi(phba->pcidev);
7549		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7550				"0478 MSI request_irq failed (%d)\n", rc);
7551	}
7552	return rc;
7553}
7554
7555/**
7556 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
7557 * @phba: pointer to lpfc hba data structure.
7558 *
7559 * This routine is invoked to disable the MSI interrupt mode to device with
7560 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
7561 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7562 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7563 * its vector.
7564 */
7565static void
7566lpfc_sli_disable_msi(struct lpfc_hba *phba)
7567{
7568	free_irq(phba->pcidev->irq, phba);
7569	pci_disable_msi(phba->pcidev);
7570	return;
7571}
7572
7573/**
7574 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
7575 * @phba: pointer to lpfc hba data structure.
7576 *
7577 * This routine is invoked to enable device interrupt and associate driver's
7578 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
7579 * spec. Depends on the interrupt mode configured to the driver, the driver
7580 * will try to fallback from the configured interrupt mode to an interrupt
7581 * mode which is supported by the platform, kernel, and device in the order
7582 * of:
7583 * MSI-X -> MSI -> IRQ.
7584 *
7585 * Return codes
7586 *   0 - successful
7587 *   other values - error
7588 **/
7589static uint32_t
7590lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7591{
7592	uint32_t intr_mode = LPFC_INTR_ERROR;
7593	int retval;
7594
7595	if (cfg_mode == 2) {
7596		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
7597		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
7598		if (!retval) {
7599			/* Now, try to enable MSI-X interrupt mode */
7600			retval = lpfc_sli_enable_msix(phba);
7601			if (!retval) {
7602				/* Indicate initialization to MSI-X mode */
7603				phba->intr_type = MSIX;
7604				intr_mode = 2;
7605			}
7606		}
7607	}
7608
7609	/* Fallback to MSI if MSI-X initialization failed */
7610	if (cfg_mode >= 1 && phba->intr_type == NONE) {
7611		retval = lpfc_sli_enable_msi(phba);
7612		if (!retval) {
7613			/* Indicate initialization to MSI mode */
7614			phba->intr_type = MSI;
7615			intr_mode = 1;
7616		}
7617	}
7618
7619	/* Fallback to INTx if both MSI-X/MSI initalization failed */
7620	if (phba->intr_type == NONE) {
7621		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7622				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7623		if (!retval) {
7624			/* Indicate initialization to INTx mode */
7625			phba->intr_type = INTx;
7626			intr_mode = 0;
7627		}
7628	}
7629	return intr_mode;
7630}
7631
7632/**
7633 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
7634 * @phba: pointer to lpfc hba data structure.
7635 *
7636 * This routine is invoked to disable device interrupt and disassociate the
7637 * driver's interrupt handler(s) from interrupt vector(s) to device with
7638 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
7639 * release the interrupt vector(s) for the message signaled interrupt.
7640 **/
7641static void
7642lpfc_sli_disable_intr(struct lpfc_hba *phba)
7643{
7644	/* Disable the currently initialized interrupt mode */
7645	if (phba->intr_type == MSIX)
7646		lpfc_sli_disable_msix(phba);
7647	else if (phba->intr_type == MSI)
7648		lpfc_sli_disable_msi(phba);
7649	else if (phba->intr_type == INTx)
7650		free_irq(phba->pcidev->irq, phba);
7651
7652	/* Reset interrupt management states */
7653	phba->intr_type = NONE;
7654	phba->sli.slistat.sli_intr = 0;
7655
7656	return;
7657}
7658
7659/**
7660 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
7661 * @phba: pointer to lpfc hba data structure.
7662 *
7663 * This routine is invoked to enable the MSI-X interrupt vectors to device
7664 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
7665 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
7666 * enables either all or nothing, depending on the current availability of
7667 * PCI vector resources. The device driver is responsible for calling the
7668 * individual request_irq() to register each MSI-X vector with a interrupt
7669 * handler, which is done in this function. Note that later when device is
7670 * unloading, the driver should always call free_irq() on all MSI-X vectors
7671 * it has done request_irq() on before calling pci_disable_msix(). Failure
7672 * to do so results in a BUG_ON() and a device will be left with MSI-X
7673 * enabled and leaks its vectors.
7674 *
7675 * Return codes
7676 * 0 - successful
7677 * other values - error
7678 **/
7679static int
7680lpfc_sli4_enable_msix(struct lpfc_hba *phba)
7681{
7682	int vectors, rc, index;
7683
7684	/* Set up MSI-X multi-message vectors */
7685	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
7686		phba->sli4_hba.msix_entries[index].entry = index;
7687
7688	/* Configure MSI-X capability structure */
7689	vectors = phba->sli4_hba.cfg_eqn;
7690enable_msix_vectors:
7691	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
7692			     vectors);
7693	if (rc > 1) {
7694		vectors = rc;
7695		goto enable_msix_vectors;
7696	} else if (rc) {
7697		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7698				"0484 PCI enable MSI-X failed (%d)\n", rc);
7699		goto msi_fail_out;
7700	}
7701
7702	/* Log MSI-X vector assignment */
7703	for (index = 0; index < vectors; index++)
7704		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7705				"0489 MSI-X entry[%d]: vector=x%x "
7706				"message=%d\n", index,
7707				phba->sli4_hba.msix_entries[index].vector,
7708				phba->sli4_hba.msix_entries[index].entry);
7709	/*
7710	 * Assign MSI-X vectors to interrupt handlers
7711	 */
7712	if (vectors > 1)
7713		rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7714				 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
7715				 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7716	else
7717		/* All Interrupts need to be handled by one EQ */
7718		rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7719				 &lpfc_sli4_intr_handler, IRQF_SHARED,
7720				 LPFC_DRIVER_NAME, phba);
7721	if (rc) {
7722		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7723				"0485 MSI-X slow-path request_irq failed "
7724				"(%d)\n", rc);
7725		goto msi_fail_out;
7726	}
7727
7728	/* The rest of the vector(s) are associated to fast-path handler(s) */
7729	for (index = 1; index < vectors; index++) {
7730		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
7731		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
7732		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
7733				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
7734				 LPFC_FP_DRIVER_HANDLER_NAME,
7735				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7736		if (rc) {
7737			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7738					"0486 MSI-X fast-path (%d) "
7739					"request_irq failed (%d)\n", index, rc);
7740			goto cfg_fail_out;
7741		}
7742	}
7743	phba->sli4_hba.msix_vec_nr = vectors;
7744
7745	return rc;
7746
7747cfg_fail_out:
7748	/* free the irq already requested */
7749	for (--index; index >= 1; index--)
7750		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
7751			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7752
7753	/* free the irq already requested */
7754	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7755
7756msi_fail_out:
7757	/* Unconfigure MSI-X capability structure */
7758	pci_disable_msix(phba->pcidev);
7759	return rc;
7760}
7761
7762/**
7763 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
7764 * @phba: pointer to lpfc hba data structure.
7765 *
7766 * This routine is invoked to release the MSI-X vectors and then disable the
7767 * MSI-X interrupt mode to device with SLI-4 interface spec.
7768 **/
7769static void
7770lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7771{
7772	int index;
7773
7774	/* Free up MSI-X multi-message vectors */
7775	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7776
7777	for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
7778		free_irq(phba->sli4_hba.msix_entries[index].vector,
7779			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7780
7781	/* Disable MSI-X */
7782	pci_disable_msix(phba->pcidev);
7783
7784	return;
7785}
7786
7787/**
7788 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
7789 * @phba: pointer to lpfc hba data structure.
7790 *
7791 * This routine is invoked to enable the MSI interrupt mode to device with
7792 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
7793 * to enable the MSI vector. The device driver is responsible for calling
7794 * the request_irq() to register MSI vector with a interrupt the handler,
7795 * which is done in this function.
7796 *
7797 * Return codes
7798 * 	0 - successful
7799 * 	other values - error
7800 **/
7801static int
7802lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7803{
7804	int rc, index;
7805
7806	rc = pci_enable_msi(phba->pcidev);
7807	if (!rc)
7808		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7809				"0487 PCI enable MSI mode success.\n");
7810	else {
7811		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7812				"0488 PCI enable MSI mode failed (%d)\n", rc);
7813		return rc;
7814	}
7815
7816	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7817			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7818	if (rc) {
7819		pci_disable_msi(phba->pcidev);
7820		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7821				"0490 MSI request_irq failed (%d)\n", rc);
7822		return rc;
7823	}
7824
7825	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
7826		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7827		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7828	}
7829
7830	return 0;
7831}
7832
7833/**
7834 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
7835 * @phba: pointer to lpfc hba data structure.
7836 *
7837 * This routine is invoked to disable the MSI interrupt mode to device with
7838 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
7839 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7840 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7841 * its vector.
7842 **/
7843static void
7844lpfc_sli4_disable_msi(struct lpfc_hba *phba)
7845{
7846	free_irq(phba->pcidev->irq, phba);
7847	pci_disable_msi(phba->pcidev);
7848	return;
7849}
7850
7851/**
7852 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
7853 * @phba: pointer to lpfc hba data structure.
7854 *
7855 * This routine is invoked to enable device interrupt and associate driver's
7856 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
7857 * interface spec. Depends on the interrupt mode configured to the driver,
7858 * the driver will try to fallback from the configured interrupt mode to an
7859 * interrupt mode which is supported by the platform, kernel, and device in
7860 * the order of:
7861 * MSI-X -> MSI -> IRQ.
7862 *
7863 * Return codes
7864 * 	0 - successful
7865 * 	other values - error
7866 **/
7867static uint32_t
7868lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7869{
7870	uint32_t intr_mode = LPFC_INTR_ERROR;
7871	int retval, index;
7872
7873	if (cfg_mode == 2) {
7874		/* Preparation before conf_msi mbox cmd */
7875		retval = 0;
7876		if (!retval) {
7877			/* Now, try to enable MSI-X interrupt mode */
7878			retval = lpfc_sli4_enable_msix(phba);
7879			if (!retval) {
7880				/* Indicate initialization to MSI-X mode */
7881				phba->intr_type = MSIX;
7882				intr_mode = 2;
7883			}
7884		}
7885	}
7886
7887	/* Fallback to MSI if MSI-X initialization failed */
7888	if (cfg_mode >= 1 && phba->intr_type == NONE) {
7889		retval = lpfc_sli4_enable_msi(phba);
7890		if (!retval) {
7891			/* Indicate initialization to MSI mode */
7892			phba->intr_type = MSI;
7893			intr_mode = 1;
7894		}
7895	}
7896
7897	/* Fallback to INTx if both MSI-X/MSI initalization failed */
7898	if (phba->intr_type == NONE) {
7899		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7900				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7901		if (!retval) {
7902			/* Indicate initialization to INTx mode */
7903			phba->intr_type = INTx;
7904			intr_mode = 0;
7905			for (index = 0; index < phba->cfg_fcp_eq_count;
7906			     index++) {
7907				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7908				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7909			}
7910		}
7911	}
7912	return intr_mode;
7913}
7914
7915/**
7916 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
7917 * @phba: pointer to lpfc hba data structure.
7918 *
7919 * This routine is invoked to disable device interrupt and disassociate
7920 * the driver's interrupt handler(s) from interrupt vector(s) to device
7921 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
7922 * will release the interrupt vector(s) for the message signaled interrupt.
7923 **/
7924static void
7925lpfc_sli4_disable_intr(struct lpfc_hba *phba)
7926{
7927	/* Disable the currently initialized interrupt mode */
7928	if (phba->intr_type == MSIX)
7929		lpfc_sli4_disable_msix(phba);
7930	else if (phba->intr_type == MSI)
7931		lpfc_sli4_disable_msi(phba);
7932	else if (phba->intr_type == INTx)
7933		free_irq(phba->pcidev->irq, phba);
7934
7935	/* Reset interrupt management states */
7936	phba->intr_type = NONE;
7937	phba->sli.slistat.sli_intr = 0;
7938
7939	return;
7940}
7941
7942/**
7943 * lpfc_unset_hba - Unset SLI3 hba device initialization
7944 * @phba: pointer to lpfc hba data structure.
7945 *
7946 * This routine is invoked to unset the HBA device initialization steps to
7947 * a device with SLI-3 interface spec.
7948 **/
7949static void
7950lpfc_unset_hba(struct lpfc_hba *phba)
7951{
7952	struct lpfc_vport *vport = phba->pport;
7953	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7954
7955	spin_lock_irq(shost->host_lock);
7956	vport->load_flag |= FC_UNLOADING;
7957	spin_unlock_irq(shost->host_lock);
7958
7959	lpfc_stop_hba_timers(phba);
7960
7961	phba->pport->work_port_events = 0;
7962
7963	lpfc_sli_hba_down(phba);
7964
7965	lpfc_sli_brdrestart(phba);
7966
7967	lpfc_sli_disable_intr(phba);
7968
7969	return;
7970}
7971
7972/**
7973 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
7974 * @phba: pointer to lpfc hba data structure.
7975 *
7976 * This routine is invoked to unset the HBA device initialization steps to
7977 * a device with SLI-4 interface spec.
7978 **/
7979static void
7980lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7981{
7982	struct lpfc_vport *vport = phba->pport;
7983	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7984
7985	spin_lock_irq(shost->host_lock);
7986	vport->load_flag |= FC_UNLOADING;
7987	spin_unlock_irq(shost->host_lock);
7988
7989	phba->pport->work_port_events = 0;
7990
7991	/* Stop the SLI4 device port */
7992	lpfc_stop_port(phba);
7993
7994	lpfc_sli4_disable_intr(phba);
7995
7996	/* Reset SLI4 HBA FCoE function */
7997	lpfc_pci_function_reset(phba);
7998
7999	return;
8000}
8001
8002/**
8003 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
8004 * @phba: Pointer to HBA context object.
8005 *
8006 * This function is called in the SLI4 code path to wait for completion
8007 * of device's XRIs exchange busy. It will check the XRI exchange busy
8008 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
8009 * that, it will check the XRI exchange busy on outstanding FCP and ELS
8010 * I/Os every 30 seconds, log error message, and wait forever. Only when
8011 * all XRI exchange busy complete, the driver unload shall proceed with
8012 * invoking the function reset ioctl mailbox command to the CNA and the
8013 * the rest of the driver unload resource release.
8014 **/
8015static void
8016lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
8017{
8018	int wait_time = 0;
8019	int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8020	int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8021
8022	while (!fcp_xri_cmpl || !els_xri_cmpl) {
8023		if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
8024			if (!fcp_xri_cmpl)
8025				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8026						"2877 FCP XRI exchange busy "
8027						"wait time: %d seconds.\n",
8028						wait_time/1000);
8029			if (!els_xri_cmpl)
8030				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8031						"2878 ELS XRI exchange busy "
8032						"wait time: %d seconds.\n",
8033						wait_time/1000);
8034			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
8035			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
8036		} else {
8037			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
8038			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
8039		}
8040		fcp_xri_cmpl =
8041			list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8042		els_xri_cmpl =
8043			list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8044	}
8045}
8046
8047/**
8048 * lpfc_sli4_hba_unset - Unset the fcoe hba
8049 * @phba: Pointer to HBA context object.
8050 *
8051 * This function is called in the SLI4 code path to reset the HBA's FCoE
8052 * function. The caller is not required to hold any lock. This routine
8053 * issues PCI function reset mailbox command to reset the FCoE function.
8054 * At the end of the function, it calls lpfc_hba_down_post function to
8055 * free any pending commands.
8056 **/
8057static void
8058lpfc_sli4_hba_unset(struct lpfc_hba *phba)
8059{
8060	int wait_cnt = 0;
8061	LPFC_MBOXQ_t *mboxq;
8062	struct pci_dev *pdev = phba->pcidev;
8063
8064	lpfc_stop_hba_timers(phba);
8065	phba->sli4_hba.intr_enable = 0;
8066
8067	/*
8068	 * Gracefully wait out the potential current outstanding asynchronous
8069	 * mailbox command.
8070	 */
8071
8072	/* First, block any pending async mailbox command from posted */
8073	spin_lock_irq(&phba->hbalock);
8074	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8075	spin_unlock_irq(&phba->hbalock);
8076	/* Now, trying to wait it out if we can */
8077	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8078		msleep(10);
8079		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
8080			break;
8081	}
8082	/* Forcefully release the outstanding mailbox command if timed out */
8083	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8084		spin_lock_irq(&phba->hbalock);
8085		mboxq = phba->sli.mbox_active;
8086		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8087		__lpfc_mbox_cmpl_put(phba, mboxq);
8088		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8089		phba->sli.mbox_active = NULL;
8090		spin_unlock_irq(&phba->hbalock);
8091	}
8092
8093	/* Abort all iocbs associated with the hba */
8094	lpfc_sli_hba_iocb_abort(phba);
8095
8096	/* Wait for completion of device XRI exchange busy */
8097	lpfc_sli4_xri_exchange_busy_wait(phba);
8098
8099	/* Disable PCI subsystem interrupt */
8100	lpfc_sli4_disable_intr(phba);
8101
8102	/* Disable SR-IOV if enabled */
8103	if (phba->cfg_sriov_nr_virtfn)
8104		pci_disable_sriov(pdev);
8105
8106	/* Stop kthread signal shall trigger work_done one more time */
8107	kthread_stop(phba->worker_thread);
8108
8109	/* Reset SLI4 HBA FCoE function */
8110	lpfc_pci_function_reset(phba);
8111
8112	/* Stop the SLI4 device port */
8113	phba->pport->work_port_events = 0;
8114}
8115
8116 /**
8117 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
8118 * @phba: Pointer to HBA context object.
8119 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8120 *
8121 * This function is called in the SLI4 code path to read the port's
8122 * sli4 capabilities.
8123 *
8124 * This function may be be called from any context that can block-wait
8125 * for the completion.  The expectation is that this routine is called
8126 * typically from probe_one or from the online routine.
8127 **/
8128int
8129lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8130{
8131	int rc;
8132	struct lpfc_mqe *mqe;
8133	struct lpfc_pc_sli4_params *sli4_params;
8134	uint32_t mbox_tmo;
8135
8136	rc = 0;
8137	mqe = &mboxq->u.mqe;
8138
8139	/* Read the port's SLI4 Parameters port capabilities */
8140	lpfc_pc_sli4_params(mboxq);
8141	if (!phba->sli4_hba.intr_enable)
8142		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8143	else {
8144		mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
8145		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8146	}
8147
8148	if (unlikely(rc))
8149		return 1;
8150
8151	sli4_params = &phba->sli4_hba.pc_sli4_params;
8152	sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
8153	sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
8154	sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
8155	sli4_params->featurelevel_1 = bf_get(featurelevel_1,
8156					     &mqe->un.sli4_params);
8157	sli4_params->featurelevel_2 = bf_get(featurelevel_2,
8158					     &mqe->un.sli4_params);
8159	sli4_params->proto_types = mqe->un.sli4_params.word3;
8160	sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
8161	sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
8162	sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
8163	sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
8164	sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
8165	sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
8166	sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
8167	sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
8168	sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
8169	sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
8170	sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
8171	sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
8172	sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
8173	sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
8174	sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
8175	sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
8176	sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
8177	sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
8178	sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
8179	sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
8180
8181	/* Make sure that sge_supp_len can be handled by the driver */
8182	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8183		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8184
8185	return rc;
8186}
8187
8188/**
8189 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
8190 * @phba: Pointer to HBA context object.
8191 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8192 *
8193 * This function is called in the SLI4 code path to read the port's
8194 * sli4 capabilities.
8195 *
8196 * This function may be be called from any context that can block-wait
8197 * for the completion.  The expectation is that this routine is called
8198 * typically from probe_one or from the online routine.
8199 **/
8200int
8201lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8202{
8203	int rc;
8204	struct lpfc_mqe *mqe = &mboxq->u.mqe;
8205	struct lpfc_pc_sli4_params *sli4_params;
8206	uint32_t mbox_tmo;
8207	int length;
8208	struct lpfc_sli4_parameters *mbx_sli4_parameters;
8209
8210	/*
8211	 * By default, the driver assumes the SLI4 port requires RPI
8212	 * header postings.  The SLI4_PARAM response will correct this
8213	 * assumption.
8214	 */
8215	phba->sli4_hba.rpi_hdrs_in_use = 1;
8216
8217	/* Read the port's SLI4 Config Parameters */
8218	length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
8219		  sizeof(struct lpfc_sli4_cfg_mhdr));
8220	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8221			 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
8222			 length, LPFC_SLI4_MBX_EMBED);
8223	if (!phba->sli4_hba.intr_enable)
8224		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8225	else {
8226		mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
8227		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8228	}
8229	if (unlikely(rc))
8230		return rc;
8231	sli4_params = &phba->sli4_hba.pc_sli4_params;
8232	mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
8233	sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
8234	sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
8235	sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
8236	sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
8237					     mbx_sli4_parameters);
8238	sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
8239					     mbx_sli4_parameters);
8240	if (bf_get(cfg_phwq, mbx_sli4_parameters))
8241		phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
8242	else
8243		phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
8244	sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
8245	sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
8246	sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
8247	sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
8248	sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
8249	sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
8250	sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
8251					    mbx_sli4_parameters);
8252	sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
8253					   mbx_sli4_parameters);
8254	phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
8255	phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
8256
8257	/* Make sure that sge_supp_len can be handled by the driver */
8258	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8259		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8260
8261	return 0;
8262}
8263
8264/**
8265 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
8266 * @pdev: pointer to PCI device
8267 * @pid: pointer to PCI device identifier
8268 *
8269 * This routine is to be called to attach a device with SLI-3 interface spec
8270 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8271 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8272 * information of the device and driver to see if the driver state that it can
8273 * support this kind of device. If the match is successful, the driver core
8274 * invokes this routine. If this routine determines it can claim the HBA, it
8275 * does all the initialization that it needs to do to handle the HBA properly.
8276 *
8277 * Return code
8278 * 	0 - driver can claim the device
8279 * 	negative value - driver can not claim the device
8280 **/
8281static int __devinit
8282lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
8283{
8284	struct lpfc_hba   *phba;
8285	struct lpfc_vport *vport = NULL;
8286	struct Scsi_Host  *shost = NULL;
8287	int error;
8288	uint32_t cfg_mode, intr_mode;
8289
8290	/* Allocate memory for HBA structure */
8291	phba = lpfc_hba_alloc(pdev);
8292	if (!phba)
8293		return -ENOMEM;
8294
8295	/* Perform generic PCI device enabling operation */
8296	error = lpfc_enable_pci_dev(phba);
8297	if (error)
8298		goto out_free_phba;
8299
8300	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
8301	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
8302	if (error)
8303		goto out_disable_pci_dev;
8304
8305	/* Set up SLI-3 specific device PCI memory space */
8306	error = lpfc_sli_pci_mem_setup(phba);
8307	if (error) {
8308		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8309				"1402 Failed to set up pci memory space.\n");
8310		goto out_disable_pci_dev;
8311	}
8312
8313	/* Set up phase-1 common device driver resources */
8314	error = lpfc_setup_driver_resource_phase1(phba);
8315	if (error) {
8316		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8317				"1403 Failed to set up driver resource.\n");
8318		goto out_unset_pci_mem_s3;
8319	}
8320
8321	/* Set up SLI-3 specific device driver resources */
8322	error = lpfc_sli_driver_resource_setup(phba);
8323	if (error) {
8324		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8325				"1404 Failed to set up driver resource.\n");
8326		goto out_unset_pci_mem_s3;
8327	}
8328
8329	/* Initialize and populate the iocb list per host */
8330	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
8331	if (error) {
8332		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8333				"1405 Failed to initialize iocb list.\n");
8334		goto out_unset_driver_resource_s3;
8335	}
8336
8337	/* Set up common device driver resources */
8338	error = lpfc_setup_driver_resource_phase2(phba);
8339	if (error) {
8340		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8341				"1406 Failed to set up driver resource.\n");
8342		goto out_free_iocb_list;
8343	}
8344
8345	/* Get the default values for Model Name and Description */
8346	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
8347
8348	/* Create SCSI host to the physical port */
8349	error = lpfc_create_shost(phba);
8350	if (error) {
8351		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8352				"1407 Failed to create scsi host.\n");
8353		goto out_unset_driver_resource;
8354	}
8355
8356	/* Configure sysfs attributes */
8357	vport = phba->pport;
8358	error = lpfc_alloc_sysfs_attr(vport);
8359	if (error) {
8360		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8361				"1476 Failed to allocate sysfs attr\n");
8362		goto out_destroy_shost;
8363	}
8364
8365	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8366	/* Now, trying to enable interrupt and bring up the device */
8367	cfg_mode = phba->cfg_use_msi;
8368	while (true) {
8369		/* Put device to a known state before enabling interrupt */
8370		lpfc_stop_port(phba);
8371		/* Configure and enable interrupt */
8372		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
8373		if (intr_mode == LPFC_INTR_ERROR) {
8374			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8375					"0431 Failed to enable interrupt.\n");
8376			error = -ENODEV;
8377			goto out_free_sysfs_attr;
8378		}
8379		/* SLI-3 HBA setup */
8380		if (lpfc_sli_hba_setup(phba)) {
8381			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8382					"1477 Failed to set up hba\n");
8383			error = -ENODEV;
8384			goto out_remove_device;
8385		}
8386
8387		/* Wait 50ms for the interrupts of previous mailbox commands */
8388		msleep(50);
8389		/* Check active interrupts on message signaled interrupts */
8390		if (intr_mode == 0 ||
8391		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
8392			/* Log the current active interrupt mode */
8393			phba->intr_mode = intr_mode;
8394			lpfc_log_intr_mode(phba, intr_mode);
8395			break;
8396		} else {
8397			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8398					"0447 Configure interrupt mode (%d) "
8399					"failed active interrupt test.\n",
8400					intr_mode);
8401			/* Disable the current interrupt mode */
8402			lpfc_sli_disable_intr(phba);
8403			/* Try next level of interrupt mode */
8404			cfg_mode = --intr_mode;
8405		}
8406	}
8407
8408	/* Perform post initialization setup */
8409	lpfc_post_init_setup(phba);
8410
8411	/* Check if there are static vports to be created. */
8412	lpfc_create_static_vport(phba);
8413
8414	return 0;
8415
8416out_remove_device:
8417	lpfc_unset_hba(phba);
8418out_free_sysfs_attr:
8419	lpfc_free_sysfs_attr(vport);
8420out_destroy_shost:
8421	lpfc_destroy_shost(phba);
8422out_unset_driver_resource:
8423	lpfc_unset_driver_resource_phase2(phba);
8424out_free_iocb_list:
8425	lpfc_free_iocb_list(phba);
8426out_unset_driver_resource_s3:
8427	lpfc_sli_driver_resource_unset(phba);
8428out_unset_pci_mem_s3:
8429	lpfc_sli_pci_mem_unset(phba);
8430out_disable_pci_dev:
8431	lpfc_disable_pci_dev(phba);
8432	if (shost)
8433		scsi_host_put(shost);
8434out_free_phba:
8435	lpfc_hba_free(phba);
8436	return error;
8437}
8438
8439/**
8440 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
8441 * @pdev: pointer to PCI device
8442 *
8443 * This routine is to be called to disattach a device with SLI-3 interface
8444 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8445 * removed from PCI bus, it performs all the necessary cleanup for the HBA
8446 * device to be removed from the PCI subsystem properly.
8447 **/
8448static void __devexit
8449lpfc_pci_remove_one_s3(struct pci_dev *pdev)
8450{
8451	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
8452	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8453	struct lpfc_vport **vports;
8454	struct lpfc_hba   *phba = vport->phba;
8455	int i;
8456	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
8457
8458	spin_lock_irq(&phba->hbalock);
8459	vport->load_flag |= FC_UNLOADING;
8460	spin_unlock_irq(&phba->hbalock);
8461
8462	lpfc_free_sysfs_attr(vport);
8463
8464	/* Release all the vports against this physical port */
8465	vports = lpfc_create_vport_work_array(phba);
8466	if (vports != NULL)
8467		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8468			fc_vport_terminate(vports[i]->fc_vport);
8469	lpfc_destroy_vport_work_array(phba, vports);
8470
8471	/* Remove FC host and then SCSI host with the physical port */
8472	fc_remove_host(shost);
8473	scsi_remove_host(shost);
8474	lpfc_cleanup(vport);
8475
8476	/*
8477	 * Bring down the SLI Layer. This step disable all interrupts,
8478	 * clears the rings, discards all mailbox commands, and resets
8479	 * the HBA.
8480	 */
8481
8482	/* HBA interrupt will be disabled after this call */
8483	lpfc_sli_hba_down(phba);
8484	/* Stop kthread signal shall trigger work_done one more time */
8485	kthread_stop(phba->worker_thread);
8486	/* Final cleanup of txcmplq and reset the HBA */
8487	lpfc_sli_brdrestart(phba);
8488
8489	lpfc_stop_hba_timers(phba);
8490	spin_lock_irq(&phba->hbalock);
8491	list_del_init(&vport->listentry);
8492	spin_unlock_irq(&phba->hbalock);
8493
8494	lpfc_debugfs_terminate(vport);
8495
8496	/* Disable SR-IOV if enabled */
8497	if (phba->cfg_sriov_nr_virtfn)
8498		pci_disable_sriov(pdev);
8499
8500	/* Disable interrupt */
8501	lpfc_sli_disable_intr(phba);
8502
8503	pci_set_drvdata(pdev, NULL);
8504	scsi_host_put(shost);
8505
8506	/*
8507	 * Call scsi_free before mem_free since scsi bufs are released to their
8508	 * corresponding pools here.
8509	 */
8510	lpfc_scsi_free(phba);
8511	lpfc_mem_free_all(phba);
8512
8513	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
8514			  phba->hbqslimp.virt, phba->hbqslimp.phys);
8515
8516	/* Free resources associated with SLI2 interface */
8517	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8518			  phba->slim2p.virt, phba->slim2p.phys);
8519
8520	/* unmap adapter SLIM and Control Registers */
8521	iounmap(phba->ctrl_regs_memmap_p);
8522	iounmap(phba->slim_memmap_p);
8523
8524	lpfc_hba_free(phba);
8525
8526	pci_release_selected_regions(pdev, bars);
8527	pci_disable_device(pdev);
8528}
8529
8530/**
8531 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
8532 * @pdev: pointer to PCI device
8533 * @msg: power management message
8534 *
8535 * This routine is to be called from the kernel's PCI subsystem to support
8536 * system Power Management (PM) to device with SLI-3 interface spec. When
8537 * PM invokes this method, it quiesces the device by stopping the driver's
8538 * worker thread for the device, turning off device's interrupt and DMA,
8539 * and bring the device offline. Note that as the driver implements the
8540 * minimum PM requirements to a power-aware driver's PM support for the
8541 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8542 * to the suspend() method call will be treated as SUSPEND and the driver will
8543 * fully reinitialize its device during resume() method call, the driver will
8544 * set device to PCI_D3hot state in PCI config space instead of setting it
8545 * according to the @msg provided by the PM.
8546 *
8547 * Return code
8548 * 	0 - driver suspended the device
8549 * 	Error otherwise
8550 **/
8551static int
8552lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
8553{
8554	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8555	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8556
8557	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8558			"0473 PCI device Power Management suspend.\n");
8559
8560	/* Bring down the device */
8561	lpfc_offline_prep(phba);
8562	lpfc_offline(phba);
8563	kthread_stop(phba->worker_thread);
8564
8565	/* Disable interrupt from device */
8566	lpfc_sli_disable_intr(phba);
8567
8568	/* Save device state to PCI config space */
8569	pci_save_state(pdev);
8570	pci_set_power_state(pdev, PCI_D3hot);
8571
8572	return 0;
8573}
8574
8575/**
8576 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
8577 * @pdev: pointer to PCI device
8578 *
8579 * This routine is to be called from the kernel's PCI subsystem to support
8580 * system Power Management (PM) to device with SLI-3 interface spec. When PM
8581 * invokes this method, it restores the device's PCI config space state and
8582 * fully reinitializes the device and brings it online. Note that as the
8583 * driver implements the minimum PM requirements to a power-aware driver's
8584 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
8585 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
8586 * driver will fully reinitialize its device during resume() method call,
8587 * the device will be set to PCI_D0 directly in PCI config space before
8588 * restoring the state.
8589 *
8590 * Return code
8591 * 	0 - driver suspended the device
8592 * 	Error otherwise
8593 **/
8594static int
8595lpfc_pci_resume_one_s3(struct pci_dev *pdev)
8596{
8597	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8598	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8599	uint32_t intr_mode;
8600	int error;
8601
8602	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8603			"0452 PCI device Power Management resume.\n");
8604
8605	/* Restore device state from PCI config space */
8606	pci_set_power_state(pdev, PCI_D0);
8607	pci_restore_state(pdev);
8608
8609	/*
8610	 * As the new kernel behavior of pci_restore_state() API call clears
8611	 * device saved_state flag, need to save the restored state again.
8612	 */
8613	pci_save_state(pdev);
8614
8615	if (pdev->is_busmaster)
8616		pci_set_master(pdev);
8617
8618	/* Startup the kernel thread for this host adapter. */
8619	phba->worker_thread = kthread_run(lpfc_do_work, phba,
8620					"lpfc_worker_%d", phba->brd_no);
8621	if (IS_ERR(phba->worker_thread)) {
8622		error = PTR_ERR(phba->worker_thread);
8623		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8624				"0434 PM resume failed to start worker "
8625				"thread: error=x%x.\n", error);
8626		return error;
8627	}
8628
8629	/* Configure and enable interrupt */
8630	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8631	if (intr_mode == LPFC_INTR_ERROR) {
8632		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8633				"0430 PM resume Failed to enable interrupt\n");
8634		return -EIO;
8635	} else
8636		phba->intr_mode = intr_mode;
8637
8638	/* Restart HBA and bring it online */
8639	lpfc_sli_brdrestart(phba);
8640	lpfc_online(phba);
8641
8642	/* Log the current active interrupt mode */
8643	lpfc_log_intr_mode(phba, phba->intr_mode);
8644
8645	return 0;
8646}
8647
8648/**
8649 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
8650 * @phba: pointer to lpfc hba data structure.
8651 *
8652 * This routine is called to prepare the SLI3 device for PCI slot recover. It
8653 * aborts all the outstanding SCSI I/Os to the pci device.
8654 **/
8655static void
8656lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
8657{
8658	struct lpfc_sli *psli = &phba->sli;
8659	struct lpfc_sli_ring  *pring;
8660
8661	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8662			"2723 PCI channel I/O abort preparing for recovery\n");
8663
8664	/*
8665	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8666	 * and let the SCSI mid-layer to retry them to recover.
8667	 */
8668	pring = &psli->ring[psli->fcp_ring];
8669	lpfc_sli_abort_iocb_ring(phba, pring);
8670}
8671
8672/**
8673 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
8674 * @phba: pointer to lpfc hba data structure.
8675 *
8676 * This routine is called to prepare the SLI3 device for PCI slot reset. It
8677 * disables the device interrupt and pci device, and aborts the internal FCP
8678 * pending I/Os.
8679 **/
8680static void
8681lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
8682{
8683	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8684			"2710 PCI channel disable preparing for reset\n");
8685
8686	/* Block any management I/Os to the device */
8687	lpfc_block_mgmt_io(phba);
8688
8689	/* Block all SCSI devices' I/Os on the host */
8690	lpfc_scsi_dev_block(phba);
8691
8692	/* stop all timers */
8693	lpfc_stop_hba_timers(phba);
8694
8695	/* Disable interrupt and pci device */
8696	lpfc_sli_disable_intr(phba);
8697	pci_disable_device(phba->pcidev);
8698
8699	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
8700	lpfc_sli_flush_fcp_rings(phba);
8701}
8702
8703/**
8704 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
8705 * @phba: pointer to lpfc hba data structure.
8706 *
8707 * This routine is called to prepare the SLI3 device for PCI slot permanently
8708 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
8709 * pending I/Os.
8710 **/
8711static void
8712lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
8713{
8714	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8715			"2711 PCI channel permanent disable for failure\n");
8716	/* Block all SCSI devices' I/Os on the host */
8717	lpfc_scsi_dev_block(phba);
8718
8719	/* stop all timers */
8720	lpfc_stop_hba_timers(phba);
8721
8722	/* Clean up all driver's outstanding SCSI I/Os */
8723	lpfc_sli_flush_fcp_rings(phba);
8724}
8725
8726/**
8727 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
8728 * @pdev: pointer to PCI device.
8729 * @state: the current PCI connection state.
8730 *
8731 * This routine is called from the PCI subsystem for I/O error handling to
8732 * device with SLI-3 interface spec. This function is called by the PCI
8733 * subsystem after a PCI bus error affecting this device has been detected.
8734 * When this function is invoked, it will need to stop all the I/Os and
8735 * interrupt(s) to the device. Once that is done, it will return
8736 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
8737 * as desired.
8738 *
8739 * Return codes
8740 * 	PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
8741 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8742 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8743 **/
8744static pci_ers_result_t
8745lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
8746{
8747	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8748	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8749
8750	switch (state) {
8751	case pci_channel_io_normal:
8752		/* Non-fatal error, prepare for recovery */
8753		lpfc_sli_prep_dev_for_recover(phba);
8754		return PCI_ERS_RESULT_CAN_RECOVER;
8755	case pci_channel_io_frozen:
8756		/* Fatal error, prepare for slot reset */
8757		lpfc_sli_prep_dev_for_reset(phba);
8758		return PCI_ERS_RESULT_NEED_RESET;
8759	case pci_channel_io_perm_failure:
8760		/* Permanent failure, prepare for device down */
8761		lpfc_sli_prep_dev_for_perm_failure(phba);
8762		return PCI_ERS_RESULT_DISCONNECT;
8763	default:
8764		/* Unknown state, prepare and request slot reset */
8765		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8766				"0472 Unknown PCI error state: x%x\n", state);
8767		lpfc_sli_prep_dev_for_reset(phba);
8768		return PCI_ERS_RESULT_NEED_RESET;
8769	}
8770}
8771
8772/**
8773 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
8774 * @pdev: pointer to PCI device.
8775 *
8776 * This routine is called from the PCI subsystem for error handling to
8777 * device with SLI-3 interface spec. This is called after PCI bus has been
8778 * reset to restart the PCI card from scratch, as if from a cold-boot.
8779 * During the PCI subsystem error recovery, after driver returns
8780 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
8781 * recovery and then call this routine before calling the .resume method
8782 * to recover the device. This function will initialize the HBA device,
8783 * enable the interrupt, but it will just put the HBA to offline state
8784 * without passing any I/O traffic.
8785 *
8786 * Return codes
8787 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
8788 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8789 */
8790static pci_ers_result_t
8791lpfc_io_slot_reset_s3(struct pci_dev *pdev)
8792{
8793	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8794	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8795	struct lpfc_sli *psli = &phba->sli;
8796	uint32_t intr_mode;
8797
8798	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
8799	if (pci_enable_device_mem(pdev)) {
8800		printk(KERN_ERR "lpfc: Cannot re-enable "
8801			"PCI device after reset.\n");
8802		return PCI_ERS_RESULT_DISCONNECT;
8803	}
8804
8805	pci_restore_state(pdev);
8806
8807	/*
8808	 * As the new kernel behavior of pci_restore_state() API call clears
8809	 * device saved_state flag, need to save the restored state again.
8810	 */
8811	pci_save_state(pdev);
8812
8813	if (pdev->is_busmaster)
8814		pci_set_master(pdev);
8815
8816	spin_lock_irq(&phba->hbalock);
8817	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8818	spin_unlock_irq(&phba->hbalock);
8819
8820	/* Configure and enable interrupt */
8821	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8822	if (intr_mode == LPFC_INTR_ERROR) {
8823		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8824				"0427 Cannot re-enable interrupt after "
8825				"slot reset.\n");
8826		return PCI_ERS_RESULT_DISCONNECT;
8827	} else
8828		phba->intr_mode = intr_mode;
8829
8830	/* Take device offline, it will perform cleanup */
8831	lpfc_offline_prep(phba);
8832	lpfc_offline(phba);
8833	lpfc_sli_brdrestart(phba);
8834
8835	/* Log the current active interrupt mode */
8836	lpfc_log_intr_mode(phba, phba->intr_mode);
8837
8838	return PCI_ERS_RESULT_RECOVERED;
8839}
8840
8841/**
8842 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
8843 * @pdev: pointer to PCI device
8844 *
8845 * This routine is called from the PCI subsystem for error handling to device
8846 * with SLI-3 interface spec. It is called when kernel error recovery tells
8847 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8848 * error recovery. After this call, traffic can start to flow from this device
8849 * again.
8850 */
8851static void
8852lpfc_io_resume_s3(struct pci_dev *pdev)
8853{
8854	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8855	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8856
8857	/* Bring device online, it will be no-op for non-fatal error resume */
8858	lpfc_online(phba);
8859
8860	/* Clean up Advanced Error Reporting (AER) if needed */
8861	if (phba->hba_flag & HBA_AER_ENABLED)
8862		pci_cleanup_aer_uncorrect_error_status(pdev);
8863}
8864
8865/**
8866 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
8867 * @phba: pointer to lpfc hba data structure.
8868 *
8869 * returns the number of ELS/CT IOCBs to reserve
8870 **/
8871int
8872lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
8873{
8874	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
8875
8876	if (phba->sli_rev == LPFC_SLI_REV4) {
8877		if (max_xri <= 100)
8878			return 10;
8879		else if (max_xri <= 256)
8880			return 25;
8881		else if (max_xri <= 512)
8882			return 50;
8883		else if (max_xri <= 1024)
8884			return 100;
8885		else
8886			return 150;
8887	} else
8888		return 0;
8889}
8890
8891/**
8892 * lpfc_write_firmware - attempt to write a firmware image to the port
8893 * @phba: pointer to lpfc hba data structure.
8894 * @fw: pointer to firmware image returned from request_firmware.
8895 *
8896 * returns the number of bytes written if write is successful.
8897 * returns a negative error value if there were errors.
8898 * returns 0 if firmware matches currently active firmware on port.
8899 **/
8900int
8901lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
8902{
8903	char fwrev[32];
8904	struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
8905	struct list_head dma_buffer_list;
8906	int i, rc = 0;
8907	struct lpfc_dmabuf *dmabuf, *next;
8908	uint32_t offset = 0, temp_offset = 0;
8909
8910	INIT_LIST_HEAD(&dma_buffer_list);
8911	if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
8912	    (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
8913	     LPFC_FILE_TYPE_GROUP) ||
8914	    (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
8915	    (be32_to_cpu(image->size) != fw->size)) {
8916		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8917				"3022 Invalid FW image found. "
8918				"Magic:%x Type:%x ID:%x\n",
8919				be32_to_cpu(image->magic_number),
8920				bf_get_be32(lpfc_grp_hdr_file_type, image),
8921				bf_get_be32(lpfc_grp_hdr_id, image));
8922		return -EINVAL;
8923	}
8924	lpfc_decode_firmware_rev(phba, fwrev, 1);
8925	if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
8926		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8927				"3023 Updating Firmware. Current Version:%s "
8928				"New Version:%s\n",
8929				fwrev, image->revision);
8930		for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
8931			dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
8932					 GFP_KERNEL);
8933			if (!dmabuf) {
8934				rc = -ENOMEM;
8935				goto out;
8936			}
8937			dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8938							  SLI4_PAGE_SIZE,
8939							  &dmabuf->phys,
8940							  GFP_KERNEL);
8941			if (!dmabuf->virt) {
8942				kfree(dmabuf);
8943				rc = -ENOMEM;
8944				goto out;
8945			}
8946			list_add_tail(&dmabuf->list, &dma_buffer_list);
8947		}
8948		while (offset < fw->size) {
8949			temp_offset = offset;
8950			list_for_each_entry(dmabuf, &dma_buffer_list, list) {
8951				if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
8952					memcpy(dmabuf->virt,
8953					       fw->data + temp_offset,
8954					       fw->size - temp_offset);
8955					temp_offset = fw->size;
8956					break;
8957				}
8958				memcpy(dmabuf->virt, fw->data + temp_offset,
8959				       SLI4_PAGE_SIZE);
8960				temp_offset += SLI4_PAGE_SIZE;
8961			}
8962			rc = lpfc_wr_object(phba, &dma_buffer_list,
8963				    (fw->size - offset), &offset);
8964			if (rc) {
8965				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8966						"3024 Firmware update failed. "
8967						"%d\n", rc);
8968				goto out;
8969			}
8970		}
8971		rc = offset;
8972	}
8973out:
8974	list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
8975		list_del(&dmabuf->list);
8976		dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
8977				  dmabuf->virt, dmabuf->phys);
8978		kfree(dmabuf);
8979	}
8980	return rc;
8981}
8982
8983/**
8984 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
8985 * @pdev: pointer to PCI device
8986 * @pid: pointer to PCI device identifier
8987 *
8988 * This routine is called from the kernel's PCI subsystem to device with
8989 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8990 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8991 * information of the device and driver to see if the driver state that it
8992 * can support this kind of device. If the match is successful, the driver
8993 * core invokes this routine. If this routine determines it can claim the HBA,
8994 * it does all the initialization that it needs to do to handle the HBA
8995 * properly.
8996 *
8997 * Return code
8998 * 	0 - driver can claim the device
8999 * 	negative value - driver can not claim the device
9000 **/
9001static int __devinit
9002lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9003{
9004	struct lpfc_hba   *phba;
9005	struct lpfc_vport *vport = NULL;
9006	struct Scsi_Host  *shost = NULL;
9007	int error;
9008	uint32_t cfg_mode, intr_mode;
9009	int mcnt;
9010	int adjusted_fcp_eq_count;
9011	int fcp_qidx;
9012	const struct firmware *fw;
9013	uint8_t file_name[16];
9014
9015	/* Allocate memory for HBA structure */
9016	phba = lpfc_hba_alloc(pdev);
9017	if (!phba)
9018		return -ENOMEM;
9019
9020	/* Perform generic PCI device enabling operation */
9021	error = lpfc_enable_pci_dev(phba);
9022	if (error)
9023		goto out_free_phba;
9024
9025	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
9026	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
9027	if (error)
9028		goto out_disable_pci_dev;
9029
9030	/* Set up SLI-4 specific device PCI memory space */
9031	error = lpfc_sli4_pci_mem_setup(phba);
9032	if (error) {
9033		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9034				"1410 Failed to set up pci memory space.\n");
9035		goto out_disable_pci_dev;
9036	}
9037
9038	/* Set up phase-1 common device driver resources */
9039	error = lpfc_setup_driver_resource_phase1(phba);
9040	if (error) {
9041		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9042				"1411 Failed to set up driver resource.\n");
9043		goto out_unset_pci_mem_s4;
9044	}
9045
9046	/* Set up SLI-4 Specific device driver resources */
9047	error = lpfc_sli4_driver_resource_setup(phba);
9048	if (error) {
9049		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9050				"1412 Failed to set up driver resource.\n");
9051		goto out_unset_pci_mem_s4;
9052	}
9053
9054	/* Initialize and populate the iocb list per host */
9055
9056	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9057			"2821 initialize iocb list %d.\n",
9058			phba->cfg_iocb_cnt*1024);
9059	error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
9060
9061	if (error) {
9062		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9063				"1413 Failed to initialize iocb list.\n");
9064		goto out_unset_driver_resource_s4;
9065	}
9066
9067	INIT_LIST_HEAD(&phba->active_rrq_list);
9068	INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
9069
9070	/* Set up common device driver resources */
9071	error = lpfc_setup_driver_resource_phase2(phba);
9072	if (error) {
9073		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9074				"1414 Failed to set up driver resource.\n");
9075		goto out_free_iocb_list;
9076	}
9077
9078	/* Get the default values for Model Name and Description */
9079	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9080
9081	/* Create SCSI host to the physical port */
9082	error = lpfc_create_shost(phba);
9083	if (error) {
9084		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9085				"1415 Failed to create scsi host.\n");
9086		goto out_unset_driver_resource;
9087	}
9088
9089	/* Configure sysfs attributes */
9090	vport = phba->pport;
9091	error = lpfc_alloc_sysfs_attr(vport);
9092	if (error) {
9093		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9094				"1416 Failed to allocate sysfs attr\n");
9095		goto out_destroy_shost;
9096	}
9097
9098	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
9099	/* Now, trying to enable interrupt and bring up the device */
9100	cfg_mode = phba->cfg_use_msi;
9101	while (true) {
9102		/* Put device to a known state before enabling interrupt */
9103		lpfc_stop_port(phba);
9104		/* Configure and enable interrupt */
9105		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
9106		if (intr_mode == LPFC_INTR_ERROR) {
9107			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9108					"0426 Failed to enable interrupt.\n");
9109			error = -ENODEV;
9110			goto out_free_sysfs_attr;
9111		}
9112		/* Default to single EQ for non-MSI-X */
9113		if (phba->intr_type != MSIX)
9114			adjusted_fcp_eq_count = 0;
9115		else if (phba->sli4_hba.msix_vec_nr <
9116					phba->cfg_fcp_eq_count + 1)
9117			adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
9118		else
9119			adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
9120		/* Free unused EQs */
9121		for (fcp_qidx = adjusted_fcp_eq_count;
9122		     fcp_qidx < phba->cfg_fcp_eq_count;
9123		     fcp_qidx++) {
9124			lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
9125			/* do not delete the first fcp_cq */
9126			if (fcp_qidx)
9127				lpfc_sli4_queue_free(
9128					phba->sli4_hba.fcp_cq[fcp_qidx]);
9129		}
9130		phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
9131		/* Set up SLI-4 HBA */
9132		if (lpfc_sli4_hba_setup(phba)) {
9133			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9134					"1421 Failed to set up hba\n");
9135			error = -ENODEV;
9136			goto out_disable_intr;
9137		}
9138
9139		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
9140		if (intr_mode != 0)
9141			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
9142							    LPFC_ACT_INTR_CNT);
9143
9144		/* Check active interrupts received only for MSI/MSI-X */
9145		if (intr_mode == 0 ||
9146		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
9147			/* Log the current active interrupt mode */
9148			phba->intr_mode = intr_mode;
9149			lpfc_log_intr_mode(phba, intr_mode);
9150			break;
9151		}
9152		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9153				"0451 Configure interrupt mode (%d) "
9154				"failed active interrupt test.\n",
9155				intr_mode);
9156		/* Unset the previous SLI-4 HBA setup. */
9157		/*
9158		 * TODO:  Is this operation compatible with IF TYPE 2
9159		 * devices?  All port state is deleted and cleared.
9160		 */
9161		lpfc_sli4_unset_hba(phba);
9162		/* Try next level of interrupt mode */
9163		cfg_mode = --intr_mode;
9164	}
9165
9166	/* Perform post initialization setup */
9167	lpfc_post_init_setup(phba);
9168
9169	/* check for firmware upgrade or downgrade */
9170	snprintf(file_name, 16, "%s.grp", phba->ModelName);
9171	error = request_firmware(&fw, file_name, &phba->pcidev->dev);
9172	if (!error) {
9173		lpfc_write_firmware(phba, fw);
9174		release_firmware(fw);
9175	}
9176
9177	/* Check if there are static vports to be created. */
9178	lpfc_create_static_vport(phba);
9179	return 0;
9180
9181out_disable_intr:
9182	lpfc_sli4_disable_intr(phba);
9183out_free_sysfs_attr:
9184	lpfc_free_sysfs_attr(vport);
9185out_destroy_shost:
9186	lpfc_destroy_shost(phba);
9187out_unset_driver_resource:
9188	lpfc_unset_driver_resource_phase2(phba);
9189out_free_iocb_list:
9190	lpfc_free_iocb_list(phba);
9191out_unset_driver_resource_s4:
9192	lpfc_sli4_driver_resource_unset(phba);
9193out_unset_pci_mem_s4:
9194	lpfc_sli4_pci_mem_unset(phba);
9195out_disable_pci_dev:
9196	lpfc_disable_pci_dev(phba);
9197	if (shost)
9198		scsi_host_put(shost);
9199out_free_phba:
9200	lpfc_hba_free(phba);
9201	return error;
9202}
9203
9204/**
9205 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
9206 * @pdev: pointer to PCI device
9207 *
9208 * This routine is called from the kernel's PCI subsystem to device with
9209 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
9210 * removed from PCI bus, it performs all the necessary cleanup for the HBA
9211 * device to be removed from the PCI subsystem properly.
9212 **/
9213static void __devexit
9214lpfc_pci_remove_one_s4(struct pci_dev *pdev)
9215{
9216	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9217	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
9218	struct lpfc_vport **vports;
9219	struct lpfc_hba *phba = vport->phba;
9220	int i;
9221
9222	/* Mark the device unloading flag */
9223	spin_lock_irq(&phba->hbalock);
9224	vport->load_flag |= FC_UNLOADING;
9225	spin_unlock_irq(&phba->hbalock);
9226
9227	/* Free the HBA sysfs attributes */
9228	lpfc_free_sysfs_attr(vport);
9229
9230	/* Release all the vports against this physical port */
9231	vports = lpfc_create_vport_work_array(phba);
9232	if (vports != NULL)
9233		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
9234			fc_vport_terminate(vports[i]->fc_vport);
9235	lpfc_destroy_vport_work_array(phba, vports);
9236
9237	/* Remove FC host and then SCSI host with the physical port */
9238	fc_remove_host(shost);
9239	scsi_remove_host(shost);
9240
9241	/* Perform cleanup on the physical port */
9242	lpfc_cleanup(vport);
9243
9244	/*
9245	 * Bring down the SLI Layer. This step disables all interrupts,
9246	 * clears the rings, discards all mailbox commands, and resets
9247	 * the HBA FCoE function.
9248	 */
9249	lpfc_debugfs_terminate(vport);
9250	lpfc_sli4_hba_unset(phba);
9251
9252	spin_lock_irq(&phba->hbalock);
9253	list_del_init(&vport->listentry);
9254	spin_unlock_irq(&phba->hbalock);
9255
9256	/* Perform scsi free before driver resource_unset since scsi
9257	 * buffers are released to their corresponding pools here.
9258	 */
9259	lpfc_scsi_free(phba);
9260	lpfc_sli4_driver_resource_unset(phba);
9261
9262	/* Unmap adapter Control and Doorbell registers */
9263	lpfc_sli4_pci_mem_unset(phba);
9264
9265	/* Release PCI resources and disable device's PCI function */
9266	scsi_host_put(shost);
9267	lpfc_disable_pci_dev(phba);
9268
9269	/* Finally, free the driver's device data structure */
9270	lpfc_hba_free(phba);
9271
9272	return;
9273}
9274
9275/**
9276 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
9277 * @pdev: pointer to PCI device
9278 * @msg: power management message
9279 *
9280 * This routine is called from the kernel's PCI subsystem to support system
9281 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
9282 * this method, it quiesces the device by stopping the driver's worker
9283 * thread for the device, turning off device's interrupt and DMA, and bring
9284 * the device offline. Note that as the driver implements the minimum PM
9285 * requirements to a power-aware driver's PM support for suspend/resume -- all
9286 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
9287 * method call will be treated as SUSPEND and the driver will fully
9288 * reinitialize its device during resume() method call, the driver will set
9289 * device to PCI_D3hot state in PCI config space instead of setting it
9290 * according to the @msg provided by the PM.
9291 *
9292 * Return code
9293 * 	0 - driver suspended the device
9294 * 	Error otherwise
9295 **/
9296static int
9297lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
9298{
9299	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9300	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9301
9302	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9303			"2843 PCI device Power Management suspend.\n");
9304
9305	/* Bring down the device */
9306	lpfc_offline_prep(phba);
9307	lpfc_offline(phba);
9308	kthread_stop(phba->worker_thread);
9309
9310	/* Disable interrupt from device */
9311	lpfc_sli4_disable_intr(phba);
9312
9313	/* Save device state to PCI config space */
9314	pci_save_state(pdev);
9315	pci_set_power_state(pdev, PCI_D3hot);
9316
9317	return 0;
9318}
9319
9320/**
9321 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
9322 * @pdev: pointer to PCI device
9323 *
9324 * This routine is called from the kernel's PCI subsystem to support system
9325 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
9326 * this method, it restores the device's PCI config space state and fully
9327 * reinitializes the device and brings it online. Note that as the driver
9328 * implements the minimum PM requirements to a power-aware driver's PM for
9329 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
9330 * to the suspend() method call will be treated as SUSPEND and the driver
9331 * will fully reinitialize its device during resume() method call, the device
9332 * will be set to PCI_D0 directly in PCI config space before restoring the
9333 * state.
9334 *
9335 * Return code
9336 * 	0 - driver suspended the device
9337 * 	Error otherwise
9338 **/
9339static int
9340lpfc_pci_resume_one_s4(struct pci_dev *pdev)
9341{
9342	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9343	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9344	uint32_t intr_mode;
9345	int error;
9346
9347	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9348			"0292 PCI device Power Management resume.\n");
9349
9350	/* Restore device state from PCI config space */
9351	pci_set_power_state(pdev, PCI_D0);
9352	pci_restore_state(pdev);
9353
9354	/*
9355	 * As the new kernel behavior of pci_restore_state() API call clears
9356	 * device saved_state flag, need to save the restored state again.
9357	 */
9358	pci_save_state(pdev);
9359
9360	if (pdev->is_busmaster)
9361		pci_set_master(pdev);
9362
9363	 /* Startup the kernel thread for this host adapter. */
9364	phba->worker_thread = kthread_run(lpfc_do_work, phba,
9365					"lpfc_worker_%d", phba->brd_no);
9366	if (IS_ERR(phba->worker_thread)) {
9367		error = PTR_ERR(phba->worker_thread);
9368		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9369				"0293 PM resume failed to start worker "
9370				"thread: error=x%x.\n", error);
9371		return error;
9372	}
9373
9374	/* Configure and enable interrupt */
9375	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9376	if (intr_mode == LPFC_INTR_ERROR) {
9377		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9378				"0294 PM resume Failed to enable interrupt\n");
9379		return -EIO;
9380	} else
9381		phba->intr_mode = intr_mode;
9382
9383	/* Restart HBA and bring it online */
9384	lpfc_sli_brdrestart(phba);
9385	lpfc_online(phba);
9386
9387	/* Log the current active interrupt mode */
9388	lpfc_log_intr_mode(phba, phba->intr_mode);
9389
9390	return 0;
9391}
9392
9393/**
9394 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
9395 * @phba: pointer to lpfc hba data structure.
9396 *
9397 * This routine is called to prepare the SLI4 device for PCI slot recover. It
9398 * aborts all the outstanding SCSI I/Os to the pci device.
9399 **/
9400static void
9401lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
9402{
9403	struct lpfc_sli *psli = &phba->sli;
9404	struct lpfc_sli_ring  *pring;
9405
9406	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9407			"2828 PCI channel I/O abort preparing for recovery\n");
9408	/*
9409	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9410	 * and let the SCSI mid-layer to retry them to recover.
9411	 */
9412	pring = &psli->ring[psli->fcp_ring];
9413	lpfc_sli_abort_iocb_ring(phba, pring);
9414}
9415
9416/**
9417 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
9418 * @phba: pointer to lpfc hba data structure.
9419 *
9420 * This routine is called to prepare the SLI4 device for PCI slot reset. It
9421 * disables the device interrupt and pci device, and aborts the internal FCP
9422 * pending I/Os.
9423 **/
9424static void
9425lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9426{
9427	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9428			"2826 PCI channel disable preparing for reset\n");
9429
9430	/* Block any management I/Os to the device */
9431	lpfc_block_mgmt_io(phba);
9432
9433	/* Block all SCSI devices' I/Os on the host */
9434	lpfc_scsi_dev_block(phba);
9435
9436	/* stop all timers */
9437	lpfc_stop_hba_timers(phba);
9438
9439	/* Disable interrupt and pci device */
9440	lpfc_sli4_disable_intr(phba);
9441	pci_disable_device(phba->pcidev);
9442
9443	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
9444	lpfc_sli_flush_fcp_rings(phba);
9445}
9446
9447/**
9448 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
9449 * @phba: pointer to lpfc hba data structure.
9450 *
9451 * This routine is called to prepare the SLI4 device for PCI slot permanently
9452 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
9453 * pending I/Os.
9454 **/
9455static void
9456lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
9457{
9458	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9459			"2827 PCI channel permanent disable for failure\n");
9460
9461	/* Block all SCSI devices' I/Os on the host */
9462	lpfc_scsi_dev_block(phba);
9463
9464	/* stop all timers */
9465	lpfc_stop_hba_timers(phba);
9466
9467	/* Clean up all driver's outstanding SCSI I/Os */
9468	lpfc_sli_flush_fcp_rings(phba);
9469}
9470
9471/**
9472 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
9473 * @pdev: pointer to PCI device.
9474 * @state: the current PCI connection state.
9475 *
9476 * This routine is called from the PCI subsystem for error handling to device
9477 * with SLI-4 interface spec. This function is called by the PCI subsystem
9478 * after a PCI bus error affecting this device has been detected. When this
9479 * function is invoked, it will need to stop all the I/Os and interrupt(s)
9480 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
9481 * for the PCI subsystem to perform proper recovery as desired.
9482 *
9483 * Return codes
9484 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9485 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9486 **/
9487static pci_ers_result_t
9488lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
9489{
9490	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9491	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9492
9493	switch (state) {
9494	case pci_channel_io_normal:
9495		/* Non-fatal error, prepare for recovery */
9496		lpfc_sli4_prep_dev_for_recover(phba);
9497		return PCI_ERS_RESULT_CAN_RECOVER;
9498	case pci_channel_io_frozen:
9499		/* Fatal error, prepare for slot reset */
9500		lpfc_sli4_prep_dev_for_reset(phba);
9501		return PCI_ERS_RESULT_NEED_RESET;
9502	case pci_channel_io_perm_failure:
9503		/* Permanent failure, prepare for device down */
9504		lpfc_sli4_prep_dev_for_perm_failure(phba);
9505		return PCI_ERS_RESULT_DISCONNECT;
9506	default:
9507		/* Unknown state, prepare and request slot reset */
9508		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9509				"2825 Unknown PCI error state: x%x\n", state);
9510		lpfc_sli4_prep_dev_for_reset(phba);
9511		return PCI_ERS_RESULT_NEED_RESET;
9512	}
9513}
9514
9515/**
9516 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
9517 * @pdev: pointer to PCI device.
9518 *
9519 * This routine is called from the PCI subsystem for error handling to device
9520 * with SLI-4 interface spec. It is called after PCI bus has been reset to
9521 * restart the PCI card from scratch, as if from a cold-boot. During the
9522 * PCI subsystem error recovery, after the driver returns
9523 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
9524 * recovery and then call this routine before calling the .resume method to
9525 * recover the device. This function will initialize the HBA device, enable
9526 * the interrupt, but it will just put the HBA to offline state without
9527 * passing any I/O traffic.
9528 *
9529 * Return codes
9530 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
9531 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9532 */
9533static pci_ers_result_t
9534lpfc_io_slot_reset_s4(struct pci_dev *pdev)
9535{
9536	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9537	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9538	struct lpfc_sli *psli = &phba->sli;
9539	uint32_t intr_mode;
9540
9541	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
9542	if (pci_enable_device_mem(pdev)) {
9543		printk(KERN_ERR "lpfc: Cannot re-enable "
9544			"PCI device after reset.\n");
9545		return PCI_ERS_RESULT_DISCONNECT;
9546	}
9547
9548	pci_restore_state(pdev);
9549
9550	/*
9551	 * As the new kernel behavior of pci_restore_state() API call clears
9552	 * device saved_state flag, need to save the restored state again.
9553	 */
9554	pci_save_state(pdev);
9555
9556	if (pdev->is_busmaster)
9557		pci_set_master(pdev);
9558
9559	spin_lock_irq(&phba->hbalock);
9560	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9561	spin_unlock_irq(&phba->hbalock);
9562
9563	/* Configure and enable interrupt */
9564	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9565	if (intr_mode == LPFC_INTR_ERROR) {
9566		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9567				"2824 Cannot re-enable interrupt after "
9568				"slot reset.\n");
9569		return PCI_ERS_RESULT_DISCONNECT;
9570	} else
9571		phba->intr_mode = intr_mode;
9572
9573	/* Log the current active interrupt mode */
9574	lpfc_log_intr_mode(phba, phba->intr_mode);
9575
9576	return PCI_ERS_RESULT_RECOVERED;
9577}
9578
9579/**
9580 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
9581 * @pdev: pointer to PCI device
9582 *
9583 * This routine is called from the PCI subsystem for error handling to device
9584 * with SLI-4 interface spec. It is called when kernel error recovery tells
9585 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
9586 * error recovery. After this call, traffic can start to flow from this device
9587 * again.
9588 **/
9589static void
9590lpfc_io_resume_s4(struct pci_dev *pdev)
9591{
9592	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9593	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9594
9595	/*
9596	 * In case of slot reset, as function reset is performed through
9597	 * mailbox command which needs DMA to be enabled, this operation
9598	 * has to be moved to the io resume phase. Taking device offline
9599	 * will perform the necessary cleanup.
9600	 */
9601	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
9602		/* Perform device reset */
9603		lpfc_offline_prep(phba);
9604		lpfc_offline(phba);
9605		lpfc_sli_brdrestart(phba);
9606		/* Bring the device back online */
9607		lpfc_online(phba);
9608	}
9609
9610	/* Clean up Advanced Error Reporting (AER) if needed */
9611	if (phba->hba_flag & HBA_AER_ENABLED)
9612		pci_cleanup_aer_uncorrect_error_status(pdev);
9613}
9614
9615/**
9616 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
9617 * @pdev: pointer to PCI device
9618 * @pid: pointer to PCI device identifier
9619 *
9620 * This routine is to be registered to the kernel's PCI subsystem. When an
9621 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
9622 * at PCI device-specific information of the device and driver to see if the
9623 * driver state that it can support this kind of device. If the match is
9624 * successful, the driver core invokes this routine. This routine dispatches
9625 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
9626 * do all the initialization that it needs to do to handle the HBA device
9627 * properly.
9628 *
9629 * Return code
9630 * 	0 - driver can claim the device
9631 * 	negative value - driver can not claim the device
9632 **/
9633static int __devinit
9634lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
9635{
9636	int rc;
9637	struct lpfc_sli_intf intf;
9638
9639	if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
9640		return -ENODEV;
9641
9642	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
9643	    (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
9644		rc = lpfc_pci_probe_one_s4(pdev, pid);
9645	else
9646		rc = lpfc_pci_probe_one_s3(pdev, pid);
9647
9648	return rc;
9649}
9650
9651/**
9652 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
9653 * @pdev: pointer to PCI device
9654 *
9655 * This routine is to be registered to the kernel's PCI subsystem. When an
9656 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
9657 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
9658 * remove routine, which will perform all the necessary cleanup for the
9659 * device to be removed from the PCI subsystem properly.
9660 **/
9661static void __devexit
9662lpfc_pci_remove_one(struct pci_dev *pdev)
9663{
9664	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9665	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9666
9667	switch (phba->pci_dev_grp) {
9668	case LPFC_PCI_DEV_LP:
9669		lpfc_pci_remove_one_s3(pdev);
9670		break;
9671	case LPFC_PCI_DEV_OC:
9672		lpfc_pci_remove_one_s4(pdev);
9673		break;
9674	default:
9675		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9676				"1424 Invalid PCI device group: 0x%x\n",
9677				phba->pci_dev_grp);
9678		break;
9679	}
9680	return;
9681}
9682
9683/**
9684 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
9685 * @pdev: pointer to PCI device
9686 * @msg: power management message
9687 *
9688 * This routine is to be registered to the kernel's PCI subsystem to support
9689 * system Power Management (PM). When PM invokes this method, it dispatches
9690 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
9691 * suspend the device.
9692 *
9693 * Return code
9694 * 	0 - driver suspended the device
9695 * 	Error otherwise
9696 **/
9697static int
9698lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
9699{
9700	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9701	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9702	int rc = -ENODEV;
9703
9704	switch (phba->pci_dev_grp) {
9705	case LPFC_PCI_DEV_LP:
9706		rc = lpfc_pci_suspend_one_s3(pdev, msg);
9707		break;
9708	case LPFC_PCI_DEV_OC:
9709		rc = lpfc_pci_suspend_one_s4(pdev, msg);
9710		break;
9711	default:
9712		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9713				"1425 Invalid PCI device group: 0x%x\n",
9714				phba->pci_dev_grp);
9715		break;
9716	}
9717	return rc;
9718}
9719
9720/**
9721 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
9722 * @pdev: pointer to PCI device
9723 *
9724 * This routine is to be registered to the kernel's PCI subsystem to support
9725 * system Power Management (PM). When PM invokes this method, it dispatches
9726 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
9727 * resume the device.
9728 *
9729 * Return code
9730 * 	0 - driver suspended the device
9731 * 	Error otherwise
9732 **/
9733static int
9734lpfc_pci_resume_one(struct pci_dev *pdev)
9735{
9736	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9737	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9738	int rc = -ENODEV;
9739
9740	switch (phba->pci_dev_grp) {
9741	case LPFC_PCI_DEV_LP:
9742		rc = lpfc_pci_resume_one_s3(pdev);
9743		break;
9744	case LPFC_PCI_DEV_OC:
9745		rc = lpfc_pci_resume_one_s4(pdev);
9746		break;
9747	default:
9748		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9749				"1426 Invalid PCI device group: 0x%x\n",
9750				phba->pci_dev_grp);
9751		break;
9752	}
9753	return rc;
9754}
9755
9756/**
9757 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
9758 * @pdev: pointer to PCI device.
9759 * @state: the current PCI connection state.
9760 *
9761 * This routine is registered to the PCI subsystem for error handling. This
9762 * function is called by the PCI subsystem after a PCI bus error affecting
9763 * this device has been detected. When this routine is invoked, it dispatches
9764 * the action to the proper SLI-3 or SLI-4 device error detected handling
9765 * routine, which will perform the proper error detected operation.
9766 *
9767 * Return codes
9768 * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9769 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9770 **/
9771static pci_ers_result_t
9772lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
9773{
9774	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9775	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9776	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
9777
9778	switch (phba->pci_dev_grp) {
9779	case LPFC_PCI_DEV_LP:
9780		rc = lpfc_io_error_detected_s3(pdev, state);
9781		break;
9782	case LPFC_PCI_DEV_OC:
9783		rc = lpfc_io_error_detected_s4(pdev, state);
9784		break;
9785	default:
9786		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9787				"1427 Invalid PCI device group: 0x%x\n",
9788				phba->pci_dev_grp);
9789		break;
9790	}
9791	return rc;
9792}
9793
9794/**
9795 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
9796 * @pdev: pointer to PCI device.
9797 *
9798 * This routine is registered to the PCI subsystem for error handling. This
9799 * function is called after PCI bus has been reset to restart the PCI card
9800 * from scratch, as if from a cold-boot. When this routine is invoked, it
9801 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
9802 * routine, which will perform the proper device reset.
9803 *
9804 * Return codes
9805 * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
9806 * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9807 **/
9808static pci_ers_result_t
9809lpfc_io_slot_reset(struct pci_dev *pdev)
9810{
9811	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9812	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9813	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
9814
9815	switch (phba->pci_dev_grp) {
9816	case LPFC_PCI_DEV_LP:
9817		rc = lpfc_io_slot_reset_s3(pdev);
9818		break;
9819	case LPFC_PCI_DEV_OC:
9820		rc = lpfc_io_slot_reset_s4(pdev);
9821		break;
9822	default:
9823		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9824				"1428 Invalid PCI device group: 0x%x\n",
9825				phba->pci_dev_grp);
9826		break;
9827	}
9828	return rc;
9829}
9830
9831/**
9832 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
9833 * @pdev: pointer to PCI device
9834 *
9835 * This routine is registered to the PCI subsystem for error handling. It
9836 * is called when kernel error recovery tells the lpfc driver that it is
9837 * OK to resume normal PCI operation after PCI bus error recovery. When
9838 * this routine is invoked, it dispatches the action to the proper SLI-3
9839 * or SLI-4 device io_resume routine, which will resume the device operation.
9840 **/
9841static void
9842lpfc_io_resume(struct pci_dev *pdev)
9843{
9844	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9845	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9846
9847	switch (phba->pci_dev_grp) {
9848	case LPFC_PCI_DEV_LP:
9849		lpfc_io_resume_s3(pdev);
9850		break;
9851	case LPFC_PCI_DEV_OC:
9852		lpfc_io_resume_s4(pdev);
9853		break;
9854	default:
9855		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9856				"1429 Invalid PCI device group: 0x%x\n",
9857				phba->pci_dev_grp);
9858		break;
9859	}
9860	return;
9861}
9862
9863static struct pci_device_id lpfc_id_table[] = {
9864	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
9865		PCI_ANY_ID, PCI_ANY_ID, },
9866	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
9867		PCI_ANY_ID, PCI_ANY_ID, },
9868	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
9869		PCI_ANY_ID, PCI_ANY_ID, },
9870	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
9871		PCI_ANY_ID, PCI_ANY_ID, },
9872	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
9873		PCI_ANY_ID, PCI_ANY_ID, },
9874	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
9875		PCI_ANY_ID, PCI_ANY_ID, },
9876	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
9877		PCI_ANY_ID, PCI_ANY_ID, },
9878	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
9879		PCI_ANY_ID, PCI_ANY_ID, },
9880	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
9881		PCI_ANY_ID, PCI_ANY_ID, },
9882	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
9883		PCI_ANY_ID, PCI_ANY_ID, },
9884	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
9885		PCI_ANY_ID, PCI_ANY_ID, },
9886	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
9887		PCI_ANY_ID, PCI_ANY_ID, },
9888	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
9889		PCI_ANY_ID, PCI_ANY_ID, },
9890	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
9891		PCI_ANY_ID, PCI_ANY_ID, },
9892	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
9893		PCI_ANY_ID, PCI_ANY_ID, },
9894	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
9895		PCI_ANY_ID, PCI_ANY_ID, },
9896	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
9897		PCI_ANY_ID, PCI_ANY_ID, },
9898	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
9899		PCI_ANY_ID, PCI_ANY_ID, },
9900	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
9901		PCI_ANY_ID, PCI_ANY_ID, },
9902	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
9903		PCI_ANY_ID, PCI_ANY_ID, },
9904	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
9905		PCI_ANY_ID, PCI_ANY_ID, },
9906	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
9907		PCI_ANY_ID, PCI_ANY_ID, },
9908	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
9909		PCI_ANY_ID, PCI_ANY_ID, },
9910	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
9911		PCI_ANY_ID, PCI_ANY_ID, },
9912	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
9913		PCI_ANY_ID, PCI_ANY_ID, },
9914	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
9915		PCI_ANY_ID, PCI_ANY_ID, },
9916	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
9917		PCI_ANY_ID, PCI_ANY_ID, },
9918	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
9919		PCI_ANY_ID, PCI_ANY_ID, },
9920	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
9921		PCI_ANY_ID, PCI_ANY_ID, },
9922	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
9923		PCI_ANY_ID, PCI_ANY_ID, },
9924	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
9925		PCI_ANY_ID, PCI_ANY_ID, },
9926	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
9927		PCI_ANY_ID, PCI_ANY_ID, },
9928	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
9929		PCI_ANY_ID, PCI_ANY_ID, },
9930	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
9931		PCI_ANY_ID, PCI_ANY_ID, },
9932	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
9933		PCI_ANY_ID, PCI_ANY_ID, },
9934	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
9935		PCI_ANY_ID, PCI_ANY_ID, },
9936	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
9937		PCI_ANY_ID, PCI_ANY_ID, },
9938	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
9939		PCI_ANY_ID, PCI_ANY_ID, },
9940	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
9941		PCI_ANY_ID, PCI_ANY_ID, },
9942	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
9943		PCI_ANY_ID, PCI_ANY_ID, },
9944	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
9945		PCI_ANY_ID, PCI_ANY_ID, },
9946	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
9947		PCI_ANY_ID, PCI_ANY_ID, },
9948	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
9949		PCI_ANY_ID, PCI_ANY_ID, },
9950	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
9951		PCI_ANY_ID, PCI_ANY_ID, },
9952	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
9953		PCI_ANY_ID, PCI_ANY_ID, },
9954	{ 0 }
9955};
9956
9957MODULE_DEVICE_TABLE(pci, lpfc_id_table);
9958
9959static struct pci_error_handlers lpfc_err_handler = {
9960	.error_detected = lpfc_io_error_detected,
9961	.slot_reset = lpfc_io_slot_reset,
9962	.resume = lpfc_io_resume,
9963};
9964
9965static struct pci_driver lpfc_driver = {
9966	.name		= LPFC_DRIVER_NAME,
9967	.id_table	= lpfc_id_table,
9968	.probe		= lpfc_pci_probe_one,
9969	.remove		= __devexit_p(lpfc_pci_remove_one),
9970	.suspend        = lpfc_pci_suspend_one,
9971	.resume		= lpfc_pci_resume_one,
9972	.err_handler    = &lpfc_err_handler,
9973};
9974
9975/**
9976 * lpfc_init - lpfc module initialization routine
9977 *
9978 * This routine is to be invoked when the lpfc module is loaded into the
9979 * kernel. The special kernel macro module_init() is used to indicate the
9980 * role of this routine to the kernel as lpfc module entry point.
9981 *
9982 * Return codes
9983 *   0 - successful
9984 *   -ENOMEM - FC attach transport failed
9985 *   all others - failed
9986 */
9987static int __init
9988lpfc_init(void)
9989{
9990	int error = 0;
9991
9992	printk(LPFC_MODULE_DESC "\n");
9993	printk(LPFC_COPYRIGHT "\n");
9994
9995	if (lpfc_enable_npiv) {
9996		lpfc_transport_functions.vport_create = lpfc_vport_create;
9997		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
9998	}
9999	lpfc_transport_template =
10000				fc_attach_transport(&lpfc_transport_functions);
10001	if (lpfc_transport_template == NULL)
10002		return -ENOMEM;
10003	if (lpfc_enable_npiv) {
10004		lpfc_vport_transport_template =
10005			fc_attach_transport(&lpfc_vport_transport_functions);
10006		if (lpfc_vport_transport_template == NULL) {
10007			fc_release_transport(lpfc_transport_template);
10008			return -ENOMEM;
10009		}
10010	}
10011	error = pci_register_driver(&lpfc_driver);
10012	if (error) {
10013		fc_release_transport(lpfc_transport_template);
10014		if (lpfc_enable_npiv)
10015			fc_release_transport(lpfc_vport_transport_template);
10016	}
10017
10018	return error;
10019}
10020
10021/**
10022 * lpfc_exit - lpfc module removal routine
10023 *
10024 * This routine is invoked when the lpfc module is removed from the kernel.
10025 * The special kernel macro module_exit() is used to indicate the role of
10026 * this routine to the kernel as lpfc module exit point.
10027 */
10028static void __exit
10029lpfc_exit(void)
10030{
10031	pci_unregister_driver(&lpfc_driver);
10032	fc_release_transport(lpfc_transport_template);
10033	if (lpfc_enable_npiv)
10034		fc_release_transport(lpfc_vport_transport_template);
10035	if (_dump_buf_data) {
10036		printk(KERN_ERR	"9062 BLKGRD: freeing %lu pages for "
10037				"_dump_buf_data at 0x%p\n",
10038				(1L << _dump_buf_data_order), _dump_buf_data);
10039		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
10040	}
10041
10042	if (_dump_buf_dif) {
10043		printk(KERN_ERR	"9049 BLKGRD: freeing %lu pages for "
10044				"_dump_buf_dif at 0x%p\n",
10045				(1L << _dump_buf_dif_order), _dump_buf_dif);
10046		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
10047	}
10048}
10049
10050module_init(lpfc_init);
10051module_exit(lpfc_exit);
10052MODULE_LICENSE("GPL");
10053MODULE_DESCRIPTION(LPFC_MODULE_DESC);
10054MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
10055MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
10056