1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26
27#include <scsi/scsi.h>
28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h>
31
32#include "lpfc_hw4.h"
33#include "lpfc_hw.h"
34#include "lpfc_sli.h"
35#include "lpfc_sli4.h"
36#include "lpfc_nl.h"
37#include "lpfc_disc.h"
38#include "lpfc_scsi.h"
39#include "lpfc.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h"
42#include "lpfc_vport.h"
43#include "lpfc_debugfs.h"
44
45
46/* Called to verify a rcv'ed ADISC was intended for us. */
47static int
48lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
49		 struct lpfc_name *nn, struct lpfc_name *pn)
50{
51	/* First, we MUST have a RPI registered */
52	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
53		return 0;
54
55	/* Compare the ADISC rsp WWNN / WWPN matches our internal node
56	 * table entry for that node.
57	 */
58	if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
59		return 0;
60
61	if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
62		return 0;
63
64	/* we match, return success */
65	return 1;
66}
67
68int
69lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
70		 struct serv_parm *sp, uint32_t class, int flogi)
71{
72	volatile struct serv_parm *hsp = &vport->fc_sparam;
73	uint16_t hsp_value, ssp_value = 0;
74
75	/*
76	 * The receive data field size and buffer-to-buffer receive data field
77	 * size entries are 16 bits but are represented as two 8-bit fields in
78	 * the driver data structure to account for rsvd bits and other control
79	 * bits.  Reconstruct and compare the fields as a 16-bit values before
80	 * correcting the byte values.
81	 */
82	if (sp->cls1.classValid) {
83		if (!flogi) {
84			hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
85				     hsp->cls1.rcvDataSizeLsb);
86			ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
87				     sp->cls1.rcvDataSizeLsb);
88			if (!ssp_value)
89				goto bad_service_param;
90			if (ssp_value > hsp_value) {
91				sp->cls1.rcvDataSizeLsb =
92					hsp->cls1.rcvDataSizeLsb;
93				sp->cls1.rcvDataSizeMsb =
94					hsp->cls1.rcvDataSizeMsb;
95			}
96		}
97	} else if (class == CLASS1)
98		goto bad_service_param;
99	if (sp->cls2.classValid) {
100		if (!flogi) {
101			hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
102				     hsp->cls2.rcvDataSizeLsb);
103			ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
104				     sp->cls2.rcvDataSizeLsb);
105			if (!ssp_value)
106				goto bad_service_param;
107			if (ssp_value > hsp_value) {
108				sp->cls2.rcvDataSizeLsb =
109					hsp->cls2.rcvDataSizeLsb;
110				sp->cls2.rcvDataSizeMsb =
111					hsp->cls2.rcvDataSizeMsb;
112			}
113		}
114	} else if (class == CLASS2)
115		goto bad_service_param;
116	if (sp->cls3.classValid) {
117		if (!flogi) {
118			hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
119				     hsp->cls3.rcvDataSizeLsb);
120			ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
121				     sp->cls3.rcvDataSizeLsb);
122			if (!ssp_value)
123				goto bad_service_param;
124			if (ssp_value > hsp_value) {
125				sp->cls3.rcvDataSizeLsb =
126					hsp->cls3.rcvDataSizeLsb;
127				sp->cls3.rcvDataSizeMsb =
128					hsp->cls3.rcvDataSizeMsb;
129			}
130		}
131	} else if (class == CLASS3)
132		goto bad_service_param;
133
134	/*
135	 * Preserve the upper four bits of the MSB from the PLOGI response.
136	 * These bits contain the Buffer-to-Buffer State Change Number
137	 * from the target and need to be passed to the FW.
138	 */
139	hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
140	ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
141	if (ssp_value > hsp_value) {
142		sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
143		sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
144				       (hsp->cmn.bbRcvSizeMsb & 0x0F);
145	}
146
147	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
148	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
149	return 1;
150bad_service_param:
151	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
152			 "0207 Device %x "
153			 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
154			 "invalid service parameters.  Ignoring device.\n",
155			 ndlp->nlp_DID,
156			 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
157			 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
158			 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
159			 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
160	return 0;
161}
162
163static void *
164lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
165			struct lpfc_iocbq *rspiocb)
166{
167	struct lpfc_dmabuf *pcmd, *prsp;
168	uint32_t *lp;
169	void     *ptr = NULL;
170	IOCB_t   *irsp;
171
172	irsp = &rspiocb->iocb;
173	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
174
175	/* For lpfc_els_abort, context2 could be zero'ed to delay
176	 * freeing associated memory till after ABTS completes.
177	 */
178	if (pcmd) {
179		prsp =  list_get_first(&pcmd->list, struct lpfc_dmabuf,
180				       list);
181		if (prsp) {
182			lp = (uint32_t *) prsp->virt;
183			ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
184		}
185	} else {
186		/* Force ulpStatus error since we are returning NULL ptr */
187		if (!(irsp->ulpStatus)) {
188			irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
189			irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
190		}
191		ptr = NULL;
192	}
193	return ptr;
194}
195
196
197
198/*
199 * Free resources / clean up outstanding I/Os
200 * associated with a LPFC_NODELIST entry. This
201 * routine effectively results in a "software abort".
202 */
203int
204lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
205{
206	LIST_HEAD(completions);
207	LIST_HEAD(txcmplq_completions);
208	LIST_HEAD(abort_list);
209	struct lpfc_sli  *psli = &phba->sli;
210	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
211	struct lpfc_iocbq *iocb, *next_iocb;
212
213	/* Abort outstanding I/O on NPort <nlp_DID> */
214	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
215			 "2819 Abort outstanding I/O on NPort x%x "
216			 "Data: x%x x%x x%x\n",
217			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
218			 ndlp->nlp_rpi);
219
220	lpfc_fabric_abort_nport(ndlp);
221
222	/* First check the txq */
223	spin_lock_irq(&phba->hbalock);
224	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
225		/* Check to see if iocb matches the nport we are looking for */
226		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
227			/* It matches, so deque and call compl with anp error */
228			list_move_tail(&iocb->list, &completions);
229			pring->txq_cnt--;
230		}
231	}
232
233	/* Next check the txcmplq */
234	list_splice_init(&pring->txcmplq, &txcmplq_completions);
235	spin_unlock_irq(&phba->hbalock);
236
237	list_for_each_entry_safe(iocb, next_iocb, &txcmplq_completions, list) {
238		/* Check to see if iocb matches the nport we are looking for */
239		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
240			list_add_tail(&iocb->dlist, &abort_list);
241	}
242	spin_lock_irq(&phba->hbalock);
243	list_splice(&txcmplq_completions, &pring->txcmplq);
244	spin_unlock_irq(&phba->hbalock);
245
246	list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
247			spin_lock_irq(&phba->hbalock);
248			list_del_init(&iocb->dlist);
249			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
250			spin_unlock_irq(&phba->hbalock);
251	}
252
253	/* Cancel all the IOCBs from the completions list */
254	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
255			      IOERR_SLI_ABORTED);
256
257	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
258	return 0;
259}
260
261static int
262lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
263	       struct lpfc_iocbq *cmdiocb)
264{
265	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
266	struct lpfc_hba    *phba = vport->phba;
267	struct lpfc_dmabuf *pcmd;
268	uint32_t *lp;
269	IOCB_t *icmd;
270	struct serv_parm *sp;
271	LPFC_MBOXQ_t *mbox;
272	struct ls_rjt stat;
273	int rc;
274
275	memset(&stat, 0, sizeof (struct ls_rjt));
276	if (vport->port_state <= LPFC_FDISC) {
277		/* Before responding to PLOGI, check for pt2pt mode.
278		 * If we are pt2pt, with an outstanding FLOGI, abort
279		 * the FLOGI and resend it first.
280		 */
281		if (vport->fc_flag & FC_PT2PT) {
282			 lpfc_els_abort_flogi(phba);
283		        if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
284				/* If the other side is supposed to initiate
285				 * the PLOGI anyway, just ACC it now and
286				 * move on with discovery.
287				 */
288				phba->fc_edtov = FF_DEF_EDTOV;
289				phba->fc_ratov = FF_DEF_RATOV;
290				/* Start discovery - this should just do
291				   CLEAR_LA */
292				lpfc_disc_start(vport);
293			} else
294				lpfc_initial_flogi(vport);
295		} else {
296			stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
297			stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
298			lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
299					    ndlp, NULL);
300			return 0;
301		}
302	}
303	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
304	lp = (uint32_t *) pcmd->virt;
305	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
306	if (wwn_to_u64(sp->portName.u.wwn) == 0) {
307		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
308				 "0140 PLOGI Reject: invalid nname\n");
309		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
310		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
311		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
312			NULL);
313		return 0;
314	}
315	if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
316		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
317				 "0141 PLOGI Reject: invalid pname\n");
318		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
319		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
320		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
321			NULL);
322		return 0;
323	}
324	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
325		/* Reject this request because invalid parameters */
326		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
327		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
328		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
329			NULL);
330		return 0;
331	}
332	icmd = &cmdiocb->iocb;
333
334	/* PLOGI chkparm OK */
335	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
336			 "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
337			 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
338			 ndlp->nlp_rpi);
339
340	if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
341		ndlp->nlp_fcp_info |= CLASS2;
342	else
343		ndlp->nlp_fcp_info |= CLASS3;
344
345	ndlp->nlp_class_sup = 0;
346	if (sp->cls1.classValid)
347		ndlp->nlp_class_sup |= FC_COS_CLASS1;
348	if (sp->cls2.classValid)
349		ndlp->nlp_class_sup |= FC_COS_CLASS2;
350	if (sp->cls3.classValid)
351		ndlp->nlp_class_sup |= FC_COS_CLASS3;
352	if (sp->cls4.classValid)
353		ndlp->nlp_class_sup |= FC_COS_CLASS4;
354	ndlp->nlp_maxframe =
355		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
356
357	/* no need to reg_login if we are already in one of these states */
358	switch (ndlp->nlp_state) {
359	case  NLP_STE_NPR_NODE:
360		if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
361			break;
362	case  NLP_STE_REG_LOGIN_ISSUE:
363	case  NLP_STE_PRLI_ISSUE:
364	case  NLP_STE_UNMAPPED_NODE:
365	case  NLP_STE_MAPPED_NODE:
366		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
367		return 1;
368	}
369
370	if ((vport->fc_flag & FC_PT2PT) &&
371	    !(vport->fc_flag & FC_PT2PT_PLOGI)) {
372		/* rcv'ed PLOGI decides what our NPortId will be */
373		vport->fc_myDID = icmd->un.rcvels.parmRo;
374		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
375		if (mbox == NULL)
376			goto out;
377		lpfc_config_link(phba, mbox);
378		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
379		mbox->vport = vport;
380		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
381		if (rc == MBX_NOT_FINISHED) {
382			mempool_free(mbox, phba->mbox_mem_pool);
383			goto out;
384		}
385
386		lpfc_can_disctmo(vport);
387	}
388	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
389	if (!mbox)
390		goto out;
391
392	/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
393	if (phba->sli_rev == LPFC_SLI_REV4)
394		lpfc_unreg_rpi(vport, ndlp);
395
396	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
397			    (uint8_t *) sp, mbox, ndlp->nlp_rpi);
398	if (rc) {
399		mempool_free(mbox, phba->mbox_mem_pool);
400		goto out;
401	}
402
403	/* ACC PLOGI rsp command needs to execute first,
404	 * queue this mbox command to be processed later.
405	 */
406	mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
407	/*
408	 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
409	 * command issued in lpfc_cmpl_els_acc().
410	 */
411	mbox->vport = vport;
412	spin_lock_irq(shost->host_lock);
413	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
414	spin_unlock_irq(shost->host_lock);
415
416	/*
417	 * If there is an outstanding PLOGI issued, abort it before
418	 * sending ACC rsp for received PLOGI. If pending plogi
419	 * is not canceled here, the plogi will be rejected by
420	 * remote port and will be retried. On a configuration with
421	 * single discovery thread, this will cause a huge delay in
422	 * discovery. Also this will cause multiple state machines
423	 * running in parallel for this node.
424	 */
425	if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
426		/* software abort outstanding PLOGI */
427		lpfc_els_abort(phba, ndlp);
428	}
429
430	if ((vport->port_type == LPFC_NPIV_PORT &&
431	     vport->cfg_restrict_login)) {
432
433		/* In order to preserve RPIs, we want to cleanup
434		 * the default RPI the firmware created to rcv
435		 * this ELS request. The only way to do this is
436		 * to register, then unregister the RPI.
437		 */
438		spin_lock_irq(shost->host_lock);
439		ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
440		spin_unlock_irq(shost->host_lock);
441		stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
442		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
443		rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
444			ndlp, mbox);
445		if (rc)
446			mempool_free(mbox, phba->mbox_mem_pool);
447		return 1;
448	}
449	rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
450	if (rc)
451		mempool_free(mbox, phba->mbox_mem_pool);
452	return 1;
453out:
454	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
455	stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
456	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
457	return 0;
458}
459
460/**
461 * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
462 * @phba: pointer to lpfc hba data structure.
463 * @mboxq: pointer to mailbox object
464 *
465 * This routine is invoked to issue a completion to a rcv'ed
466 * ADISC or PDISC after the paused RPI has been resumed.
467 **/
468static void
469lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
470{
471	struct lpfc_vport *vport;
472	struct lpfc_iocbq *elsiocb;
473	struct lpfc_nodelist *ndlp;
474	uint32_t cmd;
475
476	elsiocb = (struct lpfc_iocbq *)mboxq->context1;
477	ndlp = (struct lpfc_nodelist *) mboxq->context2;
478	vport = mboxq->vport;
479	cmd = elsiocb->drvrTimeout;
480
481	if (cmd == ELS_CMD_ADISC) {
482		lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
483	} else {
484		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
485			ndlp, NULL);
486	}
487	kfree(elsiocb);
488	mempool_free(mboxq, phba->mbox_mem_pool);
489}
490
491static int
492lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
493		struct lpfc_iocbq *cmdiocb)
494{
495	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
496	struct lpfc_iocbq  *elsiocb;
497	struct lpfc_dmabuf *pcmd;
498	struct serv_parm   *sp;
499	struct lpfc_name   *pnn, *ppn;
500	struct ls_rjt stat;
501	ADISC *ap;
502	IOCB_t *icmd;
503	uint32_t *lp;
504	uint32_t cmd;
505
506	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
507	lp = (uint32_t *) pcmd->virt;
508
509	cmd = *lp++;
510	if (cmd == ELS_CMD_ADISC) {
511		ap = (ADISC *) lp;
512		pnn = (struct lpfc_name *) & ap->nodeName;
513		ppn = (struct lpfc_name *) & ap->portName;
514	} else {
515		sp = (struct serv_parm *) lp;
516		pnn = (struct lpfc_name *) & sp->nodeName;
517		ppn = (struct lpfc_name *) & sp->portName;
518	}
519
520	icmd = &cmdiocb->iocb;
521	if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
522
523		/*
524		 * As soon as  we send ACC, the remote NPort can
525		 * start sending us data. Thus, for SLI4 we must
526		 * resume the RPI before the ACC goes out.
527		 */
528		if (vport->phba->sli_rev == LPFC_SLI_REV4) {
529			elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
530				GFP_KERNEL);
531			if (elsiocb) {
532
533				/* Save info from cmd IOCB used in rsp */
534				memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
535					sizeof(struct lpfc_iocbq));
536
537				/* Save the ELS cmd */
538				elsiocb->drvrTimeout = cmd;
539
540				lpfc_sli4_resume_rpi(ndlp,
541					lpfc_mbx_cmpl_resume_rpi, elsiocb);
542				goto out;
543			}
544		}
545
546		if (cmd == ELS_CMD_ADISC) {
547			lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
548		} else {
549			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
550				ndlp, NULL);
551		}
552out:
553		/* If we are authenticated, move to the proper state */
554		if (ndlp->nlp_type & NLP_FCP_TARGET)
555			lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
556		else
557			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
558
559		return 1;
560	}
561	/* Reject this request because invalid parameters */
562	stat.un.b.lsRjtRsvd0 = 0;
563	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
564	stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
565	stat.un.b.vendorUnique = 0;
566	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
567
568	/* 1 sec timeout */
569	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
570
571	spin_lock_irq(shost->host_lock);
572	ndlp->nlp_flag |= NLP_DELAY_TMO;
573	spin_unlock_irq(shost->host_lock);
574	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
575	ndlp->nlp_prev_state = ndlp->nlp_state;
576	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
577	return 0;
578}
579
580static int
581lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
582	      struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
583{
584	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
585	struct lpfc_hba    *phba = vport->phba;
586	struct lpfc_vport **vports;
587	int i, active_vlink_present = 0 ;
588
589	/* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
590	/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
591	 * PLOGIs during LOGO storms from a device.
592	 */
593	spin_lock_irq(shost->host_lock);
594	ndlp->nlp_flag |= NLP_LOGO_ACC;
595	spin_unlock_irq(shost->host_lock);
596	if (els_cmd == ELS_CMD_PRLO)
597		lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
598	else
599		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
600	if (ndlp->nlp_DID == Fabric_DID) {
601		if (vport->port_state <= LPFC_FDISC)
602			goto out;
603		lpfc_linkdown_port(vport);
604		spin_lock_irq(shost->host_lock);
605		vport->fc_flag |= FC_VPORT_LOGO_RCVD;
606		spin_unlock_irq(shost->host_lock);
607		vports = lpfc_create_vport_work_array(phba);
608		if (vports) {
609			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
610					i++) {
611				if ((!(vports[i]->fc_flag &
612					FC_VPORT_LOGO_RCVD)) &&
613					(vports[i]->port_state > LPFC_FDISC)) {
614					active_vlink_present = 1;
615					break;
616				}
617			}
618			lpfc_destroy_vport_work_array(phba, vports);
619		}
620
621		if (active_vlink_present) {
622			/*
623			 * If there are other active VLinks present,
624			 * re-instantiate the Vlink using FDISC.
625			 */
626			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
627			spin_lock_irq(shost->host_lock);
628			ndlp->nlp_flag |= NLP_DELAY_TMO;
629			spin_unlock_irq(shost->host_lock);
630			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
631			vport->port_state = LPFC_FDISC;
632		} else {
633			spin_lock_irq(shost->host_lock);
634			phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
635			spin_unlock_irq(shost->host_lock);
636			lpfc_retry_pport_discovery(phba);
637		}
638	} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
639		((ndlp->nlp_type & NLP_FCP_TARGET) ||
640		!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
641		(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
642		/* Only try to re-login if this is NOT a Fabric Node */
643		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
644		spin_lock_irq(shost->host_lock);
645		ndlp->nlp_flag |= NLP_DELAY_TMO;
646		spin_unlock_irq(shost->host_lock);
647
648		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
649	}
650out:
651	ndlp->nlp_prev_state = ndlp->nlp_state;
652	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
653
654	spin_lock_irq(shost->host_lock);
655	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
656	spin_unlock_irq(shost->host_lock);
657	/* The driver has to wait until the ACC completes before it continues
658	 * processing the LOGO.  The action will resume in
659	 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
660	 * unreg_login, the driver waits so the ACC does not get aborted.
661	 */
662	return 0;
663}
664
665static void
666lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
667	      struct lpfc_iocbq *cmdiocb)
668{
669	struct lpfc_dmabuf *pcmd;
670	uint32_t *lp;
671	PRLI *npr;
672	struct fc_rport *rport = ndlp->rport;
673	u32 roles;
674
675	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
676	lp = (uint32_t *) pcmd->virt;
677	npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
678
679	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
680	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
681	if (npr->prliType == PRLI_FCP_TYPE) {
682		if (npr->initiatorFunc)
683			ndlp->nlp_type |= NLP_FCP_INITIATOR;
684		if (npr->targetFunc)
685			ndlp->nlp_type |= NLP_FCP_TARGET;
686		if (npr->Retry)
687			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
688	}
689	if (rport) {
690		/* We need to update the rport role values */
691		roles = FC_RPORT_ROLE_UNKNOWN;
692		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
693			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
694		if (ndlp->nlp_type & NLP_FCP_TARGET)
695			roles |= FC_RPORT_ROLE_FCP_TARGET;
696
697		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
698			"rport rolechg:   role:x%x did:x%x flg:x%x",
699			roles, ndlp->nlp_DID, ndlp->nlp_flag);
700
701		fc_remote_port_rolechg(rport, roles);
702	}
703}
704
705static uint32_t
706lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
707{
708	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
709
710	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
711		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
712		return 0;
713	}
714
715	if (!(vport->fc_flag & FC_PT2PT)) {
716		/* Check config parameter use-adisc or FCP-2 */
717		if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
718		    ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
719		     (ndlp->nlp_type & NLP_FCP_TARGET))) {
720			spin_lock_irq(shost->host_lock);
721			ndlp->nlp_flag |= NLP_NPR_ADISC;
722			spin_unlock_irq(shost->host_lock);
723			return 1;
724		}
725	}
726	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
727	lpfc_unreg_rpi(vport, ndlp);
728	return 0;
729}
730
731/**
732 * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
733 * @phba : Pointer to lpfc_hba structure.
734 * @vport: Pointer to lpfc_vport structure.
735 * @rpi  : rpi to be release.
736 *
737 * This function will send a unreg_login mailbox command to the firmware
738 * to release a rpi.
739 **/
740void
741lpfc_release_rpi(struct lpfc_hba *phba,
742		struct lpfc_vport *vport,
743		uint16_t rpi)
744{
745	LPFC_MBOXQ_t *pmb;
746	int rc;
747
748	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
749			GFP_KERNEL);
750	if (!pmb)
751		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
752			"2796 mailbox memory allocation failed \n");
753	else {
754		lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
755		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
756		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
757		if (rc == MBX_NOT_FINISHED)
758			mempool_free(pmb, phba->mbox_mem_pool);
759	}
760}
761
762static uint32_t
763lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
764		  void *arg, uint32_t evt)
765{
766	struct lpfc_hba *phba;
767	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
768	MAILBOX_t *mb;
769	uint16_t rpi;
770
771	phba = vport->phba;
772	/* Release the RPI if reglogin completing */
773	if (!(phba->pport->load_flag & FC_UNLOADING) &&
774		(evt == NLP_EVT_CMPL_REG_LOGIN) &&
775		(!pmb->u.mb.mbxStatus)) {
776		mb = &pmb->u.mb;
777		rpi = pmb->u.mb.un.varWords[0];
778		lpfc_release_rpi(phba, vport, rpi);
779	}
780	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
781			 "0271 Illegal State Transition: node x%x "
782			 "event x%x, state x%x Data: x%x x%x\n",
783			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
784			 ndlp->nlp_flag);
785	return ndlp->nlp_state;
786}
787
788static uint32_t
789lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
790		  void *arg, uint32_t evt)
791{
792	/* This transition is only legal if we previously
793	 * rcv'ed a PLOGI. Since we don't want 2 discovery threads
794	 * working on the same NPortID, do nothing for this thread
795	 * to stop it.
796	 */
797	if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
798		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
799			 "0272 Illegal State Transition: node x%x "
800			 "event x%x, state x%x Data: x%x x%x\n",
801			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
802			 ndlp->nlp_flag);
803	}
804	return ndlp->nlp_state;
805}
806
807/* Start of Discovery State Machine routines */
808
809static uint32_t
810lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
811			   void *arg, uint32_t evt)
812{
813	struct lpfc_iocbq *cmdiocb;
814
815	cmdiocb = (struct lpfc_iocbq *) arg;
816
817	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
818		return ndlp->nlp_state;
819	}
820	return NLP_STE_FREED_NODE;
821}
822
823static uint32_t
824lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
825			 void *arg, uint32_t evt)
826{
827	lpfc_issue_els_logo(vport, ndlp, 0);
828	return ndlp->nlp_state;
829}
830
831static uint32_t
832lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
833			  void *arg, uint32_t evt)
834{
835	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
836	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
837
838	spin_lock_irq(shost->host_lock);
839	ndlp->nlp_flag |= NLP_LOGO_ACC;
840	spin_unlock_irq(shost->host_lock);
841	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
842
843	return ndlp->nlp_state;
844}
845
846static uint32_t
847lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
848			   void *arg, uint32_t evt)
849{
850	return NLP_STE_FREED_NODE;
851}
852
853static uint32_t
854lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
855			   void *arg, uint32_t evt)
856{
857	return NLP_STE_FREED_NODE;
858}
859
860static uint32_t
861lpfc_device_recov_unused_node(struct lpfc_vport *vport,
862			struct lpfc_nodelist *ndlp,
863			   void *arg, uint32_t evt)
864{
865	return ndlp->nlp_state;
866}
867
868static uint32_t
869lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
870			   void *arg, uint32_t evt)
871{
872	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
873	struct lpfc_hba   *phba = vport->phba;
874	struct lpfc_iocbq *cmdiocb = arg;
875	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
876	uint32_t *lp = (uint32_t *) pcmd->virt;
877	struct serv_parm *sp = (struct serv_parm *) (lp + 1);
878	struct ls_rjt stat;
879	int port_cmp;
880
881	memset(&stat, 0, sizeof (struct ls_rjt));
882
883	/* For a PLOGI, we only accept if our portname is less
884	 * than the remote portname.
885	 */
886	phba->fc_stat.elsLogiCol++;
887	port_cmp = memcmp(&vport->fc_portname, &sp->portName,
888			  sizeof(struct lpfc_name));
889
890	if (port_cmp >= 0) {
891		/* Reject this request because the remote node will accept
892		   ours */
893		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
894		stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
895		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
896			NULL);
897	} else {
898		if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
899		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
900		    (vport->num_disc_nodes)) {
901			spin_lock_irq(shost->host_lock);
902			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
903			spin_unlock_irq(shost->host_lock);
904			/* Check if there are more PLOGIs to be sent */
905			lpfc_more_plogi(vport);
906			if (vport->num_disc_nodes == 0) {
907				spin_lock_irq(shost->host_lock);
908				vport->fc_flag &= ~FC_NDISC_ACTIVE;
909				spin_unlock_irq(shost->host_lock);
910				lpfc_can_disctmo(vport);
911				lpfc_end_rscn(vport);
912			}
913		}
914	} /* If our portname was less */
915
916	return ndlp->nlp_state;
917}
918
919static uint32_t
920lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
921			  void *arg, uint32_t evt)
922{
923	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
924	struct ls_rjt     stat;
925
926	memset(&stat, 0, sizeof (struct ls_rjt));
927	stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
928	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
929	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
930	return ndlp->nlp_state;
931}
932
933static uint32_t
934lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
935			  void *arg, uint32_t evt)
936{
937	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
938
939				/* software abort outstanding PLOGI */
940	lpfc_els_abort(vport->phba, ndlp);
941
942	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
943	return ndlp->nlp_state;
944}
945
946static uint32_t
947lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
948			 void *arg, uint32_t evt)
949{
950	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
951	struct lpfc_hba   *phba = vport->phba;
952	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
953
954	/* software abort outstanding PLOGI */
955	lpfc_els_abort(phba, ndlp);
956
957	if (evt == NLP_EVT_RCV_LOGO) {
958		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
959	} else {
960		lpfc_issue_els_logo(vport, ndlp, 0);
961	}
962
963	/* Put ndlp in npr state set plogi timer for 1 sec */
964	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
965	spin_lock_irq(shost->host_lock);
966	ndlp->nlp_flag |= NLP_DELAY_TMO;
967	spin_unlock_irq(shost->host_lock);
968	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
969	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
970	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
971
972	return ndlp->nlp_state;
973}
974
975static uint32_t
976lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
977			    struct lpfc_nodelist *ndlp,
978			    void *arg,
979			    uint32_t evt)
980{
981	struct lpfc_hba    *phba = vport->phba;
982	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
983	struct lpfc_iocbq  *cmdiocb, *rspiocb;
984	struct lpfc_dmabuf *pcmd, *prsp, *mp;
985	uint32_t *lp;
986	IOCB_t *irsp;
987	struct serv_parm *sp;
988	LPFC_MBOXQ_t *mbox;
989
990	cmdiocb = (struct lpfc_iocbq *) arg;
991	rspiocb = cmdiocb->context_un.rsp_iocb;
992
993	if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
994		/* Recovery from PLOGI collision logic */
995		return ndlp->nlp_state;
996	}
997
998	irsp = &rspiocb->iocb;
999
1000	if (irsp->ulpStatus)
1001		goto out;
1002
1003	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1004
1005	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1006
1007	lp = (uint32_t *) prsp->virt;
1008	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
1009
1010	/* Some switches have FDMI servers returning 0 for WWN */
1011	if ((ndlp->nlp_DID != FDMI_DID) &&
1012		(wwn_to_u64(sp->portName.u.wwn) == 0 ||
1013		wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
1014		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1015				 "0142 PLOGI RSP: Invalid WWN.\n");
1016		goto out;
1017	}
1018	if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
1019		goto out;
1020	/* PLOGI chkparm OK */
1021	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1022			 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
1023			 ndlp->nlp_DID, ndlp->nlp_state,
1024			 ndlp->nlp_flag, ndlp->nlp_rpi);
1025	if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
1026		ndlp->nlp_fcp_info |= CLASS2;
1027	else
1028		ndlp->nlp_fcp_info |= CLASS3;
1029
1030	ndlp->nlp_class_sup = 0;
1031	if (sp->cls1.classValid)
1032		ndlp->nlp_class_sup |= FC_COS_CLASS1;
1033	if (sp->cls2.classValid)
1034		ndlp->nlp_class_sup |= FC_COS_CLASS2;
1035	if (sp->cls3.classValid)
1036		ndlp->nlp_class_sup |= FC_COS_CLASS3;
1037	if (sp->cls4.classValid)
1038		ndlp->nlp_class_sup |= FC_COS_CLASS4;
1039	ndlp->nlp_maxframe =
1040		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
1041
1042	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1043	if (!mbox) {
1044		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1045			"0133 PLOGI: no memory for reg_login "
1046			"Data: x%x x%x x%x x%x\n",
1047			ndlp->nlp_DID, ndlp->nlp_state,
1048			ndlp->nlp_flag, ndlp->nlp_rpi);
1049		goto out;
1050	}
1051
1052	lpfc_unreg_rpi(vport, ndlp);
1053
1054	if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
1055			 (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
1056		switch (ndlp->nlp_DID) {
1057		case NameServer_DID:
1058			mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
1059			break;
1060		case FDMI_DID:
1061			mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
1062			break;
1063		default:
1064			ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
1065			mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1066		}
1067		mbox->context2 = lpfc_nlp_get(ndlp);
1068		mbox->vport = vport;
1069		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
1070		    != MBX_NOT_FINISHED) {
1071			lpfc_nlp_set_state(vport, ndlp,
1072					   NLP_STE_REG_LOGIN_ISSUE);
1073			return ndlp->nlp_state;
1074		}
1075		if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
1076			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1077		/* decrement node reference count to the failed mbox
1078		 * command
1079		 */
1080		lpfc_nlp_put(ndlp);
1081		mp = (struct lpfc_dmabuf *) mbox->context1;
1082		lpfc_mbuf_free(phba, mp->virt, mp->phys);
1083		kfree(mp);
1084		mempool_free(mbox, phba->mbox_mem_pool);
1085
1086		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1087				 "0134 PLOGI: cannot issue reg_login "
1088				 "Data: x%x x%x x%x x%x\n",
1089				 ndlp->nlp_DID, ndlp->nlp_state,
1090				 ndlp->nlp_flag, ndlp->nlp_rpi);
1091	} else {
1092		mempool_free(mbox, phba->mbox_mem_pool);
1093
1094		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1095				 "0135 PLOGI: cannot format reg_login "
1096				 "Data: x%x x%x x%x x%x\n",
1097				 ndlp->nlp_DID, ndlp->nlp_state,
1098				 ndlp->nlp_flag, ndlp->nlp_rpi);
1099	}
1100
1101
1102out:
1103	if (ndlp->nlp_DID == NameServer_DID) {
1104		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1105		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1106				 "0261 Cannot Register NameServer login\n");
1107	}
1108
1109	spin_lock_irq(shost->host_lock);
1110	ndlp->nlp_flag |= NLP_DEFER_RM;
1111	spin_unlock_irq(shost->host_lock);
1112	return NLP_STE_FREED_NODE;
1113}
1114
1115static uint32_t
1116lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1117			   void *arg, uint32_t evt)
1118{
1119	return ndlp->nlp_state;
1120}
1121
1122static uint32_t
1123lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
1124	struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
1125{
1126	struct lpfc_hba *phba;
1127	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1128	MAILBOX_t *mb = &pmb->u.mb;
1129	uint16_t rpi;
1130
1131	phba = vport->phba;
1132	/* Release the RPI */
1133	if (!(phba->pport->load_flag & FC_UNLOADING) &&
1134		!mb->mbxStatus) {
1135		rpi = pmb->u.mb.un.varWords[0];
1136		lpfc_release_rpi(phba, vport, rpi);
1137	}
1138	return ndlp->nlp_state;
1139}
1140
1141static uint32_t
1142lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1143			   void *arg, uint32_t evt)
1144{
1145	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1146
1147	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1148		spin_lock_irq(shost->host_lock);
1149		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1150		spin_unlock_irq(shost->host_lock);
1151		return ndlp->nlp_state;
1152	} else {
1153		/* software abort outstanding PLOGI */
1154		lpfc_els_abort(vport->phba, ndlp);
1155
1156		lpfc_drop_node(vport, ndlp);
1157		return NLP_STE_FREED_NODE;
1158	}
1159}
1160
1161static uint32_t
1162lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
1163			      struct lpfc_nodelist *ndlp,
1164			      void *arg,
1165			      uint32_t evt)
1166{
1167	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1168	struct lpfc_hba  *phba = vport->phba;
1169
1170	/* Don't do anything that will mess up processing of the
1171	 * previous RSCN.
1172	 */
1173	if (vport->fc_flag & FC_RSCN_DEFERRED)
1174		return ndlp->nlp_state;
1175
1176	/* software abort outstanding PLOGI */
1177	lpfc_els_abort(phba, ndlp);
1178
1179	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
1180	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1181	spin_lock_irq(shost->host_lock);
1182	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1183	spin_unlock_irq(shost->host_lock);
1184
1185	return ndlp->nlp_state;
1186}
1187
1188static uint32_t
1189lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1190			   void *arg, uint32_t evt)
1191{
1192	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
1193	struct lpfc_hba   *phba = vport->phba;
1194	struct lpfc_iocbq *cmdiocb;
1195
1196	/* software abort outstanding ADISC */
1197	lpfc_els_abort(phba, ndlp);
1198
1199	cmdiocb = (struct lpfc_iocbq *) arg;
1200
1201	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1202		if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1203			spin_lock_irq(shost->host_lock);
1204			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1205			spin_unlock_irq(shost->host_lock);
1206			if (vport->num_disc_nodes)
1207				lpfc_more_adisc(vport);
1208		}
1209		return ndlp->nlp_state;
1210	}
1211	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1212	lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1213	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1214
1215	return ndlp->nlp_state;
1216}
1217
1218static uint32_t
1219lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1220			  void *arg, uint32_t evt)
1221{
1222	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1223
1224	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1225	return ndlp->nlp_state;
1226}
1227
1228static uint32_t
1229lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1230			  void *arg, uint32_t evt)
1231{
1232	struct lpfc_hba *phba = vport->phba;
1233	struct lpfc_iocbq *cmdiocb;
1234
1235	cmdiocb = (struct lpfc_iocbq *) arg;
1236
1237	/* software abort outstanding ADISC */
1238	lpfc_els_abort(phba, ndlp);
1239
1240	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1241	return ndlp->nlp_state;
1242}
1243
1244static uint32_t
1245lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
1246			    struct lpfc_nodelist *ndlp,
1247			    void *arg, uint32_t evt)
1248{
1249	struct lpfc_iocbq *cmdiocb;
1250
1251	cmdiocb = (struct lpfc_iocbq *) arg;
1252
1253	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1254	return ndlp->nlp_state;
1255}
1256
1257static uint32_t
1258lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1259			  void *arg, uint32_t evt)
1260{
1261	struct lpfc_iocbq *cmdiocb;
1262
1263	cmdiocb = (struct lpfc_iocbq *) arg;
1264
1265	/* Treat like rcv logo */
1266	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1267	return ndlp->nlp_state;
1268}
1269
1270static uint32_t
1271lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1272			    struct lpfc_nodelist *ndlp,
1273			    void *arg, uint32_t evt)
1274{
1275	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1276	struct lpfc_hba   *phba = vport->phba;
1277	struct lpfc_iocbq *cmdiocb, *rspiocb;
1278	IOCB_t *irsp;
1279	ADISC *ap;
1280	int rc;
1281
1282	cmdiocb = (struct lpfc_iocbq *) arg;
1283	rspiocb = cmdiocb->context_un.rsp_iocb;
1284
1285	ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1286	irsp = &rspiocb->iocb;
1287
1288	if ((irsp->ulpStatus) ||
1289	    (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
1290		/* 1 sec timeout */
1291		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
1292		spin_lock_irq(shost->host_lock);
1293		ndlp->nlp_flag |= NLP_DELAY_TMO;
1294		spin_unlock_irq(shost->host_lock);
1295		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1296
1297		memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
1298		memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
1299
1300		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1301		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1302		lpfc_unreg_rpi(vport, ndlp);
1303		return ndlp->nlp_state;
1304	}
1305
1306	if (phba->sli_rev == LPFC_SLI_REV4) {
1307		rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
1308		if (rc) {
1309			/* Stay in state and retry. */
1310			ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1311			return ndlp->nlp_state;
1312		}
1313	}
1314
1315	if (ndlp->nlp_type & NLP_FCP_TARGET) {
1316		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1317		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1318	} else {
1319		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1320		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1321	}
1322
1323	return ndlp->nlp_state;
1324}
1325
1326static uint32_t
1327lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1328			   void *arg, uint32_t evt)
1329{
1330	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1331
1332	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1333		spin_lock_irq(shost->host_lock);
1334		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1335		spin_unlock_irq(shost->host_lock);
1336		return ndlp->nlp_state;
1337	} else {
1338		/* software abort outstanding ADISC */
1339		lpfc_els_abort(vport->phba, ndlp);
1340
1341		lpfc_drop_node(vport, ndlp);
1342		return NLP_STE_FREED_NODE;
1343	}
1344}
1345
1346static uint32_t
1347lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
1348			      struct lpfc_nodelist *ndlp,
1349			      void *arg,
1350			      uint32_t evt)
1351{
1352	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1353	struct lpfc_hba  *phba = vport->phba;
1354
1355	/* Don't do anything that will mess up processing of the
1356	 * previous RSCN.
1357	 */
1358	if (vport->fc_flag & FC_RSCN_DEFERRED)
1359		return ndlp->nlp_state;
1360
1361	/* software abort outstanding ADISC */
1362	lpfc_els_abort(phba, ndlp);
1363
1364	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1365	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1366	spin_lock_irq(shost->host_lock);
1367	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1368	spin_unlock_irq(shost->host_lock);
1369	lpfc_disc_set_adisc(vport, ndlp);
1370	return ndlp->nlp_state;
1371}
1372
1373static uint32_t
1374lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
1375			      struct lpfc_nodelist *ndlp,
1376			      void *arg,
1377			      uint32_t evt)
1378{
1379	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1380
1381	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1382	return ndlp->nlp_state;
1383}
1384
1385static uint32_t
1386lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1387			     struct lpfc_nodelist *ndlp,
1388			     void *arg,
1389			     uint32_t evt)
1390{
1391	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1392
1393	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1394	return ndlp->nlp_state;
1395}
1396
1397static uint32_t
1398lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1399			     struct lpfc_nodelist *ndlp,
1400			     void *arg,
1401			     uint32_t evt)
1402{
1403	struct lpfc_hba   *phba = vport->phba;
1404	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1405	LPFC_MBOXQ_t	  *mb;
1406	LPFC_MBOXQ_t	  *nextmb;
1407	struct lpfc_dmabuf *mp;
1408
1409	cmdiocb = (struct lpfc_iocbq *) arg;
1410
1411	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1412	if ((mb = phba->sli.mbox_active)) {
1413		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1414		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1415			lpfc_nlp_put(ndlp);
1416			mb->context2 = NULL;
1417			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1418		}
1419	}
1420
1421	spin_lock_irq(&phba->hbalock);
1422	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1423		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1424		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1425			mp = (struct lpfc_dmabuf *) (mb->context1);
1426			if (mp) {
1427				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
1428				kfree(mp);
1429			}
1430			lpfc_nlp_put(ndlp);
1431			list_del(&mb->list);
1432			phba->sli.mboxq_cnt--;
1433			mempool_free(mb, phba->mbox_mem_pool);
1434		}
1435	}
1436	spin_unlock_irq(&phba->hbalock);
1437
1438	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1439	return ndlp->nlp_state;
1440}
1441
1442static uint32_t
1443lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
1444			       struct lpfc_nodelist *ndlp,
1445			       void *arg,
1446			       uint32_t evt)
1447{
1448	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1449
1450	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1451	return ndlp->nlp_state;
1452}
1453
1454static uint32_t
1455lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
1456			     struct lpfc_nodelist *ndlp,
1457			     void *arg,
1458			     uint32_t evt)
1459{
1460	struct lpfc_iocbq *cmdiocb;
1461
1462	cmdiocb = (struct lpfc_iocbq *) arg;
1463	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1464	return ndlp->nlp_state;
1465}
1466
1467static uint32_t
1468lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1469				  struct lpfc_nodelist *ndlp,
1470				  void *arg,
1471				  uint32_t evt)
1472{
1473	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1474	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1475	MAILBOX_t *mb = &pmb->u.mb;
1476	uint32_t did  = mb->un.varWords[1];
1477
1478	if (mb->mbxStatus) {
1479		/* RegLogin failed */
1480		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1481				"0246 RegLogin failed Data: x%x x%x x%x x%x "
1482				 "x%x\n",
1483				 did, mb->mbxStatus, vport->port_state,
1484				 mb->un.varRegLogin.vpi,
1485				 mb->un.varRegLogin.rpi);
1486		/*
1487		 * If RegLogin failed due to lack of HBA resources do not
1488		 * retry discovery.
1489		 */
1490		if (mb->mbxStatus == MBXERR_RPI_FULL) {
1491			ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1492			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1493			return ndlp->nlp_state;
1494		}
1495
1496		/* Put ndlp in npr state set plogi timer for 1 sec */
1497		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1498		spin_lock_irq(shost->host_lock);
1499		ndlp->nlp_flag |= NLP_DELAY_TMO;
1500		spin_unlock_irq(shost->host_lock);
1501		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1502
1503		lpfc_issue_els_logo(vport, ndlp, 0);
1504		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1505		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1506		return ndlp->nlp_state;
1507	}
1508
1509	/* SLI4 ports have preallocated logical rpis. */
1510	if (vport->phba->sli_rev < LPFC_SLI_REV4)
1511		ndlp->nlp_rpi = mb->un.varWords[0];
1512
1513	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1514
1515	/* Only if we are not a fabric nport do we issue PRLI */
1516	if (!(ndlp->nlp_type & NLP_FABRIC)) {
1517		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1518		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1519		lpfc_issue_els_prli(vport, ndlp, 0);
1520	} else {
1521		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1522		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1523	}
1524	return ndlp->nlp_state;
1525}
1526
1527static uint32_t
1528lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
1529			      struct lpfc_nodelist *ndlp,
1530			      void *arg,
1531			      uint32_t evt)
1532{
1533	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1534
1535	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1536		spin_lock_irq(shost->host_lock);
1537		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1538		spin_unlock_irq(shost->host_lock);
1539		return ndlp->nlp_state;
1540	} else {
1541		lpfc_drop_node(vport, ndlp);
1542		return NLP_STE_FREED_NODE;
1543	}
1544}
1545
1546static uint32_t
1547lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1548				 struct lpfc_nodelist *ndlp,
1549				 void *arg,
1550				 uint32_t evt)
1551{
1552	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1553
1554	/* Don't do anything that will mess up processing of the
1555	 * previous RSCN.
1556	 */
1557	if (vport->fc_flag & FC_RSCN_DEFERRED)
1558		return ndlp->nlp_state;
1559
1560	ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1561	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1562	spin_lock_irq(shost->host_lock);
1563	ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
1564	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1565	spin_unlock_irq(shost->host_lock);
1566	lpfc_disc_set_adisc(vport, ndlp);
1567	return ndlp->nlp_state;
1568}
1569
1570static uint32_t
1571lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1572			  void *arg, uint32_t evt)
1573{
1574	struct lpfc_iocbq *cmdiocb;
1575
1576	cmdiocb = (struct lpfc_iocbq *) arg;
1577
1578	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1579	return ndlp->nlp_state;
1580}
1581
1582static uint32_t
1583lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1584			 void *arg, uint32_t evt)
1585{
1586	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1587
1588	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1589	return ndlp->nlp_state;
1590}
1591
1592static uint32_t
1593lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1594			 void *arg, uint32_t evt)
1595{
1596	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1597
1598	/* Software abort outstanding PRLI before sending acc */
1599	lpfc_els_abort(vport->phba, ndlp);
1600
1601	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1602	return ndlp->nlp_state;
1603}
1604
1605static uint32_t
1606lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1607			   void *arg, uint32_t evt)
1608{
1609	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1610
1611	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1612	return ndlp->nlp_state;
1613}
1614
1615/* This routine is envoked when we rcv a PRLO request from a nport
1616 * we are logged into.  We should send back a PRLO rsp setting the
1617 * appropriate bits.
1618 * NEXT STATE = PRLI_ISSUE
1619 */
1620static uint32_t
1621lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1622			 void *arg, uint32_t evt)
1623{
1624	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1625
1626	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1627	return ndlp->nlp_state;
1628}
1629
1630static uint32_t
1631lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1632			  void *arg, uint32_t evt)
1633{
1634	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1635	struct lpfc_iocbq *cmdiocb, *rspiocb;
1636	struct lpfc_hba   *phba = vport->phba;
1637	IOCB_t *irsp;
1638	PRLI *npr;
1639
1640	cmdiocb = (struct lpfc_iocbq *) arg;
1641	rspiocb = cmdiocb->context_un.rsp_iocb;
1642	npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1643
1644	irsp = &rspiocb->iocb;
1645	if (irsp->ulpStatus) {
1646		if ((vport->port_type == LPFC_NPIV_PORT) &&
1647		    vport->cfg_restrict_login) {
1648			goto out;
1649		}
1650		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1651		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1652		return ndlp->nlp_state;
1653	}
1654
1655	/* Check out PRLI rsp */
1656	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1657	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
1658	if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1659	    (npr->prliType == PRLI_FCP_TYPE)) {
1660		if (npr->initiatorFunc)
1661			ndlp->nlp_type |= NLP_FCP_INITIATOR;
1662		if (npr->targetFunc)
1663			ndlp->nlp_type |= NLP_FCP_TARGET;
1664		if (npr->Retry)
1665			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1666	}
1667	if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
1668	    (vport->port_type == LPFC_NPIV_PORT) &&
1669	     vport->cfg_restrict_login) {
1670out:
1671		spin_lock_irq(shost->host_lock);
1672		ndlp->nlp_flag |= NLP_TARGET_REMOVE;
1673		spin_unlock_irq(shost->host_lock);
1674		lpfc_issue_els_logo(vport, ndlp, 0);
1675
1676		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1677		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1678		return ndlp->nlp_state;
1679	}
1680
1681	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1682	if (ndlp->nlp_type & NLP_FCP_TARGET)
1683		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1684	else
1685		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1686	return ndlp->nlp_state;
1687}
1688
1689/*! lpfc_device_rm_prli_issue
1690 *
1691 * \pre
1692 * \post
1693 * \param   phba
1694 * \param   ndlp
1695 * \param   arg
1696 * \param   evt
1697 * \return  uint32_t
1698 *
1699 * \b Description:
1700 *    This routine is envoked when we a request to remove a nport we are in the
1701 *    process of PRLIing. We should software abort outstanding prli, unreg
1702 *    login, send a logout. We will change node state to UNUSED_NODE, put it
1703 *    on plogi list so it can be freed when LOGO completes.
1704 *
1705 */
1706
1707static uint32_t
1708lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1709			  void *arg, uint32_t evt)
1710{
1711	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1712
1713	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1714		spin_lock_irq(shost->host_lock);
1715		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1716		spin_unlock_irq(shost->host_lock);
1717		return ndlp->nlp_state;
1718	} else {
1719		/* software abort outstanding PLOGI */
1720		lpfc_els_abort(vport->phba, ndlp);
1721
1722		lpfc_drop_node(vport, ndlp);
1723		return NLP_STE_FREED_NODE;
1724	}
1725}
1726
1727
1728/*! lpfc_device_recov_prli_issue
1729 *
1730 * \pre
1731 * \post
1732 * \param   phba
1733 * \param   ndlp
1734 * \param   arg
1735 * \param   evt
1736 * \return  uint32_t
1737 *
1738 * \b Description:
1739 *    The routine is envoked when the state of a device is unknown, like
1740 *    during a link down. We should remove the nodelist entry from the
1741 *    unmapped list, issue a UNREG_LOGIN, do a software abort of the
1742 *    outstanding PRLI command, then free the node entry.
1743 */
1744static uint32_t
1745lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1746			     struct lpfc_nodelist *ndlp,
1747			     void *arg,
1748			     uint32_t evt)
1749{
1750	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1751	struct lpfc_hba  *phba = vport->phba;
1752
1753	/* Don't do anything that will mess up processing of the
1754	 * previous RSCN.
1755	 */
1756	if (vport->fc_flag & FC_RSCN_DEFERRED)
1757		return ndlp->nlp_state;
1758
1759	/* software abort outstanding PRLI */
1760	lpfc_els_abort(phba, ndlp);
1761
1762	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1763	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1764	spin_lock_irq(shost->host_lock);
1765	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1766	spin_unlock_irq(shost->host_lock);
1767	lpfc_disc_set_adisc(vport, ndlp);
1768	return ndlp->nlp_state;
1769}
1770
1771static uint32_t
1772lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1773			  void *arg, uint32_t evt)
1774{
1775	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1776
1777	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1778	return ndlp->nlp_state;
1779}
1780
1781static uint32_t
1782lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1783			 void *arg, uint32_t evt)
1784{
1785	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1786
1787	lpfc_rcv_prli(vport, ndlp, cmdiocb);
1788	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1789	return ndlp->nlp_state;
1790}
1791
1792static uint32_t
1793lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1794			 void *arg, uint32_t evt)
1795{
1796	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1797
1798	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1799	return ndlp->nlp_state;
1800}
1801
1802static uint32_t
1803lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1804			   void *arg, uint32_t evt)
1805{
1806	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1807
1808	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1809	return ndlp->nlp_state;
1810}
1811
1812static uint32_t
1813lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1814			 void *arg, uint32_t evt)
1815{
1816	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1817
1818	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1819	return ndlp->nlp_state;
1820}
1821
1822static uint32_t
1823lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
1824			     struct lpfc_nodelist *ndlp,
1825			     void *arg,
1826			     uint32_t evt)
1827{
1828	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1829
1830	ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
1831	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1832	spin_lock_irq(shost->host_lock);
1833	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1834	spin_unlock_irq(shost->host_lock);
1835	lpfc_disc_set_adisc(vport, ndlp);
1836
1837	return ndlp->nlp_state;
1838}
1839
1840static uint32_t
1841lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1842			   void *arg, uint32_t evt)
1843{
1844	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1845
1846	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1847	return ndlp->nlp_state;
1848}
1849
1850static uint32_t
1851lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1852			  void *arg, uint32_t evt)
1853{
1854	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1855
1856	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1857	return ndlp->nlp_state;
1858}
1859
1860static uint32_t
1861lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1862			  void *arg, uint32_t evt)
1863{
1864	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1865
1866	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1867	return ndlp->nlp_state;
1868}
1869
1870static uint32_t
1871lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
1872			    struct lpfc_nodelist *ndlp,
1873			    void *arg, uint32_t evt)
1874{
1875	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1876
1877	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1878	return ndlp->nlp_state;
1879}
1880
1881static uint32_t
1882lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1883			  void *arg, uint32_t evt)
1884{
1885	struct lpfc_hba  *phba = vport->phba;
1886	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1887
1888	/* flush the target */
1889	lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1890			    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
1891
1892	/* Treat like rcv logo */
1893	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1894	return ndlp->nlp_state;
1895}
1896
1897static uint32_t
1898lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
1899			      struct lpfc_nodelist *ndlp,
1900			      void *arg,
1901			      uint32_t evt)
1902{
1903	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1904
1905	ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
1906	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1907	spin_lock_irq(shost->host_lock);
1908	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1909	spin_unlock_irq(shost->host_lock);
1910	lpfc_disc_set_adisc(vport, ndlp);
1911	return ndlp->nlp_state;
1912}
1913
1914static uint32_t
1915lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1916			void *arg, uint32_t evt)
1917{
1918	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1919	struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
1920
1921	/* Ignore PLOGI if we have an outstanding LOGO */
1922	if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
1923		return ndlp->nlp_state;
1924	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1925		lpfc_cancel_retry_delay_tmo(vport, ndlp);
1926		spin_lock_irq(shost->host_lock);
1927		ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
1928		spin_unlock_irq(shost->host_lock);
1929	} else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
1930		/* send PLOGI immediately, move to PLOGI issue state */
1931		if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1932			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1933			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1934			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1935		}
1936	}
1937	return ndlp->nlp_state;
1938}
1939
1940static uint32_t
1941lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1942		       void *arg, uint32_t evt)
1943{
1944	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1945	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1946	struct ls_rjt     stat;
1947
1948	memset(&stat, 0, sizeof (struct ls_rjt));
1949	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1950	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1951	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1952
1953	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1954		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1955			spin_lock_irq(shost->host_lock);
1956			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1957			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1958			spin_unlock_irq(shost->host_lock);
1959			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1960			lpfc_issue_els_adisc(vport, ndlp, 0);
1961		} else {
1962			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1963			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1964			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1965		}
1966	}
1967	return ndlp->nlp_state;
1968}
1969
1970static uint32_t
1971lpfc_rcv_logo_npr_node(struct lpfc_vport *vport,  struct lpfc_nodelist *ndlp,
1972		       void *arg, uint32_t evt)
1973{
1974	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1975
1976	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1977	return ndlp->nlp_state;
1978}
1979
1980static uint32_t
1981lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1982			 void *arg, uint32_t evt)
1983{
1984	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1985
1986	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1987	/*
1988	 * Do not start discovery if discovery is about to start
1989	 * or discovery in progress for this node. Starting discovery
1990	 * here will affect the counting of discovery threads.
1991	 */
1992	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
1993	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
1994		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1995			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1996			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1997			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1998			lpfc_issue_els_adisc(vport, ndlp, 0);
1999		} else {
2000			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2001			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2002			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2003		}
2004	}
2005	return ndlp->nlp_state;
2006}
2007
2008static uint32_t
2009lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2010		       void *arg, uint32_t evt)
2011{
2012	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2013	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2014
2015	spin_lock_irq(shost->host_lock);
2016	ndlp->nlp_flag |= NLP_LOGO_ACC;
2017	spin_unlock_irq(shost->host_lock);
2018
2019	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
2020
2021	if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
2022		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
2023		spin_lock_irq(shost->host_lock);
2024		ndlp->nlp_flag |= NLP_DELAY_TMO;
2025		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2026		spin_unlock_irq(shost->host_lock);
2027		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
2028	} else {
2029		spin_lock_irq(shost->host_lock);
2030		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2031		spin_unlock_irq(shost->host_lock);
2032	}
2033	return ndlp->nlp_state;
2034}
2035
2036static uint32_t
2037lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2038			 void *arg, uint32_t evt)
2039{
2040	struct lpfc_iocbq *cmdiocb, *rspiocb;
2041	IOCB_t *irsp;
2042
2043	cmdiocb = (struct lpfc_iocbq *) arg;
2044	rspiocb = cmdiocb->context_un.rsp_iocb;
2045
2046	irsp = &rspiocb->iocb;
2047	if (irsp->ulpStatus) {
2048		ndlp->nlp_flag |= NLP_DEFER_RM;
2049		return NLP_STE_FREED_NODE;
2050	}
2051	return ndlp->nlp_state;
2052}
2053
2054static uint32_t
2055lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2056			void *arg, uint32_t evt)
2057{
2058	struct lpfc_iocbq *cmdiocb, *rspiocb;
2059	IOCB_t *irsp;
2060
2061	cmdiocb = (struct lpfc_iocbq *) arg;
2062	rspiocb = cmdiocb->context_un.rsp_iocb;
2063
2064	irsp = &rspiocb->iocb;
2065	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2066		lpfc_drop_node(vport, ndlp);
2067		return NLP_STE_FREED_NODE;
2068	}
2069	return ndlp->nlp_state;
2070}
2071
2072static uint32_t
2073lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2074			void *arg, uint32_t evt)
2075{
2076	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2077	if (ndlp->nlp_DID == Fabric_DID) {
2078		spin_lock_irq(shost->host_lock);
2079		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
2080		spin_unlock_irq(shost->host_lock);
2081	}
2082	lpfc_unreg_rpi(vport, ndlp);
2083	return ndlp->nlp_state;
2084}
2085
2086static uint32_t
2087lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2088			 void *arg, uint32_t evt)
2089{
2090	struct lpfc_iocbq *cmdiocb, *rspiocb;
2091	IOCB_t *irsp;
2092
2093	cmdiocb = (struct lpfc_iocbq *) arg;
2094	rspiocb = cmdiocb->context_un.rsp_iocb;
2095
2096	irsp = &rspiocb->iocb;
2097	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2098		lpfc_drop_node(vport, ndlp);
2099		return NLP_STE_FREED_NODE;
2100	}
2101	return ndlp->nlp_state;
2102}
2103
2104static uint32_t
2105lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
2106			    struct lpfc_nodelist *ndlp,
2107			    void *arg, uint32_t evt)
2108{
2109	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
2110	MAILBOX_t    *mb = &pmb->u.mb;
2111
2112	if (!mb->mbxStatus) {
2113		/* SLI4 ports have preallocated logical rpis. */
2114		if (vport->phba->sli_rev < LPFC_SLI_REV4)
2115			ndlp->nlp_rpi = mb->un.varWords[0];
2116		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
2117	} else {
2118		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
2119			lpfc_drop_node(vport, ndlp);
2120			return NLP_STE_FREED_NODE;
2121		}
2122	}
2123	return ndlp->nlp_state;
2124}
2125
2126static uint32_t
2127lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2128			void *arg, uint32_t evt)
2129{
2130	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2131
2132	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2133		spin_lock_irq(shost->host_lock);
2134		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
2135		spin_unlock_irq(shost->host_lock);
2136		return ndlp->nlp_state;
2137	}
2138	lpfc_drop_node(vport, ndlp);
2139	return NLP_STE_FREED_NODE;
2140}
2141
2142static uint32_t
2143lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2144			   void *arg, uint32_t evt)
2145{
2146	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2147
2148	/* Don't do anything that will mess up processing of the
2149	 * previous RSCN.
2150	 */
2151	if (vport->fc_flag & FC_RSCN_DEFERRED)
2152		return ndlp->nlp_state;
2153
2154	lpfc_cancel_retry_delay_tmo(vport, ndlp);
2155	spin_lock_irq(shost->host_lock);
2156	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2157	spin_unlock_irq(shost->host_lock);
2158	return ndlp->nlp_state;
2159}
2160
2161
2162/* This next section defines the NPort Discovery State Machine */
2163
2164/* There are 4 different double linked lists nodelist entries can reside on.
2165 * The plogi list and adisc list are used when Link Up discovery or RSCN
2166 * processing is needed. Each list holds the nodes that we will send PLOGI
2167 * or ADISC on. These lists will keep track of what nodes will be effected
2168 * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
2169 * The unmapped_list will contain all nodes that we have successfully logged
2170 * into at the Fibre Channel level. The mapped_list will contain all nodes
2171 * that are mapped FCP targets.
2172 */
2173/*
2174 * The bind list is a list of undiscovered (potentially non-existent) nodes
2175 * that we have saved binding information on. This information is used when
2176 * nodes transition from the unmapped to the mapped list.
2177 */
2178/* For UNUSED_NODE state, the node has just been allocated .
2179 * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
2180 * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
2181 * and put on the unmapped list. For ADISC processing, the node is taken off
2182 * the ADISC list and placed on either the mapped or unmapped list (depending
2183 * on its previous state). Once on the unmapped list, a PRLI is issued and the
2184 * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
2185 * changed to UNMAPPED_NODE. If the completion indicates a mapped
2186 * node, the node is taken off the unmapped list. The binding list is checked
2187 * for a valid binding, or a binding is automatically assigned. If binding
2188 * assignment is unsuccessful, the node is left on the unmapped list. If
2189 * binding assignment is successful, the associated binding list entry (if
2190 * any) is removed, and the node is placed on the mapped list.
2191 */
2192/*
2193 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
2194 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
2195 * expire, all effected nodes will receive a DEVICE_RM event.
2196 */
2197/*
2198 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
2199 * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
2200 * check, additional nodes may be added or removed (via DEVICE_RM) to / from
2201 * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
2202 * we will first process the ADISC list.  32 entries are processed initially and
2203 * ADISC is initited for each one.  Completions / Events for each node are
2204 * funnelled thru the state machine.  As each node finishes ADISC processing, it
2205 * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
2206 * waiting, and the ADISC list count is identically 0, then we are done. For
2207 * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
2208 * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
2209 * list.  32 entries are processed initially and PLOGI is initited for each one.
2210 * Completions / Events for each node are funnelled thru the state machine.  As
2211 * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
2212 * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
2213 * indentically 0, then we are done. We have now completed discovery / RSCN
2214 * handling. Upon completion, ALL nodes should be on either the mapped or
2215 * unmapped lists.
2216 */
2217
2218static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
2219     (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
2220	/* Action routine                  Event       Current State  */
2221	lpfc_rcv_plogi_unused_node,	/* RCV_PLOGI   UNUSED_NODE    */
2222	lpfc_rcv_els_unused_node,	/* RCV_PRLI        */
2223	lpfc_rcv_logo_unused_node,	/* RCV_LOGO        */
2224	lpfc_rcv_els_unused_node,	/* RCV_ADISC       */
2225	lpfc_rcv_els_unused_node,	/* RCV_PDISC       */
2226	lpfc_rcv_els_unused_node,	/* RCV_PRLO        */
2227	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2228	lpfc_disc_illegal,		/* CMPL_PRLI       */
2229	lpfc_cmpl_logo_unused_node,	/* CMPL_LOGO       */
2230	lpfc_disc_illegal,		/* CMPL_ADISC      */
2231	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2232	lpfc_device_rm_unused_node,	/* DEVICE_RM       */
2233	lpfc_device_recov_unused_node,	/* DEVICE_RECOVERY */
2234
2235	lpfc_rcv_plogi_plogi_issue,	/* RCV_PLOGI   PLOGI_ISSUE    */
2236	lpfc_rcv_prli_plogi_issue,	/* RCV_PRLI        */
2237	lpfc_rcv_logo_plogi_issue,	/* RCV_LOGO        */
2238	lpfc_rcv_els_plogi_issue,	/* RCV_ADISC       */
2239	lpfc_rcv_els_plogi_issue,	/* RCV_PDISC       */
2240	lpfc_rcv_els_plogi_issue,	/* RCV_PRLO        */
2241	lpfc_cmpl_plogi_plogi_issue,	/* CMPL_PLOGI      */
2242	lpfc_disc_illegal,		/* CMPL_PRLI       */
2243	lpfc_cmpl_logo_plogi_issue,	/* CMPL_LOGO       */
2244	lpfc_disc_illegal,		/* CMPL_ADISC      */
2245	lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN  */
2246	lpfc_device_rm_plogi_issue,	/* DEVICE_RM       */
2247	lpfc_device_recov_plogi_issue,	/* DEVICE_RECOVERY */
2248
2249	lpfc_rcv_plogi_adisc_issue,	/* RCV_PLOGI   ADISC_ISSUE    */
2250	lpfc_rcv_prli_adisc_issue,	/* RCV_PRLI        */
2251	lpfc_rcv_logo_adisc_issue,	/* RCV_LOGO        */
2252	lpfc_rcv_padisc_adisc_issue,	/* RCV_ADISC       */
2253	lpfc_rcv_padisc_adisc_issue,	/* RCV_PDISC       */
2254	lpfc_rcv_prlo_adisc_issue,	/* RCV_PRLO        */
2255	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2256	lpfc_disc_illegal,		/* CMPL_PRLI       */
2257	lpfc_disc_illegal,		/* CMPL_LOGO       */
2258	lpfc_cmpl_adisc_adisc_issue,	/* CMPL_ADISC      */
2259	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2260	lpfc_device_rm_adisc_issue,	/* DEVICE_RM       */
2261	lpfc_device_recov_adisc_issue,	/* DEVICE_RECOVERY */
2262
2263	lpfc_rcv_plogi_reglogin_issue,	/* RCV_PLOGI  REG_LOGIN_ISSUE */
2264	lpfc_rcv_prli_reglogin_issue,	/* RCV_PLOGI       */
2265	lpfc_rcv_logo_reglogin_issue,	/* RCV_LOGO        */
2266	lpfc_rcv_padisc_reglogin_issue,	/* RCV_ADISC       */
2267	lpfc_rcv_padisc_reglogin_issue,	/* RCV_PDISC       */
2268	lpfc_rcv_prlo_reglogin_issue,	/* RCV_PRLO        */
2269	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2270	lpfc_disc_illegal,		/* CMPL_PRLI       */
2271	lpfc_disc_illegal,		/* CMPL_LOGO       */
2272	lpfc_disc_illegal,		/* CMPL_ADISC      */
2273	lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN  */
2274	lpfc_device_rm_reglogin_issue,	/* DEVICE_RM       */
2275	lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
2276
2277	lpfc_rcv_plogi_prli_issue,	/* RCV_PLOGI   PRLI_ISSUE     */
2278	lpfc_rcv_prli_prli_issue,	/* RCV_PRLI        */
2279	lpfc_rcv_logo_prli_issue,	/* RCV_LOGO        */
2280	lpfc_rcv_padisc_prli_issue,	/* RCV_ADISC       */
2281	lpfc_rcv_padisc_prli_issue,	/* RCV_PDISC       */
2282	lpfc_rcv_prlo_prli_issue,	/* RCV_PRLO        */
2283	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2284	lpfc_cmpl_prli_prli_issue,	/* CMPL_PRLI       */
2285	lpfc_disc_illegal,		/* CMPL_LOGO       */
2286	lpfc_disc_illegal,		/* CMPL_ADISC      */
2287	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2288	lpfc_device_rm_prli_issue,	/* DEVICE_RM       */
2289	lpfc_device_recov_prli_issue,	/* DEVICE_RECOVERY */
2290
2291	lpfc_rcv_plogi_unmap_node,	/* RCV_PLOGI   UNMAPPED_NODE  */
2292	lpfc_rcv_prli_unmap_node,	/* RCV_PRLI        */
2293	lpfc_rcv_logo_unmap_node,	/* RCV_LOGO        */
2294	lpfc_rcv_padisc_unmap_node,	/* RCV_ADISC       */
2295	lpfc_rcv_padisc_unmap_node,	/* RCV_PDISC       */
2296	lpfc_rcv_prlo_unmap_node,	/* RCV_PRLO        */
2297	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2298	lpfc_disc_illegal,		/* CMPL_PRLI       */
2299	lpfc_disc_illegal,		/* CMPL_LOGO       */
2300	lpfc_disc_illegal,		/* CMPL_ADISC      */
2301	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2302	lpfc_disc_illegal,		/* DEVICE_RM       */
2303	lpfc_device_recov_unmap_node,	/* DEVICE_RECOVERY */
2304
2305	lpfc_rcv_plogi_mapped_node,	/* RCV_PLOGI   MAPPED_NODE    */
2306	lpfc_rcv_prli_mapped_node,	/* RCV_PRLI        */
2307	lpfc_rcv_logo_mapped_node,	/* RCV_LOGO        */
2308	lpfc_rcv_padisc_mapped_node,	/* RCV_ADISC       */
2309	lpfc_rcv_padisc_mapped_node,	/* RCV_PDISC       */
2310	lpfc_rcv_prlo_mapped_node,	/* RCV_PRLO        */
2311	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2312	lpfc_disc_illegal,		/* CMPL_PRLI       */
2313	lpfc_disc_illegal,		/* CMPL_LOGO       */
2314	lpfc_disc_illegal,		/* CMPL_ADISC      */
2315	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2316	lpfc_disc_illegal,		/* DEVICE_RM       */
2317	lpfc_device_recov_mapped_node,	/* DEVICE_RECOVERY */
2318
2319	lpfc_rcv_plogi_npr_node,        /* RCV_PLOGI   NPR_NODE    */
2320	lpfc_rcv_prli_npr_node,         /* RCV_PRLI        */
2321	lpfc_rcv_logo_npr_node,         /* RCV_LOGO        */
2322	lpfc_rcv_padisc_npr_node,       /* RCV_ADISC       */
2323	lpfc_rcv_padisc_npr_node,       /* RCV_PDISC       */
2324	lpfc_rcv_prlo_npr_node,         /* RCV_PRLO        */
2325	lpfc_cmpl_plogi_npr_node,	/* CMPL_PLOGI      */
2326	lpfc_cmpl_prli_npr_node,	/* CMPL_PRLI       */
2327	lpfc_cmpl_logo_npr_node,        /* CMPL_LOGO       */
2328	lpfc_cmpl_adisc_npr_node,       /* CMPL_ADISC      */
2329	lpfc_cmpl_reglogin_npr_node,    /* CMPL_REG_LOGIN  */
2330	lpfc_device_rm_npr_node,        /* DEVICE_RM       */
2331	lpfc_device_recov_npr_node,     /* DEVICE_RECOVERY */
2332};
2333
2334int
2335lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2336			void *arg, uint32_t evt)
2337{
2338	uint32_t cur_state, rc;
2339	uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
2340			 uint32_t);
2341	uint32_t got_ndlp = 0;
2342
2343	if (lpfc_nlp_get(ndlp))
2344		got_ndlp = 1;
2345
2346	cur_state = ndlp->nlp_state;
2347
2348	/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
2349	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2350			 "0211 DSM in event x%x on NPort x%x in "
2351			 "state %d Data: x%x\n",
2352			 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
2353
2354	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2355		 "DSM in:          evt:%d ste:%d did:x%x",
2356		evt, cur_state, ndlp->nlp_DID);
2357
2358	func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
2359	rc = (func) (vport, ndlp, arg, evt);
2360
2361	/* DSM out state <rc> on NPort <nlp_DID> */
2362	if (got_ndlp) {
2363		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2364			 "0212 DSM out state %d on NPort x%x Data: x%x\n",
2365			 rc, ndlp->nlp_DID, ndlp->nlp_flag);
2366
2367		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2368			"DSM out:         ste:%d did:x%x flg:x%x",
2369			rc, ndlp->nlp_DID, ndlp->nlp_flag);
2370		/* Decrement the ndlp reference count held for this function */
2371		lpfc_nlp_put(ndlp);
2372	} else {
2373		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2374			"0213 DSM out state %d on NPort free\n", rc);
2375
2376		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2377			"DSM out:         ste:%d did:x%x flg:x%x",
2378			rc, 0, 0);
2379	}
2380
2381	return rc;
2382}
2383