1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21 /* See Fibre Channel protocol T11 FC-LS for details */
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_transport_fc.h>
33 #include "lpfc_disc.h"
34 #include "lpfc_scsi.h"
36 #include "lpfc_logmsg.h"
37 #include "lpfc_crtn.h"
38 #include "lpfc_vport.h"
39 #include "lpfc_debugfs.h"
41 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
43 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
45 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
46 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
47 struct lpfc_nodelist *ndlp, uint8_t retry);
48 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
49 struct lpfc_iocbq *iocb);
50 static void lpfc_register_new_vport(struct lpfc_hba *phba,
51 struct lpfc_vport *vport,
52 struct lpfc_nodelist *ndlp);
54 static int lpfc_max_els_tries = 3;
57 lpfc_els_chk_latt(struct lpfc_vport *vport)
59 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
60 struct lpfc_hba *phba = vport->phba;
63 if (vport->port_state >= LPFC_VPORT_READY ||
64 phba->link_state == LPFC_LINK_DOWN)
67 /* Read the HBA Host Attention Register */
68 ha_copy = readl(phba->HAregaddr);
70 if (!(ha_copy & HA_LATT))
73 /* Pending Link Event during Discovery */
74 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
75 "0237 Pending Link Event during "
76 "Discovery: State x%x\n",
77 phba->pport->port_state);
79 /* CLEAR_LA should re-enable link attention events and
80 * we should then imediately take a LATT event. The
81 * LATT processing should call lpfc_linkdown() which
82 * will cleanup any left over in-progress discovery
85 spin_lock_irq(shost->host_lock);
86 vport->fc_flag |= FC_ABORT_DISCOVERY;
87 spin_unlock_irq(shost->host_lock);
89 if (phba->link_state != LPFC_CLEAR_LA)
90 lpfc_issue_clear_la(phba, vport);
95 static struct lpfc_iocbq *
96 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
97 uint16_t cmdSize, uint8_t retry,
98 struct lpfc_nodelist *ndlp, uint32_t did,
101 struct lpfc_hba *phba = vport->phba;
102 struct lpfc_iocbq *elsiocb;
103 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
104 struct ulp_bde64 *bpl;
108 if (!lpfc_is_link_up(phba))
111 /* Allocate buffer for command iocb */
112 elsiocb = lpfc_sli_get_iocbq(phba);
117 icmd = &elsiocb->iocb;
119 /* fill in BDEs for command */
120 /* Allocate buffer for command payload */
121 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
123 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
124 if (!pcmd || !pcmd->virt)
125 goto els_iocb_free_pcmb_exit;
127 INIT_LIST_HEAD(&pcmd->list);
129 /* Allocate buffer for response payload */
131 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
133 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
135 if (!prsp || !prsp->virt)
136 goto els_iocb_free_prsp_exit;
137 INIT_LIST_HEAD(&prsp->list);
141 /* Allocate buffer for Buffer ptr list */
142 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
144 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
146 if (!pbuflist || !pbuflist->virt)
147 goto els_iocb_free_pbuf_exit;
149 INIT_LIST_HEAD(&pbuflist->list);
151 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
152 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
153 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
154 icmd->un.elsreq64.remoteID = did; /* DID */
156 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
157 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
158 icmd->ulpTimeout = phba->fc_ratov * 2;
160 icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
161 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
163 icmd->ulpBdeCount = 1;
165 icmd->ulpClass = CLASS3;
167 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
168 icmd->un.elsreq64.myID = vport->fc_myDID;
170 /* For ELS_REQUEST64_CR, use the VPI by default */
171 icmd->ulpContext = vport->vpi;
176 bpl = (struct ulp_bde64 *) pbuflist->virt;
177 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
178 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
179 bpl->tus.f.bdeSize = cmdSize;
180 bpl->tus.f.bdeFlags = 0;
181 bpl->tus.w = le32_to_cpu(bpl->tus.w);
185 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
186 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
187 bpl->tus.f.bdeSize = FCELSSIZE;
188 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
189 bpl->tus.w = le32_to_cpu(bpl->tus.w);
192 /* prevent preparing iocb with NULL ndlp reference */
193 elsiocb->context1 = lpfc_nlp_get(ndlp);
194 if (!elsiocb->context1)
195 goto els_iocb_free_pbuf_exit;
196 elsiocb->context2 = pcmd;
197 elsiocb->context3 = pbuflist;
198 elsiocb->retry = retry;
199 elsiocb->vport = vport;
200 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
203 list_add(&prsp->list, &pcmd->list);
206 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
207 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
208 "0116 Xmit ELS command x%x to remote "
209 "NPORT x%x I/O tag: x%x, port state: x%x\n",
210 elscmd, did, elsiocb->iotag,
213 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
214 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
215 "0117 Xmit ELS response x%x to remote "
216 "NPORT x%x I/O tag: x%x, size: x%x\n",
217 elscmd, ndlp->nlp_DID, elsiocb->iotag,
222 els_iocb_free_pbuf_exit:
223 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
226 els_iocb_free_prsp_exit:
227 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
230 els_iocb_free_pcmb_exit:
232 lpfc_sli_release_iocbq(phba, elsiocb);
237 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
239 struct lpfc_hba *phba = vport->phba;
241 struct lpfc_dmabuf *mp;
242 struct lpfc_nodelist *ndlp;
243 struct serv_parm *sp;
247 sp = &phba->fc_fabparam;
248 ndlp = lpfc_findnode_did(vport, Fabric_DID);
249 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
254 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
260 vport->port_state = LPFC_FABRIC_CFG_LINK;
261 lpfc_config_link(phba, mbox);
262 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
265 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
266 if (rc == MBX_NOT_FINISHED) {
271 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
276 rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
283 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
285 /* increment the reference count on ndlp to hold reference
286 * for the callback routine.
288 mbox->context2 = lpfc_nlp_get(ndlp);
290 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
291 if (rc == MBX_NOT_FINISHED) {
293 goto fail_issue_reg_login;
298 fail_issue_reg_login:
299 /* decrement the reference count on ndlp just incremented
300 * for the failed mbox command.
303 mp = (struct lpfc_dmabuf *) mbox->context1;
304 lpfc_mbuf_free(phba, mp->virt, mp->phys);
307 mempool_free(mbox, phba->mbox_mem_pool);
310 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
311 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
312 "0249 Cannot issue Register Fabric login: Err %d\n", err);
317 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
318 struct serv_parm *sp, IOCB_t *irsp)
320 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
321 struct lpfc_hba *phba = vport->phba;
322 struct lpfc_nodelist *np;
323 struct lpfc_nodelist *next_np;
325 spin_lock_irq(shost->host_lock);
326 vport->fc_flag |= FC_FABRIC;
327 spin_unlock_irq(shost->host_lock);
329 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
330 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
331 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
333 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
335 if (phba->fc_topology == TOPOLOGY_LOOP) {
336 spin_lock_irq(shost->host_lock);
337 vport->fc_flag |= FC_PUBLIC_LOOP;
338 spin_unlock_irq(shost->host_lock);
341 * If we are a N-port connected to a Fabric, fixup sparam's so
342 * logins to devices on remote loops work.
344 vport->fc_sparam.cmn.altBbCredit = 1;
347 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
348 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
349 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
350 ndlp->nlp_class_sup = 0;
351 if (sp->cls1.classValid)
352 ndlp->nlp_class_sup |= FC_COS_CLASS1;
353 if (sp->cls2.classValid)
354 ndlp->nlp_class_sup |= FC_COS_CLASS2;
355 if (sp->cls3.classValid)
356 ndlp->nlp_class_sup |= FC_COS_CLASS3;
357 if (sp->cls4.classValid)
358 ndlp->nlp_class_sup |= FC_COS_CLASS4;
359 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
360 sp->cmn.bbRcvSizeLsb;
361 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
363 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
364 if (sp->cmn.response_multiple_NPort) {
365 lpfc_printf_vlog(vport, KERN_WARNING,
367 "1816 FLOGI NPIV supported, "
368 "response data 0x%x\n",
369 sp->cmn.response_multiple_NPort);
370 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
372 /* Because we asked f/w for NPIV it still expects us
373 to call reg_vnpid atleast for the physcial host */
374 lpfc_printf_vlog(vport, KERN_WARNING,
376 "1817 Fabric does not support NPIV "
377 "- configuring single port mode.\n");
378 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
382 if ((vport->fc_prevDID != vport->fc_myDID) &&
383 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
385 /* If our NportID changed, we need to ensure all
386 * remaining NPORTs get unreg_login'ed.
388 list_for_each_entry_safe(np, next_np,
389 &vport->fc_nodes, nlp_listp) {
390 if (!NLP_CHK_NODE_ACT(ndlp))
392 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
393 !(np->nlp_flag & NLP_NPR_ADISC))
395 spin_lock_irq(shost->host_lock);
396 np->nlp_flag &= ~NLP_NPR_ADISC;
397 spin_unlock_irq(shost->host_lock);
398 lpfc_unreg_rpi(vport, np);
400 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
401 lpfc_mbx_unreg_vpi(vport);
402 spin_lock_irq(shost->host_lock);
403 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
404 spin_unlock_irq(shost->host_lock);
408 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
410 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
411 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) {
412 lpfc_register_new_vport(phba, vport, ndlp);
415 lpfc_issue_fabric_reglogin(vport);
420 * We FLOGIed into an NPort, initiate pt2pt protocol
423 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
424 struct serv_parm *sp)
426 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
427 struct lpfc_hba *phba = vport->phba;
431 spin_lock_irq(shost->host_lock);
432 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
433 spin_unlock_irq(shost->host_lock);
435 phba->fc_edtov = FF_DEF_EDTOV;
436 phba->fc_ratov = FF_DEF_RATOV;
437 rc = memcmp(&vport->fc_portname, &sp->portName,
438 sizeof(vport->fc_portname));
440 /* This side will initiate the PLOGI */
441 spin_lock_irq(shost->host_lock);
442 vport->fc_flag |= FC_PT2PT_PLOGI;
443 spin_unlock_irq(shost->host_lock);
446 * N_Port ID cannot be 0, set our to LocalID the other
447 * side will be RemoteID.
452 vport->fc_myDID = PT2PT_LocalID;
454 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
458 lpfc_config_link(phba, mbox);
460 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
462 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
463 if (rc == MBX_NOT_FINISHED) {
464 mempool_free(mbox, phba->mbox_mem_pool);
467 /* Decrement ndlp reference count indicating that ndlp can be
468 * safely released when other references to it are done.
472 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
475 * Cannot find existing Fabric ndlp, so allocate a
478 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
481 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
482 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
483 ndlp = lpfc_enable_node(vport, ndlp,
484 NLP_STE_UNUSED_NODE);
489 memcpy(&ndlp->nlp_portname, &sp->portName,
490 sizeof(struct lpfc_name));
491 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
492 sizeof(struct lpfc_name));
493 /* Set state will put ndlp onto node list if not already done */
494 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
495 spin_lock_irq(shost->host_lock);
496 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
497 spin_unlock_irq(shost->host_lock);
499 /* This side will wait for the PLOGI, decrement ndlp reference
500 * count indicating that ndlp can be released when other
501 * references to it are done.
505 /* If we are pt2pt with another NPort, force NPIV off! */
506 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
508 spin_lock_irq(shost->host_lock);
509 vport->fc_flag |= FC_PT2PT;
510 spin_unlock_irq(shost->host_lock);
512 /* Start discovery - this should just do CLEAR_LA */
513 lpfc_disc_start(vport);
520 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
521 struct lpfc_iocbq *rspiocb)
523 struct lpfc_vport *vport = cmdiocb->vport;
524 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
525 IOCB_t *irsp = &rspiocb->iocb;
526 struct lpfc_nodelist *ndlp = cmdiocb->context1;
527 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
528 struct serv_parm *sp;
531 /* Check to see if link went down during discovery */
532 if (lpfc_els_chk_latt(vport)) {
533 /* One additional decrement on node reference count to
534 * trigger the release of the node
540 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
541 "FLOGI cmpl: status:x%x/x%x state:x%x",
542 irsp->ulpStatus, irsp->un.ulpWord[4],
545 if (irsp->ulpStatus) {
546 /* Check for retry */
547 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
550 /* FLOGI failed, so there is no fabric */
551 spin_lock_irq(shost->host_lock);
552 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
553 spin_unlock_irq(shost->host_lock);
555 /* If private loop, then allow max outstanding els to be
556 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
557 * alpa map would take too long otherwise.
559 if (phba->alpa_map[0] == 0) {
560 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
564 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
565 "0100 FLOGI failure Data: x%x x%x "
567 irsp->ulpStatus, irsp->un.ulpWord[4],
573 * The FLogI succeeded. Sync the data for the CPU before
576 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
578 sp = prsp->virt + sizeof(uint32_t);
580 /* FLOGI completes successfully */
581 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
582 "0101 FLOGI completes sucessfully "
583 "Data: x%x x%x x%x x%x\n",
584 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
585 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
587 if (vport->port_state == LPFC_FLOGI) {
589 * If Common Service Parameters indicate Nport
590 * we are point to point, if Fport we are Fabric.
593 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
595 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
604 if (!lpfc_error_lost_link(irsp)) {
605 /* FLOGI failed, so just use loop map to make discovery list */
606 lpfc_disc_list_loopmap(vport);
608 /* Start discovery */
609 lpfc_disc_start(vport);
610 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
611 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
612 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) &&
613 (phba->link_state != LPFC_CLEAR_LA)) {
614 /* If FLOGI failed enable link interrupt. */
615 lpfc_issue_clear_la(phba, vport);
618 lpfc_els_free_iocb(phba, cmdiocb);
622 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
625 struct lpfc_hba *phba = vport->phba;
626 struct serv_parm *sp;
628 struct lpfc_iocbq *elsiocb;
629 struct lpfc_sli_ring *pring;
635 pring = &phba->sli.ring[LPFC_ELS_RING];
637 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
638 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
639 ndlp->nlp_DID, ELS_CMD_FLOGI);
644 icmd = &elsiocb->iocb;
645 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
647 /* For FLOGI request, remainder of payload is service parameters */
648 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
649 pcmd += sizeof(uint32_t);
650 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
651 sp = (struct serv_parm *) pcmd;
653 /* Setup CSPs accordingly for Fabric */
655 sp->cmn.w2.r_a_tov = 0;
656 sp->cls1.classValid = 0;
657 sp->cls2.seqDelivery = 1;
658 sp->cls3.seqDelivery = 1;
659 if (sp->cmn.fcphLow < FC_PH3)
660 sp->cmn.fcphLow = FC_PH3;
661 if (sp->cmn.fcphHigh < FC_PH3)
662 sp->cmn.fcphHigh = FC_PH3;
664 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
665 sp->cmn.request_multiple_Nport = 1;
667 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
672 if (phba->fc_topology != TOPOLOGY_LOOP) {
673 icmd->un.elsreq64.myID = 0;
674 icmd->un.elsreq64.fl = 1;
677 tmo = phba->fc_ratov;
678 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
679 lpfc_set_disctmo(vport);
680 phba->fc_ratov = tmo;
682 phba->fc_stat.elsXmitFLOGI++;
683 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
685 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
686 "Issue FLOGI: opt:x%x",
687 phba->sli3_options, 0, 0);
689 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
690 if (rc == IOCB_ERROR) {
691 lpfc_els_free_iocb(phba, elsiocb);
698 lpfc_els_abort_flogi(struct lpfc_hba *phba)
700 struct lpfc_sli_ring *pring;
701 struct lpfc_iocbq *iocb, *next_iocb;
702 struct lpfc_nodelist *ndlp;
705 /* Abort outstanding I/O on NPort <nlp_DID> */
706 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
707 "0201 Abort outstanding I/O on NPort x%x\n",
710 pring = &phba->sli.ring[LPFC_ELS_RING];
713 * Check the txcmplq for an iocb that matches the nport the driver is
716 spin_lock_irq(&phba->hbalock);
717 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
719 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
720 icmd->un.elsreq64.bdl.ulpIoTag32) {
721 ndlp = (struct lpfc_nodelist *)(iocb->context1);
722 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
723 (ndlp->nlp_DID == Fabric_DID))
724 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
727 spin_unlock_irq(&phba->hbalock);
733 lpfc_initial_flogi(struct lpfc_vport *vport)
735 struct lpfc_hba *phba = vport->phba;
736 struct lpfc_nodelist *ndlp;
738 vport->port_state = LPFC_FLOGI;
739 lpfc_set_disctmo(vport);
741 /* First look for the Fabric ndlp */
742 ndlp = lpfc_findnode_did(vport, Fabric_DID);
744 /* Cannot find existing Fabric ndlp, so allocate a new one */
745 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
748 lpfc_nlp_init(vport, ndlp, Fabric_DID);
749 /* Put ndlp onto node list */
750 lpfc_enqueue_node(vport, ndlp);
751 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
752 /* re-setup ndlp without removing from node list */
753 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
758 if (lpfc_issue_els_flogi(vport, ndlp, 0))
759 /* This decrement of reference count to node shall kick off
760 * the release of the node.
768 lpfc_initial_fdisc(struct lpfc_vport *vport)
770 struct lpfc_hba *phba = vport->phba;
771 struct lpfc_nodelist *ndlp;
773 /* First look for the Fabric ndlp */
774 ndlp = lpfc_findnode_did(vport, Fabric_DID);
776 /* Cannot find existing Fabric ndlp, so allocate a new one */
777 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
780 lpfc_nlp_init(vport, ndlp, Fabric_DID);
781 /* Put ndlp onto node list */
782 lpfc_enqueue_node(vport, ndlp);
783 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
784 /* re-setup ndlp without removing from node list */
785 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
790 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
791 /* decrement node reference count to trigger the release of
801 lpfc_more_plogi(struct lpfc_vport *vport)
805 if (vport->num_disc_nodes)
806 vport->num_disc_nodes--;
808 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
809 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
810 "0232 Continue discovery with %d PLOGIs to go "
811 "Data: x%x x%x x%x\n",
812 vport->num_disc_nodes, vport->fc_plogi_cnt,
813 vport->fc_flag, vport->port_state);
814 /* Check to see if there are more PLOGIs to be sent */
815 if (vport->fc_flag & FC_NLP_MORE)
816 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
817 sentplogi = lpfc_els_disc_plogi(vport);
822 static struct lpfc_nodelist *
823 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
824 struct lpfc_nodelist *ndlp)
826 struct lpfc_vport *vport = ndlp->vport;
827 struct lpfc_nodelist *new_ndlp;
828 struct lpfc_rport_data *rdata;
829 struct fc_rport *rport;
830 struct serv_parm *sp;
831 uint8_t name[sizeof(struct lpfc_name)];
832 uint32_t rc, keepDID = 0;
834 /* Fabric nodes can have the same WWPN so we don't bother searching
835 * by WWPN. Just return the ndlp that was given to us.
837 if (ndlp->nlp_type & NLP_FABRIC)
840 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
841 memset(name, 0, sizeof(struct lpfc_name));
843 /* Now we find out if the NPort we are logging into, matches the WWPN
844 * we have for that ndlp. If not, we have some work to do.
846 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
848 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
852 rc = memcmp(&ndlp->nlp_portname, name,
853 sizeof(struct lpfc_name));
856 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
859 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
860 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
861 rc = memcmp(&ndlp->nlp_portname, name,
862 sizeof(struct lpfc_name));
865 new_ndlp = lpfc_enable_node(vport, new_ndlp,
866 NLP_STE_UNUSED_NODE);
869 keepDID = new_ndlp->nlp_DID;
871 keepDID = new_ndlp->nlp_DID;
873 lpfc_unreg_rpi(vport, new_ndlp);
874 new_ndlp->nlp_DID = ndlp->nlp_DID;
875 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
877 if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
878 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
879 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
881 /* Set state will put new_ndlp on to node list if not already done */
882 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
884 /* Move this back to NPR state */
885 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
886 /* The new_ndlp is replacing ndlp totally, so we need
887 * to put ndlp on UNUSED list and try to free it.
890 /* Fix up the rport accordingly */
893 rdata = rport->dd_data;
894 if (rdata->pnode == ndlp) {
897 rdata->pnode = lpfc_nlp_get(new_ndlp);
898 new_ndlp->rport = rport;
900 new_ndlp->nlp_type = ndlp->nlp_type;
902 /* We shall actually free the ndlp with both nlp_DID and
903 * nlp_portname fields equals 0 to avoid any ndlp on the
904 * nodelist never to be used.
906 if (ndlp->nlp_DID == 0) {
907 spin_lock_irq(&phba->ndlp_lock);
908 NLP_SET_FREE_REQ(ndlp);
909 spin_unlock_irq(&phba->ndlp_lock);
912 /* Two ndlps cannot have the same did on the nodelist */
913 ndlp->nlp_DID = keepDID;
914 lpfc_drop_node(vport, ndlp);
917 lpfc_unreg_rpi(vport, ndlp);
918 /* Two ndlps cannot have the same did */
919 ndlp->nlp_DID = keepDID;
920 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
926 lpfc_end_rscn(struct lpfc_vport *vport)
928 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
930 if (vport->fc_flag & FC_RSCN_MODE) {
932 * Check to see if more RSCNs came in while we were
933 * processing this one.
935 if (vport->fc_rscn_id_cnt ||
936 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
937 lpfc_els_handle_rscn(vport);
939 spin_lock_irq(shost->host_lock);
940 vport->fc_flag &= ~FC_RSCN_MODE;
941 spin_unlock_irq(shost->host_lock);
947 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
948 struct lpfc_iocbq *rspiocb)
950 struct lpfc_vport *vport = cmdiocb->vport;
951 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
953 struct lpfc_nodelist *ndlp;
954 struct lpfc_dmabuf *prsp;
955 int disc, rc, did, type;
957 /* we pass cmdiocb to state machine which needs rspiocb as well */
958 cmdiocb->context_un.rsp_iocb = rspiocb;
960 irsp = &rspiocb->iocb;
961 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
962 "PLOGI cmpl: status:x%x/x%x did:x%x",
963 irsp->ulpStatus, irsp->un.ulpWord[4],
964 irsp->un.elsreq64.remoteID);
966 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
967 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
968 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
969 "0136 PLOGI completes to NPort x%x "
970 "with no ndlp. Data: x%x x%x x%x\n",
971 irsp->un.elsreq64.remoteID,
972 irsp->ulpStatus, irsp->un.ulpWord[4],
977 /* Since ndlp can be freed in the disc state machine, note if this node
978 * is being used during discovery.
980 spin_lock_irq(shost->host_lock);
981 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
982 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
983 spin_unlock_irq(shost->host_lock);
986 /* PLOGI completes to NPort <nlp_DID> */
987 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
988 "0102 PLOGI completes to NPort x%x "
989 "Data: x%x x%x x%x x%x x%x\n",
990 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
991 irsp->ulpTimeout, disc, vport->num_disc_nodes);
992 /* Check to see if link went down during discovery */
993 if (lpfc_els_chk_latt(vport)) {
994 spin_lock_irq(shost->host_lock);
995 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
996 spin_unlock_irq(shost->host_lock);
1000 /* ndlp could be freed in DSM, save these values now */
1001 type = ndlp->nlp_type;
1002 did = ndlp->nlp_DID;
1004 if (irsp->ulpStatus) {
1005 /* Check for retry */
1006 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1007 /* ELS command is being retried */
1009 spin_lock_irq(shost->host_lock);
1010 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1011 spin_unlock_irq(shost->host_lock);
1016 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1017 if (lpfc_error_lost_link(irsp))
1018 rc = NLP_STE_FREED_NODE;
1020 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1021 NLP_EVT_CMPL_PLOGI);
1023 /* Good status, call state machine */
1024 prsp = list_entry(((struct lpfc_dmabuf *)
1025 cmdiocb->context2)->list.next,
1026 struct lpfc_dmabuf, list);
1027 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
1028 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1029 NLP_EVT_CMPL_PLOGI);
1032 if (disc && vport->num_disc_nodes) {
1033 /* Check to see if there are more PLOGIs to be sent */
1034 lpfc_more_plogi(vport);
1036 if (vport->num_disc_nodes == 0) {
1037 spin_lock_irq(shost->host_lock);
1038 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1039 spin_unlock_irq(shost->host_lock);
1041 lpfc_can_disctmo(vport);
1042 lpfc_end_rscn(vport);
1047 lpfc_els_free_iocb(phba, cmdiocb);
1052 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1054 struct lpfc_hba *phba = vport->phba;
1055 struct serv_parm *sp;
1057 struct lpfc_nodelist *ndlp;
1058 struct lpfc_iocbq *elsiocb;
1059 struct lpfc_sli_ring *pring;
1060 struct lpfc_sli *psli;
1066 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1068 ndlp = lpfc_findnode_did(vport, did);
1069 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
1072 /* If ndlp is not NULL, we will bump the reference count on it */
1073 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1074 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
1079 icmd = &elsiocb->iocb;
1080 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1082 /* For PLOGI request, remainder of payload is service parameters */
1083 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
1084 pcmd += sizeof(uint32_t);
1085 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1086 sp = (struct serv_parm *) pcmd;
1088 if (sp->cmn.fcphLow < FC_PH_4_3)
1089 sp->cmn.fcphLow = FC_PH_4_3;
1091 if (sp->cmn.fcphHigh < FC_PH3)
1092 sp->cmn.fcphHigh = FC_PH3;
1094 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1095 "Issue PLOGI: did:x%x",
1098 phba->fc_stat.elsXmitPLOGI++;
1099 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
1100 ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
1102 if (ret == IOCB_ERROR) {
1103 lpfc_els_free_iocb(phba, elsiocb);
1110 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1111 struct lpfc_iocbq *rspiocb)
1113 struct lpfc_vport *vport = cmdiocb->vport;
1114 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1116 struct lpfc_sli *psli;
1117 struct lpfc_nodelist *ndlp;
1120 /* we pass cmdiocb to state machine which needs rspiocb as well */
1121 cmdiocb->context_un.rsp_iocb = rspiocb;
1123 irsp = &(rspiocb->iocb);
1124 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1125 spin_lock_irq(shost->host_lock);
1126 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1127 spin_unlock_irq(shost->host_lock);
1129 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1130 "PRLI cmpl: status:x%x/x%x did:x%x",
1131 irsp->ulpStatus, irsp->un.ulpWord[4],
1133 /* PRLI completes to NPort <nlp_DID> */
1134 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1135 "0103 PRLI completes to NPort x%x "
1136 "Data: x%x x%x x%x x%x\n",
1137 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1138 irsp->ulpTimeout, vport->num_disc_nodes);
1140 vport->fc_prli_sent--;
1141 /* Check to see if link went down during discovery */
1142 if (lpfc_els_chk_latt(vport))
1145 if (irsp->ulpStatus) {
1146 /* Check for retry */
1147 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1148 /* ELS command is being retried */
1152 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1153 if (lpfc_error_lost_link(irsp))
1156 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1159 /* Good status, call state machine */
1160 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1163 lpfc_els_free_iocb(phba, cmdiocb);
1168 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1171 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1172 struct lpfc_hba *phba = vport->phba;
1175 struct lpfc_iocbq *elsiocb;
1176 struct lpfc_sli_ring *pring;
1177 struct lpfc_sli *psli;
1182 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1184 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
1185 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1186 ndlp->nlp_DID, ELS_CMD_PRLI);
1190 icmd = &elsiocb->iocb;
1191 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1193 /* For PRLI request, remainder of payload is service parameters */
1194 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
1195 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
1196 pcmd += sizeof(uint32_t);
1198 /* For PRLI, remainder of payload is PRLI parameter page */
1199 npr = (PRLI *) pcmd;
1201 * If our firmware version is 3.20 or later,
1202 * set the following bits for FC-TAPE support.
1204 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
1205 npr->ConfmComplAllowed = 1;
1207 npr->TaskRetryIdReq = 1;
1209 npr->estabImagePair = 1;
1210 npr->readXferRdyDis = 1;
1212 /* For FCP support */
1213 npr->prliType = PRLI_FCP_TYPE;
1214 npr->initiatorFunc = 1;
1216 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1217 "Issue PRLI: did:x%x",
1218 ndlp->nlp_DID, 0, 0);
1220 phba->fc_stat.elsXmitPRLI++;
1221 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
1222 spin_lock_irq(shost->host_lock);
1223 ndlp->nlp_flag |= NLP_PRLI_SND;
1224 spin_unlock_irq(shost->host_lock);
1225 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1226 spin_lock_irq(shost->host_lock);
1227 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1228 spin_unlock_irq(shost->host_lock);
1229 lpfc_els_free_iocb(phba, elsiocb);
1232 vport->fc_prli_sent++;
1237 lpfc_more_adisc(struct lpfc_vport *vport)
1241 if (vport->num_disc_nodes)
1242 vport->num_disc_nodes--;
1243 /* Continue discovery with <num_disc_nodes> ADISCs to go */
1244 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1245 "0210 Continue discovery with %d ADISCs to go "
1246 "Data: x%x x%x x%x\n",
1247 vport->num_disc_nodes, vport->fc_adisc_cnt,
1248 vport->fc_flag, vport->port_state);
1249 /* Check to see if there are more ADISCs to be sent */
1250 if (vport->fc_flag & FC_NLP_MORE) {
1251 lpfc_set_disctmo(vport);
1252 /* go thru NPR nodes and issue any remaining ELS ADISCs */
1253 sentadisc = lpfc_els_disc_adisc(vport);
1259 lpfc_rscn_disc(struct lpfc_vport *vport)
1261 lpfc_can_disctmo(vport);
1263 /* RSCN discovery */
1264 /* go thru NPR nodes and issue ELS PLOGIs */
1265 if (vport->fc_npr_cnt)
1266 if (lpfc_els_disc_plogi(vport))
1269 lpfc_end_rscn(vport);
1273 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1274 struct lpfc_iocbq *rspiocb)
1276 struct lpfc_vport *vport = cmdiocb->vport;
1277 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1279 struct lpfc_nodelist *ndlp;
1282 /* we pass cmdiocb to state machine which needs rspiocb as well */
1283 cmdiocb->context_un.rsp_iocb = rspiocb;
1285 irsp = &(rspiocb->iocb);
1286 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1288 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1289 "ADISC cmpl: status:x%x/x%x did:x%x",
1290 irsp->ulpStatus, irsp->un.ulpWord[4],
1293 /* Since ndlp can be freed in the disc state machine, note if this node
1294 * is being used during discovery.
1296 spin_lock_irq(shost->host_lock);
1297 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1298 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
1299 spin_unlock_irq(shost->host_lock);
1300 /* ADISC completes to NPort <nlp_DID> */
1301 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1302 "0104 ADISC completes to NPort x%x "
1303 "Data: x%x x%x x%x x%x x%x\n",
1304 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1305 irsp->ulpTimeout, disc, vport->num_disc_nodes);
1306 /* Check to see if link went down during discovery */
1307 if (lpfc_els_chk_latt(vport)) {
1308 spin_lock_irq(shost->host_lock);
1309 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1310 spin_unlock_irq(shost->host_lock);
1314 if (irsp->ulpStatus) {
1315 /* Check for retry */
1316 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1317 /* ELS command is being retried */
1319 spin_lock_irq(shost->host_lock);
1320 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1321 spin_unlock_irq(shost->host_lock);
1322 lpfc_set_disctmo(vport);
1327 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1328 if (!lpfc_error_lost_link(irsp))
1329 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1330 NLP_EVT_CMPL_ADISC);
1332 /* Good status, call state machine */
1333 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1334 NLP_EVT_CMPL_ADISC);
1336 if (disc && vport->num_disc_nodes) {
1337 /* Check to see if there are more ADISCs to be sent */
1338 lpfc_more_adisc(vport);
1340 /* Check to see if we are done with ADISC authentication */
1341 if (vport->num_disc_nodes == 0) {
1342 /* If we get here, there is nothing left to ADISC */
1344 * For NPIV, cmpl_reg_vpi will set port_state to READY,
1345 * and continue discovery.
1347 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1348 !(vport->fc_flag & FC_RSCN_MODE)) {
1349 lpfc_issue_reg_vpi(phba, vport);
1353 * For SLI2, we need to set port_state to READY
1354 * and continue discovery.
1356 if (vport->port_state < LPFC_VPORT_READY) {
1357 /* If we get here, there is nothing to ADISC */
1358 if (vport->port_type == LPFC_PHYSICAL_PORT)
1359 lpfc_issue_clear_la(phba, vport);
1361 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1362 vport->num_disc_nodes = 0;
1363 /* go thru NPR list, issue ELS PLOGIs */
1364 if (vport->fc_npr_cnt)
1365 lpfc_els_disc_plogi(vport);
1367 if (!vport->num_disc_nodes) {
1368 spin_lock_irq(shost->host_lock);
1373 lpfc_can_disctmo(vport);
1376 vport->port_state = LPFC_VPORT_READY;
1378 lpfc_rscn_disc(vport);
1383 lpfc_els_free_iocb(phba, cmdiocb);
1388 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1391 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1392 struct lpfc_hba *phba = vport->phba;
1395 struct lpfc_iocbq *elsiocb;
1396 struct lpfc_sli *psli = &phba->sli;
1397 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1401 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
1402 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1403 ndlp->nlp_DID, ELS_CMD_ADISC);
1407 icmd = &elsiocb->iocb;
1408 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1410 /* For ADISC request, remainder of payload is service parameters */
1411 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
1412 pcmd += sizeof(uint32_t);
1414 /* Fill in ADISC payload */
1415 ap = (ADISC *) pcmd;
1416 ap->hardAL_PA = phba->fc_pref_ALPA;
1417 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
1418 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
1419 ap->DID = be32_to_cpu(vport->fc_myDID);
1421 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1422 "Issue ADISC: did:x%x",
1423 ndlp->nlp_DID, 0, 0);
1425 phba->fc_stat.elsXmitADISC++;
1426 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
1427 spin_lock_irq(shost->host_lock);
1428 ndlp->nlp_flag |= NLP_ADISC_SND;
1429 spin_unlock_irq(shost->host_lock);
1430 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1431 spin_lock_irq(shost->host_lock);
1432 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1433 spin_unlock_irq(shost->host_lock);
1434 lpfc_els_free_iocb(phba, elsiocb);
1441 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1442 struct lpfc_iocbq *rspiocb)
1444 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1445 struct lpfc_vport *vport = ndlp->vport;
1446 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1448 struct lpfc_sli *psli;
1451 /* we pass cmdiocb to state machine which needs rspiocb as well */
1452 cmdiocb->context_un.rsp_iocb = rspiocb;
1454 irsp = &(rspiocb->iocb);
1455 spin_lock_irq(shost->host_lock);
1456 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1457 spin_unlock_irq(shost->host_lock);
1459 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1460 "LOGO cmpl: status:x%x/x%x did:x%x",
1461 irsp->ulpStatus, irsp->un.ulpWord[4],
1463 /* LOGO completes to NPort <nlp_DID> */
1464 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1465 "0105 LOGO completes to NPort x%x "
1466 "Data: x%x x%x x%x x%x\n",
1467 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1468 irsp->ulpTimeout, vport->num_disc_nodes);
1469 /* Check to see if link went down during discovery */
1470 if (lpfc_els_chk_latt(vport))
1473 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
1474 /* NLP_EVT_DEVICE_RM should unregister the RPI
1475 * which should abort all outstanding IOs.
1477 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1482 if (irsp->ulpStatus) {
1483 /* Check for retry */
1484 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
1485 /* ELS command is being retried */
1488 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1489 if (lpfc_error_lost_link(irsp))
1492 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1495 /* Good status, call state machine.
1496 * This will unregister the rpi if needed.
1498 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1501 lpfc_els_free_iocb(phba, cmdiocb);
1506 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1509 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1510 struct lpfc_hba *phba = vport->phba;
1512 struct lpfc_iocbq *elsiocb;
1513 struct lpfc_sli_ring *pring;
1514 struct lpfc_sli *psli;
1520 pring = &psli->ring[LPFC_ELS_RING];
1522 spin_lock_irq(shost->host_lock);
1523 if (ndlp->nlp_flag & NLP_LOGO_SND) {
1524 spin_unlock_irq(shost->host_lock);
1527 spin_unlock_irq(shost->host_lock);
1529 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
1530 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1531 ndlp->nlp_DID, ELS_CMD_LOGO);
1535 icmd = &elsiocb->iocb;
1536 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1537 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
1538 pcmd += sizeof(uint32_t);
1540 /* Fill in LOGO payload */
1541 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
1542 pcmd += sizeof(uint32_t);
1543 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
1545 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1546 "Issue LOGO: did:x%x",
1547 ndlp->nlp_DID, 0, 0);
1549 phba->fc_stat.elsXmitLOGO++;
1550 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
1551 spin_lock_irq(shost->host_lock);
1552 ndlp->nlp_flag |= NLP_LOGO_SND;
1553 spin_unlock_irq(shost->host_lock);
1554 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
1556 if (rc == IOCB_ERROR) {
1557 spin_lock_irq(shost->host_lock);
1558 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1559 spin_unlock_irq(shost->host_lock);
1560 lpfc_els_free_iocb(phba, elsiocb);
1567 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1568 struct lpfc_iocbq *rspiocb)
1570 struct lpfc_vport *vport = cmdiocb->vport;
1573 irsp = &rspiocb->iocb;
1575 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1576 "ELS cmd cmpl: status:x%x/x%x did:x%x",
1577 irsp->ulpStatus, irsp->un.ulpWord[4],
1578 irsp->un.elsreq64.remoteID);
1579 /* ELS cmd tag <ulpIoTag> completes */
1580 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1581 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
1582 irsp->ulpIoTag, irsp->ulpStatus,
1583 irsp->un.ulpWord[4], irsp->ulpTimeout);
1584 /* Check to see if link went down during discovery */
1585 lpfc_els_chk_latt(vport);
1586 lpfc_els_free_iocb(phba, cmdiocb);
1591 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1593 struct lpfc_hba *phba = vport->phba;
1595 struct lpfc_iocbq *elsiocb;
1596 struct lpfc_sli_ring *pring;
1597 struct lpfc_sli *psli;
1600 struct lpfc_nodelist *ndlp;
1603 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1604 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
1606 ndlp = lpfc_findnode_did(vport, nportid);
1608 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1611 lpfc_nlp_init(vport, ndlp, nportid);
1612 lpfc_enqueue_node(vport, ndlp);
1613 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1614 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1619 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1620 ndlp->nlp_DID, ELS_CMD_SCR);
1623 /* This will trigger the release of the node just
1630 icmd = &elsiocb->iocb;
1631 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1633 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
1634 pcmd += sizeof(uint32_t);
1636 /* For SCR, remainder of payload is SCR parameter page */
1637 memset(pcmd, 0, sizeof(SCR));
1638 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
1640 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1641 "Issue SCR: did:x%x",
1642 ndlp->nlp_DID, 0, 0);
1644 phba->fc_stat.elsXmitSCR++;
1645 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1646 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1647 /* The additional lpfc_nlp_put will cause the following
1648 * lpfc_els_free_iocb routine to trigger the rlease of
1652 lpfc_els_free_iocb(phba, elsiocb);
1655 /* This will cause the callback-function lpfc_cmpl_els_cmd to
1656 * trigger the release of node.
1663 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1665 struct lpfc_hba *phba = vport->phba;
1667 struct lpfc_iocbq *elsiocb;
1668 struct lpfc_sli_ring *pring;
1669 struct lpfc_sli *psli;
1674 struct lpfc_nodelist *ondlp;
1675 struct lpfc_nodelist *ndlp;
1678 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1679 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
1681 ndlp = lpfc_findnode_did(vport, nportid);
1683 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1686 lpfc_nlp_init(vport, ndlp, nportid);
1687 lpfc_enqueue_node(vport, ndlp);
1688 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1689 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1694 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1695 ndlp->nlp_DID, ELS_CMD_RNID);
1697 /* This will trigger the release of the node just
1704 icmd = &elsiocb->iocb;
1705 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1707 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
1708 pcmd += sizeof(uint32_t);
1710 /* Fill in FARPR payload */
1711 fp = (FARP *) (pcmd);
1712 memset(fp, 0, sizeof(FARP));
1713 lp = (uint32_t *) pcmd;
1714 *lp++ = be32_to_cpu(nportid);
1715 *lp++ = be32_to_cpu(vport->fc_myDID);
1717 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
1719 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
1720 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
1721 ondlp = lpfc_findnode_did(vport, nportid);
1722 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
1723 memcpy(&fp->OportName, &ondlp->nlp_portname,
1724 sizeof(struct lpfc_name));
1725 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
1726 sizeof(struct lpfc_name));
1729 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1730 "Issue FARPR: did:x%x",
1731 ndlp->nlp_DID, 0, 0);
1733 phba->fc_stat.elsXmitFARPR++;
1734 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1735 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1736 /* The additional lpfc_nlp_put will cause the following
1737 * lpfc_els_free_iocb routine to trigger the release of
1741 lpfc_els_free_iocb(phba, elsiocb);
1744 /* This will cause the callback-function lpfc_cmpl_els_cmd to
1745 * trigger the release of the node.
1752 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
1754 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1755 struct lpfc_work_evt *evtp;
1757 if (!(nlp->nlp_flag & NLP_DELAY_TMO))
1759 spin_lock_irq(shost->host_lock);
1760 nlp->nlp_flag &= ~NLP_DELAY_TMO;
1761 spin_unlock_irq(shost->host_lock);
1762 del_timer_sync(&nlp->nlp_delayfunc);
1763 nlp->nlp_last_elscmd = 0;
1764 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
1765 list_del_init(&nlp->els_retry_evt.evt_listp);
1766 /* Decrement nlp reference count held for the delayed retry */
1767 evtp = &nlp->els_retry_evt;
1768 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
1770 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
1771 spin_lock_irq(shost->host_lock);
1772 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1773 spin_unlock_irq(shost->host_lock);
1774 if (vport->num_disc_nodes) {
1775 if (vport->port_state < LPFC_VPORT_READY) {
1776 /* Check if there are more ADISCs to be sent */
1777 lpfc_more_adisc(vport);
1778 if ((vport->num_disc_nodes == 0) &&
1779 (vport->fc_npr_cnt))
1780 lpfc_els_disc_plogi(vport);
1782 /* Check if there are more PLOGIs to be sent */
1783 lpfc_more_plogi(vport);
1785 if (vport->num_disc_nodes == 0) {
1786 spin_lock_irq(shost->host_lock);
1787 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1788 spin_unlock_irq(shost->host_lock);
1789 lpfc_can_disctmo(vport);
1790 lpfc_end_rscn(vport);
1798 lpfc_els_retry_delay(unsigned long ptr)
1800 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
1801 struct lpfc_vport *vport = ndlp->vport;
1802 struct lpfc_hba *phba = vport->phba;
1803 unsigned long flags;
1804 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
1806 spin_lock_irqsave(&phba->hbalock, flags);
1807 if (!list_empty(&evtp->evt_listp)) {
1808 spin_unlock_irqrestore(&phba->hbalock, flags);
1812 /* We need to hold the node by incrementing the reference
1813 * count until the queued work is done
1815 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
1816 if (evtp->evt_arg1) {
1817 evtp->evt = LPFC_EVT_ELS_RETRY;
1818 list_add_tail(&evtp->evt_listp, &phba->work_list);
1819 lpfc_worker_wake_up(phba);
1821 spin_unlock_irqrestore(&phba->hbalock, flags);
1826 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
1828 struct lpfc_vport *vport = ndlp->vport;
1829 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1830 uint32_t cmd, did, retry;
1832 spin_lock_irq(shost->host_lock);
1833 did = ndlp->nlp_DID;
1834 cmd = ndlp->nlp_last_elscmd;
1835 ndlp->nlp_last_elscmd = 0;
1837 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1838 spin_unlock_irq(shost->host_lock);
1842 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1843 spin_unlock_irq(shost->host_lock);
1845 * If a discovery event readded nlp_delayfunc after timer
1846 * firing and before processing the timer, cancel the
1849 del_timer_sync(&ndlp->nlp_delayfunc);
1850 retry = ndlp->nlp_retry;
1854 lpfc_issue_els_flogi(vport, ndlp, retry);
1857 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
1858 ndlp->nlp_prev_state = ndlp->nlp_state;
1859 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1863 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
1864 ndlp->nlp_prev_state = ndlp->nlp_state;
1865 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1869 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
1870 ndlp->nlp_prev_state = ndlp->nlp_state;
1871 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1875 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
1876 ndlp->nlp_prev_state = ndlp->nlp_state;
1877 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1881 lpfc_issue_els_fdisc(vport, ndlp, retry);
1888 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1889 struct lpfc_iocbq *rspiocb)
1891 struct lpfc_vport *vport = cmdiocb->vport;
1892 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1893 IOCB_t *irsp = &rspiocb->iocb;
1894 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1895 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1898 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
1904 /* Note: context2 may be 0 for internal driver abort
1905 * of delays ELS command.
1908 if (pcmd && pcmd->virt) {
1909 elscmd = (uint32_t *) (pcmd->virt);
1913 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
1914 did = ndlp->nlp_DID;
1916 /* We should only hit this case for retrying PLOGI */
1917 did = irsp->un.elsreq64.remoteID;
1918 ndlp = lpfc_findnode_did(vport, did);
1919 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
1920 && (cmd != ELS_CMD_PLOGI))
1924 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1925 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
1926 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
1928 switch (irsp->ulpStatus) {
1929 case IOSTAT_FCP_RSP_ERROR:
1930 case IOSTAT_REMOTE_STOP:
1933 case IOSTAT_LOCAL_REJECT:
1934 switch ((irsp->un.ulpWord[4] & 0xff)) {
1935 case IOERR_LOOP_OPEN_FAILURE:
1936 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
1941 case IOERR_ILLEGAL_COMMAND:
1942 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1943 "0124 Retry illegal cmd x%x "
1944 "retry:x%x delay:x%x\n",
1945 cmd, cmdiocb->retry, delay);
1947 /* All command's retry policy */
1949 if (cmdiocb->retry > 2)
1953 case IOERR_NO_RESOURCES:
1954 logerr = 1; /* HBA out of resources */
1956 if (cmdiocb->retry > 100)
1961 case IOERR_ILLEGAL_FRAME:
1966 case IOERR_SEQUENCE_TIMEOUT:
1967 case IOERR_INVALID_RPI:
1973 case IOSTAT_NPORT_RJT:
1974 case IOSTAT_FABRIC_RJT:
1975 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
1981 case IOSTAT_NPORT_BSY:
1982 case IOSTAT_FABRIC_BSY:
1983 logerr = 1; /* Fabric / Remote NPort out of resources */
1988 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
1989 /* Added for Vendor specifc support
1990 * Just keep retrying for these Rsn / Exp codes
1992 switch (stat.un.b.lsRjtRsnCode) {
1993 case LSRJT_UNABLE_TPC:
1994 if (stat.un.b.lsRjtRsnCodeExp ==
1995 LSEXP_CMD_IN_PROGRESS) {
1996 if (cmd == ELS_CMD_PLOGI) {
2003 if (cmd == ELS_CMD_PLOGI) {
2005 maxretry = lpfc_max_els_tries + 1;
2009 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2010 (cmd == ELS_CMD_FDISC) &&
2011 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
2012 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2013 "0125 FDISC Failed (x%x). "
2014 "Fabric out of resources\n",
2015 stat.un.lsRjtError);
2016 lpfc_vport_set_state(vport,
2017 FC_VPORT_NO_FABRIC_RSCS);
2021 case LSRJT_LOGICAL_BSY:
2022 if ((cmd == ELS_CMD_PLOGI) ||
2023 (cmd == ELS_CMD_PRLI)) {
2026 } else if (cmd == ELS_CMD_FDISC) {
2027 /* FDISC retry policy */
2029 if (cmdiocb->retry >= 32)
2035 case LSRJT_LOGICAL_ERR:
2036 /* There are some cases where switches return this
2037 * error when they are not ready and should be returning
2038 * Logical Busy. We should delay every time.
2040 if (cmd == ELS_CMD_FDISC &&
2041 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
2047 case LSRJT_PROTOCOL_ERR:
2048 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2049 (cmd == ELS_CMD_FDISC) &&
2050 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
2051 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
2053 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2054 "0123 FDISC Failed (x%x). "
2055 "Fabric Detected Bad WWN\n",
2056 stat.un.lsRjtError);
2057 lpfc_vport_set_state(vport,
2058 FC_VPORT_FABRIC_REJ_WWN);
2064 case IOSTAT_INTERMED_RSP:
2072 if (did == FDMI_DID)
2075 if ((cmd == ELS_CMD_FLOGI) &&
2076 (phba->fc_topology != TOPOLOGY_LOOP) &&
2077 !lpfc_error_lost_link(irsp)) {
2078 /* FLOGI retry policy */
2081 if (cmdiocb->retry >= 32)
2085 if ((++cmdiocb->retry) >= maxretry) {
2086 phba->fc_stat.elsRetryExceeded++;
2090 if ((vport->load_flag & FC_UNLOADING) != 0)
2095 /* Retry ELS command <elsCmd> to remote NPORT <did> */
2096 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2097 "0107 Retry ELS command x%x to remote "
2098 "NPORT x%x Data: x%x x%x\n",
2099 cmd, did, cmdiocb->retry, delay);
2101 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
2102 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
2103 ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
2104 /* Don't reset timer for no resources */
2106 /* If discovery / RSCN timer is running, reset it */
2107 if (timer_pending(&vport->fc_disctmo) ||
2108 (vport->fc_flag & FC_RSCN_MODE))
2109 lpfc_set_disctmo(vport);
2112 phba->fc_stat.elsXmitRetry++;
2113 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
2114 phba->fc_stat.elsDelayRetry++;
2115 ndlp->nlp_retry = cmdiocb->retry;
2117 /* delay is specified in milliseconds */
2118 mod_timer(&ndlp->nlp_delayfunc,
2119 jiffies + msecs_to_jiffies(delay));
2120 spin_lock_irq(shost->host_lock);
2121 ndlp->nlp_flag |= NLP_DELAY_TMO;
2122 spin_unlock_irq(shost->host_lock);
2124 ndlp->nlp_prev_state = ndlp->nlp_state;
2125 if (cmd == ELS_CMD_PRLI)
2126 lpfc_nlp_set_state(vport, ndlp,
2127 NLP_STE_REG_LOGIN_ISSUE);
2129 lpfc_nlp_set_state(vport, ndlp,
2131 ndlp->nlp_last_elscmd = cmd;
2137 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
2140 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
2143 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
2144 ndlp->nlp_prev_state = ndlp->nlp_state;
2145 lpfc_nlp_set_state(vport, ndlp,
2146 NLP_STE_PLOGI_ISSUE);
2148 lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
2151 ndlp->nlp_prev_state = ndlp->nlp_state;
2152 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2153 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
2156 ndlp->nlp_prev_state = ndlp->nlp_state;
2157 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
2158 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
2161 ndlp->nlp_prev_state = ndlp->nlp_state;
2162 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2163 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
2167 /* No retry ELS command <elsCmd> to remote NPORT <did> */
2169 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2170 "0137 No retry ELS command x%x to remote "
2171 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
2172 cmd, did, irsp->ulpStatus,
2173 irsp->un.ulpWord[4]);
2176 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2177 "0108 No retry ELS command x%x to remote "
2178 "NPORT x%x Retried:%d Error:x%x/%x\n",
2179 cmd, did, cmdiocb->retry, irsp->ulpStatus,
2180 irsp->un.ulpWord[4]);
2186 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
2188 struct lpfc_dmabuf *buf_ptr;
2190 /* Free the response before processing the command. */
2191 if (!list_empty(&buf_ptr1->list)) {
2192 list_remove_head(&buf_ptr1->list, buf_ptr,
2195 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2198 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
2204 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
2206 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2212 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
2214 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
2215 struct lpfc_nodelist *ndlp;
2217 ndlp = (struct lpfc_nodelist *)elsiocb->context1;
2219 if (ndlp->nlp_flag & NLP_DEFER_RM) {
2222 /* If the ndlp is not being used by another discovery
2225 if (!lpfc_nlp_not_used(ndlp)) {
2226 /* If ndlp is being used by another discovery
2227 * thread, just clear NLP_DEFER_RM
2229 ndlp->nlp_flag &= ~NLP_DEFER_RM;
2234 elsiocb->context1 = NULL;
2236 /* context2 = cmd, context2->next = rsp, context3 = bpl */
2237 if (elsiocb->context2) {
2238 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
2239 /* Firmware could still be in progress of DMAing
2240 * payload, so don't free data buffer till after
2243 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
2244 buf_ptr = elsiocb->context2;
2245 elsiocb->context2 = NULL;
2248 spin_lock_irq(&phba->hbalock);
2249 if (!list_empty(&buf_ptr->list)) {
2250 list_remove_head(&buf_ptr->list,
2251 buf_ptr1, struct lpfc_dmabuf,
2253 INIT_LIST_HEAD(&buf_ptr1->list);
2254 list_add_tail(&buf_ptr1->list,
2258 INIT_LIST_HEAD(&buf_ptr->list);
2259 list_add_tail(&buf_ptr->list, &phba->elsbuf);
2261 spin_unlock_irq(&phba->hbalock);
2264 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
2265 lpfc_els_free_data(phba, buf_ptr1);
2269 if (elsiocb->context3) {
2270 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
2271 lpfc_els_free_bpl(phba, buf_ptr);
2273 lpfc_sli_release_iocbq(phba, elsiocb);
2278 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2279 struct lpfc_iocbq *rspiocb)
2281 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2282 struct lpfc_vport *vport = cmdiocb->vport;
2285 irsp = &rspiocb->iocb;
2286 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2287 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
2288 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
2289 /* ACC to LOGO completes to NPort <nlp_DID> */
2290 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2291 "0109 ACC to LOGO completes to NPort x%x "
2292 "Data: x%x x%x x%x\n",
2293 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2296 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
2297 /* NPort Recovery mode or node is just allocated */
2298 if (!lpfc_nlp_not_used(ndlp)) {
2299 /* If the ndlp is being used by another discovery
2300 * thread, just unregister the RPI.
2302 lpfc_unreg_rpi(vport, ndlp);
2304 /* Indicate the node has already released, should
2305 * not reference to it from within lpfc_els_free_iocb.
2307 cmdiocb->context1 = NULL;
2310 lpfc_els_free_iocb(phba, cmdiocb);
2315 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2317 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2318 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2320 pmb->context1 = NULL;
2321 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2323 mempool_free(pmb, phba->mbox_mem_pool);
2324 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
2326 /* This is the end of the default RPI cleanup logic for this
2327 * ndlp. If no other discovery threads are using this ndlp.
2328 * we should free all resources associated with it.
2330 lpfc_nlp_not_used(ndlp);
2336 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2337 struct lpfc_iocbq *rspiocb)
2339 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2340 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
2341 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
2344 LPFC_MBOXQ_t *mbox = NULL;
2345 struct lpfc_dmabuf *mp = NULL;
2346 uint32_t ls_rjt = 0;
2348 irsp = &rspiocb->iocb;
2350 if (cmdiocb->context_un.mbox)
2351 mbox = cmdiocb->context_un.mbox;
2353 /* First determine if this is a LS_RJT cmpl. Note, this callback
2354 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
2356 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
2357 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
2358 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
2359 /* A LS_RJT associated with Default RPI cleanup has its own
2360 * seperate code path.
2362 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
2366 /* Check to see if link went down during discovery */
2367 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
2369 mp = (struct lpfc_dmabuf *) mbox->context1;
2371 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2374 mempool_free(mbox, phba->mbox_mem_pool);
2376 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
2377 (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
2378 if (lpfc_nlp_not_used(ndlp)) {
2380 /* Indicate the node has already released,
2381 * should not reference to it from within
2382 * the routine lpfc_els_free_iocb.
2384 cmdiocb->context1 = NULL;
2389 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2390 "ELS rsp cmpl: status:x%x/x%x did:x%x",
2391 irsp->ulpStatus, irsp->un.ulpWord[4],
2392 cmdiocb->iocb.un.elsreq64.remoteID);
2393 /* ELS response tag <ulpIoTag> completes */
2394 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2395 "0110 ELS response tag x%x completes "
2396 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
2397 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
2398 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
2399 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2402 if ((rspiocb->iocb.ulpStatus == 0)
2403 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
2404 lpfc_unreg_rpi(vport, ndlp);
2405 /* Increment reference count to ndlp to hold the
2406 * reference to ndlp for the callback function.
2408 mbox->context2 = lpfc_nlp_get(ndlp);
2409 mbox->vport = vport;
2410 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
2411 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
2412 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
2415 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
2416 ndlp->nlp_prev_state = ndlp->nlp_state;
2417 lpfc_nlp_set_state(vport, ndlp,
2418 NLP_STE_REG_LOGIN_ISSUE);
2420 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
2421 != MBX_NOT_FINISHED)
2424 /* Decrement the ndlp reference count we
2425 * set for this failed mailbox command.
2429 /* ELS rsp: Cannot issue reg_login for <NPortid> */
2430 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2431 "0138 ELS rsp: Cannot issue reg_login for x%x "
2432 "Data: x%x x%x x%x\n",
2433 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2436 if (lpfc_nlp_not_used(ndlp)) {
2438 /* Indicate node has already been released,
2439 * should not reference to it from within
2440 * the routine lpfc_els_free_iocb.
2442 cmdiocb->context1 = NULL;
2445 /* Do not drop node for lpfc_els_abort'ed ELS cmds */
2446 if (!lpfc_error_lost_link(irsp) &&
2447 ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
2448 if (lpfc_nlp_not_used(ndlp)) {
2450 /* Indicate node has already been
2451 * released, should not reference
2452 * to it from within the routine
2453 * lpfc_els_free_iocb.
2455 cmdiocb->context1 = NULL;
2459 mp = (struct lpfc_dmabuf *) mbox->context1;
2461 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2464 mempool_free(mbox, phba->mbox_mem_pool);
2467 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
2468 spin_lock_irq(shost->host_lock);
2469 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
2470 spin_unlock_irq(shost->host_lock);
2472 /* If the node is not being used by another discovery thread,
2473 * and we are sending a reject, we are done with it.
2474 * Release driver reference count here and free associated
2478 if (lpfc_nlp_not_used(ndlp))
2479 /* Indicate node has already been released,
2480 * should not reference to it from within
2481 * the routine lpfc_els_free_iocb.
2483 cmdiocb->context1 = NULL;
2486 lpfc_els_free_iocb(phba, cmdiocb);
2491 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
2492 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
2495 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2496 struct lpfc_hba *phba = vport->phba;
2499 struct lpfc_iocbq *elsiocb;
2500 struct lpfc_sli_ring *pring;
2501 struct lpfc_sli *psli;
2505 ELS_PKT *els_pkt_ptr;
2508 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2509 oldcmd = &oldiocb->iocb;
2513 cmdsize = sizeof(uint32_t);
2514 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
2515 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
2517 spin_lock_irq(shost->host_lock);
2518 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2519 spin_unlock_irq(shost->host_lock);
2523 icmd = &elsiocb->iocb;
2524 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2525 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2526 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2527 pcmd += sizeof(uint32_t);
2529 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2530 "Issue ACC: did:x%x flg:x%x",
2531 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2534 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
2535 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
2536 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
2540 icmd = &elsiocb->iocb;
2541 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2542 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2545 elsiocb->context_un.mbox = mbox;
2547 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2548 pcmd += sizeof(uint32_t);
2549 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
2551 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2552 "Issue ACC PLOGI: did:x%x flg:x%x",
2553 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2556 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
2557 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
2558 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
2562 icmd = &elsiocb->iocb;
2563 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2564 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2566 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
2567 sizeof(uint32_t) + sizeof(PRLO));
2568 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
2569 els_pkt_ptr = (ELS_PKT *) pcmd;
2570 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
2572 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2573 "Issue ACC PRLO: did:x%x flg:x%x",
2574 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2579 /* Xmit ELS ACC response tag <ulpIoTag> */
2580 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2581 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
2582 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
2583 elsiocb->iotag, elsiocb->iocb.ulpContext,
2584 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2586 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
2587 spin_lock_irq(shost->host_lock);
2588 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2589 spin_unlock_irq(shost->host_lock);
2590 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
2592 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2595 phba->fc_stat.elsXmitACC++;
2596 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2597 if (rc == IOCB_ERROR) {
2598 lpfc_els_free_iocb(phba, elsiocb);
2605 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
2606 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
2609 struct lpfc_hba *phba = vport->phba;
2612 struct lpfc_iocbq *elsiocb;
2613 struct lpfc_sli_ring *pring;
2614 struct lpfc_sli *psli;
2620 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2622 cmdsize = 2 * sizeof(uint32_t);
2623 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2624 ndlp->nlp_DID, ELS_CMD_LS_RJT);
2628 icmd = &elsiocb->iocb;
2629 oldcmd = &oldiocb->iocb;
2630 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2631 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2633 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
2634 pcmd += sizeof(uint32_t);
2635 *((uint32_t *) (pcmd)) = rejectError;
2638 elsiocb->context_un.mbox = mbox;
2640 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
2641 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2642 "0129 Xmit ELS RJT x%x response tag x%x "
2643 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
2645 rejectError, elsiocb->iotag,
2646 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2647 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2648 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2649 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
2650 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
2652 phba->fc_stat.elsXmitLSRJT++;
2653 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2654 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2656 if (rc == IOCB_ERROR) {
2657 lpfc_els_free_iocb(phba, elsiocb);
2664 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2665 struct lpfc_nodelist *ndlp)
2667 struct lpfc_hba *phba = vport->phba;
2668 struct lpfc_sli *psli = &phba->sli;
2669 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
2671 IOCB_t *icmd, *oldcmd;
2672 struct lpfc_iocbq *elsiocb;
2677 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
2678 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2679 ndlp->nlp_DID, ELS_CMD_ACC);
2683 icmd = &elsiocb->iocb;
2684 oldcmd = &oldiocb->iocb;
2685 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2687 /* Xmit ADISC ACC response tag <ulpIoTag> */
2688 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2689 "0130 Xmit ADISC ACC response iotag x%x xri: "
2690 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
2691 elsiocb->iotag, elsiocb->iocb.ulpContext,
2692 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2694 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2696 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2697 pcmd += sizeof(uint32_t);
2699 ap = (ADISC *) (pcmd);
2700 ap->hardAL_PA = phba->fc_pref_ALPA;
2701 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2702 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2703 ap->DID = be32_to_cpu(vport->fc_myDID);
2705 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2706 "Issue ACC ADISC: did:x%x flg:x%x",
2707 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2709 phba->fc_stat.elsXmitACC++;
2710 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2711 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2712 if (rc == IOCB_ERROR) {
2713 lpfc_els_free_iocb(phba, elsiocb);
2720 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2721 struct lpfc_nodelist *ndlp)
2723 struct lpfc_hba *phba = vport->phba;
2728 struct lpfc_iocbq *elsiocb;
2729 struct lpfc_sli_ring *pring;
2730 struct lpfc_sli *psli;
2736 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2738 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
2739 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2740 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
2744 icmd = &elsiocb->iocb;
2745 oldcmd = &oldiocb->iocb;
2746 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2747 /* Xmit PRLI ACC response tag <ulpIoTag> */
2748 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2749 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
2750 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
2751 elsiocb->iotag, elsiocb->iocb.ulpContext,
2752 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
2754 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2756 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
2757 pcmd += sizeof(uint32_t);
2759 /* For PRLI, remainder of payload is PRLI parameter page */
2760 memset(pcmd, 0, sizeof(PRLI));
2762 npr = (PRLI *) pcmd;
2765 * If the remote port is a target and our firmware version is 3.20 or
2766 * later, set the following bits for FC-TAPE support.
2768 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
2769 (vpd->rev.feaLevelHigh >= 0x02)) {
2770 npr->ConfmComplAllowed = 1;
2772 npr->TaskRetryIdReq = 1;
2775 npr->acceptRspCode = PRLI_REQ_EXECUTED;
2776 npr->estabImagePair = 1;
2777 npr->readXferRdyDis = 1;
2778 npr->ConfmComplAllowed = 1;
2780 npr->prliType = PRLI_FCP_TYPE;
2781 npr->initiatorFunc = 1;
2783 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2784 "Issue ACC PRLI: did:x%x flg:x%x",
2785 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2787 phba->fc_stat.elsXmitACC++;
2788 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2790 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2791 if (rc == IOCB_ERROR) {
2792 lpfc_els_free_iocb(phba, elsiocb);
2799 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
2800 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
2802 struct lpfc_hba *phba = vport->phba;
2804 IOCB_t *icmd, *oldcmd;
2805 struct lpfc_iocbq *elsiocb;
2806 struct lpfc_sli_ring *pring;
2807 struct lpfc_sli *psli;
2813 pring = &psli->ring[LPFC_ELS_RING];
2815 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
2816 + (2 * sizeof(struct lpfc_name));
2818 cmdsize += sizeof(RNID_TOP_DISC);
2820 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2821 ndlp->nlp_DID, ELS_CMD_ACC);
2825 icmd = &elsiocb->iocb;
2826 oldcmd = &oldiocb->iocb;
2827 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2828 /* Xmit RNID ACC response tag <ulpIoTag> */
2829 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2830 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
2831 elsiocb->iotag, elsiocb->iocb.ulpContext);
2832 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2833 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2834 pcmd += sizeof(uint32_t);
2836 memset(pcmd, 0, sizeof(RNID));
2837 rn = (RNID *) (pcmd);
2838 rn->Format = format;
2839 rn->CommonLen = (2 * sizeof(struct lpfc_name));
2840 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2841 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2844 rn->SpecificLen = 0;
2846 case RNID_TOPOLOGY_DISC:
2847 rn->SpecificLen = sizeof(RNID_TOP_DISC);
2848 memcpy(&rn->un.topologyDisc.portName,
2849 &vport->fc_portname, sizeof(struct lpfc_name));
2850 rn->un.topologyDisc.unitType = RNID_HBA;
2851 rn->un.topologyDisc.physPort = 0;
2852 rn->un.topologyDisc.attachedNodes = 0;
2856 rn->SpecificLen = 0;
2860 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2861 "Issue ACC RNID: did:x%x flg:x%x",
2862 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2864 phba->fc_stat.elsXmitACC++;
2865 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2867 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
2868 * it could be freed */
2870 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2871 if (rc == IOCB_ERROR) {
2872 lpfc_els_free_iocb(phba, elsiocb);
2879 lpfc_els_disc_adisc(struct lpfc_vport *vport)
2881 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2882 struct lpfc_nodelist *ndlp, *next_ndlp;
2885 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2886 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2887 if (!NLP_CHK_NODE_ACT(ndlp))
2889 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
2890 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
2891 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
2892 spin_lock_irq(shost->host_lock);
2893 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2894 spin_unlock_irq(shost->host_lock);
2895 ndlp->nlp_prev_state = ndlp->nlp_state;
2896 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2897 lpfc_issue_els_adisc(vport, ndlp, 0);
2899 vport->num_disc_nodes++;
2900 if (vport->num_disc_nodes >=
2901 vport->cfg_discovery_threads) {
2902 spin_lock_irq(shost->host_lock);
2903 vport->fc_flag |= FC_NLP_MORE;
2904 spin_unlock_irq(shost->host_lock);
2909 if (sentadisc == 0) {
2910 spin_lock_irq(shost->host_lock);
2911 vport->fc_flag &= ~FC_NLP_MORE;
2912 spin_unlock_irq(shost->host_lock);
2918 lpfc_els_disc_plogi(struct lpfc_vport *vport)
2920 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2921 struct lpfc_nodelist *ndlp, *next_ndlp;
2924 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
2925 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2926 if (!NLP_CHK_NODE_ACT(ndlp))
2928 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
2929 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
2930 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
2931 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
2932 ndlp->nlp_prev_state = ndlp->nlp_state;
2933 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2934 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2936 vport->num_disc_nodes++;
2937 if (vport->num_disc_nodes >=
2938 vport->cfg_discovery_threads) {
2939 spin_lock_irq(shost->host_lock);
2940 vport->fc_flag |= FC_NLP_MORE;
2941 spin_unlock_irq(shost->host_lock);
2947 lpfc_set_disctmo(vport);
2950 spin_lock_irq(shost->host_lock);
2951 vport->fc_flag &= ~FC_NLP_MORE;
2952 spin_unlock_irq(shost->host_lock);
2958 lpfc_els_flush_rscn(struct lpfc_vport *vport)
2960 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2961 struct lpfc_hba *phba = vport->phba;
2964 spin_lock_irq(shost->host_lock);
2965 if (vport->fc_rscn_flush) {
2966 /* Another thread is walking fc_rscn_id_list on this vport */
2967 spin_unlock_irq(shost->host_lock);
2970 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
2971 vport->fc_rscn_flush = 1;
2972 spin_unlock_irq(shost->host_lock);
2974 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
2975 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
2976 vport->fc_rscn_id_list[i] = NULL;
2978 spin_lock_irq(shost->host_lock);
2979 vport->fc_rscn_id_cnt = 0;
2980 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
2981 spin_unlock_irq(shost->host_lock);
2982 lpfc_can_disctmo(vport);
2983 /* Indicate we are done walking this fc_rscn_id_list */
2984 vport->fc_rscn_flush = 0;
2988 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
2993 uint32_t payload_len, i;
2994 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2996 ns_did.un.word = did;
2998 /* Never match fabric nodes for RSCNs */
2999 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
3002 /* If we are doing a FULL RSCN rediscovery, match everything */
3003 if (vport->fc_flag & FC_RSCN_DISCOVERY)
3006 spin_lock_irq(shost->host_lock);
3007 if (vport->fc_rscn_flush) {
3008 /* Another thread is walking fc_rscn_id_list on this vport */
3009 spin_unlock_irq(shost->host_lock);
3012 /* Indicate we are walking fc_rscn_id_list on this vport */
3013 vport->fc_rscn_flush = 1;
3014 spin_unlock_irq(shost->host_lock);
3015 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
3016 lp = vport->fc_rscn_id_list[i]->virt;
3017 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
3018 payload_len -= sizeof(uint32_t); /* take off word 0 */
3019 while (payload_len) {
3020 rscn_did.un.word = be32_to_cpu(*lp++);
3021 payload_len -= sizeof(uint32_t);
3022 switch (rscn_did.un.b.resv) {
3023 case 0: /* Single N_Port ID effected */
3024 if (ns_did.un.word == rscn_did.un.word)
3025 goto return_did_out;
3027 case 1: /* Whole N_Port Area effected */
3028 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
3029 && (ns_did.un.b.area == rscn_did.un.b.area))
3030 goto return_did_out;
3032 case 2: /* Whole N_Port Domain effected */
3033 if (ns_did.un.b.domain == rscn_did.un.b.domain)
3034 goto return_did_out;
3037 /* Unknown Identifier in RSCN node */
3038 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
3039 "0217 Unknown Identifier in "
3040 "RSCN payload Data: x%x\n",
3042 case 3: /* Whole Fabric effected */
3043 goto return_did_out;
3047 /* Indicate we are done with walking fc_rscn_id_list on this vport */
3048 vport->fc_rscn_flush = 0;
3051 /* Indicate we are done with walking fc_rscn_id_list on this vport */
3052 vport->fc_rscn_flush = 0;
3057 lpfc_rscn_recovery_check(struct lpfc_vport *vport)
3059 struct lpfc_nodelist *ndlp = NULL;
3061 /* Move all affected nodes by pending RSCNs to NPR state. */
3062 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3063 if (!NLP_CHK_NODE_ACT(ndlp) ||
3064 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
3065 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
3067 lpfc_disc_state_machine(vport, ndlp, NULL,
3068 NLP_EVT_DEVICE_RECOVERY);
3069 lpfc_cancel_retry_delay_tmo(vport, ndlp);
3075 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3076 struct lpfc_nodelist *ndlp)
3078 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3079 struct lpfc_hba *phba = vport->phba;
3080 struct lpfc_dmabuf *pcmd;
3081 uint32_t *lp, *datap;
3083 uint32_t payload_len, length, nportid, *cmd;
3085 int rscn_id = 0, hba_id = 0;
3088 icmd = &cmdiocb->iocb;
3089 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3090 lp = (uint32_t *) pcmd->virt;
3092 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
3093 payload_len -= sizeof(uint32_t); /* take off word 0 */
3095 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3096 "0214 RSCN received Data: x%x x%x x%x x%x\n",
3097 vport->fc_flag, payload_len, *lp,
3098 vport->fc_rscn_id_cnt);
3099 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
3100 fc_host_post_event(shost, fc_get_event_number(),
3101 FCH_EVT_RSCN, lp[i]);
3103 /* If we are about to begin discovery, just ACC the RSCN.
3104 * Discovery processing will satisfy it.
3106 if (vport->port_state <= LPFC_NS_QRY) {
3107 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3108 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
3109 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
3111 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3115 /* If this RSCN just contains NPortIDs for other vports on this HBA,
3116 * just ACC and ignore it.
3118 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3119 !(vport->cfg_peer_port_login)) {
3124 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
3125 i -= sizeof(uint32_t);
3127 if (lpfc_find_vport_by_did(phba, nportid))
3130 if (rscn_id == hba_id) {
3131 /* ALL NPortIDs in RSCN are on HBA */
3132 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3134 "Data: x%x x%x x%x x%x\n",
3135 vport->fc_flag, payload_len,
3136 *lp, vport->fc_rscn_id_cnt);
3137 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3138 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
3139 ndlp->nlp_DID, vport->port_state,
3142 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
3148 spin_lock_irq(shost->host_lock);
3149 if (vport->fc_rscn_flush) {
3150 /* Another thread is walking fc_rscn_id_list on this vport */
3151 spin_unlock_irq(shost->host_lock);
3152 vport->fc_flag |= FC_RSCN_DISCOVERY;
3154 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3157 /* Indicate we are walking fc_rscn_id_list on this vport */
3158 vport->fc_rscn_flush = 1;
3159 spin_unlock_irq(shost->host_lock);
3160 /* Get the array count after sucessfully have the token */
3161 rscn_cnt = vport->fc_rscn_id_cnt;
3162 /* If we are already processing an RSCN, save the received
3163 * RSCN payload buffer, cmdiocb->context2 to process later.
3165 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
3166 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3167 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
3168 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
3170 spin_lock_irq(shost->host_lock);
3171 vport->fc_flag |= FC_RSCN_DEFERRED;
3172 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
3173 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
3174 vport->fc_flag |= FC_RSCN_MODE;
3175 spin_unlock_irq(shost->host_lock);
3177 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
3178 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
3181 (payload_len + length <= LPFC_BPL_SIZE)) {
3182 *cmd &= ELS_CMD_MASK;
3183 *cmd |= cpu_to_be32(payload_len + length);
3184 memcpy(((uint8_t *)cmd) + length, lp,
3187 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
3188 vport->fc_rscn_id_cnt++;
3189 /* If we zero, cmdiocb->context2, the calling
3190 * routine will not try to free it.
3192 cmdiocb->context2 = NULL;
3195 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3196 "0235 Deferred RSCN "
3197 "Data: x%x x%x x%x\n",
3198 vport->fc_rscn_id_cnt, vport->fc_flag,
3201 vport->fc_flag |= FC_RSCN_DISCOVERY;
3202 spin_unlock_irq(shost->host_lock);
3203 /* ReDiscovery RSCN */
3204 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3205 "0234 ReDiscovery RSCN "
3206 "Data: x%x x%x x%x\n",
3207 vport->fc_rscn_id_cnt, vport->fc_flag,
3210 /* Indicate we are done walking fc_rscn_id_list on this vport */
3211 vport->fc_rscn_flush = 0;
3213 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3214 /* send RECOVERY event for ALL nodes that match RSCN payload */
3215 lpfc_rscn_recovery_check(vport);
3216 spin_lock_irq(shost->host_lock);
3217 vport->fc_flag &= ~FC_RSCN_DEFERRED;
3218 spin_unlock_irq(shost->host_lock);
3221 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3222 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
3223 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
3225 spin_lock_irq(shost->host_lock);
3226 vport->fc_flag |= FC_RSCN_MODE;
3227 spin_unlock_irq(shost->host_lock);
3228 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
3229 /* Indicate we are done walking fc_rscn_id_list on this vport */
3230 vport->fc_rscn_flush = 0;
3232 * If we zero, cmdiocb->context2, the calling routine will
3233 * not try to free it.
3235 cmdiocb->context2 = NULL;
3236 lpfc_set_disctmo(vport);
3238 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3239 /* send RECOVERY event for ALL nodes that match RSCN payload */
3240 lpfc_rscn_recovery_check(vport);
3241 return lpfc_els_handle_rscn(vport);
3245 lpfc_els_handle_rscn(struct lpfc_vport *vport)
3247 struct lpfc_nodelist *ndlp;
3248 struct lpfc_hba *phba = vport->phba;
3250 /* Ignore RSCN if the port is being torn down. */
3251 if (vport->load_flag & FC_UNLOADING) {
3252 lpfc_els_flush_rscn(vport);
3256 /* Start timer for RSCN processing */
3257 lpfc_set_disctmo(vport);
3259 /* RSCN processed */
3260 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3261 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
3262 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
3265 /* To process RSCN, first compare RSCN data with NameServer */
3266 vport->fc_ns_retry = 0;
3267 vport->num_disc_nodes = 0;
3269 ndlp = lpfc_findnode_did(vport, NameServer_DID);
3270 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
3271 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
3272 /* Good ndlp, issue CT Request to NameServer */
3273 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
3274 /* Wait for NameServer query cmpl before we can
3278 /* If login to NameServer does not exist, issue one */
3279 /* Good status, issue PLOGI to NameServer */
3280 ndlp = lpfc_findnode_did(vport, NameServer_DID);
3281 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
3282 /* Wait for NameServer login cmpl before we can
3287 ndlp = lpfc_enable_node(vport, ndlp,
3288 NLP_STE_PLOGI_ISSUE);
3290 lpfc_els_flush_rscn(vport);
3293 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
3295 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3297 lpfc_els_flush_rscn(vport);
3300 lpfc_nlp_init(vport, ndlp, NameServer_DID);
3301 ndlp->nlp_prev_state = ndlp->nlp_state;
3302 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
3304 ndlp->nlp_type |= NLP_FABRIC;
3305 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
3306 /* Wait for NameServer login cmpl before we can
3312 lpfc_els_flush_rscn(vport);
3317 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3318 struct lpfc_nodelist *ndlp)
3320 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3321 struct lpfc_hba *phba = vport->phba;
3322 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3323 uint32_t *lp = (uint32_t *) pcmd->virt;
3324 IOCB_t *icmd = &cmdiocb->iocb;
3325 struct serv_parm *sp;
3332 sp = (struct serv_parm *) lp;
3334 /* FLOGI received */
3336 lpfc_set_disctmo(vport);
3338 if (phba->fc_topology == TOPOLOGY_LOOP) {
3339 /* We should never receive a FLOGI in loop mode, ignore it */
3340 did = icmd->un.elsreq64.remoteID;
3342 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
3344 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3345 "0113 An FLOGI ELS command x%x was "
3346 "received from DID x%x in Loop Mode\n",
3353 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3))) {
3354 /* For a FLOGI we accept, then if our portname is greater
3355 * then the remote portname we initiate Nport login.
3358 rc = memcmp(&vport->fc_portname, &sp->portName,
3359 sizeof(struct lpfc_name));
3362 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3366 lpfc_linkdown(phba);
3367 lpfc_init_link(phba, mbox,
3369 phba->cfg_link_speed);
3370 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
3371 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3372 mbox->vport = vport;
3373 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3374 lpfc_set_loopback_flag(phba);
3375 if (rc == MBX_NOT_FINISHED) {
3376 mempool_free(mbox, phba->mbox_mem_pool);
3379 } else if (rc > 0) { /* greater than */
3380 spin_lock_irq(shost->host_lock);
3381 vport->fc_flag |= FC_PT2PT_PLOGI;
3382 spin_unlock_irq(shost->host_lock);
3384 spin_lock_irq(shost->host_lock);
3385 vport->fc_flag |= FC_PT2PT;
3386 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3387 spin_unlock_irq(shost->host_lock);
3389 /* Reject this request because invalid parameters */
3390 stat.un.b.lsRjtRsvd0 = 0;
3391 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3392 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
3393 stat.un.b.vendorUnique = 0;
3394 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3400 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
3406 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3407 struct lpfc_nodelist *ndlp)
3409 struct lpfc_dmabuf *pcmd;
3416 icmd = &cmdiocb->iocb;
3417 did = icmd->un.elsreq64.remoteID;
3418 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3419 lp = (uint32_t *) pcmd->virt;
3426 switch (rn->Format) {
3428 case RNID_TOPOLOGY_DISC:
3430 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
3433 /* Reject this request because format not supported */
3434 stat.un.b.lsRjtRsvd0 = 0;
3435 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3436 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
3437 stat.un.b.vendorUnique = 0;
3438 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3445 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3446 struct lpfc_nodelist *ndlp)
3450 /* For now, unconditionally reject this command */
3451 stat.un.b.lsRjtRsvd0 = 0;
3452 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3453 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
3454 stat.un.b.vendorUnique = 0;
3455 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
3460 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3462 struct lpfc_sli *psli = &phba->sli;
3463 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
3468 struct lpfc_iocbq *elsiocb;
3469 struct lpfc_nodelist *ndlp;
3470 uint16_t xri, status;
3475 ndlp = (struct lpfc_nodelist *) pmb->context2;
3476 xri = (uint16_t) ((unsigned long)(pmb->context1));
3477 pmb->context1 = NULL;
3478 pmb->context2 = NULL;
3480 if (mb->mbxStatus) {
3481 mempool_free(pmb, phba->mbox_mem_pool);
3485 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
3486 mempool_free(pmb, phba->mbox_mem_pool);
3487 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
3488 lpfc_max_els_tries, ndlp,
3489 ndlp->nlp_DID, ELS_CMD_ACC);
3491 /* Decrement the ndlp reference count from previous mbox command */
3497 icmd = &elsiocb->iocb;
3498 icmd->ulpContext = xri;
3500 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3501 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3502 pcmd += sizeof(uint32_t); /* Skip past command */
3503 rps_rsp = (RPS_RSP *)pcmd;
3505 if (phba->fc_topology != TOPOLOGY_LOOP)
3509 if (phba->pport->fc_flag & FC_FABRIC)
3513 rps_rsp->portStatus = cpu_to_be16(status);
3514 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
3515 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
3516 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
3517 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
3518 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
3519 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
3520 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
3521 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
3522 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
3523 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
3524 elsiocb->iotag, elsiocb->iocb.ulpContext,
3525 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3527 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3528 phba->fc_stat.elsXmitACC++;
3529 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR)
3530 lpfc_els_free_iocb(phba, elsiocb);
3535 lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3536 struct lpfc_nodelist *ndlp)
3538 struct lpfc_hba *phba = vport->phba;
3542 struct lpfc_dmabuf *pcmd;
3546 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3547 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
3548 stat.un.b.lsRjtRsvd0 = 0;
3549 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3550 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
3551 stat.un.b.vendorUnique = 0;
3552 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3556 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3557 lp = (uint32_t *) pcmd->virt;
3558 flag = (be32_to_cpu(*lp++) & 0xf);
3562 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
3563 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
3564 sizeof(struct lpfc_name)) == 0))) {
3566 printk("Fix me....\n");
3568 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
3570 lpfc_read_lnk_stat(phba, mbox);
3572 (void *)((unsigned long) cmdiocb->iocb.ulpContext);
3573 mbox->context2 = lpfc_nlp_get(ndlp);
3574 mbox->vport = vport;
3575 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
3576 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
3577 != MBX_NOT_FINISHED)
3578 /* Mbox completion will send ELS Response */
3580 /* Decrement reference count used for the failed mbox
3584 mempool_free(mbox, phba->mbox_mem_pool);
3587 stat.un.b.lsRjtRsvd0 = 0;
3588 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3589 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
3590 stat.un.b.vendorUnique = 0;
3591 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
3596 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
3597 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
3599 struct lpfc_hba *phba = vport->phba;
3600 IOCB_t *icmd, *oldcmd;
3602 struct lpfc_iocbq *elsiocb;
3603 struct lpfc_sli *psli = &phba->sli;
3604 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
3607 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3608 ndlp->nlp_DID, ELS_CMD_ACC);
3613 icmd = &elsiocb->iocb;
3614 oldcmd = &oldiocb->iocb;
3615 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3617 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3618 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3619 pcmd += sizeof(uint16_t);
3620 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
3621 pcmd += sizeof(uint16_t);
3623 /* Setup the RPL ACC payload */
3624 rpl_rsp.listLen = be32_to_cpu(1);
3626 rpl_rsp.port_num_blk.portNum = 0;
3627 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
3628 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
3629 sizeof(struct lpfc_name));
3630 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
3631 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
3632 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3633 "0120 Xmit ELS RPL ACC response tag x%x "
3634 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
3636 elsiocb->iotag, elsiocb->iocb.ulpContext,
3637 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3639 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3640 phba->fc_stat.elsXmitACC++;
3641 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
3642 lpfc_els_free_iocb(phba, elsiocb);
3649 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3650 struct lpfc_nodelist *ndlp)
3652 struct lpfc_dmabuf *pcmd;
3659 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3660 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
3661 stat.un.b.lsRjtRsvd0 = 0;
3662 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3663 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
3664 stat.un.b.vendorUnique = 0;
3665 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3669 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3670 lp = (uint32_t *) pcmd->virt;
3671 rpl = (RPL *) (lp + 1);
3673 maxsize = be32_to_cpu(rpl->maxsize);
3675 /* We support only one port */
3676 if ((rpl->index == 0) &&
3678 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
3679 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
3681 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
3683 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
3689 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3690 struct lpfc_nodelist *ndlp)
3692 struct lpfc_dmabuf *pcmd;
3696 uint32_t cmd, cnt, did;
3698 icmd = &cmdiocb->iocb;
3699 did = icmd->un.elsreq64.remoteID;
3700 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3701 lp = (uint32_t *) pcmd->virt;
3705 /* FARP-REQ received from DID <did> */
3706 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3707 "0601 FARP-REQ received from DID x%x\n", did);
3708 /* We will only support match on WWPN or WWNN */
3709 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
3714 /* If this FARP command is searching for my portname */
3715 if (fp->Mflags & FARP_MATCH_PORT) {
3716 if (memcmp(&fp->RportName, &vport->fc_portname,
3717 sizeof(struct lpfc_name)) == 0)
3721 /* If this FARP command is searching for my nodename */
3722 if (fp->Mflags & FARP_MATCH_NODE) {
3723 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
3724 sizeof(struct lpfc_name)) == 0)
3729 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
3730 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
3731 /* Log back into the node before sending the FARP. */
3732 if (fp->Rflags & FARP_REQUEST_PLOGI) {
3733 ndlp->nlp_prev_state = ndlp->nlp_state;
3734 lpfc_nlp_set_state(vport, ndlp,
3735 NLP_STE_PLOGI_ISSUE);
3736 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
3739 /* Send a FARP response to that node */
3740 if (fp->Rflags & FARP_REQUEST_FARPR)
3741 lpfc_issue_els_farpr(vport, did, 0);
3748 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3749 struct lpfc_nodelist *ndlp)
3751 struct lpfc_dmabuf *pcmd;
3756 icmd = &cmdiocb->iocb;
3757 did = icmd->un.elsreq64.remoteID;
3758 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3759 lp = (uint32_t *) pcmd->virt;
3762 /* FARP-RSP received from DID <did> */
3763 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3764 "0600 FARP-RSP received from DID x%x\n", did);
3765 /* ACCEPT the Farp resp request */
3766 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3772 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3773 struct lpfc_nodelist *fan_ndlp)
3775 struct lpfc_hba *phba = vport->phba;
3779 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
3780 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
3782 /* FAN received; Fan does not have a reply sequence */
3783 if ((vport == phba->pport) &&
3784 (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
3785 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
3786 sizeof(struct lpfc_name))) ||
3787 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
3788 sizeof(struct lpfc_name)))) {
3789 /* This port has switched fabrics. FLOGI is required */
3790 lpfc_initial_flogi(vport);
3792 /* FAN verified - skip FLOGI */
3793 vport->fc_myDID = vport->fc_prevDID;
3794 lpfc_issue_fabric_reglogin(vport);
3801 lpfc_els_timeout(unsigned long ptr)
3803 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
3804 struct lpfc_hba *phba = vport->phba;
3805 uint32_t tmo_posted;
3806 unsigned long iflag;
3808 spin_lock_irqsave(&vport->work_port_lock, iflag);
3809 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
3811 vport->work_port_events |= WORKER_ELS_TMO;
3812 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3815 lpfc_worker_wake_up(phba);
3820 lpfc_els_timeout_handler(struct lpfc_vport *vport)
3822 struct lpfc_hba *phba = vport->phba;
3823 struct lpfc_sli_ring *pring;
3824 struct lpfc_iocbq *tmp_iocb, *piocb;
3826 struct lpfc_dmabuf *pcmd;
3827 uint32_t els_command = 0;
3829 uint32_t remote_ID = 0xffffffff;
3831 /* If the timer is already canceled do nothing */
3832 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
3835 spin_lock_irq(&phba->hbalock);
3836 timeout = (uint32_t)(phba->fc_ratov << 1);
3838 pring = &phba->sli.ring[LPFC_ELS_RING];
3840 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
3843 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
3844 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
3845 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
3848 if (piocb->vport != vport)
3851 pcmd = (struct lpfc_dmabuf *) piocb->context2;
3853 els_command = *(uint32_t *) (pcmd->virt);
3855 if (els_command == ELS_CMD_FARP ||
3856 els_command == ELS_CMD_FARPR ||
3857 els_command == ELS_CMD_FDISC)
3860 if (piocb->drvrTimeout > 0) {
3861 if (piocb->drvrTimeout >= timeout)
3862 piocb->drvrTimeout -= timeout;
3864 piocb->drvrTimeout = 0;
3868 remote_ID = 0xffffffff;
3869 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
3870 remote_ID = cmd->un.elsreq64.remoteID;
3872 struct lpfc_nodelist *ndlp;
3873 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
3874 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
3875 remote_ID = ndlp->nlp_DID;
3877 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3878 "0127 ELS timeout Data: x%x x%x x%x "
3879 "x%x\n", els_command,
3880 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
3881 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
3883 spin_unlock_irq(&phba->hbalock);
3885 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
3886 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
3890 lpfc_els_flush_cmd(struct lpfc_vport *vport)
3892 LIST_HEAD(completions);
3893 struct lpfc_hba *phba = vport->phba;
3894 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3895 struct lpfc_iocbq *tmp_iocb, *piocb;
3898 lpfc_fabric_abort_vport(vport);
3900 spin_lock_irq(&phba->hbalock);
3901 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
3904 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
3908 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
3909 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
3910 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
3911 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
3912 cmd->ulpCommand == CMD_ABORT_XRI_CN)
3915 if (piocb->vport != vport)
3918 list_move_tail(&piocb->list, &completions);
3922 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
3923 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
3927 if (piocb->vport != vport)
3930 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
3932 spin_unlock_irq(&phba->hbalock);
3934 while (!list_empty(&completions)) {
3935 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
3937 list_del_init(&piocb->list);
3939 if (!piocb->iocb_cmpl)
3940 lpfc_sli_release_iocbq(phba, piocb);
3942 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3943 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
3944 (piocb->iocb_cmpl) (phba, piocb, piocb);
3952 lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
3954 LIST_HEAD(completions);
3955 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3956 struct lpfc_iocbq *tmp_iocb, *piocb;
3959 lpfc_fabric_abort_hba(phba);
3960 spin_lock_irq(&phba->hbalock);
3961 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
3963 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
3965 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
3966 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
3967 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
3968 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
3969 cmd->ulpCommand == CMD_ABORT_XRI_CN)
3971 list_move_tail(&piocb->list, &completions);
3974 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
3975 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
3977 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
3979 spin_unlock_irq(&phba->hbalock);
3980 while (!list_empty(&completions)) {
3981 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
3983 list_del_init(&piocb->list);
3984 if (!piocb->iocb_cmpl)
3985 lpfc_sli_release_iocbq(phba, piocb);
3987 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3988 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
3989 (piocb->iocb_cmpl) (phba, piocb, piocb);
3996 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3997 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
3999 struct Scsi_Host *shost;
4000 struct lpfc_nodelist *ndlp;
4003 uint32_t cmd, did, newnode, rjt_err = 0;
4004 IOCB_t *icmd = &elsiocb->iocb;
4006 if (!vport || !(elsiocb->context2))
4010 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
4012 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
4013 lpfc_post_buffer(phba, pring, 1);
4015 did = icmd->un.rcvels.remoteID;
4016 if (icmd->ulpStatus) {
4017 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4018 "RCV Unsol ELS: status:x%x/x%x did:x%x",
4019 icmd->ulpStatus, icmd->un.ulpWord[4], did);
4023 /* Check to see if link went down during discovery */
4024 if (lpfc_els_chk_latt(vport))
4027 /* Ignore traffic recevied during vport shutdown. */
4028 if (vport->load_flag & FC_UNLOADING)
4031 ndlp = lpfc_findnode_did(vport, did);
4033 /* Cannot find existing Fabric ndlp, so allocate a new one */
4034 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4038 lpfc_nlp_init(vport, ndlp, did);
4039 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4041 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
4042 ndlp->nlp_type |= NLP_FABRIC;
4043 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4044 ndlp = lpfc_enable_node(vport, ndlp,
4045 NLP_STE_UNUSED_NODE);
4048 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4050 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
4051 ndlp->nlp_type |= NLP_FABRIC;
4052 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
4053 /* This is similar to the new node path */
4054 ndlp = lpfc_nlp_get(ndlp);
4057 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4061 phba->fc_stat.elsRcvFrame++;
4062 if (elsiocb->context1)
4063 lpfc_nlp_put(elsiocb->context1);
4065 elsiocb->context1 = lpfc_nlp_get(ndlp);
4066 elsiocb->vport = vport;
4068 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
4069 cmd &= ELS_CMD_MASK;
4071 /* ELS command <elsCmd> received from NPORT <did> */
4072 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4073 "0112 ELS command x%x received from NPORT x%x "
4074 "Data: x%x\n", cmd, did, vport->port_state);
4077 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4078 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
4079 did, vport->port_state, ndlp->nlp_flag);
4081 phba->fc_stat.elsRcvPLOGI++;
4082 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
4084 if (vport->port_state < LPFC_DISC_AUTH) {
4085 if (!(phba->pport->fc_flag & FC_PT2PT) ||
4086 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
4087 rjt_err = LSRJT_UNABLE_TPC;
4090 /* We get here, and drop thru, if we are PT2PT with
4091 * another NPort and the other side has initiated
4092 * the PLOGI before responding to our FLOGI.
4096 shost = lpfc_shost_from_vport(vport);
4097 spin_lock_irq(shost->host_lock);
4098 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
4099 spin_unlock_irq(shost->host_lock);
4101 lpfc_disc_state_machine(vport, ndlp, elsiocb,
4106 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4107 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
4108 did, vport->port_state, ndlp->nlp_flag);
4110 phba->fc_stat.elsRcvFLOGI++;
4111 lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
4116 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4117 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
4118 did, vport->port_state, ndlp->nlp_flag);
4120 phba->fc_stat.elsRcvLOGO++;
4121 if (vport->port_state < LPFC_DISC_AUTH) {
4122 rjt_err = LSRJT_UNABLE_TPC;
4125 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
4128 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4129 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
4130 did, vport->port_state, ndlp->nlp_flag);
4132 phba->fc_stat.elsRcvPRLO++;
4133 if (vport->port_state < LPFC_DISC_AUTH) {
4134 rjt_err = LSRJT_UNABLE_TPC;
4137 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
4140 phba->fc_stat.elsRcvRSCN++;
4141 lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
4146 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4147 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
4148 did, vport->port_state, ndlp->nlp_flag);
4150 phba->fc_stat.elsRcvADISC++;
4151 if (vport->port_state < LPFC_DISC_AUTH) {
4152 rjt_err = LSRJT_UNABLE_TPC;
4155 lpfc_disc_state_machine(vport, ndlp, elsiocb,
4159 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4160 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
4161 did, vport->port_state, ndlp->nlp_flag);
4163 phba->fc_stat.elsRcvPDISC++;
4164 if (vport->port_state < LPFC_DISC_AUTH) {
4165 rjt_err = LSRJT_UNABLE_TPC;
4168 lpfc_disc_state_machine(vport, ndlp, elsiocb,
4172 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4173 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
4174 did, vport->port_state, ndlp->nlp_flag);
4176 phba->fc_stat.elsRcvFARPR++;
4177 lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
4180 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4181 "RCV FARP: did:x%x/ste:x%x flg:x%x",
4182 did, vport->port_state, ndlp->nlp_flag);
4184 phba->fc_stat.elsRcvFARP++;
4185 lpfc_els_rcv_farp(vport, elsiocb, ndlp);
4188 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4189 "RCV FAN: did:x%x/ste:x%x flg:x%x",
4190 did, vport->port_state, ndlp->nlp_flag);
4192 phba->fc_stat.elsRcvFAN++;
4193 lpfc_els_rcv_fan(vport, elsiocb, ndlp);
4196 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4197 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
4198 did, vport->port_state, ndlp->nlp_flag);
4200 phba->fc_stat.elsRcvPRLI++;
4201 if (vport->port_state < LPFC_DISC_AUTH) {
4202 rjt_err = LSRJT_UNABLE_TPC;
4205 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
4208 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4209 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
4210 did, vport->port_state, ndlp->nlp_flag);
4212 phba->fc_stat.elsRcvLIRR++;
4213 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
4218 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4219 "RCV RPS: did:x%x/ste:x%x flg:x%x",
4220 did, vport->port_state, ndlp->nlp_flag);
4222 phba->fc_stat.elsRcvRPS++;
4223 lpfc_els_rcv_rps(vport, elsiocb, ndlp);
4228 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4229 "RCV RPL: did:x%x/ste:x%x flg:x%x",
4230 did, vport->port_state, ndlp->nlp_flag);
4232 phba->fc_stat.elsRcvRPL++;
4233 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
4238 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4239 "RCV RNID: did:x%x/ste:x%x flg:x%x",
4240 did, vport->port_state, ndlp->nlp_flag);
4242 phba->fc_stat.elsRcvRNID++;
4243 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
4248 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4249 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
4250 cmd, did, vport->port_state);
4252 /* Unsupported ELS command, reject */
4253 rjt_err = LSRJT_INVALID_CMD;
4255 /* Unknown ELS command <elsCmd> received from NPORT <did> */
4256 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4257 "0115 Unknown ELS command x%x "
4258 "received from NPORT x%x\n", cmd, did);
4264 /* check if need to LS_RJT received ELS cmd */
4266 memset(&stat, 0, sizeof(stat));
4267 stat.un.b.lsRjtRsnCode = rjt_err;
4268 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
4269 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
4276 if (vport && !(vport->load_flag & FC_UNLOADING))
4277 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
4278 "(%d):0111 Dropping received ELS cmd "
4279 "Data: x%x x%x x%x\n",
4280 vport->vpi, icmd->ulpStatus,
4281 icmd->un.ulpWord[4], icmd->ulpTimeout);
4282 phba->fc_stat.elsRcvDrop++;
4285 static struct lpfc_vport *
4286 lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
4288 struct lpfc_vport *vport;
4289 unsigned long flags;
4291 spin_lock_irqsave(&phba->hbalock, flags);
4292 list_for_each_entry(vport, &phba->port_list, listentry) {
4293 if (vport->vpi == vpi) {
4294 spin_unlock_irqrestore(&phba->hbalock, flags);
4298 spin_unlock_irqrestore(&phba->hbalock, flags);
4303 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4304 struct lpfc_iocbq *elsiocb)
4306 struct lpfc_vport *vport = phba->pport;
4307 IOCB_t *icmd = &elsiocb->iocb;
4309 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
4310 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
4312 elsiocb->context2 = NULL;
4313 elsiocb->context3 = NULL;
4315 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
4316 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
4317 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
4318 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
4319 phba->fc_stat.NoRcvBuf++;
4320 /* Not enough posted buffers; Try posting more buffers */
4321 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
4322 lpfc_post_buffer(phba, pring, 0);
4326 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4327 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
4328 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
4329 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
4330 vport = phba->pport;
4332 uint16_t vpi = icmd->unsli3.rcvsli3.vpi;
4333 vport = lpfc_find_vport_by_vpid(phba, vpi);
4336 /* If there are no BDEs associated
4337 * with this IOCB, there is nothing to do.
4339 if (icmd->ulpBdeCount == 0)
4342 /* type of ELS cmd is first 32bit word
4345 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4346 elsiocb->context2 = bdeBuf1;
4348 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
4349 icmd->un.cont64[0].addrLow);
4350 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
4354 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
4356 * The different unsolicited event handlers would tell us
4357 * if they are done with "mp" by setting context2 to NULL.
4359 lpfc_nlp_put(elsiocb->context1);
4360 elsiocb->context1 = NULL;
4361 if (elsiocb->context2) {
4362 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
4363 elsiocb->context2 = NULL;
4366 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
4367 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
4368 icmd->ulpBdeCount == 2) {
4369 elsiocb->context2 = bdeBuf2;
4370 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
4371 /* free mp if we are done with it */
4372 if (elsiocb->context2) {
4373 lpfc_in_buf_free(phba, elsiocb->context2);
4374 elsiocb->context2 = NULL;
4380 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
4382 struct lpfc_nodelist *ndlp, *ndlp_fdmi;
4384 ndlp = lpfc_findnode_did(vport, NameServer_DID);
4386 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4388 if (phba->fc_topology == TOPOLOGY_LOOP) {
4389 lpfc_disc_start(vport);
4392 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4393 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4394 "0251 NameServer login: no memory\n");
4397 lpfc_nlp_init(vport, ndlp, NameServer_DID);
4398 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4399 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
4401 if (phba->fc_topology == TOPOLOGY_LOOP) {
4402 lpfc_disc_start(vport);
4405 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4406 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4407 "0348 NameServer login: node freed\n");
4411 ndlp->nlp_type |= NLP_FABRIC;
4413 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4415 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
4416 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4417 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4418 "0252 Cannot issue NameServer login\n");
4422 if (vport->cfg_fdmi_on) {
4423 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
4426 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
4427 ndlp_fdmi->nlp_type |= NLP_FABRIC;
4428 lpfc_nlp_set_state(vport, ndlp_fdmi,
4429 NLP_STE_PLOGI_ISSUE);
4430 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
4438 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4440 struct lpfc_vport *vport = pmb->vport;
4441 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4442 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
4443 MAILBOX_t *mb = &pmb->mb;
4445 spin_lock_irq(shost->host_lock);
4446 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4447 spin_unlock_irq(shost->host_lock);
4449 if (mb->mbxStatus) {
4450 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
4451 "0915 Register VPI failed: 0x%x\n",
4454 switch (mb->mbxStatus) {
4455 case 0x11: /* unsupported feature */
4456 case 0x9603: /* max_vpi exceeded */
4457 case 0x9602: /* Link event since CLEAR_LA */
4458 /* giving up on vport registration */
4459 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4460 spin_lock_irq(shost->host_lock);
4461 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
4462 spin_unlock_irq(shost->host_lock);
4463 lpfc_can_disctmo(vport);
4466 /* Try to recover from this error */
4467 lpfc_mbx_unreg_vpi(vport);
4468 spin_lock_irq(shost->host_lock);
4469 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4470 spin_unlock_irq(shost->host_lock);
4471 if (vport->port_type == LPFC_PHYSICAL_PORT)
4472 lpfc_initial_flogi(vport);
4474 lpfc_initial_fdisc(vport);
4479 if (vport == phba->pport)
4480 lpfc_issue_fabric_reglogin(vport);
4482 lpfc_do_scr_ns_plogi(phba, vport);
4485 /* Now, we decrement the ndlp reference count held for this
4490 mempool_free(pmb, phba->mbox_mem_pool);
4495 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
4496 struct lpfc_nodelist *ndlp)
4498 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4501 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4503 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox);
4504 mbox->vport = vport;
4505 mbox->context2 = lpfc_nlp_get(ndlp);
4506 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
4507 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
4508 == MBX_NOT_FINISHED) {
4509 /* mailbox command not success, decrement ndlp
4510 * reference count for this command
4513 mempool_free(mbox, phba->mbox_mem_pool);
4515 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
4516 "0253 Register VPI: Can't send mbox\n");
4520 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
4521 "0254 Register VPI: no memory\n");
4527 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4528 spin_lock_irq(shost->host_lock);
4529 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4530 spin_unlock_irq(shost->host_lock);
4535 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4536 struct lpfc_iocbq *rspiocb)
4538 struct lpfc_vport *vport = cmdiocb->vport;
4539 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4540 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
4541 struct lpfc_nodelist *np;
4542 struct lpfc_nodelist *next_np;
4543 IOCB_t *irsp = &rspiocb->iocb;
4544 struct lpfc_iocbq *piocb;
4546 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4547 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
4548 irsp->ulpStatus, irsp->un.ulpWord[4],
4550 /* Since all FDISCs are being single threaded, we
4551 * must reset the discovery timer for ALL vports
4552 * waiting to send FDISC when one completes.
4554 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
4555 lpfc_set_disctmo(piocb->vport);
4558 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4559 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
4560 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
4562 if (irsp->ulpStatus) {
4563 /* Check for retry */
4564 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
4567 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4568 "0124 FDISC failed. (%d/%d)\n",
4569 irsp->ulpStatus, irsp->un.ulpWord[4]);
4570 if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
4571 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4573 /* giving up on FDISC. Cancel discovery timer */
4574 lpfc_can_disctmo(vport);
4576 spin_lock_irq(shost->host_lock);
4577 vport->fc_flag |= FC_FABRIC;
4578 if (vport->phba->fc_topology == TOPOLOGY_LOOP)
4579 vport->fc_flag |= FC_PUBLIC_LOOP;
4580 spin_unlock_irq(shost->host_lock);
4582 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
4583 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
4584 if ((vport->fc_prevDID != vport->fc_myDID) &&
4585 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
4586 /* If our NportID changed, we need to ensure all
4587 * remaining NPORTs get unreg_login'ed so we can
4590 list_for_each_entry_safe(np, next_np,
4591 &vport->fc_nodes, nlp_listp) {
4592 if (!NLP_CHK_NODE_ACT(ndlp) ||
4593 (np->nlp_state != NLP_STE_NPR_NODE) ||
4594 !(np->nlp_flag & NLP_NPR_ADISC))
4596 spin_lock_irq(shost->host_lock);
4597 np->nlp_flag &= ~NLP_NPR_ADISC;
4598 spin_unlock_irq(shost->host_lock);
4599 lpfc_unreg_rpi(vport, np);
4601 lpfc_mbx_unreg_vpi(vport);
4602 spin_lock_irq(shost->host_lock);
4603 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4604 spin_unlock_irq(shost->host_lock);
4607 if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
4608 lpfc_register_new_vport(phba, vport, ndlp);
4610 lpfc_do_scr_ns_plogi(phba, vport);
4612 /* Unconditionaly kick off releasing fabric node for vports */
4617 lpfc_els_free_iocb(phba, cmdiocb);
4621 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4624 struct lpfc_hba *phba = vport->phba;
4626 struct lpfc_iocbq *elsiocb;
4627 struct serv_parm *sp;
4630 int did = ndlp->nlp_DID;
4633 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
4634 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
4637 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4638 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4639 "0255 Issue FDISC: no IOCB\n");
4643 icmd = &elsiocb->iocb;
4644 icmd->un.elsreq64.myID = 0;
4645 icmd->un.elsreq64.fl = 1;
4647 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
4651 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4652 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
4653 pcmd += sizeof(uint32_t); /* CSP Word 1 */
4654 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
4655 sp = (struct serv_parm *) pcmd;
4656 /* Setup CSPs accordingly for Fabric */
4657 sp->cmn.e_d_tov = 0;
4658 sp->cmn.w2.r_a_tov = 0;
4659 sp->cls1.classValid = 0;
4660 sp->cls2.seqDelivery = 1;
4661 sp->cls3.seqDelivery = 1;
4663 pcmd += sizeof(uint32_t); /* CSP Word 2 */
4664 pcmd += sizeof(uint32_t); /* CSP Word 3 */
4665 pcmd += sizeof(uint32_t); /* CSP Word 4 */
4666 pcmd += sizeof(uint32_t); /* Port Name */
4667 memcpy(pcmd, &vport->fc_portname, 8);
4668 pcmd += sizeof(uint32_t); /* Node Name */
4669 pcmd += sizeof(uint32_t); /* Node Name */
4670 memcpy(pcmd, &vport->fc_nodename, 8);
4672 lpfc_set_disctmo(vport);
4674 phba->fc_stat.elsXmitFDISC++;
4675 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
4677 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4678 "Issue FDISC: did:x%x",
4681 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
4682 if (rc == IOCB_ERROR) {
4683 lpfc_els_free_iocb(phba, elsiocb);
4684 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4685 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4686 "0256 Issue FDISC: Cannot send IOCB\n");
4689 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
4690 vport->port_state = LPFC_FDISC;
4695 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4696 struct lpfc_iocbq *rspiocb)
4698 struct lpfc_vport *vport = cmdiocb->vport;
4700 struct lpfc_nodelist *ndlp;
4701 ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
4703 irsp = &rspiocb->iocb;
4704 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4705 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
4706 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
4708 lpfc_els_free_iocb(phba, cmdiocb);
4709 vport->unreg_vpi_cmpl = VPORT_ERROR;
4711 /* Trigger the release of the ndlp after logo */
4716 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4718 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4719 struct lpfc_hba *phba = vport->phba;
4720 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4722 struct lpfc_iocbq *elsiocb;
4726 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
4727 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
4732 icmd = &elsiocb->iocb;
4733 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4734 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
4735 pcmd += sizeof(uint32_t);
4737 /* Fill in LOGO payload */
4738 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
4739 pcmd += sizeof(uint32_t);
4740 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
4742 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4743 "Issue LOGO npiv did:x%x flg:x%x",
4744 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4746 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
4747 spin_lock_irq(shost->host_lock);
4748 ndlp->nlp_flag |= NLP_LOGO_SND;
4749 spin_unlock_irq(shost->host_lock);
4750 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
4751 spin_lock_irq(shost->host_lock);
4752 ndlp->nlp_flag &= ~NLP_LOGO_SND;
4753 spin_unlock_irq(shost->host_lock);
4754 lpfc_els_free_iocb(phba, elsiocb);
4761 lpfc_fabric_block_timeout(unsigned long ptr)
4763 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4764 unsigned long iflags;
4765 uint32_t tmo_posted;
4767 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
4768 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
4770 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
4771 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
4774 lpfc_worker_wake_up(phba);
4779 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
4781 struct lpfc_iocbq *iocb;
4782 unsigned long iflags;
4784 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4789 spin_lock_irqsave(&phba->hbalock, iflags);
4790 /* Post any pending iocb to the SLI layer */
4791 if (atomic_read(&phba->fabric_iocb_count) == 0) {
4792 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
4795 /* Increment fabric iocb count to hold the position */
4796 atomic_inc(&phba->fabric_iocb_count);
4798 spin_unlock_irqrestore(&phba->hbalock, iflags);
4800 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
4801 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
4802 iocb->iocb_flag |= LPFC_IO_FABRIC;
4804 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
4805 "Fabric sched1: ste:x%x",
4806 iocb->vport->port_state, 0, 0);
4808 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
4810 if (ret == IOCB_ERROR) {
4811 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
4812 iocb->fabric_iocb_cmpl = NULL;
4813 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
4815 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4816 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4817 iocb->iocb_cmpl(phba, iocb, iocb);
4819 atomic_dec(&phba->fabric_iocb_count);
4828 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
4830 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4832 lpfc_resume_fabric_iocbs(phba);
4837 lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
4841 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4842 /* Start a timer to unblock fabric iocbs after 100ms */
4844 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
4850 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4851 struct lpfc_iocbq *rspiocb)
4855 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
4858 switch (rspiocb->iocb.ulpStatus) {
4859 case IOSTAT_NPORT_RJT:
4860 case IOSTAT_FABRIC_RJT:
4861 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
4862 lpfc_block_fabric_iocbs(phba);
4866 case IOSTAT_NPORT_BSY:
4867 case IOSTAT_FABRIC_BSY:
4868 lpfc_block_fabric_iocbs(phba);
4872 stat.un.lsRjtError =
4873 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
4874 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
4875 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
4876 lpfc_block_fabric_iocbs(phba);
4880 if (atomic_read(&phba->fabric_iocb_count) == 0)
4883 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
4884 cmdiocb->fabric_iocb_cmpl = NULL;
4885 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
4886 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
4888 atomic_dec(&phba->fabric_iocb_count);
4889 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
4890 /* Post any pending iocbs to HBA */
4891 lpfc_resume_fabric_iocbs(phba);
4896 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
4898 unsigned long iflags;
4899 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4903 if (atomic_read(&phba->fabric_iocb_count) > 1)
4906 spin_lock_irqsave(&phba->hbalock, iflags);
4907 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
4908 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4911 /* Increment fabric iocb count to hold the position */
4912 atomic_inc(&phba->fabric_iocb_count);
4913 spin_unlock_irqrestore(&phba->hbalock, iflags);
4915 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
4916 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
4917 iocb->iocb_flag |= LPFC_IO_FABRIC;
4919 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
4920 "Fabric sched2: ste:x%x",
4921 iocb->vport->port_state, 0, 0);
4923 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
4925 if (ret == IOCB_ERROR) {
4926 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
4927 iocb->fabric_iocb_cmpl = NULL;
4928 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
4929 atomic_dec(&phba->fabric_iocb_count);
4932 spin_lock_irqsave(&phba->hbalock, iflags);
4933 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
4934 spin_unlock_irqrestore(&phba->hbalock, iflags);
4941 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
4943 LIST_HEAD(completions);
4944 struct lpfc_hba *phba = vport->phba;
4945 struct lpfc_iocbq *tmp_iocb, *piocb;
4948 spin_lock_irq(&phba->hbalock);
4949 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
4952 if (piocb->vport != vport)
4955 list_move_tail(&piocb->list, &completions);
4957 spin_unlock_irq(&phba->hbalock);
4959 while (!list_empty(&completions)) {
4960 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
4961 list_del_init(&piocb->list);
4964 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4965 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4966 (piocb->iocb_cmpl) (phba, piocb, piocb);
4970 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
4972 LIST_HEAD(completions);
4973 struct lpfc_hba *phba = ndlp->vport->phba;
4974 struct lpfc_iocbq *tmp_iocb, *piocb;
4975 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4978 spin_lock_irq(&phba->hbalock);
4979 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
4981 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
4983 list_move_tail(&piocb->list, &completions);
4986 spin_unlock_irq(&phba->hbalock);
4988 while (!list_empty(&completions)) {
4989 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
4990 list_del_init(&piocb->list);
4993 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4994 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4995 (piocb->iocb_cmpl) (phba, piocb, piocb);
4999 void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
5001 LIST_HEAD(completions);
5002 struct lpfc_iocbq *piocb;
5005 spin_lock_irq(&phba->hbalock);
5006 list_splice_init(&phba->fabric_iocb_list, &completions);
5007 spin_unlock_irq(&phba->hbalock);
5009 while (!list_empty(&completions)) {
5010 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
5011 list_del_init(&piocb->list);
5014 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
5015 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
5016 (piocb->iocb_cmpl) (phba, piocb, piocb);