1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_transport_fc.h>
33 #include "lpfc_disc.h"
34 #include "lpfc_scsi.h"
36 #include "lpfc_logmsg.h"
37 #include "lpfc_crtn.h"
39 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
41 static int lpfc_max_els_tries = 3;
44 lpfc_els_chk_latt(struct lpfc_hba * phba)
46 struct lpfc_sli *psli;
53 if ((phba->hba_state >= LPFC_HBA_READY) ||
54 (phba->hba_state == LPFC_LINK_DOWN))
57 /* Read the HBA Host Attention Register */
58 spin_lock_irq(phba->host->host_lock);
59 ha_copy = readl(phba->HAregaddr);
60 spin_unlock_irq(phba->host->host_lock);
62 if (!(ha_copy & HA_LATT))
65 /* Pending Link Event during Discovery */
66 lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
67 "%d:0237 Pending Link Event during "
68 "Discovery: State x%x\n",
69 phba->brd_no, phba->hba_state);
71 /* CLEAR_LA should re-enable link attention events and
72 * we should then imediately take a LATT event. The
73 * LATT processing should call lpfc_linkdown() which
74 * will cleanup any left over in-progress discovery
77 spin_lock_irq(phba->host->host_lock);
78 phba->fc_flag |= FC_ABORT_DISCOVERY;
79 spin_unlock_irq(phba->host->host_lock);
81 if (phba->hba_state != LPFC_CLEAR_LA) {
82 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
83 phba->hba_state = LPFC_CLEAR_LA;
84 lpfc_clear_la(phba, mbox);
85 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
86 rc = lpfc_sli_issue_mbox (phba, mbox,
87 (MBX_NOWAIT | MBX_STOP_IOCB));
88 if (rc == MBX_NOT_FINISHED) {
89 mempool_free(mbox, phba->mbox_mem_pool);
90 phba->hba_state = LPFC_HBA_ERROR;
99 static struct lpfc_iocbq *
100 lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
101 uint16_t cmdSize, uint8_t retry, struct lpfc_nodelist * ndlp,
102 uint32_t did, uint32_t elscmd)
104 struct lpfc_sli_ring *pring;
105 struct lpfc_iocbq *elsiocb;
106 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
107 struct ulp_bde64 *bpl;
110 pring = &phba->sli.ring[LPFC_ELS_RING];
112 if (phba->hba_state < LPFC_LINK_UP)
115 /* Allocate buffer for command iocb */
116 spin_lock_irq(phba->host->host_lock);
117 elsiocb = lpfc_sli_get_iocbq(phba);
118 spin_unlock_irq(phba->host->host_lock);
122 icmd = &elsiocb->iocb;
124 /* fill in BDEs for command */
125 /* Allocate buffer for command payload */
126 if (((pcmd = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
127 ((pcmd->virt = lpfc_mbuf_alloc(phba,
128 MEM_PRI, &(pcmd->phys))) == 0)) {
131 spin_lock_irq(phba->host->host_lock);
132 lpfc_sli_release_iocbq(phba, elsiocb);
133 spin_unlock_irq(phba->host->host_lock);
137 INIT_LIST_HEAD(&pcmd->list);
139 /* Allocate buffer for response payload */
141 prsp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
143 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
145 if (prsp == 0 || prsp->virt == 0) {
147 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
149 spin_lock_irq(phba->host->host_lock);
150 lpfc_sli_release_iocbq(phba, elsiocb);
151 spin_unlock_irq(phba->host->host_lock);
154 INIT_LIST_HEAD(&prsp->list);
159 /* Allocate buffer for Buffer ptr list */
160 pbuflist = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
162 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
164 if (pbuflist == 0 || pbuflist->virt == 0) {
165 spin_lock_irq(phba->host->host_lock);
166 lpfc_sli_release_iocbq(phba, elsiocb);
167 spin_unlock_irq(phba->host->host_lock);
168 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
169 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
176 INIT_LIST_HEAD(&pbuflist->list);
178 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
179 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
180 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
182 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
183 icmd->un.elsreq64.remoteID = did; /* DID */
184 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
186 icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64);
187 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
190 icmd->ulpBdeCount = 1;
192 icmd->ulpClass = CLASS3;
194 bpl = (struct ulp_bde64 *) pbuflist->virt;
195 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
196 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
197 bpl->tus.f.bdeSize = cmdSize;
198 bpl->tus.f.bdeFlags = 0;
199 bpl->tus.w = le32_to_cpu(bpl->tus.w);
203 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
204 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
205 bpl->tus.f.bdeSize = FCELSSIZE;
206 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
207 bpl->tus.w = le32_to_cpu(bpl->tus.w);
210 /* Save for completion so we can release these resources */
211 elsiocb->context1 = (uint8_t *) ndlp;
212 elsiocb->context2 = (uint8_t *) pcmd;
213 elsiocb->context3 = (uint8_t *) pbuflist;
214 elsiocb->retry = retry;
215 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
218 list_add(&prsp->list, &pcmd->list);
222 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
223 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
224 "%d:0116 Xmit ELS command x%x to remote "
225 "NPORT x%x Data: x%x x%x\n",
226 phba->brd_no, elscmd,
227 did, icmd->ulpIoTag, phba->hba_state);
229 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
230 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
231 "%d:0117 Xmit ELS response x%x to remote "
232 "NPORT x%x Data: x%x x%x\n",
233 phba->brd_no, elscmd,
234 ndlp->nlp_DID, icmd->ulpIoTag, cmdSize);
242 lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
243 struct serv_parm *sp, IOCB_t *irsp)
248 spin_lock_irq(phba->host->host_lock);
249 phba->fc_flag |= FC_FABRIC;
250 spin_unlock_irq(phba->host->host_lock);
252 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
253 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
254 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
256 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
258 if (phba->fc_topology == TOPOLOGY_LOOP) {
259 spin_lock_irq(phba->host->host_lock);
260 phba->fc_flag |= FC_PUBLIC_LOOP;
261 spin_unlock_irq(phba->host->host_lock);
264 * If we are a N-port connected to a Fabric, fixup sparam's so
265 * logins to devices on remote loops work.
267 phba->fc_sparam.cmn.altBbCredit = 1;
270 phba->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
271 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
272 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
273 ndlp->nlp_class_sup = 0;
274 if (sp->cls1.classValid)
275 ndlp->nlp_class_sup |= FC_COS_CLASS1;
276 if (sp->cls2.classValid)
277 ndlp->nlp_class_sup |= FC_COS_CLASS2;
278 if (sp->cls3.classValid)
279 ndlp->nlp_class_sup |= FC_COS_CLASS3;
280 if (sp->cls4.classValid)
281 ndlp->nlp_class_sup |= FC_COS_CLASS4;
282 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
283 sp->cmn.bbRcvSizeLsb;
284 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
286 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
290 phba->hba_state = LPFC_FABRIC_CFG_LINK;
291 lpfc_config_link(phba, mbox);
292 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
294 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
295 if (rc == MBX_NOT_FINISHED)
298 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
302 if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0))
305 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
306 mbox->context2 = ndlp;
308 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
309 if (rc == MBX_NOT_FINISHED)
315 mempool_free(mbox, phba->mbox_mem_pool);
321 * We FLOGIed into an NPort, initiate pt2pt protocol
324 lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
325 struct serv_parm *sp)
330 spin_lock_irq(phba->host->host_lock);
331 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
332 spin_unlock_irq(phba->host->host_lock);
334 phba->fc_edtov = FF_DEF_EDTOV;
335 phba->fc_ratov = FF_DEF_RATOV;
336 rc = memcmp(&phba->fc_portname, &sp->portName,
337 sizeof(struct lpfc_name));
339 /* This side will initiate the PLOGI */
340 spin_lock_irq(phba->host->host_lock);
341 phba->fc_flag |= FC_PT2PT_PLOGI;
342 spin_unlock_irq(phba->host->host_lock);
345 * N_Port ID cannot be 0, set our to LocalID the other
346 * side will be RemoteID.
351 phba->fc_myDID = PT2PT_LocalID;
353 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
357 lpfc_config_link(phba, mbox);
359 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
360 rc = lpfc_sli_issue_mbox(phba, mbox,
361 MBX_NOWAIT | MBX_STOP_IOCB);
362 if (rc == MBX_NOT_FINISHED) {
363 mempool_free(mbox, phba->mbox_mem_pool);
366 mempool_free(ndlp, phba->nlp_mem_pool);
368 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, PT2PT_RemoteID);
371 * Cannot find existing Fabric ndlp, so allocate a
374 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
378 lpfc_nlp_init(phba, ndlp, PT2PT_RemoteID);
381 memcpy(&ndlp->nlp_portname, &sp->portName,
382 sizeof(struct lpfc_name));
383 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
384 sizeof(struct lpfc_name));
385 ndlp->nlp_state = NLP_STE_NPR_NODE;
386 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
387 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
389 /* This side will wait for the PLOGI */
390 mempool_free( ndlp, phba->nlp_mem_pool);
393 spin_lock_irq(phba->host->host_lock);
394 phba->fc_flag |= FC_PT2PT;
395 spin_unlock_irq(phba->host->host_lock);
397 /* Start discovery - this should just do CLEAR_LA */
398 lpfc_disc_start(phba);
405 lpfc_cmpl_els_flogi(struct lpfc_hba * phba,
406 struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb)
408 IOCB_t *irsp = &rspiocb->iocb;
409 struct lpfc_nodelist *ndlp = cmdiocb->context1;
410 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
411 struct serv_parm *sp;
414 /* Check to see if link went down during discovery */
415 if (lpfc_els_chk_latt(phba)) {
416 lpfc_nlp_remove(phba, ndlp);
420 if (irsp->ulpStatus) {
421 /* Check for retry */
422 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
423 /* ELS command is being retried */
426 /* FLOGI failed, so there is no fabric */
427 spin_lock_irq(phba->host->host_lock);
428 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
429 spin_unlock_irq(phba->host->host_lock);
431 /* If private loop, then allow max outstandting els to be
432 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
433 * alpa map would take too long otherwise.
435 if (phba->alpa_map[0] == 0) {
436 phba->cfg_discovery_threads =
437 LPFC_MAX_DISC_THREADS;
441 lpfc_printf_log(phba,
444 "%d:0100 FLOGI failure Data: x%x x%x x%x\n",
446 irsp->ulpStatus, irsp->un.ulpWord[4],
452 * The FLogI succeeded. Sync the data for the CPU before
455 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
457 sp = prsp->virt + sizeof(uint32_t);
459 /* FLOGI completes successfully */
460 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
461 "%d:0101 FLOGI completes sucessfully "
462 "Data: x%x x%x x%x x%x\n",
464 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
465 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
467 if (phba->hba_state == LPFC_FLOGI) {
469 * If Common Service Parameters indicate Nport
470 * we are point to point, if Fport we are Fabric.
473 rc = lpfc_cmpl_els_flogi_fabric(phba, ndlp, sp, irsp);
475 rc = lpfc_cmpl_els_flogi_nport(phba, ndlp, sp);
482 lpfc_nlp_remove(phba, ndlp);
484 if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
485 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED &&
486 irsp->un.ulpWord[4] != IOERR_SLI_DOWN)) {
487 /* FLOGI failed, so just use loop map to make discovery list */
488 lpfc_disc_list_loopmap(phba);
490 /* Start discovery */
491 lpfc_disc_start(phba);
495 lpfc_els_free_iocb(phba, cmdiocb);
499 lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
502 struct serv_parm *sp;
504 struct lpfc_iocbq *elsiocb;
505 struct lpfc_sli_ring *pring;
511 pring = &phba->sli.ring[LPFC_ELS_RING];
513 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
514 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
515 ndlp->nlp_DID, ELS_CMD_FLOGI);
519 icmd = &elsiocb->iocb;
520 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
522 /* For FLOGI request, remainder of payload is service parameters */
523 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
524 pcmd += sizeof (uint32_t);
525 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
526 sp = (struct serv_parm *) pcmd;
528 /* Setup CSPs accordingly for Fabric */
530 sp->cmn.w2.r_a_tov = 0;
531 sp->cls1.classValid = 0;
532 sp->cls2.seqDelivery = 1;
533 sp->cls3.seqDelivery = 1;
534 if (sp->cmn.fcphLow < FC_PH3)
535 sp->cmn.fcphLow = FC_PH3;
536 if (sp->cmn.fcphHigh < FC_PH3)
537 sp->cmn.fcphHigh = FC_PH3;
539 tmo = phba->fc_ratov;
540 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
541 lpfc_set_disctmo(phba);
542 phba->fc_ratov = tmo;
544 phba->fc_stat.elsXmitFLOGI++;
545 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
546 spin_lock_irq(phba->host->host_lock);
547 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
548 spin_unlock_irq(phba->host->host_lock);
549 if (rc == IOCB_ERROR) {
550 lpfc_els_free_iocb(phba, elsiocb);
557 lpfc_els_abort_flogi(struct lpfc_hba * phba)
559 struct lpfc_sli_ring *pring;
560 struct lpfc_iocbq *iocb, *next_iocb;
561 struct lpfc_nodelist *ndlp;
564 /* Abort outstanding I/O on NPort <nlp_DID> */
565 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
566 "%d:0201 Abort outstanding I/O on NPort x%x\n",
567 phba->brd_no, Fabric_DID);
569 pring = &phba->sli.ring[LPFC_ELS_RING];
572 * Check the txcmplq for an iocb that matches the nport the driver is
575 spin_lock_irq(phba->host->host_lock);
576 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
578 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
579 ndlp = (struct lpfc_nodelist *)(iocb->context1);
580 if (ndlp && (ndlp->nlp_DID == Fabric_DID)) {
581 list_del(&iocb->list);
582 pring->txcmplq_cnt--;
584 if ((icmd->un.elsreq64.bdl.ulpIoTag32)) {
585 lpfc_sli_issue_abort_iotag32
588 if (iocb->iocb_cmpl) {
589 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
590 icmd->un.ulpWord[4] =
592 spin_unlock_irq(phba->host->host_lock);
593 (iocb->iocb_cmpl) (phba, iocb, iocb);
594 spin_lock_irq(phba->host->host_lock);
596 lpfc_sli_release_iocbq(phba, iocb);
600 spin_unlock_irq(phba->host->host_lock);
606 lpfc_initial_flogi(struct lpfc_hba * phba)
608 struct lpfc_nodelist *ndlp;
610 /* First look for the Fabric ndlp */
611 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, Fabric_DID);
613 /* Cannot find existing Fabric ndlp, so allocate a new one */
614 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
617 lpfc_nlp_init(phba, ndlp, Fabric_DID);
619 lpfc_nlp_list(phba, ndlp, NLP_JUST_DQ);
621 if (lpfc_issue_els_flogi(phba, ndlp, 0)) {
622 mempool_free( ndlp, phba->nlp_mem_pool);
628 lpfc_more_plogi(struct lpfc_hba * phba)
632 if (phba->num_disc_nodes)
633 phba->num_disc_nodes--;
635 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
636 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
637 "%d:0232 Continue discovery with %d PLOGIs to go "
638 "Data: x%x x%x x%x\n",
639 phba->brd_no, phba->num_disc_nodes, phba->fc_plogi_cnt,
640 phba->fc_flag, phba->hba_state);
642 /* Check to see if there are more PLOGIs to be sent */
643 if (phba->fc_flag & FC_NLP_MORE) {
644 /* go thru NPR list and issue any remaining ELS PLOGIs */
645 sentplogi = lpfc_els_disc_plogi(phba);
650 static struct lpfc_nodelist *
651 lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp,
652 struct lpfc_nodelist *ndlp)
654 struct lpfc_nodelist *new_ndlp;
656 struct serv_parm *sp;
657 uint8_t name[sizeof (struct lpfc_name)];
660 lp = (uint32_t *) prsp->virt;
661 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
662 memset(name, 0, sizeof (struct lpfc_name));
664 /* Now we to find out if the NPort we are logging into, matches the WWPN
665 * we have for that ndlp. If not, we have some work to do.
667 new_ndlp = lpfc_findnode_wwpn(phba, NLP_SEARCH_ALL, &sp->portName);
669 if (new_ndlp == ndlp)
674 memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name));
677 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
681 lpfc_nlp_init(phba, new_ndlp, ndlp->nlp_DID);
684 lpfc_unreg_rpi(phba, new_ndlp);
685 new_ndlp->nlp_DID = ndlp->nlp_DID;
686 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
687 new_ndlp->nlp_state = ndlp->nlp_state;
688 lpfc_nlp_list(phba, new_ndlp, ndlp->nlp_flag & NLP_LIST_MASK);
690 /* Move this back to NPR list */
691 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
692 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
695 lpfc_unreg_rpi(phba, ndlp);
696 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */
697 ndlp->nlp_state = NLP_STE_NPR_NODE;
698 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
704 lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
705 struct lpfc_iocbq * rspiocb)
708 struct lpfc_nodelist *ndlp;
709 struct lpfc_dmabuf *prsp;
710 int disc, rc, did, type;
713 /* we pass cmdiocb to state machine which needs rspiocb as well */
714 cmdiocb->context_un.rsp_iocb = rspiocb;
716 irsp = &rspiocb->iocb;
717 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL,
718 irsp->un.elsreq64.remoteID);
722 /* Since ndlp can be freed in the disc state machine, note if this node
723 * is being used during discovery.
725 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
726 spin_lock_irq(phba->host->host_lock);
727 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
728 spin_unlock_irq(phba->host->host_lock);
731 /* PLOGI completes to NPort <nlp_DID> */
732 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
733 "%d:0102 PLOGI completes to NPort x%x "
734 "Data: x%x x%x x%x x%x x%x\n",
735 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
736 irsp->un.ulpWord[4], irsp->ulpTimeout, disc,
737 phba->num_disc_nodes);
739 /* Check to see if link went down during discovery */
740 if (lpfc_els_chk_latt(phba)) {
741 spin_lock_irq(phba->host->host_lock);
742 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
743 spin_unlock_irq(phba->host->host_lock);
747 /* ndlp could be freed in DSM, save these values now */
748 type = ndlp->nlp_type;
751 if (irsp->ulpStatus) {
752 /* Check for retry */
753 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
754 /* ELS command is being retried */
756 spin_lock_irq(phba->host->host_lock);
757 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
758 spin_unlock_irq(phba->host->host_lock);
764 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
765 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
766 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
767 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
768 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
769 rc = NLP_STE_FREED_NODE;
771 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
775 /* Good status, call state machine */
776 prsp = list_entry(((struct lpfc_dmabuf *)
777 cmdiocb->context2)->list.next,
778 struct lpfc_dmabuf, list);
779 ndlp = lpfc_plogi_confirm_nport(phba, prsp, ndlp);
780 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
784 if (disc && phba->num_disc_nodes) {
785 /* Check to see if there are more PLOGIs to be sent */
786 lpfc_more_plogi(phba);
788 if (phba->num_disc_nodes == 0) {
789 spin_lock_irq(phba->host->host_lock);
790 phba->fc_flag &= ~FC_NDISC_ACTIVE;
791 spin_unlock_irq(phba->host->host_lock);
793 lpfc_can_disctmo(phba);
794 if (phba->fc_flag & FC_RSCN_MODE) {
796 * Check to see if more RSCNs came in while
797 * we were processing this one.
799 if ((phba->fc_rscn_id_cnt == 0) &&
800 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
801 spin_lock_irq(phba->host->host_lock);
802 phba->fc_flag &= ~FC_RSCN_MODE;
803 spin_unlock_irq(phba->host->host_lock);
805 lpfc_els_handle_rscn(phba);
812 lpfc_els_free_iocb(phba, cmdiocb);
817 lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
819 struct serv_parm *sp;
821 struct lpfc_iocbq *elsiocb;
822 struct lpfc_sli_ring *pring;
823 struct lpfc_sli *psli;
828 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
830 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
831 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, NULL, did,
836 icmd = &elsiocb->iocb;
837 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
839 /* For PLOGI request, remainder of payload is service parameters */
840 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
841 pcmd += sizeof (uint32_t);
842 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
843 sp = (struct serv_parm *) pcmd;
845 if (sp->cmn.fcphLow < FC_PH_4_3)
846 sp->cmn.fcphLow = FC_PH_4_3;
848 if (sp->cmn.fcphHigh < FC_PH3)
849 sp->cmn.fcphHigh = FC_PH3;
851 phba->fc_stat.elsXmitPLOGI++;
852 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
853 spin_lock_irq(phba->host->host_lock);
854 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
855 spin_unlock_irq(phba->host->host_lock);
856 lpfc_els_free_iocb(phba, elsiocb);
859 spin_unlock_irq(phba->host->host_lock);
864 lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
865 struct lpfc_iocbq * rspiocb)
868 struct lpfc_sli *psli;
869 struct lpfc_nodelist *ndlp;
872 /* we pass cmdiocb to state machine which needs rspiocb as well */
873 cmdiocb->context_un.rsp_iocb = rspiocb;
875 irsp = &(rspiocb->iocb);
876 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
877 spin_lock_irq(phba->host->host_lock);
878 ndlp->nlp_flag &= ~NLP_PRLI_SND;
879 spin_unlock_irq(phba->host->host_lock);
881 /* PRLI completes to NPort <nlp_DID> */
882 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
883 "%d:0103 PRLI completes to NPort x%x "
884 "Data: x%x x%x x%x x%x\n",
885 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
886 irsp->un.ulpWord[4], irsp->ulpTimeout,
887 phba->num_disc_nodes);
889 phba->fc_prli_sent--;
890 /* Check to see if link went down during discovery */
891 if (lpfc_els_chk_latt(phba))
894 if (irsp->ulpStatus) {
895 /* Check for retry */
896 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
897 /* ELS command is being retried */
901 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
902 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
903 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
904 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
905 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
908 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
912 /* Good status, call state machine */
913 lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI);
917 lpfc_els_free_iocb(phba, cmdiocb);
922 lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
927 struct lpfc_iocbq *elsiocb;
928 struct lpfc_sli_ring *pring;
929 struct lpfc_sli *psli;
934 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
936 cmdsize = (sizeof (uint32_t) + sizeof (PRLI));
937 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
938 ndlp->nlp_DID, ELS_CMD_PRLI);
942 icmd = &elsiocb->iocb;
943 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
945 /* For PRLI request, remainder of payload is service parameters */
946 memset(pcmd, 0, (sizeof (PRLI) + sizeof (uint32_t)));
947 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
948 pcmd += sizeof (uint32_t);
950 /* For PRLI, remainder of payload is PRLI parameter page */
953 * If our firmware version is 3.20 or later,
954 * set the following bits for FC-TAPE support.
956 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
957 npr->ConfmComplAllowed = 1;
959 npr->TaskRetryIdReq = 1;
961 npr->estabImagePair = 1;
962 npr->readXferRdyDis = 1;
964 /* For FCP support */
965 npr->prliType = PRLI_FCP_TYPE;
966 npr->initiatorFunc = 1;
968 phba->fc_stat.elsXmitPRLI++;
969 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
970 spin_lock_irq(phba->host->host_lock);
971 ndlp->nlp_flag |= NLP_PRLI_SND;
972 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
973 ndlp->nlp_flag &= ~NLP_PRLI_SND;
974 spin_unlock_irq(phba->host->host_lock);
975 lpfc_els_free_iocb(phba, elsiocb);
978 spin_unlock_irq(phba->host->host_lock);
979 phba->fc_prli_sent++;
984 lpfc_more_adisc(struct lpfc_hba * phba)
988 if (phba->num_disc_nodes)
989 phba->num_disc_nodes--;
991 /* Continue discovery with <num_disc_nodes> ADISCs to go */
992 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
993 "%d:0210 Continue discovery with %d ADISCs to go "
994 "Data: x%x x%x x%x\n",
995 phba->brd_no, phba->num_disc_nodes, phba->fc_adisc_cnt,
996 phba->fc_flag, phba->hba_state);
998 /* Check to see if there are more ADISCs to be sent */
999 if (phba->fc_flag & FC_NLP_MORE) {
1000 lpfc_set_disctmo(phba);
1002 /* go thru NPR list and issue any remaining ELS ADISCs */
1003 sentadisc = lpfc_els_disc_adisc(phba);
1009 lpfc_rscn_disc(struct lpfc_hba * phba)
1011 /* RSCN discovery */
1012 /* go thru NPR list and issue ELS PLOGIs */
1013 if (phba->fc_npr_cnt) {
1014 if (lpfc_els_disc_plogi(phba))
1017 if (phba->fc_flag & FC_RSCN_MODE) {
1018 /* Check to see if more RSCNs came in while we were
1019 * processing this one.
1021 if ((phba->fc_rscn_id_cnt == 0) &&
1022 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
1023 spin_lock_irq(phba->host->host_lock);
1024 phba->fc_flag &= ~FC_RSCN_MODE;
1025 spin_unlock_irq(phba->host->host_lock);
1027 lpfc_els_handle_rscn(phba);
1033 lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1034 struct lpfc_iocbq * rspiocb)
1037 struct lpfc_sli *psli;
1038 struct lpfc_nodelist *ndlp;
1044 /* we pass cmdiocb to state machine which needs rspiocb as well */
1045 cmdiocb->context_un.rsp_iocb = rspiocb;
1047 irsp = &(rspiocb->iocb);
1048 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1050 /* Since ndlp can be freed in the disc state machine, note if this node
1051 * is being used during discovery.
1053 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1054 spin_lock_irq(phba->host->host_lock);
1055 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
1056 spin_unlock_irq(phba->host->host_lock);
1058 /* ADISC completes to NPort <nlp_DID> */
1059 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1060 "%d:0104 ADISC completes to NPort x%x "
1061 "Data: x%x x%x x%x x%x x%x\n",
1062 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
1063 irsp->un.ulpWord[4], irsp->ulpTimeout, disc,
1064 phba->num_disc_nodes);
1066 /* Check to see if link went down during discovery */
1067 if (lpfc_els_chk_latt(phba)) {
1068 spin_lock_irq(phba->host->host_lock);
1069 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1070 spin_unlock_irq(phba->host->host_lock);
1074 if (irsp->ulpStatus) {
1075 /* Check for retry */
1076 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1077 /* ELS command is being retried */
1079 spin_lock_irq(phba->host->host_lock);
1080 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1081 spin_unlock_irq(phba->host->host_lock);
1082 lpfc_set_disctmo(phba);
1087 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1088 if ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
1089 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
1090 (irsp->un.ulpWord[4] != IOERR_LINK_DOWN) &&
1091 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) {
1092 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
1093 NLP_EVT_CMPL_ADISC);
1096 /* Good status, call state machine */
1097 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
1098 NLP_EVT_CMPL_ADISC);
1101 if (disc && phba->num_disc_nodes) {
1102 /* Check to see if there are more ADISCs to be sent */
1103 lpfc_more_adisc(phba);
1105 /* Check to see if we are done with ADISC authentication */
1106 if (phba->num_disc_nodes == 0) {
1107 lpfc_can_disctmo(phba);
1108 /* If we get here, there is nothing left to wait for */
1109 if ((phba->hba_state < LPFC_HBA_READY) &&
1110 (phba->hba_state != LPFC_CLEAR_LA)) {
1111 /* Link up discovery */
1112 if ((mbox = mempool_alloc(phba->mbox_mem_pool,
1114 phba->hba_state = LPFC_CLEAR_LA;
1115 lpfc_clear_la(phba, mbox);
1117 lpfc_mbx_cmpl_clear_la;
1118 rc = lpfc_sli_issue_mbox
1120 (MBX_NOWAIT | MBX_STOP_IOCB));
1121 if (rc == MBX_NOT_FINISHED) {
1123 phba->mbox_mem_pool);
1124 lpfc_disc_flush_list(phba);
1125 psli->ring[(psli->ip_ring)].
1127 ~LPFC_STOP_IOCB_EVENT;
1128 psli->ring[(psli->fcp_ring)].
1130 ~LPFC_STOP_IOCB_EVENT;
1131 psli->ring[(psli->next_ring)].
1133 ~LPFC_STOP_IOCB_EVENT;
1139 lpfc_rscn_disc(phba);
1144 lpfc_els_free_iocb(phba, cmdiocb);
1149 lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
1154 struct lpfc_iocbq *elsiocb;
1155 struct lpfc_sli_ring *pring;
1156 struct lpfc_sli *psli;
1161 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1163 cmdsize = (sizeof (uint32_t) + sizeof (ADISC));
1164 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
1165 ndlp->nlp_DID, ELS_CMD_ADISC);
1169 icmd = &elsiocb->iocb;
1170 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1172 /* For ADISC request, remainder of payload is service parameters */
1173 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
1174 pcmd += sizeof (uint32_t);
1176 /* Fill in ADISC payload */
1177 ap = (ADISC *) pcmd;
1178 ap->hardAL_PA = phba->fc_pref_ALPA;
1179 memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
1180 memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
1181 ap->DID = be32_to_cpu(phba->fc_myDID);
1183 phba->fc_stat.elsXmitADISC++;
1184 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
1185 spin_lock_irq(phba->host->host_lock);
1186 ndlp->nlp_flag |= NLP_ADISC_SND;
1187 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1188 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1189 spin_unlock_irq(phba->host->host_lock);
1190 lpfc_els_free_iocb(phba, elsiocb);
1193 spin_unlock_irq(phba->host->host_lock);
1198 lpfc_cmpl_els_logo(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1199 struct lpfc_iocbq * rspiocb)
1202 struct lpfc_sli *psli;
1203 struct lpfc_nodelist *ndlp;
1206 /* we pass cmdiocb to state machine which needs rspiocb as well */
1207 cmdiocb->context_un.rsp_iocb = rspiocb;
1209 irsp = &(rspiocb->iocb);
1210 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1211 spin_lock_irq(phba->host->host_lock);
1212 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1213 spin_unlock_irq(phba->host->host_lock);
1215 /* LOGO completes to NPort <nlp_DID> */
1216 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1217 "%d:0105 LOGO completes to NPort x%x "
1218 "Data: x%x x%x x%x x%x\n",
1219 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
1220 irsp->un.ulpWord[4], irsp->ulpTimeout,
1221 phba->num_disc_nodes);
1223 /* Check to see if link went down during discovery */
1224 if (lpfc_els_chk_latt(phba))
1227 if (irsp->ulpStatus) {
1228 /* Check for retry */
1229 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1230 /* ELS command is being retried */
1234 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1235 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1236 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
1237 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
1238 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
1241 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
1245 /* Good status, call state machine.
1246 * This will unregister the rpi if needed.
1248 lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
1252 lpfc_els_free_iocb(phba, cmdiocb);
1257 lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
1261 struct lpfc_iocbq *elsiocb;
1262 struct lpfc_sli_ring *pring;
1263 struct lpfc_sli *psli;
1268 pring = &psli->ring[LPFC_ELS_RING];
1270 cmdsize = (2 * sizeof (uint32_t)) + sizeof (struct lpfc_name);
1271 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
1272 ndlp->nlp_DID, ELS_CMD_LOGO);
1276 icmd = &elsiocb->iocb;
1277 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1278 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
1279 pcmd += sizeof (uint32_t);
1281 /* Fill in LOGO payload */
1282 *((uint32_t *) (pcmd)) = be32_to_cpu(phba->fc_myDID);
1283 pcmd += sizeof (uint32_t);
1284 memcpy(pcmd, &phba->fc_portname, sizeof (struct lpfc_name));
1286 phba->fc_stat.elsXmitLOGO++;
1287 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
1288 spin_lock_irq(phba->host->host_lock);
1289 ndlp->nlp_flag |= NLP_LOGO_SND;
1290 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1291 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1292 spin_unlock_irq(phba->host->host_lock);
1293 lpfc_els_free_iocb(phba, elsiocb);
1296 spin_unlock_irq(phba->host->host_lock);
1301 lpfc_cmpl_els_cmd(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1302 struct lpfc_iocbq * rspiocb)
1306 irsp = &rspiocb->iocb;
1308 /* ELS cmd tag <ulpIoTag> completes */
1309 lpfc_printf_log(phba,
1312 "%d:0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
1314 irsp->ulpIoTag, irsp->ulpStatus,
1315 irsp->un.ulpWord[4], irsp->ulpTimeout);
1317 /* Check to see if link went down during discovery */
1318 lpfc_els_chk_latt(phba);
1319 lpfc_els_free_iocb(phba, cmdiocb);
1324 lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1327 struct lpfc_iocbq *elsiocb;
1328 struct lpfc_sli_ring *pring;
1329 struct lpfc_sli *psli;
1332 struct lpfc_nodelist *ndlp;
1335 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1336 cmdsize = (sizeof (uint32_t) + sizeof (SCR));
1337 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1341 lpfc_nlp_init(phba, ndlp, nportid);
1343 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
1344 ndlp->nlp_DID, ELS_CMD_SCR);
1346 mempool_free( ndlp, phba->nlp_mem_pool);
1350 icmd = &elsiocb->iocb;
1351 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1353 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
1354 pcmd += sizeof (uint32_t);
1356 /* For SCR, remainder of payload is SCR parameter page */
1357 memset(pcmd, 0, sizeof (SCR));
1358 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
1360 phba->fc_stat.elsXmitSCR++;
1361 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1362 spin_lock_irq(phba->host->host_lock);
1363 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1364 spin_unlock_irq(phba->host->host_lock);
1365 mempool_free( ndlp, phba->nlp_mem_pool);
1366 lpfc_els_free_iocb(phba, elsiocb);
1369 spin_unlock_irq(phba->host->host_lock);
1370 mempool_free( ndlp, phba->nlp_mem_pool);
1375 lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1378 struct lpfc_iocbq *elsiocb;
1379 struct lpfc_sli_ring *pring;
1380 struct lpfc_sli *psli;
1385 struct lpfc_nodelist *ondlp;
1386 struct lpfc_nodelist *ndlp;
1389 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1390 cmdsize = (sizeof (uint32_t) + sizeof (FARP));
1391 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1394 lpfc_nlp_init(phba, ndlp, nportid);
1396 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
1397 ndlp->nlp_DID, ELS_CMD_RNID);
1399 mempool_free( ndlp, phba->nlp_mem_pool);
1403 icmd = &elsiocb->iocb;
1404 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1406 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
1407 pcmd += sizeof (uint32_t);
1409 /* Fill in FARPR payload */
1410 fp = (FARP *) (pcmd);
1411 memset(fp, 0, sizeof (FARP));
1412 lp = (uint32_t *) pcmd;
1413 *lp++ = be32_to_cpu(nportid);
1414 *lp++ = be32_to_cpu(phba->fc_myDID);
1416 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
1418 memcpy(&fp->RportName, &phba->fc_portname, sizeof (struct lpfc_name));
1419 memcpy(&fp->RnodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
1420 if ((ondlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, nportid))) {
1421 memcpy(&fp->OportName, &ondlp->nlp_portname,
1422 sizeof (struct lpfc_name));
1423 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
1424 sizeof (struct lpfc_name));
1427 phba->fc_stat.elsXmitFARPR++;
1428 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1429 spin_lock_irq(phba->host->host_lock);
1430 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1431 spin_unlock_irq(phba->host->host_lock);
1432 mempool_free( ndlp, phba->nlp_mem_pool);
1433 lpfc_els_free_iocb(phba, elsiocb);
1436 spin_unlock_irq(phba->host->host_lock);
1437 mempool_free( ndlp, phba->nlp_mem_pool);
1442 lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp)
1444 nlp->nlp_flag &= ~NLP_DELAY_TMO;
1445 del_timer_sync(&nlp->nlp_delayfunc);
1446 nlp->nlp_last_elscmd = 0;
1448 if (!list_empty(&nlp->els_retry_evt.evt_listp))
1449 list_del_init(&nlp->els_retry_evt.evt_listp);
1451 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
1452 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1453 if (phba->num_disc_nodes) {
1454 /* Check to see if there are more
1457 lpfc_more_plogi(phba);
1459 if (phba->num_disc_nodes == 0) {
1460 phba->fc_flag &= ~FC_NDISC_ACTIVE;
1461 lpfc_can_disctmo(phba);
1462 if (phba->fc_flag & FC_RSCN_MODE) {
1464 * Check to see if more RSCNs
1465 * came in while we were
1466 * processing this one.
1468 if((phba->fc_rscn_id_cnt==0) &&
1469 !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
1470 phba->fc_flag &= ~FC_RSCN_MODE;
1473 lpfc_els_handle_rscn(phba);
1483 lpfc_els_retry_delay(unsigned long ptr)
1485 struct lpfc_nodelist *ndlp;
1486 struct lpfc_hba *phba;
1487 unsigned long iflag;
1488 struct lpfc_work_evt *evtp;
1490 ndlp = (struct lpfc_nodelist *)ptr;
1491 phba = ndlp->nlp_phba;
1492 evtp = &ndlp->els_retry_evt;
1494 spin_lock_irqsave(phba->host->host_lock, iflag);
1495 if (!list_empty(&evtp->evt_listp)) {
1496 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1500 evtp->evt_arg1 = ndlp;
1501 evtp->evt = LPFC_EVT_ELS_RETRY;
1502 list_add_tail(&evtp->evt_listp, &phba->work_list);
1503 if (phba->work_wait)
1504 wake_up(phba->work_wait);
1506 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1511 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
1513 struct lpfc_hba *phba;
1518 phba = ndlp->nlp_phba;
1519 spin_lock_irq(phba->host->host_lock);
1520 did = ndlp->nlp_DID;
1521 cmd = ndlp->nlp_last_elscmd;
1522 ndlp->nlp_last_elscmd = 0;
1524 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1525 spin_unlock_irq(phba->host->host_lock);
1529 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1530 spin_unlock_irq(phba->host->host_lock);
1532 * If a discovery event readded nlp_delayfunc after timer
1533 * firing and before processing the timer, cancel the
1536 del_timer_sync(&ndlp->nlp_delayfunc);
1537 retry = ndlp->nlp_retry;
1541 lpfc_issue_els_flogi(phba, ndlp, retry);
1544 if(!lpfc_issue_els_plogi(phba, ndlp->nlp_DID, retry)) {
1545 ndlp->nlp_prev_state = ndlp->nlp_state;
1546 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1547 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1551 if (!lpfc_issue_els_adisc(phba, ndlp, retry)) {
1552 ndlp->nlp_prev_state = ndlp->nlp_state;
1553 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
1554 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1558 if (!lpfc_issue_els_prli(phba, ndlp, retry)) {
1559 ndlp->nlp_prev_state = ndlp->nlp_state;
1560 ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
1561 lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
1565 if (!lpfc_issue_els_logo(phba, ndlp, retry)) {
1566 ndlp->nlp_prev_state = ndlp->nlp_state;
1567 ndlp->nlp_state = NLP_STE_NPR_NODE;
1568 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1576 lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1577 struct lpfc_iocbq * rspiocb)
1580 struct lpfc_dmabuf *pcmd;
1581 struct lpfc_nodelist *ndlp;
1584 int retry, maxretry;
1591 maxretry = lpfc_max_els_tries;
1592 irsp = &rspiocb->iocb;
1593 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1594 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1597 /* Note: context2 may be 0 for internal driver abort
1598 * of delays ELS command.
1601 if (pcmd && pcmd->virt) {
1602 elscmd = (uint32_t *) (pcmd->virt);
1607 did = ndlp->nlp_DID;
1609 /* We should only hit this case for retrying PLOGI */
1610 did = irsp->un.elsreq64.remoteID;
1611 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did);
1612 if (!ndlp && (cmd != ELS_CMD_PLOGI))
1616 switch (irsp->ulpStatus) {
1617 case IOSTAT_FCP_RSP_ERROR:
1618 case IOSTAT_REMOTE_STOP:
1621 case IOSTAT_LOCAL_REJECT:
1622 switch ((irsp->un.ulpWord[4] & 0xff)) {
1623 case IOERR_LOOP_OPEN_FAILURE:
1624 if (cmd == ELS_CMD_PLOGI) {
1625 if (cmdiocb->retry == 0) {
1632 case IOERR_SEQUENCE_TIMEOUT:
1636 case IOERR_NO_RESOURCES:
1637 if (cmd == ELS_CMD_PLOGI) {
1643 case IOERR_INVALID_RPI:
1649 case IOSTAT_NPORT_RJT:
1650 case IOSTAT_FABRIC_RJT:
1651 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
1657 case IOSTAT_NPORT_BSY:
1658 case IOSTAT_FABRIC_BSY:
1663 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
1664 /* Added for Vendor specifc support
1665 * Just keep retrying for these Rsn / Exp codes
1667 switch (stat.un.b.lsRjtRsnCode) {
1668 case LSRJT_UNABLE_TPC:
1669 if (stat.un.b.lsRjtRsnCodeExp ==
1670 LSEXP_CMD_IN_PROGRESS) {
1671 if (cmd == ELS_CMD_PLOGI) {
1678 if (cmd == ELS_CMD_PLOGI) {
1680 maxretry = lpfc_max_els_tries + 1;
1686 case LSRJT_LOGICAL_BSY:
1687 if (cmd == ELS_CMD_PLOGI) {
1696 case IOSTAT_INTERMED_RSP:
1704 if (did == FDMI_DID)
1707 if ((++cmdiocb->retry) >= maxretry) {
1708 phba->fc_stat.elsRetryExceeded++;
1714 /* Retry ELS command <elsCmd> to remote NPORT <did> */
1715 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1716 "%d:0107 Retry ELS command x%x to remote "
1717 "NPORT x%x Data: x%x x%x\n",
1719 cmd, did, cmdiocb->retry, delay);
1721 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) {
1722 /* If discovery / RSCN timer is running, reset it */
1723 if (timer_pending(&phba->fc_disctmo) ||
1724 (phba->fc_flag & FC_RSCN_MODE)) {
1725 lpfc_set_disctmo(phba);
1729 phba->fc_stat.elsXmitRetry++;
1730 if (ndlp && delay) {
1731 phba->fc_stat.elsDelayRetry++;
1732 ndlp->nlp_retry = cmdiocb->retry;
1734 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
1735 ndlp->nlp_flag |= NLP_DELAY_TMO;
1737 ndlp->nlp_prev_state = ndlp->nlp_state;
1738 ndlp->nlp_state = NLP_STE_NPR_NODE;
1739 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1740 ndlp->nlp_last_elscmd = cmd;
1746 lpfc_issue_els_flogi(phba, ndlp, cmdiocb->retry);
1750 ndlp->nlp_prev_state = ndlp->nlp_state;
1751 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1752 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1754 lpfc_issue_els_plogi(phba, did, cmdiocb->retry);
1757 ndlp->nlp_prev_state = ndlp->nlp_state;
1758 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
1759 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1760 lpfc_issue_els_adisc(phba, ndlp, cmdiocb->retry);
1763 ndlp->nlp_prev_state = ndlp->nlp_state;
1764 ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
1765 lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
1766 lpfc_issue_els_prli(phba, ndlp, cmdiocb->retry);
1769 ndlp->nlp_prev_state = ndlp->nlp_state;
1770 ndlp->nlp_state = NLP_STE_NPR_NODE;
1771 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1772 lpfc_issue_els_logo(phba, ndlp, cmdiocb->retry);
1777 /* No retry ELS command <elsCmd> to remote NPORT <did> */
1778 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1779 "%d:0108 No retry ELS command x%x to remote NPORT x%x "
1782 cmd, did, cmdiocb->retry);
1788 lpfc_els_free_iocb(struct lpfc_hba * phba, struct lpfc_iocbq * elsiocb)
1790 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
1792 /* context2 = cmd, context2->next = rsp, context3 = bpl */
1793 if (elsiocb->context2) {
1794 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
1795 /* Free the response before processing the command. */
1796 if (!list_empty(&buf_ptr1->list)) {
1797 list_remove_head(&buf_ptr1->list, buf_ptr,
1800 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1803 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
1807 if (elsiocb->context3) {
1808 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
1809 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1812 spin_lock_irq(phba->host->host_lock);
1813 lpfc_sli_release_iocbq(phba, elsiocb);
1814 spin_unlock_irq(phba->host->host_lock);
1819 lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1820 struct lpfc_iocbq * rspiocb)
1822 struct lpfc_nodelist *ndlp;
1824 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1826 /* ACC to LOGO completes to NPort <nlp_DID> */
1827 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1828 "%d:0109 ACC to LOGO completes to NPort x%x "
1829 "Data: x%x x%x x%x\n",
1830 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
1831 ndlp->nlp_state, ndlp->nlp_rpi);
1833 switch (ndlp->nlp_state) {
1834 case NLP_STE_UNUSED_NODE: /* node is just allocated */
1835 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1837 case NLP_STE_NPR_NODE: /* NPort Recovery mode */
1838 lpfc_unreg_rpi(phba, ndlp);
1843 lpfc_els_free_iocb(phba, cmdiocb);
1848 lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1849 struct lpfc_iocbq * rspiocb)
1852 struct lpfc_nodelist *ndlp;
1853 LPFC_MBOXQ_t *mbox = NULL;
1855 irsp = &rspiocb->iocb;
1857 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1858 if (cmdiocb->context_un.mbox)
1859 mbox = cmdiocb->context_un.mbox;
1862 /* Check to see if link went down during discovery */
1863 if ((lpfc_els_chk_latt(phba)) || !ndlp) {
1865 mempool_free( mbox, phba->mbox_mem_pool);
1870 /* ELS response tag <ulpIoTag> completes */
1871 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1872 "%d:0110 ELS response tag x%x completes "
1873 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
1875 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
1876 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
1877 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
1881 if ((rspiocb->iocb.ulpStatus == 0)
1882 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
1883 lpfc_unreg_rpi(phba, ndlp);
1884 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1885 mbox->context2 = ndlp;
1886 ndlp->nlp_prev_state = ndlp->nlp_state;
1887 ndlp->nlp_state = NLP_STE_REG_LOGIN_ISSUE;
1888 lpfc_nlp_list(phba, ndlp, NLP_REGLOGIN_LIST);
1889 if (lpfc_sli_issue_mbox(phba, mbox,
1890 (MBX_NOWAIT | MBX_STOP_IOCB))
1891 != MBX_NOT_FINISHED) {
1894 /* NOTE: we should have messages for unsuccessful
1896 mempool_free( mbox, phba->mbox_mem_pool);
1898 mempool_free( mbox, phba->mbox_mem_pool);
1899 /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */
1900 if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1901 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
1902 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
1903 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) {
1904 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1905 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1913 spin_lock_irq(phba->host->host_lock);
1914 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
1915 spin_unlock_irq(phba->host->host_lock);
1917 lpfc_els_free_iocb(phba, cmdiocb);
1922 lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1923 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp,
1924 LPFC_MBOXQ_t * mbox, uint8_t newnode)
1928 struct lpfc_iocbq *elsiocb;
1929 struct lpfc_sli_ring *pring;
1930 struct lpfc_sli *psli;
1934 ELS_PKT *els_pkt_ptr;
1937 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1938 oldcmd = &oldiocb->iocb;
1942 cmdsize = sizeof (uint32_t);
1943 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
1944 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
1946 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
1949 icmd = &elsiocb->iocb;
1950 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1951 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1952 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
1953 pcmd += sizeof (uint32_t);
1956 cmdsize = (sizeof (struct serv_parm) + sizeof (uint32_t));
1957 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
1958 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
1962 icmd = &elsiocb->iocb;
1963 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1964 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1967 elsiocb->context_un.mbox = mbox;
1969 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
1970 pcmd += sizeof (uint32_t);
1971 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
1974 cmdsize = sizeof (uint32_t) + sizeof (PRLO);
1975 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
1976 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
1980 icmd = &elsiocb->iocb;
1981 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1982 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1984 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
1985 sizeof (uint32_t) + sizeof (PRLO));
1986 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
1987 els_pkt_ptr = (ELS_PKT *) pcmd;
1988 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
1995 elsiocb->context1 = NULL;
1997 /* Xmit ELS ACC response tag <ulpIoTag> */
1998 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1999 "%d:0128 Xmit ELS ACC response tag x%x "
2000 "Data: x%x x%x x%x x%x x%x\n",
2002 elsiocb->iocb.ulpIoTag,
2003 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2004 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2006 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
2007 spin_lock_irq(phba->host->host_lock);
2008 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2009 spin_unlock_irq(phba->host->host_lock);
2010 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
2012 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2015 phba->fc_stat.elsXmitACC++;
2016 spin_lock_irq(phba->host->host_lock);
2017 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2018 spin_unlock_irq(phba->host->host_lock);
2019 if (rc == IOCB_ERROR) {
2020 lpfc_els_free_iocb(phba, elsiocb);
2027 lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
2028 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
2032 struct lpfc_iocbq *elsiocb;
2033 struct lpfc_sli_ring *pring;
2034 struct lpfc_sli *psli;
2040 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2042 cmdsize = 2 * sizeof (uint32_t);
2043 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
2044 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
2048 icmd = &elsiocb->iocb;
2049 oldcmd = &oldiocb->iocb;
2050 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2051 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2053 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
2054 pcmd += sizeof (uint32_t);
2055 *((uint32_t *) (pcmd)) = rejectError;
2057 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
2058 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2059 "%d:0129 Xmit ELS RJT x%x response tag x%x "
2060 "Data: x%x x%x x%x x%x x%x\n",
2062 rejectError, elsiocb->iocb.ulpIoTag,
2063 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2064 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2066 phba->fc_stat.elsXmitLSRJT++;
2067 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2068 spin_lock_irq(phba->host->host_lock);
2069 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2070 spin_unlock_irq(phba->host->host_lock);
2071 if (rc == IOCB_ERROR) {
2072 lpfc_els_free_iocb(phba, elsiocb);
2079 lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
2080 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
2085 struct lpfc_iocbq *elsiocb;
2086 struct lpfc_sli_ring *pring;
2087 struct lpfc_sli *psli;
2093 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2095 cmdsize = sizeof (uint32_t) + sizeof (ADISC);
2096 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
2097 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
2101 /* Xmit ADISC ACC response tag <ulpIoTag> */
2102 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2103 "%d:0130 Xmit ADISC ACC response tag x%x "
2104 "Data: x%x x%x x%x x%x x%x\n",
2106 elsiocb->iocb.ulpIoTag,
2107 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2108 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2110 icmd = &elsiocb->iocb;
2111 oldcmd = &oldiocb->iocb;
2112 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2113 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2115 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2116 pcmd += sizeof (uint32_t);
2118 ap = (ADISC *) (pcmd);
2119 ap->hardAL_PA = phba->fc_pref_ALPA;
2120 memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
2121 memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
2122 ap->DID = be32_to_cpu(phba->fc_myDID);
2124 phba->fc_stat.elsXmitACC++;
2125 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2126 spin_lock_irq(phba->host->host_lock);
2127 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2128 spin_unlock_irq(phba->host->host_lock);
2129 if (rc == IOCB_ERROR) {
2130 lpfc_els_free_iocb(phba, elsiocb);
2137 lpfc_els_rsp_prli_acc(struct lpfc_hba * phba,
2138 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
2144 struct lpfc_iocbq *elsiocb;
2145 struct lpfc_sli_ring *pring;
2146 struct lpfc_sli *psli;
2152 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2154 cmdsize = sizeof (uint32_t) + sizeof (PRLI);
2155 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, ndlp,
2156 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
2160 /* Xmit PRLI ACC response tag <ulpIoTag> */
2161 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2162 "%d:0131 Xmit PRLI ACC response tag x%x "
2163 "Data: x%x x%x x%x x%x x%x\n",
2165 elsiocb->iocb.ulpIoTag,
2166 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2167 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2169 icmd = &elsiocb->iocb;
2170 oldcmd = &oldiocb->iocb;
2171 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2172 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2174 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
2175 pcmd += sizeof (uint32_t);
2177 /* For PRLI, remainder of payload is PRLI parameter page */
2178 memset(pcmd, 0, sizeof (PRLI));
2180 npr = (PRLI *) pcmd;
2183 * If our firmware version is 3.20 or later,
2184 * set the following bits for FC-TAPE support.
2186 if (vpd->rev.feaLevelHigh >= 0x02) {
2187 npr->ConfmComplAllowed = 1;
2189 npr->TaskRetryIdReq = 1;
2192 npr->acceptRspCode = PRLI_REQ_EXECUTED;
2193 npr->estabImagePair = 1;
2194 npr->readXferRdyDis = 1;
2195 npr->ConfmComplAllowed = 1;
2197 npr->prliType = PRLI_FCP_TYPE;
2198 npr->initiatorFunc = 1;
2200 phba->fc_stat.elsXmitACC++;
2201 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2203 spin_lock_irq(phba->host->host_lock);
2204 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2205 spin_unlock_irq(phba->host->host_lock);
2206 if (rc == IOCB_ERROR) {
2207 lpfc_els_free_iocb(phba, elsiocb);
2214 lpfc_els_rsp_rnid_acc(struct lpfc_hba * phba,
2216 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
2221 struct lpfc_iocbq *elsiocb;
2222 struct lpfc_sli_ring *pring;
2223 struct lpfc_sli *psli;
2229 pring = &psli->ring[LPFC_ELS_RING];
2231 cmdsize = sizeof (uint32_t) + sizeof (uint32_t)
2232 + (2 * sizeof (struct lpfc_name));
2234 cmdsize += sizeof (RNID_TOP_DISC);
2236 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
2237 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
2241 /* Xmit RNID ACC response tag <ulpIoTag> */
2242 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2243 "%d:0132 Xmit RNID ACC response tag x%x "
2246 elsiocb->iocb.ulpIoTag,
2247 elsiocb->iocb.ulpContext);
2249 icmd = &elsiocb->iocb;
2250 oldcmd = &oldiocb->iocb;
2251 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2252 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2254 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2255 pcmd += sizeof (uint32_t);
2257 memset(pcmd, 0, sizeof (RNID));
2258 rn = (RNID *) (pcmd);
2259 rn->Format = format;
2260 rn->CommonLen = (2 * sizeof (struct lpfc_name));
2261 memcpy(&rn->portName, &phba->fc_portname, sizeof (struct lpfc_name));
2262 memcpy(&rn->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
2265 rn->SpecificLen = 0;
2267 case RNID_TOPOLOGY_DISC:
2268 rn->SpecificLen = sizeof (RNID_TOP_DISC);
2269 memcpy(&rn->un.topologyDisc.portName,
2270 &phba->fc_portname, sizeof (struct lpfc_name));
2271 rn->un.topologyDisc.unitType = RNID_HBA;
2272 rn->un.topologyDisc.physPort = 0;
2273 rn->un.topologyDisc.attachedNodes = 0;
2277 rn->SpecificLen = 0;
2281 phba->fc_stat.elsXmitACC++;
2282 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2283 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
2284 * it could be freed */
2286 spin_lock_irq(phba->host->host_lock);
2287 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2288 spin_unlock_irq(phba->host->host_lock);
2289 if (rc == IOCB_ERROR) {
2290 lpfc_els_free_iocb(phba, elsiocb);
2297 lpfc_els_disc_adisc(struct lpfc_hba * phba)
2300 struct lpfc_nodelist *ndlp, *next_ndlp;
2303 /* go thru NPR list and issue any remaining ELS ADISCs */
2304 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2306 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2307 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2308 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2309 ndlp->nlp_prev_state = ndlp->nlp_state;
2310 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
2311 lpfc_nlp_list(phba, ndlp,
2313 lpfc_issue_els_adisc(phba, ndlp, 0);
2315 phba->num_disc_nodes++;
2316 if (phba->num_disc_nodes >=
2317 phba->cfg_discovery_threads) {
2318 spin_lock_irq(phba->host->host_lock);
2319 phba->fc_flag |= FC_NLP_MORE;
2320 spin_unlock_irq(phba->host->host_lock);
2326 if (sentadisc == 0) {
2327 spin_lock_irq(phba->host->host_lock);
2328 phba->fc_flag &= ~FC_NLP_MORE;
2329 spin_unlock_irq(phba->host->host_lock);
2335 lpfc_els_disc_plogi(struct lpfc_hba * phba)
2338 struct lpfc_nodelist *ndlp, *next_ndlp;
2341 /* go thru NPR list and issue any remaining ELS PLOGIs */
2342 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2344 if ((ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
2345 (!(ndlp->nlp_flag & NLP_DELAY_TMO))) {
2346 if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2347 ndlp->nlp_prev_state = ndlp->nlp_state;
2348 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
2349 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
2350 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
2352 phba->num_disc_nodes++;
2353 if (phba->num_disc_nodes >=
2354 phba->cfg_discovery_threads) {
2355 spin_lock_irq(phba->host->host_lock);
2356 phba->fc_flag |= FC_NLP_MORE;
2357 spin_unlock_irq(phba->host->host_lock);
2363 if (sentplogi == 0) {
2364 spin_lock_irq(phba->host->host_lock);
2365 phba->fc_flag &= ~FC_NLP_MORE;
2366 spin_unlock_irq(phba->host->host_lock);
2372 lpfc_els_flush_rscn(struct lpfc_hba * phba)
2374 struct lpfc_dmabuf *mp;
2377 for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
2378 mp = phba->fc_rscn_id_list[i];
2379 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2381 phba->fc_rscn_id_list[i] = NULL;
2383 phba->fc_rscn_id_cnt = 0;
2384 spin_lock_irq(phba->host->host_lock);
2385 phba->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
2386 spin_unlock_irq(phba->host->host_lock);
2387 lpfc_can_disctmo(phba);
2392 lpfc_rscn_payload_check(struct lpfc_hba * phba, uint32_t did)
2396 struct lpfc_dmabuf *mp;
2398 uint32_t payload_len, cmd, i, match;
2400 ns_did.un.word = did;
2403 /* Never match fabric nodes for RSCNs */
2404 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
2407 /* If we are doing a FULL RSCN rediscovery, match everything */
2408 if (phba->fc_flag & FC_RSCN_DISCOVERY) {
2412 for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
2413 mp = phba->fc_rscn_id_list[i];
2414 lp = (uint32_t *) mp->virt;
2416 payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
2417 payload_len -= sizeof (uint32_t); /* take off word 0 */
2418 while (payload_len) {
2419 rscn_did.un.word = *lp++;
2420 rscn_did.un.word = be32_to_cpu(rscn_did.un.word);
2421 payload_len -= sizeof (uint32_t);
2422 switch (rscn_did.un.b.resv) {
2423 case 0: /* Single N_Port ID effected */
2424 if (ns_did.un.word == rscn_did.un.word) {
2428 case 1: /* Whole N_Port Area effected */
2429 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
2430 && (ns_did.un.b.area == rscn_did.un.b.area))
2435 case 2: /* Whole N_Port Domain effected */
2436 if (ns_did.un.b.domain == rscn_did.un.b.domain)
2441 case 3: /* Whole Fabric effected */
2445 /* Unknown Identifier in RSCN list */
2446 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2447 "%d:0217 Unknown Identifier in "
2448 "RSCN payload Data: x%x\n",
2449 phba->brd_no, rscn_did.un.word);
2461 lpfc_rscn_recovery_check(struct lpfc_hba * phba)
2463 struct lpfc_nodelist *ndlp = NULL, *next_ndlp;
2464 struct list_head *listp;
2465 struct list_head *node_list[7];
2468 /* Look at all nodes effected by pending RSCNs and move
2471 node_list[0] = &phba->fc_npr_list; /* MUST do this list first */
2472 node_list[1] = &phba->fc_nlpmap_list;
2473 node_list[2] = &phba->fc_nlpunmap_list;
2474 node_list[3] = &phba->fc_prli_list;
2475 node_list[4] = &phba->fc_reglogin_list;
2476 node_list[5] = &phba->fc_adisc_list;
2477 node_list[6] = &phba->fc_plogi_list;
2478 for (i = 0; i < 7; i++) {
2479 listp = node_list[i];
2480 if (list_empty(listp))
2483 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
2484 if (!(lpfc_rscn_payload_check(phba, ndlp->nlp_DID)))
2487 lpfc_disc_state_machine(phba, ndlp, NULL,
2488 NLP_EVT_DEVICE_RECOVERY);
2490 /* Make sure NLP_DELAY_TMO is NOT running
2491 * after a device recovery event.
2493 if (ndlp->nlp_flag & NLP_DELAY_TMO)
2494 lpfc_cancel_retry_delay_tmo(phba, ndlp);
2501 lpfc_els_rcv_rscn(struct lpfc_hba * phba,
2502 struct lpfc_iocbq * cmdiocb,
2503 struct lpfc_nodelist * ndlp, uint8_t newnode)
2505 struct lpfc_dmabuf *pcmd;
2508 uint32_t payload_len, cmd;
2511 icmd = &cmdiocb->iocb;
2512 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2513 lp = (uint32_t *) pcmd->virt;
2516 payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
2517 payload_len -= sizeof (uint32_t); /* take off word 0 */
2518 cmd &= ELS_CMD_MASK;
2521 lpfc_printf_log(phba,
2524 "%d:0214 RSCN received Data: x%x x%x x%x x%x\n",
2526 phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt);
2528 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
2529 fc_host_post_event(phba->host, fc_get_event_number(),
2530 FCH_EVT_RSCN, lp[i]);
2532 /* If we are about to begin discovery, just ACC the RSCN.
2533 * Discovery processing will satisfy it.
2535 if (phba->hba_state <= LPFC_NS_QRY) {
2536 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
2541 /* If we are already processing an RSCN, save the received
2542 * RSCN payload buffer, cmdiocb->context2 to process later.
2544 if (phba->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
2545 if ((phba->fc_rscn_id_cnt < FC_MAX_HOLD_RSCN) &&
2546 !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
2547 spin_lock_irq(phba->host->host_lock);
2548 phba->fc_flag |= FC_RSCN_MODE;
2549 spin_unlock_irq(phba->host->host_lock);
2550 phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
2552 /* If we zero, cmdiocb->context2, the calling
2553 * routine will not try to free it.
2555 cmdiocb->context2 = NULL;
2558 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2559 "%d:0235 Deferred RSCN "
2560 "Data: x%x x%x x%x\n",
2561 phba->brd_no, phba->fc_rscn_id_cnt,
2562 phba->fc_flag, phba->hba_state);
2564 spin_lock_irq(phba->host->host_lock);
2565 phba->fc_flag |= FC_RSCN_DISCOVERY;
2566 spin_unlock_irq(phba->host->host_lock);
2567 /* ReDiscovery RSCN */
2568 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2569 "%d:0234 ReDiscovery RSCN "
2570 "Data: x%x x%x x%x\n",
2571 phba->brd_no, phba->fc_rscn_id_cnt,
2572 phba->fc_flag, phba->hba_state);
2575 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
2578 /* send RECOVERY event for ALL nodes that match RSCN payload */
2579 lpfc_rscn_recovery_check(phba);
2583 phba->fc_flag |= FC_RSCN_MODE;
2584 phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
2586 * If we zero, cmdiocb->context2, the calling routine will
2587 * not try to free it.
2589 cmdiocb->context2 = NULL;
2591 lpfc_set_disctmo(phba);
2594 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode);
2596 /* send RECOVERY event for ALL nodes that match RSCN payload */
2597 lpfc_rscn_recovery_check(phba);
2599 return lpfc_els_handle_rscn(phba);
2603 lpfc_els_handle_rscn(struct lpfc_hba * phba)
2605 struct lpfc_nodelist *ndlp;
2607 /* Start timer for RSCN processing */
2608 lpfc_set_disctmo(phba);
2610 /* RSCN processed */
2611 lpfc_printf_log(phba,
2614 "%d:0215 RSCN processed Data: x%x x%x x%x x%x\n",
2616 phba->fc_flag, 0, phba->fc_rscn_id_cnt,
2619 /* To process RSCN, first compare RSCN data with NameServer */
2620 phba->fc_ns_retry = 0;
2621 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED, NameServer_DID);
2623 /* Good ndlp, issue CT Request to NameServer */
2624 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 0) {
2625 /* Wait for NameServer query cmpl before we can
2630 /* If login to NameServer does not exist, issue one */
2631 /* Good status, issue PLOGI to NameServer */
2632 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
2634 /* Wait for NameServer login cmpl before we can
2638 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2640 lpfc_els_flush_rscn(phba);
2643 lpfc_nlp_init(phba, ndlp, NameServer_DID);
2644 ndlp->nlp_type |= NLP_FABRIC;
2645 ndlp->nlp_prev_state = ndlp->nlp_state;
2646 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
2647 lpfc_issue_els_plogi(phba, NameServer_DID, 0);
2648 /* Wait for NameServer login cmpl before we can
2654 lpfc_els_flush_rscn(phba);
2659 lpfc_els_rcv_flogi(struct lpfc_hba * phba,
2660 struct lpfc_iocbq * cmdiocb,
2661 struct lpfc_nodelist * ndlp, uint8_t newnode)
2663 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2664 uint32_t *lp = (uint32_t *) pcmd->virt;
2665 IOCB_t *icmd = &cmdiocb->iocb;
2666 struct serv_parm *sp;
2673 sp = (struct serv_parm *) lp;
2675 /* FLOGI received */
2677 lpfc_set_disctmo(phba);
2679 if (phba->fc_topology == TOPOLOGY_LOOP) {
2680 /* We should never receive a FLOGI in loop mode, ignore it */
2681 did = icmd->un.elsreq64.remoteID;
2683 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
2685 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
2686 "%d:0113 An FLOGI ELS command x%x was received "
2687 "from DID x%x in Loop Mode\n",
2688 phba->brd_no, cmd, did);
2694 if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3))) {
2695 /* For a FLOGI we accept, then if our portname is greater
2696 * then the remote portname we initiate Nport login.
2699 rc = memcmp(&phba->fc_portname, &sp->portName,
2700 sizeof (struct lpfc_name));
2703 if ((mbox = mempool_alloc(phba->mbox_mem_pool,
2704 GFP_KERNEL)) == 0) {
2707 lpfc_linkdown(phba);
2708 lpfc_init_link(phba, mbox,
2710 phba->cfg_link_speed);
2711 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2712 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2713 rc = lpfc_sli_issue_mbox
2714 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
2715 if (rc == MBX_NOT_FINISHED) {
2716 mempool_free( mbox, phba->mbox_mem_pool);
2719 } else if (rc > 0) { /* greater than */
2720 spin_lock_irq(phba->host->host_lock);
2721 phba->fc_flag |= FC_PT2PT_PLOGI;
2722 spin_unlock_irq(phba->host->host_lock);
2724 phba->fc_flag |= FC_PT2PT;
2725 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
2727 /* Reject this request because invalid parameters */
2728 stat.un.b.lsRjtRsvd0 = 0;
2729 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2730 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
2731 stat.un.b.vendorUnique = 0;
2732 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
2737 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode);
2743 lpfc_els_rcv_rnid(struct lpfc_hba * phba,
2744 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
2746 struct lpfc_dmabuf *pcmd;
2753 icmd = &cmdiocb->iocb;
2754 did = icmd->un.elsreq64.remoteID;
2755 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2756 lp = (uint32_t *) pcmd->virt;
2763 switch (rn->Format) {
2765 case RNID_TOPOLOGY_DISC:
2767 lpfc_els_rsp_rnid_acc(phba, rn->Format, cmdiocb, ndlp);
2770 /* Reject this request because format not supported */
2771 stat.un.b.lsRjtRsvd0 = 0;
2772 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2773 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2774 stat.un.b.vendorUnique = 0;
2775 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
2781 lpfc_els_rcv_lirr(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2782 struct lpfc_nodelist * ndlp)
2786 /* For now, unconditionally reject this command */
2787 stat.un.b.lsRjtRsvd0 = 0;
2788 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2789 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2790 stat.un.b.vendorUnique = 0;
2791 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
2796 lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2798 struct lpfc_sli *psli;
2799 struct lpfc_sli_ring *pring;
2804 struct lpfc_iocbq *elsiocb;
2805 struct lpfc_nodelist *ndlp;
2806 uint16_t xri, status;
2810 pring = &psli->ring[LPFC_ELS_RING];
2813 ndlp = (struct lpfc_nodelist *) pmb->context2;
2814 xri = (uint16_t) ((unsigned long)(pmb->context1));
2815 pmb->context1 = NULL;
2816 pmb->context2 = NULL;
2818 if (mb->mbxStatus) {
2819 mempool_free( pmb, phba->mbox_mem_pool);
2823 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
2824 mempool_free( pmb, phba->mbox_mem_pool);
2825 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, lpfc_max_els_tries, ndlp,
2826 ndlp->nlp_DID, ELS_CMD_ACC);
2830 icmd = &elsiocb->iocb;
2831 icmd->ulpContext = xri;
2833 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2834 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2835 pcmd += sizeof (uint32_t); /* Skip past command */
2836 rps_rsp = (RPS_RSP *)pcmd;
2838 if (phba->fc_topology != TOPOLOGY_LOOP)
2842 if (phba->fc_flag & FC_FABRIC)
2846 rps_rsp->portStatus = be16_to_cpu(status);
2847 rps_rsp->linkFailureCnt = be32_to_cpu(mb->un.varRdLnk.linkFailureCnt);
2848 rps_rsp->lossSyncCnt = be32_to_cpu(mb->un.varRdLnk.lossSyncCnt);
2849 rps_rsp->lossSignalCnt = be32_to_cpu(mb->un.varRdLnk.lossSignalCnt);
2850 rps_rsp->primSeqErrCnt = be32_to_cpu(mb->un.varRdLnk.primSeqErrCnt);
2851 rps_rsp->invalidXmitWord = be32_to_cpu(mb->un.varRdLnk.invalidXmitWord);
2852 rps_rsp->crcCnt = be32_to_cpu(mb->un.varRdLnk.crcCnt);
2854 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
2855 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2856 "%d:0118 Xmit ELS RPS ACC response tag x%x "
2857 "Data: x%x x%x x%x x%x x%x\n",
2859 elsiocb->iocb.ulpIoTag,
2860 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2861 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2863 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2864 phba->fc_stat.elsXmitACC++;
2865 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
2866 lpfc_els_free_iocb(phba, elsiocb);
2872 lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2873 struct lpfc_nodelist * ndlp)
2878 struct lpfc_dmabuf *pcmd;
2882 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2883 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
2884 stat.un.b.lsRjtRsvd0 = 0;
2885 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2886 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2887 stat.un.b.vendorUnique = 0;
2888 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
2891 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2892 lp = (uint32_t *) pcmd->virt;
2893 flag = (be32_to_cpu(*lp++) & 0xf);
2897 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
2898 ((flag == 2) && (memcmp(&rps->un.portName, &phba->fc_portname,
2899 sizeof (struct lpfc_name)) == 0))) {
2900 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
2901 lpfc_read_lnk_stat(phba, mbox);
2903 (void *)((unsigned long)cmdiocb->iocb.ulpContext);
2904 mbox->context2 = ndlp;
2905 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
2906 if (lpfc_sli_issue_mbox (phba, mbox,
2907 (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED) {
2908 /* Mbox completion will send ELS Response */
2911 mempool_free(mbox, phba->mbox_mem_pool);
2914 stat.un.b.lsRjtRsvd0 = 0;
2915 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2916 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2917 stat.un.b.vendorUnique = 0;
2918 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
2923 lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
2924 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
2929 struct lpfc_iocbq *elsiocb;
2930 struct lpfc_sli_ring *pring;
2931 struct lpfc_sli *psli;
2935 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2937 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
2938 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
2942 icmd = &elsiocb->iocb;
2943 oldcmd = &oldiocb->iocb;
2944 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2946 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2947 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2948 pcmd += sizeof (uint16_t);
2949 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
2950 pcmd += sizeof(uint16_t);
2952 /* Setup the RPL ACC payload */
2953 rpl_rsp.listLen = be32_to_cpu(1);
2955 rpl_rsp.port_num_blk.portNum = 0;
2956 rpl_rsp.port_num_blk.portID = be32_to_cpu(phba->fc_myDID);
2957 memcpy(&rpl_rsp.port_num_blk.portName, &phba->fc_portname,
2958 sizeof(struct lpfc_name));
2960 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
2963 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
2964 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2965 "%d:0120 Xmit ELS RPL ACC response tag x%x "
2966 "Data: x%x x%x x%x x%x x%x\n",
2968 elsiocb->iocb.ulpIoTag,
2969 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2970 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2972 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2974 phba->fc_stat.elsXmitACC++;
2975 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
2976 lpfc_els_free_iocb(phba, elsiocb);
2983 lpfc_els_rcv_rpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2984 struct lpfc_nodelist * ndlp)
2986 struct lpfc_dmabuf *pcmd;
2993 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2994 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
2995 stat.un.b.lsRjtRsvd0 = 0;
2996 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2997 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2998 stat.un.b.vendorUnique = 0;
2999 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
3002 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3003 lp = (uint32_t *) pcmd->virt;
3004 rpl = (RPL *) (lp + 1);
3006 maxsize = be32_to_cpu(rpl->maxsize);
3008 /* We support only one port */
3009 if ((rpl->index == 0) &&
3011 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
3012 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
3014 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
3016 lpfc_els_rsp_rpl_acc(phba, cmdsize, cmdiocb, ndlp);
3022 lpfc_els_rcv_farp(struct lpfc_hba * phba,
3023 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
3025 struct lpfc_dmabuf *pcmd;
3029 uint32_t cmd, cnt, did;
3031 icmd = &cmdiocb->iocb;
3032 did = icmd->un.elsreq64.remoteID;
3033 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3034 lp = (uint32_t *) pcmd->virt;
3039 /* FARP-REQ received from DID <did> */
3040 lpfc_printf_log(phba,
3043 "%d:0601 FARP-REQ received from DID x%x\n",
3046 /* We will only support match on WWPN or WWNN */
3047 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
3052 /* If this FARP command is searching for my portname */
3053 if (fp->Mflags & FARP_MATCH_PORT) {
3054 if (memcmp(&fp->RportName, &phba->fc_portname,
3055 sizeof (struct lpfc_name)) == 0)
3059 /* If this FARP command is searching for my nodename */
3060 if (fp->Mflags & FARP_MATCH_NODE) {
3061 if (memcmp(&fp->RnodeName, &phba->fc_nodename,
3062 sizeof (struct lpfc_name)) == 0)
3067 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
3068 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
3069 /* Log back into the node before sending the FARP. */
3070 if (fp->Rflags & FARP_REQUEST_PLOGI) {
3071 ndlp->nlp_prev_state = ndlp->nlp_state;
3072 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
3073 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
3074 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
3077 /* Send a FARP response to that node */
3078 if (fp->Rflags & FARP_REQUEST_FARPR) {
3079 lpfc_issue_els_farpr(phba, did, 0);
3087 lpfc_els_rcv_farpr(struct lpfc_hba * phba,
3088 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
3090 struct lpfc_dmabuf *pcmd;
3095 icmd = &cmdiocb->iocb;
3096 did = icmd->un.elsreq64.remoteID;
3097 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3098 lp = (uint32_t *) pcmd->virt;
3101 /* FARP-RSP received from DID <did> */
3102 lpfc_printf_log(phba,
3105 "%d:0600 FARP-RSP received from DID x%x\n",
3108 /* ACCEPT the Farp resp request */
3109 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
3115 lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3116 struct lpfc_nodelist * fan_ndlp)
3118 struct lpfc_dmabuf *pcmd;
3123 struct lpfc_nodelist *ndlp, *next_ndlp;
3126 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n",
3129 icmd = &cmdiocb->iocb;
3130 did = icmd->un.elsreq64.remoteID;
3131 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
3132 lp = (uint32_t *)pcmd->virt;
3137 /* FAN received; Fan does not have a reply sequence */
3139 if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
3140 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
3141 sizeof(struct lpfc_name)) != 0) ||
3142 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
3143 sizeof(struct lpfc_name)) != 0)) {
3145 * This node has switched fabrics. FLOGI is required
3146 * Clean up the old rpi's
3149 list_for_each_entry_safe(ndlp, next_ndlp,
3150 &phba->fc_npr_list, nlp_listp) {
3152 if (ndlp->nlp_type & NLP_FABRIC) {
3154 * Clean up old Fabric, Nameserver and
3155 * other NLP_FABRIC logins
3157 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3158 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
3159 /* Fail outstanding I/O now since this
3160 * device is marked for PLOGI
3162 lpfc_unreg_rpi(phba, ndlp);
3166 phba->hba_state = LPFC_FLOGI;
3167 lpfc_set_disctmo(phba);
3168 lpfc_initial_flogi(phba);
3171 /* Discovery not needed,
3172 * move the nodes to their original state.
3174 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
3177 switch (ndlp->nlp_prev_state) {
3178 case NLP_STE_UNMAPPED_NODE:
3179 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3180 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
3181 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
3184 case NLP_STE_MAPPED_NODE:
3185 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3186 ndlp->nlp_state = NLP_STE_MAPPED_NODE;
3187 lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
3195 /* Start discovery - this should just do CLEAR_LA */
3196 lpfc_disc_start(phba);
3202 lpfc_els_timeout(unsigned long ptr)
3204 struct lpfc_hba *phba;
3205 unsigned long iflag;
3207 phba = (struct lpfc_hba *)ptr;
3210 spin_lock_irqsave(phba->host->host_lock, iflag);
3211 if (!(phba->work_hba_events & WORKER_ELS_TMO)) {
3212 phba->work_hba_events |= WORKER_ELS_TMO;
3213 if (phba->work_wait)
3214 wake_up(phba->work_wait);
3216 spin_unlock_irqrestore(phba->host->host_lock, iflag);
3221 lpfc_els_timeout_handler(struct lpfc_hba *phba)
3223 struct lpfc_sli_ring *pring;
3224 struct lpfc_iocbq *tmp_iocb, *piocb;
3226 struct lpfc_dmabuf *pcmd;
3227 struct list_head *dlp;
3229 uint32_t els_command;
3235 spin_lock_irq(phba->host->host_lock);
3236 /* If the timer is already canceled do nothing */
3237 if (!(phba->work_hba_events & WORKER_ELS_TMO)) {
3238 spin_unlock_irq(phba->host->host_lock);
3241 timeout = (uint32_t)(phba->fc_ratov << 1);
3243 pring = &phba->sli.ring[LPFC_ELS_RING];
3244 dlp = &pring->txcmplq;
3246 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
3249 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
3252 pcmd = (struct lpfc_dmabuf *) piocb->context2;
3253 elscmd = (uint32_t *) (pcmd->virt);
3254 els_command = *elscmd;
3256 if ((els_command == ELS_CMD_FARP)
3257 || (els_command == ELS_CMD_FARPR)) {
3261 if (piocb->drvrTimeout > 0) {
3262 if (piocb->drvrTimeout >= timeout) {
3263 piocb->drvrTimeout -= timeout;
3265 piocb->drvrTimeout = 0;
3270 list_del(&piocb->list);
3271 pring->txcmplq_cnt--;
3273 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
3274 struct lpfc_nodelist *ndlp;
3275 spin_unlock_irq(phba->host->host_lock);
3276 ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
3277 spin_lock_irq(phba->host->host_lock);
3278 remote_ID = ndlp->nlp_DID;
3279 if (cmd->un.elsreq64.bdl.ulpIoTag32) {
3280 lpfc_sli_issue_abort_iotag32(phba,
3284 remote_ID = cmd->un.elsreq64.remoteID;
3287 lpfc_printf_log(phba,
3290 "%d:0127 ELS timeout Data: x%x x%x x%x x%x\n",
3291 phba->brd_no, els_command,
3292 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
3295 * The iocb has timed out; abort it.
3297 if (piocb->iocb_cmpl) {
3298 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3299 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
3300 spin_unlock_irq(phba->host->host_lock);
3301 (piocb->iocb_cmpl) (phba, piocb, piocb);
3302 spin_lock_irq(phba->host->host_lock);
3304 lpfc_sli_release_iocbq(phba, piocb);
3306 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
3307 mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout);
3309 spin_unlock_irq(phba->host->host_lock);
3313 lpfc_els_flush_cmd(struct lpfc_hba * phba)
3315 struct lpfc_sli_ring *pring;
3316 struct lpfc_iocbq *tmp_iocb, *piocb;
3318 struct lpfc_dmabuf *pcmd;
3320 uint32_t els_command;
3322 pring = &phba->sli.ring[LPFC_ELS_RING];
3323 spin_lock_irq(phba->host->host_lock);
3324 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
3327 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
3331 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
3332 if ((cmd->ulpCommand == CMD_QUE_RING_BUF_CN) ||
3333 (cmd->ulpCommand == CMD_QUE_RING_BUF64_CN) ||
3334 (cmd->ulpCommand == CMD_CLOSE_XRI_CN) ||
3335 (cmd->ulpCommand == CMD_ABORT_XRI_CN)) {
3339 pcmd = (struct lpfc_dmabuf *) piocb->context2;
3340 elscmd = (uint32_t *) (pcmd->virt);
3341 els_command = *elscmd;
3343 list_del(&piocb->list);
3344 pring->txcmplq_cnt--;
3346 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3347 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
3349 if (piocb->iocb_cmpl) {
3350 spin_unlock_irq(phba->host->host_lock);
3351 (piocb->iocb_cmpl) (phba, piocb, piocb);
3352 spin_lock_irq(phba->host->host_lock);
3354 lpfc_sli_release_iocbq(phba, piocb);
3357 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
3360 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
3363 pcmd = (struct lpfc_dmabuf *) piocb->context2;
3364 elscmd = (uint32_t *) (pcmd->virt);
3365 els_command = *elscmd;
3367 list_del(&piocb->list);
3368 pring->txcmplq_cnt--;
3370 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3371 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
3373 if (piocb->iocb_cmpl) {
3374 spin_unlock_irq(phba->host->host_lock);
3375 (piocb->iocb_cmpl) (phba, piocb, piocb);
3376 spin_lock_irq(phba->host->host_lock);
3378 lpfc_sli_release_iocbq(phba, piocb);
3380 spin_unlock_irq(phba->host->host_lock);
3385 lpfc_els_unsol_event(struct lpfc_hba * phba,
3386 struct lpfc_sli_ring * pring, struct lpfc_iocbq * elsiocb)
3388 struct lpfc_sli *psli;
3389 struct lpfc_nodelist *ndlp;
3390 struct lpfc_dmabuf *mp;
3397 uint32_t drop_cmd = 0; /* by default do NOT drop received cmd */
3398 uint32_t rjt_err = 0;
3401 icmd = &elsiocb->iocb;
3403 if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3404 ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
3405 /* Not enough posted buffers; Try posting more buffers */
3406 phba->fc_stat.NoRcvBuf++;
3407 lpfc_post_buffer(phba, pring, 0, 1);
3411 /* If there are no BDEs associated with this IOCB,
3412 * there is nothing to do.
3414 if (icmd->ulpBdeCount == 0)
3417 /* type of ELS cmd is first 32bit word in packet */
3418 mp = lpfc_sli_ringpostbuf_get(phba, pring, getPaddr(icmd->un.
3422 cont64[0].addrLow));
3429 lp = (uint32_t *) mp->virt;
3431 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], 1, 1);
3433 if (icmd->ulpStatus) {
3434 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3440 /* Check to see if link went down during discovery */
3441 if (lpfc_els_chk_latt(phba)) {
3442 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3448 did = icmd->un.rcvels.remoteID;
3449 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did);
3451 /* Cannot find existing Fabric ndlp, so allocate a new one */
3452 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3454 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3460 lpfc_nlp_init(phba, ndlp, did);
3462 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
3463 ndlp->nlp_type |= NLP_FABRIC;
3465 ndlp->nlp_state = NLP_STE_UNUSED_NODE;
3466 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
3469 phba->fc_stat.elsRcvFrame++;
3470 elsiocb->context1 = ndlp;
3471 elsiocb->context2 = mp;
3473 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
3474 cmd &= ELS_CMD_MASK;
3476 /* ELS command <elsCmd> received from NPORT <did> */
3477 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3478 "%d:0112 ELS command x%x received from NPORT x%x "
3479 "Data: x%x\n", phba->brd_no, cmd, did, phba->hba_state);
3483 phba->fc_stat.elsRcvPLOGI++;
3484 if (phba->hba_state < LPFC_DISC_AUTH) {
3488 ndlp = lpfc_plogi_confirm_nport(phba, mp, ndlp);
3489 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI);
3492 phba->fc_stat.elsRcvFLOGI++;
3493 lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode);
3495 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3499 phba->fc_stat.elsRcvLOGO++;
3500 if (phba->hba_state < LPFC_DISC_AUTH) {
3504 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
3507 phba->fc_stat.elsRcvPRLO++;
3508 if (phba->hba_state < LPFC_DISC_AUTH) {
3512 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
3515 phba->fc_stat.elsRcvRSCN++;
3516 lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode);
3518 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3522 phba->fc_stat.elsRcvADISC++;
3523 if (phba->hba_state < LPFC_DISC_AUTH) {
3527 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_ADISC);
3530 phba->fc_stat.elsRcvPDISC++;
3531 if (phba->hba_state < LPFC_DISC_AUTH) {
3535 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PDISC);
3538 phba->fc_stat.elsRcvFARPR++;
3539 lpfc_els_rcv_farpr(phba, elsiocb, ndlp);
3542 phba->fc_stat.elsRcvFARP++;
3543 lpfc_els_rcv_farp(phba, elsiocb, ndlp);
3546 phba->fc_stat.elsRcvFAN++;
3547 lpfc_els_rcv_fan(phba, elsiocb, ndlp);
3550 phba->fc_stat.elsRcvPRLI++;
3551 if (phba->hba_state < LPFC_DISC_AUTH) {
3555 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
3558 phba->fc_stat.elsRcvLIRR++;
3559 lpfc_els_rcv_lirr(phba, elsiocb, ndlp);
3561 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3565 phba->fc_stat.elsRcvRPS++;
3566 lpfc_els_rcv_rps(phba, elsiocb, ndlp);
3568 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3572 phba->fc_stat.elsRcvRPL++;
3573 lpfc_els_rcv_rpl(phba, elsiocb, ndlp);
3575 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3579 phba->fc_stat.elsRcvRNID++;
3580 lpfc_els_rcv_rnid(phba, elsiocb, ndlp);
3582 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3586 /* Unsupported ELS command, reject */
3589 /* Unknown ELS command <elsCmd> received from NPORT <did> */
3590 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3591 "%d:0115 Unknown ELS command x%x received from "
3592 "NPORT x%x\n", phba->brd_no, cmd, did);
3594 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3599 /* check if need to LS_RJT received ELS cmd */
3601 stat.un.b.lsRjtRsvd0 = 0;
3602 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3603 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3604 stat.un.b.vendorUnique = 0;
3605 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, elsiocb, ndlp);
3608 if (elsiocb->context2) {
3609 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3613 /* check if need to drop received ELS cmd */
3614 if (drop_cmd == 1) {
3615 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3616 "%d:0111 Dropping received ELS cmd "
3617 "Data: x%x x%x x%x\n", phba->brd_no,
3618 icmd->ulpStatus, icmd->un.ulpWord[4],
3620 phba->fc_stat.elsRcvDrop++;